Crawling_Script.py
2.31 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import time
import requests
from bs4 import BeautifulSoup
import pymysql
#pip3 install requests, pip3 install pymyaql, pip3 install beautifulsoup4를 하고 진행, pip3대신 pip 사용가능
#사이트가 크롤링을 공격으로 인식하는 것을 방지하기 위해 중간중간에 sleep함수를 사용
conn = pymysql.connect(host="localhost", user="root",password='abcde12345abcde',db='db_recipe', charset='utf8')
curs = conn.cursor(pymysql.cursors.DictCursor)
for pagenum in range(1,35): #크롤링 할 상위 페이지 개수. 약 40개정도의 레시피 링크가 한 페이지에 이어져 있음
print(pagenum)
page = requests.get('https://www.10000recipe.com/recipe/list.html?order=reco&page='+str(pagenum))
soup = BeautifulSoup(page.content, 'html.parser')
anchors = soup.find_all("a", {"class": "common_sp_link"})
for anch in anchors:
time.sleep(7)
print(pagenum, anch.get('href'))
page2 = requests.get('https://www.10000recipe.com' + anch.get('href'))
soup2 = BeautifulSoup(page2.content, 'html.parser')
#모든 레시피 형식이 같지 않아 에러 발생 가능성이 있음
res1 = soup2.find('div', 'view2_summary st3')
if not res1: continue
res1 = res1.find('h3')
db_menu = res1.get_text()
ingList = []
res2 = soup2.find('div', 'ready_ingre3')
try:
for ultag in res2.find_all('ul'):
source = []
for litag in ultag.find_all('li'):
tli = litag.get_text().replace(' ','').split('\n')
ingList.append(tli[0])
db_ingr = ','.join(ingList)
except(AttributeError):
continue
db_recipe = ''
res3 = soup2.find('div', {"class": "view_step"})
res3 = res3.find_all('div')
for stepdiv in res3:
if not stepdiv.has_attr('class'): continue
if 'view_step_cont' in stepdiv['class'][0]:
if not 'media'==stepdiv['class'][1]: continue
db_recipe += stepdiv.get_text()
if len(db_recipe)<50: continue
db_recipe = db_recipe[0:998]
sql = 'INSERT INTO recipe(menu, ingrediant, recipe) VALUES (%s, %s, %s)'
curs.execute(sql, (db_menu,db_ingr,db_recipe))
conn.commit()
conn.close()