downloader.py
7.38 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
#!/usr/bin/env python
from __future__ import print_function
import sys
import os
import time
import json
import requests
import argparse
import lxml.html
import io
from urllib.parse import urlparse, parse_qs
from lxml.cssselect import CSSSelector
YOUTUBE_COMMENTS_URL = 'https://www.youtube.com/all_comments?v={youtube_id}'
YOUTUBE_COMMENTS_AJAX_URL = 'https://www.youtube.com/comment_ajax'
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36'
def find_value(html, key, num_chars=2):
pos_begin = html.find(key) + len(key) + num_chars
pos_end = html.find('"', pos_begin)
return html[pos_begin: pos_end]
def extract_comments(html):
tree = lxml.html.fromstring(html)
item_sel = CSSSelector('.comment-item')
text_sel = CSSSelector('.comment-text-content')
time_sel = CSSSelector('.time')
author_sel = CSSSelector('.user-name')
for item in item_sel(tree):
yield {'cid': item.get('data-cid'),
'text': text_sel(item)[0].text_content(),
'time': time_sel(item)[0].text_content().strip(),
'author': author_sel(item)[0].text_content()}
def extract_reply_cids(html):
tree = lxml.html.fromstring(html)
sel = CSSSelector('.comment-replies-header > .load-comments')
return [i.get('data-cid') for i in sel(tree)]
def ajax_request(session, url, params, data, retries=10, sleep=20):
for _ in range(retries):
response = session.post(url, params=params, data=data)
if response.status_code == 200:
response_dict = json.loads(response.text)
return response_dict.get('page_token', None), response_dict['html_content']
else:
time.sleep(sleep)
def download_comments(youtube_id, sleep=1):
session = requests.Session()
session.headers['User-Agent'] = USER_AGENT
# Get Youtube page with initial comments
response = session.get(YOUTUBE_COMMENTS_URL.format(youtube_id=youtube_id))
html = response.text
reply_cids = extract_reply_cids(html)
ret_cids = []
for comment in extract_comments(html):
ret_cids.append(comment['cid'])
yield comment
page_token = find_value(html, 'data-token')
session_token = find_value(html, 'XSRF_TOKEN', 4)
first_iteration = True
# Get remaining comments (the same as pressing the 'Show more' button)
while page_token:
data = {'video_id': youtube_id,
'session_token': session_token}
params = {'action_load_comments': 1,
'order_by_time': True,
'filter': youtube_id}
if first_iteration:
params['order_menu'] = True
else:
data['page_token'] = page_token
response = ajax_request(session, YOUTUBE_COMMENTS_AJAX_URL, params, data)
if not response:
break
page_token, html = response
reply_cids += extract_reply_cids(html)
for comment in extract_comments(html):
if comment['cid'] not in ret_cids:
ret_cids.append(comment['cid'])
yield comment
first_iteration = False
time.sleep(sleep)
# Get replies (the same as pressing the 'View all X replies' link)
for cid in reply_cids:
data = {'comment_id': cid,
'video_id': youtube_id,
'can_reply': 1,
'session_token': session_token}
params = {'action_load_replies': 1,
'order_by_time': True,
'filter': youtube_id,
'tab': 'inbox'}
response = ajax_request(session, YOUTUBE_COMMENTS_AJAX_URL, params, data)
if not response:
break
_, html = response
for comment in extract_comments(html):
if comment['cid'] not in ret_cids:
ret_cids.append(comment['cid'])
yield comment
time.sleep(sleep)
def goto_Menu(result_List) :
for i in range (len(result_List)) :
a = str(result_List[i]['text'])
if('상무' in a) :
print((result_List)[i]['text'])
else :
print('Not Found')
## input video 값 parsing
def video_id(value):
query = urlparse(value)
if query.hostname == 'youtu.be':
return query.path[1:]
if query.hostname in ('www.youtube.com', 'youtube.com'):
if query.path == '/watch':
p = parse_qs(query.query)
return p['v'][0]
if query.path[:7] == '/embed/':
return query.path.split('/')[2]
if query.path[:3] == '/v/':
return query.path.split('/')[2]
# fail?
return None
def main():
#parser = argparse.ArgumentParser(add_help=False, description=('Download Youtube comments without using the Youtube API'))
#parser.add_argument('--help', '-h', action='help', default=argparse.SUPPRESS, help='Show this help message and exit')
#parser.add_argument('--youtubeid', '-y', help='ID of Youtube video for which to download the comments')
#parser.add_argument('--output', '-o', help='Output filename (output format is line delimited JSON)')
#parser.add_argument('--limit', '-l', type=int, help='Limit the number of comments')
Youtube_id1 = input('Youtube_ID 입력 :')
## Cutting Link를 받고 id만 딸 수 있도록
Youtube_id1 = video_id(Youtube_id1)
Output1 = input('결과를 받을 파일 입력 :')
Limit1 = input('제한 갯수 입력 : ')
##### argument로 받지 않고 input으로 받기 위한 것
try:
# args = parser.parse_args(argv)
#youtube_id = args.youtubeid
#output = args.output
#limit = args.limit
result_List =[]
youtube_id = Youtube_id1
output = Output1
## input 값을 받고 값에 할당
if Limit1 == '' :
Limit1 = 100
Limit1 = int(Limit1)
limit = Limit1
## Limit에 빈 값이 들어갈 경우 Default 값으로 100을 넣게 하였음
if not youtube_id or not output:
#parser.print_usage()
#raise ValueError('you need to specify a Youtube ID and an output filename')
raise ValueError('올바른 입력 값을 입력하세요')
print('Downloading Youtube comments for video:', youtube_id)
count = 0
Number = input(' 저장 - 0 저장 안함- 1 : ')
if Number == '0' :
with io.open(output, 'w', encoding='utf8') as fp:
for comment in download_comments(youtube_id):
comment_json = json.dumps(comment, ensure_ascii=False)
print(comment_json.decode('utf-8') if isinstance(comment_json, bytes) else comment_json, file=fp)
count += 1
sys.stdout.write('Downloaded %d comment(s)\r' % count)
sys.stdout.flush()
if limit and count >= limit:
break
print('\nDone!')
else :
i = 0
for comment in download_comments(youtube_id):
result_List.append(comment)
print(result_List[i])
count += 1
i += 1
if limit and count >= limit:
break
print('\nDone!')
return result_List
#goto_Menu(result_List)
except Exception as e:
print('Error:', str(e))
sys.exit(1)
if __name__ == "__main__":
main()