Commit 3a61d8f4 authored by litaolemo's avatar litaolemo

update

parent 4dfed1b9
# crawler
1. 部署在BJ-PaaS-test-nvwa001/srv/apps/
2. 创建虚拟环境 conda activate crawler_env/conda deactivate
\ No newline at end of file
......@@ -6,8 +6,7 @@ Created on Thu Jun 14 17:09:09 2018
"""
from elasticsearch import Elasticsearch
es_framework = Elasticsearch(hosts='192.168.17.11', port=80,
http_auth=('crawler', 'XBcasfo8dgfs'))
es_framework = Elasticsearch(hosts='172.16.32.37', port=9200)
index_target_releaser = 'target_releasers'
doc_type_target_releaser = 'doc'
......
......@@ -768,25 +768,41 @@ class Crawler_toutiao():
return video_image_url
def get_web_article_info(self,article_id):
# headers = {
# "Accept": "*/*",
# "Accept-Encoding": "gzip, deflate",
# "Accept-Language": "zh,zh-CN;q=0.9",
# "Connection": "keep-alive",
# # "Cookie": "tt_webid=6851461299689686542; SLARDAR_WEB_ID=568d391e-7f96-491b-9557-b045a55e9dd8",
# "Host": "m.toutiao.com",
# # "Referer": "https://m.toutiao.com/i6851146167279944199/",
# "Sec-Fetch-Dest": "empty",
# "Sec-Fetch-Mode": "cors",
# "Sec-Fetch-Site": "same-origin",
# "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1",
# }
# headers["Referer"] = "https://m.toutiao.com/i%s" % article_id
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh,zh-CN;q=0.9",
"Connection": "keep-alive",
# "Cookie": "tt_webid=6851461299689686542; SLARDAR_WEB_ID=568d391e-7f96-491b-9557-b045a55e9dd8",
"Host": "m.toutiao.com",
"Referer": "https://m.toutiao.com/i6851146167279944199/",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "max-age=0",
"Connection": "keep-alive",
"Cookie": "tt_webid=6851788569271944719",
"Host": "m.toutiao.com",
"Sec-Fetch-Dest": "document",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-Site": "none",
"Sec-Fetch-User": "?1",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36",
}
headers["Referer"] = "https://m.toutiao.com/i%s" % article_id
url = "https://m.toutiao.com/i{0}/info/?i={1}".format(article_id,article_id)
requests_res = retry_get_url(url,headers=headers,proxies=0)
res_json = requests_res.json()
res_dic = {
"title":res_json["data"].get("title"),
"title":res_json["data"].get("title").replace("\r","").replace("\n",""),
'high_quality_flag':int(res_json["data"].get('high_quality_flag')),
"play_count": int(res_json["data"].get('impression_count')),
"comment_count": res_json["data"].get("comment_count"),
......@@ -794,7 +810,7 @@ class Crawler_toutiao():
"favorite_count": res_json["data"].get("digg_count"),
'releaser_followers_count': res_json["data"].get("follower_count"),
'release_time': int(res_json["data"].get('publish_time')*1e3),
"content":res_json["data"].get("content"),
"content":res_json["data"].get("content").replace("\r","").replace("\n",""),
}
return res_dic
......@@ -1761,13 +1777,19 @@ class Crawler_toutiao():
output_to_es_register = kwargs.get("output_to_es_register")
output_to_es_raw = kwargs.get("output_to_es_raw")
es_index = kwargs.get("es_index")
doc_type = kwargs.get("doc_type")
for res in self.releaser_page(url, proxies_num=kwargs.get("proxies_num")):
video_time = res["release_time"]
# print(res)
if video_time:
if start_time < video_time:
if video_time < end_time:
print(res)
try:
res["fetch_time"] = datetime.datetime.fromtimestamp(res.get("fetch_time") / 1000).strftime('%Y-%m-%d %H:%M:%S')
res["release_time"] = datetime.datetime.fromtimestamp(res.get("release_time") / 1000).strftime('%Y-%m-%d %H:%M:%S')
except:
pass
data_lis.append(res)
if len(data_lis) >= 100:
......@@ -1779,7 +1801,7 @@ class Crawler_toutiao():
output_to_es_register=output_to_es_register,
output_to_es_raw=output_to_es_raw,
es_index=es_index,
doc_type=doc_type)
)
data_lis.clear()
else:
......@@ -1789,100 +1811,64 @@ class Crawler_toutiao():
else:
continue
count_false = 0
for res in self.App_releaser_page_all(url, proxies_num=kwargs.get("proxies_num")):
video_time = res["release_time"]
print(video_time)
if video_time:
if start_time < video_time:
if video_time < end_time:
data_lis.append(res)
if len(data_lis) >= 100:
output_result(result_Lst=data_lis,
platform=self.platform,
output_to_file=output_to_file,
filepath=filepath,
push_to_redis=push_to_redis,
output_to_es_register=output_to_es_register,
output_to_es_raw=output_to_es_raw,
es_index=es_index,
doc_type=doc_type)
data_lis.clear()
else:
count_false += 1
if count_false > 5:
break
else:
continue
if data_lis != []:
output_result(result_Lst=data_lis,
platform=self.platform,
output_to_file=output_to_file,
filepath=filepath,
push_to_redis=push_to_redis,
output_to_es_register=output_to_es_register,
output_to_es_raw=output_to_es_raw,
es_index=es_index,
doc_type=doc_type)
# for res in self.App_releaser_page_all(url, proxies_num=kwargs.get("proxies_num")):
# video_time = res["release_time"]
# print(video_time)
# if video_time:
# if start_time < video_time:
# if video_time < end_time:
# data_lis.append(res)
#
# if len(data_lis) >= 100:
# output_result(result_Lst=data_lis,
# platform=self.platform,
# output_to_file=output_to_file,
# filepath=filepath,
# push_to_redis=push_to_redis,
# output_to_es_register=output_to_es_register,
# output_to_es_raw=output_to_es_raw,
# es_index=es_index,
# doc_type=doc_type)
# data_lis.clear()
#
# else:
# count_false += 1
# if count_false > 5:
# break
# else:
# continue
#
# if data_lis != []:
# output_result(result_Lst=data_lis,
# platform=self.platform,
# output_to_file=output_to_file,
# filepath=filepath,
# push_to_redis=push_to_redis,
# output_to_es_register=output_to_es_register,
# output_to_es_raw=output_to_es_raw,
# es_index=es_index,
# doc_type=doc_type)
# import pandas as pd
# data = pd.DataFrame(data_lis)
# s = datetime.datetime.now()
# ss = str(s)[0:19].replace(' ', '-').replace(':', '-')
# res = data.to_csv('%s%sall_s1.csv' % ("all_", ss), encoding='gb18030',
# # columns=columns
# )
if __name__ == '__main__':
data_lis = [
# "https://www.toutiao.com/c/user/5839829632/#mid=5839829632",
'http://m.365yg.com/video/app/user/home/?to_user_id=58914711545&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=50002654647&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=72306985675&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=50290733206&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=79471761049&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=70924955541&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=63729812076&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=64172069815&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=55470957474&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=66196307775&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=4336397515&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=54206087856&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=61744078692&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=5834270582&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=73541277002&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=59225198862&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=75042514157&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=67484010334&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=63027790441&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=64406518419&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=67368895643&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=57058365381&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=54713142371&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=3109782351&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=5870734316&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=63933376369&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=75755755283&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=50899907502&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=54825408087&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=73551586417&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=50538471193&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=60874721257&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=51611052789&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=81986651686&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=6927313695&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=3244888003&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=67104215936&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=71538765457&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=67907307056&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=81940366588&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=62906682978&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=4338388624&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=60008826118&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=74862652408&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=51487578351&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=56061102173&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=71588475370&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=6378865391&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=6967151530&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=69432588162&format=html',
'http://m.365yg.com/video/app/user/home/?to_user_id=62187743919&format=html', ]
"https://profile.zjurl.cn/rogue/ugc/profile/?version_code=7.7.9&version_name=70709&user_id=103497952048&media_id=1609675594821640&request_source=1&active_tab=dongtai&device_id=65&app_name=news_article",
"https://profile.zjurl.cn/rogue/ugc/profile/?version_code=7.7.9&version_name=70709&user_id=51218680623&media_id=51210905535&request_source=1&active_tab=dongtai&device_id=65&app_name=news_article",
"https://profile.zjurl.cn/rogue/ugc/profile/?version_code=7.7.9&version_name=70709&user_id=5547176384&media_id=5547176384&request_source=1&active_tab=dongtai&device_id=65&app_name=news_article",
"https://profile.zjurl.cn/rogue/ugc/profile/?version_code=7.7.9&version_name=70709&user_id=64781639962&media_id=1574600923716622&request_source=1&active_tab=dongtai&device_id=65&app_name=news_article",
"https://profile.zjurl.cn/rogue/ugc/profile/?version_code=7.7.9&version_name=70709&user_id=5784214021&media_id=5806157501&request_source=1&active_tab=dongtai&device_id=65&app_name=news_article",
"https://profile.zjurl.cn/rogue/ugc/profile/?version_code=7.7.9&version_name=70709&user_id=79111609720&media_id=1586021722311694&request_source=1&active_tab=dongtai&device_id=65&app_name=news_article",
"https://profile.zjurl.cn/rogue/ugc/profile/?version_code=7.7.9&version_name=70709&user_id=5576607099&media_id=5575391553&request_source=1&active_tab=dongtai&device_id=65&app_name=news_article",
]
# data_lis = ["https://www.toutiao.com/c/user/6911429466/#mid=6911254049"]
# data_lis = ["https://www.toutiao.com/c/user/6113709817/#mid=6113817558","https://www.toutiao.com/c/user/3688888283/#mid=3689528443","https://www.toutiao.com/c/user/4188615746/#mid=4273783271"]
# data_lis = ["https://www.toutiao.com/c/user/6027579671/#mid=6217730861","http://www.toutiao.com/c/user/61621115551/#mid=1569627833237506"]
......@@ -1894,9 +1880,8 @@ if __name__ == '__main__':
# res = test.video_page("https://www.ixigua.com/i6701478014242259463/")
# print(res)
for url in data_lis:
test.releaser_page_by_time(1582272540000, 1595302556249, url, output_to_es_raw=True,
es_index='crawler-data-raw',
doc_type='doc', releaser_page_num_max=2,
test.releaser_page_by_time(1595088000000, 1595319362610, url, output_to_es_raw=True,
es_index='crawler-data-raw', releaser_page_num_max=2,
proxies_num=0
)
# test.get_releaser_follower_num(url)
......
......@@ -75,18 +75,16 @@ def output_result(result_Lst, platform,
push_to_redis=False,
batch_str=None,
release_time_lower_bdr=None,
es_index=index_site_crawler,
doc_type=doc_type_site_crawler):
es_index=index_site_crawler):
# write data into es crawler-raw index
if output_to_es_raw:
bulk_write_into_es(result_Lst, es_index, doc_type)
bulk_write_into_es(result_Lst, es_index)
# write data into es crawler-url-register index
if output_to_es_register:
data_Lst_reg = form_data_Lst_for_url_register(result_Lst)
bulk_write_into_es(data_Lst_reg,
index=index_url_register,
doc_type=doc_type_url_register,
construct_id=True,
platform=platform
)
......@@ -148,7 +146,6 @@ def get_ill_encoded_str_posi(UnicodeEncodeError_msg):
def bulk_write_into_es(dict_Lst,
index,
doc_type,
construct_id=False,
platform=None):
bulk_write_body = ''
......@@ -186,15 +183,15 @@ def bulk_write_into_es(dict_Lst,
if construct_id and platform is not None:
doc_id = construct_id_for_url_register(platform, line['url'])
action_str = ('{ "index" : { "_index" : "%s", "_type" : "%s", "_id" : "%s" } }'
% (index, doc_type, doc_id))
% (index, doc_id))
else:
action_str = ('{ "index" : { "_index" : "%s", "_type" : "%s" } }'
% (index, doc_type))
% (index))
data_str = json.dumps(line, ensure_ascii=False)
line_body = action_str + '\n' + data_str + '\n'
bulk_write_body += line_body
if write_counter%1000 == 0 or write_counter == len(dict_Lst):
print('Writing into es %s/%s %d/%d' % (index, doc_type,
print('Writing into es %s %d/%d' % (index,
write_counter,
len(dict_Lst)))
if bulk_write_body != '':
......
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 17 10:15:01 2018
@author: hanye
"""
from es_hy_sign_in import es
import sys
import datetime
def cal_monthly_net_inc_test_range(fetch_year, fetch_month, fetch_day,
fetch_hour=0,
fetch_time_seg_hours=24,
doc_type_target='daily-url',
search_body_logic_part=None,
threads_num=5,
logger_name='calculate_monthly_net_inc',
f_log=sys.stdout):
print('************In func:', file=f_log)
print('fetch_year:', fetch_year, file=f_log)
print('fetch_month:', fetch_month, file=f_log)
print('fetch_day:', fetch_day, file=f_log)
print('fetch_hour:', fetch_hour, file=f_log)
print('fetch_time_seg_hours:', fetch_time_seg_hours, file=f_log)
print('doc_type_target:', doc_type_target, file=f_log)
print('search_body_logic_part:', search_body_logic_part, file=f_log)
print('threads_num:', threads_num, file=f_log)
print('logger_name:', logger_name, file=f_log)
date_passed_in = datetime.datetime(year=fetch_year, month=fetch_month,
day=fetch_day, hour=fetch_hour)
fetch_time_start_ts = int(date_passed_in.timestamp()*1e3)
fetch_time_end_ts = int((date_passed_in
+ datetime.timedelta(seconds=fetch_time_seg_hours*3600)
).timestamp()*1e3)
fetch_time_start_iso = datetime.datetime.fromtimestamp(
int(fetch_time_start_ts/1e3)).isoformat()
fetch_time_end_iso = datetime.datetime.fromtimestamp(
int(fetch_time_end_ts/1e3)).isoformat()
print('fetch_time_start_iso:', fetch_time_start_iso, file=f_log)
print('fetch_time_end_iso:', fetch_time_end_iso, file=f_log)
release_time_start_ts = int((date_passed_in - datetime.timedelta(days=180))
.timestamp()*1000)
release_time_end_ts = int((date_passed_in + datetime.timedelta(days=365))
.timestamp()*1000)
release_time_start_T = datetime.datetime.fromtimestamp(release_time_start_ts/1e3)
release_time_end_T = datetime.datetime.fromtimestamp(release_time_end_ts/1e3)
print('release_time_start:', release_time_start_T, file=f_log)
print('release_time_end:', release_time_end_T, file=f_log)
search_body = {
"query": {
"bool": {
"filter": [
{"range": {"release_time": {
"gte": release_time_start_ts,
"lt": release_time_end_ts}}
},
{"range": {"fetch_time": {
"gte": fetch_time_start_ts,
"lt": fetch_time_end_ts}}
}
],
}
},
"size": 2,
"aggs": {
"release_time_distribution": {
"date_histogram": {
"field": "release_time",
"interval": "day",
"time_zone": "Asia/Shanghai"
}
}
}
}
if search_body_logic_part is not None:
if 'filter' in search_body_logic_part:
search_body['query']['bool']['filter'].append(search_body_logic_part['filter'])
else:
search_body['query']['bool'].update(search_body_logic_part)
else:
pass
search_body_str = search_body.__str__().replace('\'', '"')
print('search_body:', search_body_str, file=f_log)
search_resp = es.search(index='short-video-production',
doc_type='daily-url-2018-06-30',
body=search_body)
total_hit = search_resp['hits']['total']
print('search hits:', total_hit, file=f_log)
runday = datetime.datetime.now()
if runday.day == 1:
last_day_in_the_month_T = runday - datetime.timedelta(days=1)
else:
if runday.month == 1:
month_pre = 12
year_pre = runday.year - 1
last_day_in_the_month_T = datetime.datetime(year_pre, month_pre, 31)
else:
month_pre = runday.month - 1
year_pre = runday.year
last_day_in_the_month_T = (datetime.datetime(year_pre, runday.month, 1)
- datetime.timedelta(days=1))
last_day_in_the_month_str = last_day_in_the_month_T.isoformat()[:10]
doc_type_monthly = 'daily-url-%s' % last_day_in_the_month_str
first_day_in_next_month_T = last_day_in_the_month_T + datetime.timedelta(days=1)
year_start = last_day_in_the_month_T.year
month_start = last_day_in_the_month_T.month
cal_month_str = datetime.datetime.strftime(last_day_in_the_month_T, '%b%Y')
cal_month_T = last_day_in_the_month_T
logger_name = 'calculate_MNI_for_missed'
thread_num = 5
search_body_for_missed_MNI_logic_part = {
"must_not": [
{"exists": {"field": "monthly_cal_base"}}
]
}
cal_day_T = datetime.datetime(year_start, month_start, 2)
step_hours = 4
log_fn = 'test_range_for_MNI_missed_cal_%s.log' % datetime.datetime.now().isoformat()[:19].replace(':', '-')
log_pth = r'D:\CSM\Docs\Projects\短视频\code\write-data-into-es\test'
f_log = open(log_pth+'/'+log_fn, 'w', encoding='utf-8')
while cal_day_T <= first_day_in_next_month_T+datetime.timedelta(days=1):
print(cal_day_T)
cal_monthly_net_inc_test_range(cal_day_T.year, cal_day_T.month, cal_day_T.day,
fetch_hour=cal_day_T.hour,
fetch_time_seg_hours=step_hours,
doc_type_target=doc_type_monthly,
search_body_logic_part=search_body_for_missed_MNI_logic_part,
threads_num=thread_num,
logger_name=logger_name,
f_log=f_log)
cal_day_T = cal_day_T + datetime.timedelta(seconds=step_hours*3600)
f_log.close()
# encoding: utf-8
'''
@author: zhangjian
@time: 2018/11/22 15:10
'''
from elasticsearch import Elasticsearch
from elasticsearch import exceptions
from elasticsearch.helpers import scan
from urllib.parse import quote
import json
import pandas as pd
import datetime
import copy
hosts = '192.168.17.11'
port = 80
user = 'zhouyujiang'
passwd = '8tM9JDN2LVxM'
http_auth = (user, passwd)
es = Elasticsearch(hosts=hosts, port=port, http_auth=http_auth)
sv_index = 'short-video-production'
sv_doc = 'daily-url'
wr_index = 'short-video-production'
wr_doc = 'daily-url'
platfrom_list = [
'haokan',
]
count = 0
bulk_all_body = ''
for platfrom in platfrom_list:
search_body = {
"query": {
"bool": {
"filter": [
{"term": {"platform.keyword": platfrom}},
{"term": {"data_provider.keyword": "CCR"}}
],
"must": [
{
"match_phrase": {
"url": "nid"
}
}
]
}
}
}
search_resp = scan(client=es, index=sv_index, doc_type=sv_doc, query=search_body)
for line in search_resp:
count += 1
one_dict = {}
#dayil-url
fetch_time_int = int(line['_source']['fetch_time'] / 1000)
fetch_time_H = datetime.datetime.fromtimestamp(fetch_time_int)
year=fetch_time_H.year
month=fetch_time_H.month
day=fetch_time_H.day
date_str='_'+str(year)+'-'+str(month)+'-'+str(day)
one_dict.update(line['_source'])
vid = line['_source']['video_id']
url= 'https://sv.baidu.com/videoui/page/videoland?context=' + quote('{\"nid\":\"sv_' + vid + "\"}")
new_id=url+date_str
# print(new_id)
one_dict.update({"data_provider": "CCR_2",
"url": url})
bulk_head = '{"index": {"_id":"%s"}}' % new_id
data_str = json.dumps(one_dict, ensure_ascii=False)
bulk_one_body = bulk_head + '\n' + data_str + '\n'
bulk_all_body += bulk_one_body
if count % 1000 == 0:
eror_dic = es.bulk(index=wr_index, doc_type=wr_doc,
body=bulk_all_body, request_timeout=200)
bulk_all_body = ''
if eror_dic['errors'] is True:
print(eror_dic['items'])
print(bulk_all_body)
print(count)
if bulk_all_body != '':
eror_dic = es.bulk(body=bulk_all_body,
index=wr_index,
doc_type=wr_doc,
request_timeout=200)
if eror_dic['errors'] is True:
print(eror_dic)
bulk_all_body = ''
print("end")
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment