Commit 20082ec7 authored by 李小芳's avatar 李小芳

更新代码

parents
__pycache__/
*.py[c|o]
.gm_rpcd.log/
.idea
settings_local.py
.gm_rpcd.develop_conf.xml
celerybeat-schedule
celerybeat-schedule.db
celerybeat.pid
_build/
.env/
.vscode/
request.log
env/
venv/
*migrations/
develop_conf.xml
FROM ccr.ccs.tencentyun.com/gm-base/gm-alpine-ffmpeg:v1.1
MAINTAINER wph [wangpenghong@igengmei.com]
COPY ./requirements.txt /tmp
RUN apk add --no-cache --virtual .build-deps \
bzip2-dev \
coreutils \
dpkg-dev dpkg \
expat-dev \
findutils \
gcc \
gdbm-dev \
libc-dev \
libffi-dev \
libnsl-dev \
libressl-dev \
libtirpc-dev \
linux-headers \
make \
ncurses-dev \
pax-utils \
readline-dev \
sqlite-dev \
tcl-dev \
tk \
tk-dev \
xz-dev \
zlib-dev \
# 业务相关依赖和安装工具
linux-headers \
python3-dev \
librdkafka-dev \
mariadb-client \
mariadb-dev \
git \
openssh \
\
&& apk add --no-cache jpeg-dev zlib-dev freetype-dev lcms2-dev openjpeg-dev tiff-dev tk-dev tcl-dev \
# 取消ssh第一次链接的确认
&& echo "StrictHostKeyChecking no" >> /etc/ssh/ssh_config \
&& apk add --no-cache mariadb-connector-c-dev libxml2-dev libxslt-dev librdkafka-dev \
&& pip install --no-cache-dir -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com -r /tmp/requirements.txt \
&& mkdir -p /tmp/video_convert \
&& mkdir -p /data/log/mentha/app
ENV GM_RPCD_DEPLOY_CONF_PATH "/srv/apps/mimas/deploy_prod.xml"
COPY . /srv/apps/mentha/
WORKDIR /srv/apps/mentha/
RUN cat requirements.txt | grep master > /tmp/gm-requirements.txt \
&& pip install --no-deps --upgrade -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com -r /tmp/gm-requirements.txt \
&& apk del .build-deps
CMD gunicorn gm_rpcd.wsgi:application -w 5 -k gevent -b 0.0.0.0:8000 --worker-tmp-dir /dev/shm
@Library('gm-pipeline-library') _
pipeline {
agent any
options {
// Console output add timestamps
timestamps()
// Disallow concurrent executions of the Pipeline
disableConcurrentBuilds()
// On failure, retry the entire Pipeline the specified number of times.
retry(1)
}
parameters {
choice(name: 'CACHE', choices: ['', '--no-cache'], description: 'docker build 是否使用cache,默认使用,不使用为--no-cache')
}
environment {
// Image Tag branch.time.hash
TAG = dockerTag()
// Image Full Tag
IMAGE = "${DOCKER_REGISTRY}/gm-backend/mentha:$TAG"
}
stages {
stage("Begin") {
steps {
dingNotify "before"
}
}
stage('Check Code') {
//when { branch 'test' }
steps {
script {
currentBuild.displayName = "Check-Code"
checkCode "3.6"
}
}
}
stage('Build Images') {
steps {
script {
currentBuild.displayName = "Build-Image"
}
sh "docker build . ${params.CACHE} -t $IMAGE"
sh "docker push $IMAGE"
}
}
}
post {
always {
script {
def stage = "${currentBuild.displayName}"
if (stage == "Check-Code" && fileExists("./flake.output.txt")) {
def description = readFile("./flake.output.txt")
dingNotify "after", "${currentBuild.currentResult}", "${description}"
} else {
dingNotify "after", "${currentBuild.currentResult}"
}
}
recordIssues enabledForFailure: true, tools: [flake8()]
}
}
}
1.modify sqitchconf: add target_db
2. cd sqls; sqitch deploy -t target_db
3.python manage.py init_tasks
\ No newline at end of file
# !/usr/bin/env python
# encoding=utf-8
from __future__ import absolute_import
import os
import raven
from celery import Celery
from django.conf import settings
from raven.contrib.celery import register_signal, register_logger_signal
# set the default Django settings module for the 'celery' program.
# os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings.settings')
class _Celery(Celery):
"""wrap for celery.Celery."""
def on_configure(self):
# check if sentry settings provided
if not settings.SENTRY_CELERY_ENDPOINT:
return
client = raven.Client(settings.SENTRY_CELERY_ENDPOINT)
# register a custom filter to filter out duplicate logs
register_logger_signal(client)
# hook into the Celery error handler
register_signal(client)
app = _Celery('mimas_tasks')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
<?xml version="1.0" encoding="utf-8"?>
<gm_rpcd_config>
<info config_name="app" version="1.0"/>
<config name="application_name" value="mimas"/>
<config name="statuses" value="system:status" />
<config name="request_info_extractor" value="system:MimasRequestInfoExtractor" />
<config name="service_list">
<element value="mimas"/>
<element value="qa"/>
<element value="topic"/>
<element value="diary"/>
</config>
<config name="initializer_list">
<element value="init_django"/>
<element value="user_hierarchy.views"/>
<element value="qa.views"/>
<element value="hera.views"/>
<element value="hera.queries"/>
<element value="live.views"/>
<element value="talos.views"/>
<element value="data_sync.views"/>
<element value="search.views"/>
<element value="communal.views"/>
</config>
</gm_rpcd_config>
#!/bin/sh
cd docs
make html
cd ..
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 这里是常用方法集合
# -*- coding: UTF-8 -*-
import redis
from django.conf import settings
class RedisWrapper(object):
"""redis proxy add prefix automatically."""
def __init__(self, conf):
self.__pool = redis.ConnectionPool(**conf)
self.redis = redis.StrictRedis(connection_pool=self.__pool)
# add methods those are need to be hacked here
_hacked_methods = [
'set', 'get', 'mget', 'setex', 'hget', 'hset', 'hincrby', 'hdel', 'hgetall',
'smembers', 'sadd', 'incr', 'incrby', 'delete', 'expire', 'decr',
'lpush', 'lrange', 'lrem', 'llen', 'sadd', 'srem', 'scard',
'sismember', 'rpop', 'keys', 'rpush', 'lpop', 'sscan_iter',
'hmget', 'hmset'
]
def __getattribute__(self, name):
"""
redis method hack.
:param name:
:return:
"""
try:
return super(RedisWrapper, self).__getattribute__(name)
except AttributeError:
f = getattr(self.redis, name)
if name in RedisWrapper._hacked_methods:
def wrapper(k, *args, **kwargs):
data = f(k, *args, **kwargs)
# bug fix for py35, json.loads does accept bytes!
if type(data) == bytes:
data = data.decode()
return data
return wrapper
return f
personalize_push_cache = RedisWrapper(settings.DEMETER_PUSH_REDIS)
doris_ctr_cache = RedisWrapper(settings.DORIS_REDIS)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
\ No newline at end of file
#!/usr/bin/env python
# -*- coding: utf-8 -*-
\ No newline at end of file
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
from django.conf import settings
from django.core.management import BaseCommand
from django.db import connection, connections
from django.db.models import Count
from qa.models import Answer, AnswerVote
class Command(BaseCommand):
""" python django_manage.py answer_like_num_update --year= --month= --day="""
def add_arguments(self, parser):
parser.add_argument(
'--year',
help=u'指定日期%Y-%m-%d'
)
parser.add_argument(
'--month',
help=u'指定日期%Y-%m-%d'
)
parser.add_argument(
'--day',
help=u'指定日期%Y-%m-%d'
)
def handle(self, *args, **options):
# 获取指定日期点赞灌水数据
year = int(options['year'])
month = int(options['month'])
day = int(options['day'])
if not all([year, month, day]):
return
cursor = connection.cursor()
connections.close_all()
start_time = datetime.today().replace(year=year, month=month, day=day, hour=0)
end_time = datetime.today().replace(year=year, month=month, day=day, hour=23)
# 统计指定日期灌水点赞个数
answer_map_vote_num = list(AnswerVote.objects.using(settings.HERA_READ_DB).filter(
create_time__lte=end_time, create_time__gte=start_time, is_fake=True,
).values('answer_id').annotate(count=Count('id')).values_list('answer_id', 'count'))
answer_ids = [item[0] for item in answer_map_vote_num]
# 回答原有的点赞数
answer_like_num = Answer.objects.using(settings.SLAVE_DB_NAME).filter(
id__in=answer_ids,
).values_list('id', 'like_num')
answer_like_num_dict = {item[0]: item[1] for item in answer_like_num}
sql = """UPDATE api_answer SET like_num = CASE id {} END WHERE id IN {}"""
sub_sql = """WHEN {} THEN {} """
sqls = []
len_answer = len(answer_map_vote_num)
ids = []
sub_sql_list = []
for index, (answer_id, like_num) in enumerate(answer_map_vote_num):
# WHEN 25 THEN 25 WHEN 14 THEN 245
count = answer_like_num_dict.get(answer_id, 0) + like_num
sub_sql_list.append(
sub_sql.format(answer_id, count)
)
ids.append(answer_id)
index += 1
if index % 500 == 0 or index == len_answer:
sqls.append(
sql.format("".join(sub_sql_list), tuple(ids))
)
ids = []
sub_sql_list = []
for sql in sqls:
cursor.execute(sql)
cursor.fetchall()
print("Done")
This diff is collapsed.
This diff is collapsed.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
import time
import json
import random
from threading import Thread
from queue import Queue
from django.core.management import BaseCommand
from django.conf import settings
from gm_types.doris import (
CONTENT_AGGRE_CARD_TYPE,
)
from gm_types.gaia import (
TOPIC_TYPE,
)
from gm_types.mimas import (
QUICK_SEARCH_CONTENT_DIVISION,
)
from utils.rpc import get_rpc_invoker
from communal.models import QuickSearchContentKeyword
from qa.models import (
Answer,
Question
)
from talos.models.topic import Problem
from talos.models.tractate import Tractate
rpc_client = get_rpc_invoker()
is_finished = False
def sync_content_quick_search(content_ids, content_type):
"""
同步内容快速搜索词
:param content_ids:
:param content_type:
:return:
"""
def keywords_check(keywords):
"""
关键字校验,索引列表仅保留一个有效位置
:param keywords:
:return:
"""
_index_map_set = set()
_result = []
for item in keywords:
keyword_index_list = list(map(tuple, item.pop("index", [])))
_index_map_diff = set(keyword_index_list) - _index_map_set
if not _index_map_diff:
continue
# 更新位置暂存库
_index_map_set.update(set(keyword_index_list))
item["index"] = sorted(_index_map_diff, key=keyword_index_list.index)[0] # 注意此处是个元组
_result.append(item)
return _result
mapping_dic = {
QUICK_SEARCH_CONTENT_DIVISION.TRACTATE: CONTENT_AGGRE_CARD_TYPE.TRACTATE,
QUICK_SEARCH_CONTENT_DIVISION.TOPIC: CONTENT_AGGRE_CARD_TYPE.DIARY,
QUICK_SEARCH_CONTENT_DIVISION.QUESTION: CONTENT_AGGRE_CARD_TYPE.QA,
QUICK_SEARCH_CONTENT_DIVISION.ANSWER: CONTENT_AGGRE_CARD_TYPE.ANSWER,
QUICK_SEARCH_CONTENT_DIVISION.ARTICLE: CONTENT_AGGRE_CARD_TYPE.ARTICLE,
}
per_num = 100
success_create_num = 0
if not content_ids:
return
in_sql_ids = set(QuickSearchContentKeyword.objects.filter(
content_id__in=content_ids,
content_type=content_type
).using(settings.SLAVE_DB_NAME).values_list("content_id", flat=True))
need_update_content_ids = list(set(content_ids) - in_sql_ids)
for i in range(int(math.ceil(len(need_update_content_ids)/per_num))):
bulk_create_list = []
stn = i * per_num
_ids = need_update_content_ids[stn: stn + per_num]
time.sleep(0.5 * random.random()) # 让接口歇一会儿(*^▽^*)
try:
key_words_dic = rpc_client["doris/search/get_content_search_highlight"](
content_ids=_ids,
content_type=mapping_dic.get(content_type)
).unwrap()
except:
print(" doris rpc error")
key_words_dic = {}
keywords_dic = json.loads(key_words_dic.get("content_highlight_list") or '{}')
if keywords_dic:
for content_id in _ids:
keywords = keywords_dic.get(str(content_id), [])
if not keywords:
continue
_data = {
"content_id": content_id,
"content_type": content_type,
"keywords": json.dumps(keywords_check(keywords)),
}
bulk_create_list.append(_data)
if bulk_create_list:
# 批量创建前再做一次校验
_in_sql_ids = set(QuickSearchContentKeyword.objects.filter(
content_id__in=_ids,
content_type=content_type
).values_list("content_id", flat=True))
print("filter can create ids in sql")
bulk_create_list = list(filter(lambda item: item["content_id"] not in _in_sql_ids, bulk_create_list))
QuickSearchContentKeyword.objects.bulk_create([
QuickSearchContentKeyword(**item) for item in bulk_create_list
])
success_create_num = len(bulk_create_list)
return success_create_num
class ContentSearchThreadBase(Thread):
def __init__(self, name, queue, model, content_type, step=1000):
super(ContentSearchThreadBase, self).__init__(name=name)
self.queue = queue
self.model = model
self.content_type = content_type
self.step = step
@property
def get_ids(self):
filters = {
"is_online": True,
}
if self.content_type == QUICK_SEARCH_CONTENT_DIVISION.TOPIC:
filters.update({
"topic_type__in": [TOPIC_TYPE.ASK, TOPIC_TYPE.SHARE, TOPIC_TYPE.TOPIC],
})
elif self.content_type == QUICK_SEARCH_CONTENT_DIVISION.ARTICLE:
filters.update({
"topic_type__in": [TOPIC_TYPE.USER_ARTICLE, TOPIC_TYPE.COLUMN_ARTICLE],
})
return self.model.objects.filter(**filters).only('id').using(settings.SLAVE_DB_NAME)
# return self.model.objects.filter(**filters).only('id')
@property
def get_min_id(self):
return self.get_ids.first().id
@property
def get_max_id(self):
return self.get_ids.last().id
class Producer(ContentSearchThreadBase):
"""
生产者
"""
def run(self):
global is_finished
count = self.get_ids.count()
min_id = self.get_min_id
print("{} total_count {}".format(self.content_type, count))
transfer_id = min_id
for i in range(int(math.ceil(count/self.step))):
nexts_data = self.get_ids.filter(pk__gte=transfer_id).order_by("id")[:self.step]
if not nexts_data:
break
next_id = list(nexts_data.values_list("id", flat=True))[-1]
print("%s is producing %s to the queue!" % (self.getName(), (transfer_id, next_id)))
self.queue.put((transfer_id, next_id))
transfer_id = next_id + 1
is_finished = True
print("%s finished!" % self.getName())
class Consumer(ContentSearchThreadBase):
"""
消费者
"""
def executive_logic(self, start_pk_id, end_pk_id):
"""
执行逻辑
:return:
"""
content_ids = list(self.get_ids.filter(pk__range=[start_pk_id, end_pk_id]).values_list("id", flat=True))
nums = sync_content_quick_search(content_ids, self.content_type)
return nums
def run(self):
global is_finished
while True:
if is_finished and self.queue.empty():
break
try:
start_id, end_id = self.queue.get(timeout=0.2)
print("%s is consuming. %s in the queue is consumed!" % (self.getName(), (start_id, end_id)))
except:
continue
nums = self.executive_logic(start_id, end_id)
print("bulk_create_nums", nums)
print("%s finished!" % self.getName())
class Command(BaseCommand):
"""
python django_manage.py initialization_content_quick_search_data --content_type
内容-新标签映射 数据清洗
"""
_input_content_type_map_dic = {
"topic": (Problem, QUICK_SEARCH_CONTENT_DIVISION.TOPIC),
"article": (Problem, QUICK_SEARCH_CONTENT_DIVISION.ARTICLE),
"question": (Question, QUICK_SEARCH_CONTENT_DIVISION.QUESTION),
"answer": (Answer, QUICK_SEARCH_CONTENT_DIVISION.ANSWER),
"tractate": (Tractate, QUICK_SEARCH_CONTENT_DIVISION.TRACTATE),
}
def add_arguments(self, parser):
parser.add_argument(
'--content_type',
help=u'内容类型(单选), choice is article/topic/question/answer/tractate ...'
)
def handle(self, *args, **options):
_input_content_type = options["content_type"]
model_, content_type = self._input_content_type_map_dic.get(_input_content_type, (None, None))
if not model_:
print(u'请输入正确参数')
return
print("START!")
start_time = time.time()
content_queue = Queue()
producer = Producer("producer_{}".format(content_type), content_queue, model_, content_type)
consumer = Consumer("Consumer_{}".format(content_type), content_queue, model_, content_type)
producer.start()
consumer.start()
producer.join()
consumer.join()
end_time = time.time()
print("ALL threads finished!")
print("END")
print("total_time: %s s" % int(math.ceil(end_time - start_time)))
This diff is collapsed.
from collections import defaultdict
import time
from django.core.management import BaseCommand
from django.conf import settings
from gm_types.gaia import TAG_TYPE, TAG_V3_TYPE
from talos.models.diary import (
Diary,
DiaryTagV3,
DiaryTag,
)
from talos.models.topic import (
Problem,
ProblemTag,
ProblemTagV3,
)
from qa.models.answer import (
Answer,
AnswerTagV3,
Question,
QuestionTagV3,
AnswerTag,
QuestionTag,
)
from tags.models.tag import (
TagV3,
TagMapOldTag,
Tag,
)
CONTENT_TYPE_MAP = {
"diary": (Diary, DiaryTagV3, "diary_id", DiaryTag, 'tag_id'),
"topic": (Problem, ProblemTagV3, "problem_id", ProblemTag, 'tag_id'),
"question": (Question, QuestionTagV3, "question_id", QuestionTag, 'tag'),
"answer": (Answer, AnswerTagV3, "answer_id", AnswerTag, 'tag'),
}
class TagTool(object):
def __new__(cls, content_type):
cls.model, cls.tag3_model, cls.field, cls.tag_model, cls.tag_name = CONTENT_TYPE_MAP.get(content_type)
cls.operation_tag1_ids = getattr(cls, 'get_operation_tag1')()
cls.operation_tag3_ids = getattr(cls, 'get_operation_tag3')()
cls.operation_tag3_map_tag1 = getattr(cls, 'operation_tag1_map_tag3')()
cls.create_info = []
return cls
@classmethod
def get_operation_tag1(cls):
"""所有1.0运营标签"""
all_operation_tag1 = set(Tag.objects.using(settings.ZHENGXING_DB).filter(
tag_type=TAG_TYPE.YUNYING, is_online=True,
).values_list('id', flat=True))
return all_operation_tag1
@classmethod
def get_operation_tag3(cls):
"""所有3.0运营标签 包括交易运营、社区运营"""
operation_tag3_ids = set(TagV3.objects.using(settings.ZHENGXING_DB).filter(
tag_type__in=[TAG_V3_TYPE.EXCHANGE, TAG_V3_TYPE.COMMUNITY], is_online=True
).values_list('id', flat=True))
return operation_tag3_ids
@classmethod
def operation_tag1_map_tag3(cls):
"""1.0运营标签与3.0标签的映射关系"""
tags = TagMapOldTag.objects.using(settings.ZHENGXING_DB).filter(
old_tag_id__in=cls.operation_tag1_ids
).values_list('tag_id', 'old_tag_id')
tag_map_old_tag = defaultdict()
for tag_id, old_tag_id in tags:
if not tag_id or tag_id not in cls.operation_tag3_ids:
continue
tag_map_old_tag[old_tag_id] = tag_id
return dict(tag_map_old_tag)
@classmethod
def get_operation_tag1_content_map(cls, operation_tag_ids):
"""1.0运营标签与内容关联关系"""
tag_content_map = defaultdict(set)
query = {"{}__in".format(cls.tag_name): list(operation_tag_ids)}
fields = [cls.field, cls.tag_name]
tags = cls.tag_model.objects.using(settings.SLAVE_DB_NAME).filter(**query).values_list(
*fields
)
for content_id, tag_id in tags:
tag_content_map[tag_id].add(content_id)
return dict(tag_content_map)
@classmethod
def get_operation_tag3_content_map(cls, tag1_ids):
"""3.0运营标签与内容关联关系"""
tag1_content_map = cls.get_operation_tag1_content_map(tag1_ids)
if not tag1_content_map:
return
all_map = []
tag3_ids = []
for tag1_id in tag1_ids:
tag3_id = cls.operation_tag3_map_tag1.get(tag1_id)
content_ids = tag1_content_map.get(tag1_id)
if not all([tag3_id, content_ids]): continue
tag3_ids.append(tag3_id)
all_map.extend([(tag3_id, content_id) for content_id in content_ids])
fields = ['tag_v3_id', cls.field]
tag3_content_map = set(cls.tag3_model.objects.using(settings.SLAVE_DB_NAME).filter(
tag_v3_id__in=tag3_ids
).values_list(*fields))
create_tag3_content_map = set(all_map) - tag3_content_map
fields = [{cls.field: content_id, 'tag_v3_id': tag_id} for tag_id, content_id in create_tag3_content_map]
cls.create_info.extend([cls.tag3_model(
**item
)for item in fields])
return cls.create_info
class Command(BaseCommand):
"""python django_manage.py operation_tag_map --content_type="""
def add_arguments(self, parser):
parser.add_argument(
'--content_type',
help=u'内容类型(单选), choice is diary/topic/question/answer ...'
)
def handle(self, *args, **options):
begin = time.time()
content_type = options['content_type']
if content_type not in CONTENT_TYPE_MAP:
print("内容参数有误,请重新输入")
return
t = TagTool(content_type=content_type)
operation_tag1_ids = list(t.operation_tag1_ids)
tag3_model = t.tag3_model
while operation_tag1_ids:
if not operation_tag1_ids:
break
tag1_ids = operation_tag1_ids[:10]
t.get_operation_tag3_content_map(tag1_ids)
operation_tag1_ids = operation_tag1_ids[10:]
create_info = t.create_info
print('需要创建关联关系个数', len(create_info))
while create_info:
if not create_info:
break
tag3_model.objects.bulk_create(create_info[:2000])
create_info = create_info[2000:]
print('Done cost {}s'.format(int(time.time() - begin)))
This diff is collapsed.
from collections import defaultdict
from django.core.management import BaseCommand
from gm_types.mimas import FAKE_REPLY_BAND_TYPE
from communal.models.fake_reply import FakeReplyPool, FakeReplyConfig, FakeReplyConfigMapReply
from utils.execel import ExcelReader
missing_reply = {
1508: '激光美肤kyc',
1502: '唇部美化kyc',
}
class Command(BaseCommand):
"""
python django_manage.py reply_pool_add_reply
新增灌水评论
"""
def handle(self, *args, **options):
excel = ExcelReader('/tmp/reply_pool.xlsx')
new_add_reply = defaultdict(list)
replies = []
for c in range(1, excel.column_number+1):
data = excel.read_column(c)
tag_id, tag_name = data[0], data[1]
for i in data[2:]:
if i:
i = i.replace("\t", "").replace('"', "")
if i:
new_add_reply[tag_id].append(i)
replies.append(
FakeReplyPool(
content=i
)
)
FakeReplyPool.objects.bulk_create(replies)
print('新增评论创建成功')
# 关联
for tag_id, replies in new_add_reply.items():
config = FakeReplyConfig.objects.filter(
tag_id=int(tag_id), band_type=FAKE_REPLY_BAND_TYPE.TAG,
).first()
if not config:
continue
band_list = []
for content in replies:
reply = FakeReplyPool.objects.filter(content=content).first()
if not reply:
continue
band_list.append(
FakeReplyConfigMapReply(
config_id=config.id,
reply_id=reply.id,
)
)
FakeReplyConfigMapReply.objects.bulk_create(band_list)
print('DONE')
This source diff could not be displayed because it is too large. You can view the blob instead.
from collections import defaultdict
from cached_property import cached_property
import time
from django.core.management import BaseCommand
from django.conf import settings
from gm_types.gaia import TAG_TYPE, TAG_V3_TYPE
from talos.models.diary import (
Diary,
DiaryTagV3,
DiaryTag,
)
from talos.models.topic import (
Problem,
ProblemTag,
ProblemTagV3,
)
from qa.models.answer import (
Answer,
AnswerTagV3,
Question,
QuestionTagV3,
AnswerTag,
QuestionTag,
)
from tags.models.tag import (
TagV3,
TagMapOldTag,
Tag,
)
CONTENT_TYPE_MAP = {
"diary": (Diary, DiaryTagV3, "diary_id", DiaryTag, 'tag_id'),
"topic": (Problem, ProblemTagV3, "problem_id", ProblemTag, 'tag_id'),
"question": (Question, QuestionTagV3, "question_id", QuestionTag, 'tag'),
"answer": (Answer, AnswerTagV3, "answer_id", AnswerTag, 'tag'),
}
class TagTool(object):
def __init__(self, content_type):
self.content_model, self.relation_model_v3, self.content_field_id, self.relation_model_v1, self.tag_v1_field = CONTENT_TYPE_MAP.get(content_type)
@cached_property
def tags_v1(self):
"""所有1.0运营标签"""
tag_types = [TAG_TYPE.YUNYING]
# tag_types = [TAG_TYPE.CITY, TAG_TYPE.PROVINCE, TAG_TYPE.COUNTRY]
tag1_ids = set(
Tag.objects.using(settings.ZHENGXING_DB).filter(
tag_type__in=tag_types, is_online=True).values_list('id', flat=True)
)
return tag1_ids
@cached_property
def get_tag3(self):
"""所有3.0运营标签 包括交易运营、社区运营"""
tag_types = [TAG_V3_TYPE.EXCHANGE, TAG_V3_TYPE.COMMUNITY]
# tag_types = [TAG_V3_TYPE.CITY, TAG_V3_TYPE.PROVINCE, TAG_V3_TYPE.COUNTRY]
tag3_ids = set(
TagV3.objects.using(settings.ZHENGXING_DB).filter(
tag_type__in=tag_types, is_online=True).values_list('id', flat=True)
)
return tag3_ids
@cached_property
def tag1_map_tag3(self):
"""1.0运营标签与3.0标签的映射关系"""
maps = TagMapOldTag.objects.using(settings.ZHENGXING_DB).filter(
old_tag_id__in=self.tags_v1).values_list('tag_id', 'old_tag_id')
tag_map_old_tag = {}
for tag_id, old_tag_id in maps:
if not tag_id or tag_id not in self.get_tag3:
continue
tag_map_old_tag[old_tag_id] = tag_id
return tag_map_old_tag
def get_tag1_content_map(self, tag_ids):
"""1.0运营标签与内容关联关系"""
tag_content_map = defaultdict(set)
query = {"{}__in".format(self.tag_v1_field): list(tag_ids)}
fields = [self.content_field_id, self.tag_v1_field]
tags = self.relation_model_v1.objects.using(settings.SLAVE_DB_NAME).filter(**query).values_list(
*fields
)
for content_id, tag_id in tags:
tag_content_map[tag_id].add(content_id)
return dict(tag_content_map)
def get_tag3_content_map(self, tag1_ids):
"""3.0运营标签与内容关联关系"""
tag1_content_map = self.get_tag1_content_map(tag1_ids)
if not tag1_content_map:
return []
all_relations = []
tag3_ids = []
for tag1_id in tag1_ids:
tag3_id = self.tag1_map_tag3.get(tag1_id)
content_ids = tag1_content_map.get(tag1_id)
if not all([tag3_id, content_ids]):
continue
tag3_ids.append(tag3_id)
all_relations.extend([(tag3_id, content_id) for content_id in content_ids])
fields = ['tag_v3_id', self.content_field_id]
exist_relations = set(
self.relation_model_v3.objects.using(settings.SLAVE_DB_NAME).filter(
tag_v3_id__in=tag3_ids).values_list(*fields)
)
print("所有关系:", all_relations)
print("已经存在的关系:", exist_relations)
need_create = set(all_relations) - exist_relations
creation = [{self.content_field_id: content_id, 'tag_v3_id': tag_id} for tag_id, content_id in need_create]
print("需要创建的关系", creation)
return [
self.relation_model_v3(**item)
for item in creation
]
class Command(BaseCommand):
"""python django_manage.py operation_tag_map --content_type="""
def add_arguments(self, parser):
parser.add_argument(
'--content_type',
help=u'内容类型(单选), choice is diary/topic/question/answer ...'
)
def handle(self, *args, **options):
begin = time.time()
content_type = options['content_type']
if content_type not in CONTENT_TYPE_MAP:
print("内容参数有误,请重新输入")
return
t = TagTool(content_type=content_type)
tag1_ids = list(t.tags_v1)
create_info = []
while tag1_ids:
if not tag1_ids:
break
create_info.extend(t.get_tag3_content_map(tag1_ids[:10]))
tag1_ids = tag1_ids[10:]
print('需要创建关联关系个数', len(create_info))
while create_info:
if not create_info:
break
t.relation_model_v3.objects.bulk_create(create_info[:500])
create_info = create_info[500:]
print('Done cost {}s'.format(int(time.time() - begin)))
from collections import defaultdict
from cached_property import cached_property
import time
from django.core.management import BaseCommand
from django.conf import settings
from gm_types.gaia import TAG_TYPE, TAG_V3_TYPE
from talos.models.diary import (
Diary,
DiaryTagV3,
DiaryTag,
)
from talos.models.topic import (
Problem,
ProblemTag,
ProblemTagV3,
)
from talos.models.tractate import (
Tractate,
TractateTagV3,
TractateTag,
)
from qa.models.answer import (
Answer,
AnswerTagV3,
Question,
QuestionTagV3,
AnswerTag,
QuestionTag,
)
from tags.models.tag import (
TagV3,
TagMapOldTag,
Tag,
)
CONTENT_TYPE_MAP = {
"diary": (Diary, DiaryTagV3, "diary_id", DiaryTag, 'tag_id'),
"topic": (Problem, ProblemTagV3, "problem_id", ProblemTag, 'tag_id'),
"question": (Question, QuestionTagV3, "question_id", QuestionTag, 'tag'),
"answer": (Answer, AnswerTagV3, "answer_id", AnswerTag, 'tag'),
"tractate": (Tractate, TractateTagV3, "tractate_id", TractateTag, 'tag_id'),
}
class TagTool(object):
def __init__(self, content_type):
self.content_model, self.relation_model_v3, self.content_field_id, self.relation_model_v1, self.tag_v1_field = CONTENT_TYPE_MAP.get(content_type)
@cached_property
def tags_v1(self):
return [
1, 3, 5, 7, 9, 10, 11, 12, 992, 1024, 1080, 2212, 2214, 9180, 12711, 13, 21, 24, 26, 55, 65, 67, 81, 158,
237, 766, 842, 874, 884, 896, 898, 929, 936, 938, 939, 962, 971, 1233, 2448, 3285, 5724, 6418, 6698, 6933,
6948, 12262, 85, 215, 792, 872, 7179
]
@cached_property
def get_tag3(self):
"""所有3.0运营标签 包括交易运营、社区运营"""
tag_types = [TAG_V3_TYPE.COMMUNITY]
tag3_ids = set(
TagV3.objects.using(settings.ZHENGXING_DB).filter(is_online=True).values_list('id', flat=True)
)
return tag3_ids
@cached_property
def tag1_map_tag3(self):
"""1.0运营标签与3.0标签的映射关系"""
maps = TagMapOldTag.objects.using(settings.ZHENGXING_DB).filter(
old_tag_id__in=self.tags_v1).values_list('tag_id', 'old_tag_id')
tag_map_old_tag = {}
for tag_id, old_tag_id in maps:
if not tag_id or tag_id not in self.get_tag3:
continue
tag_map_old_tag[old_tag_id] = tag_id
return tag_map_old_tag
def get_tag1_content_map(self, tag_ids):
"""1.0运营标签与内容关联关系"""
tag_content_map = defaultdict(set)
query = {"{}__in".format(self.tag_v1_field): list(tag_ids)}
fields = [self.content_field_id, self.tag_v1_field]
tags = self.relation_model_v1.objects.using(settings.SLAVE_DB_NAME).filter(**query).values_list(
*fields
)
for content_id, tag_id in tags:
tag_content_map[tag_id].add(content_id)
return dict(tag_content_map)
def get_tag3_content_map(self, tag1_ids):
"""3.0运营标签与内容关联关系"""
tag1_content_map = self.get_tag1_content_map(tag1_ids)
if not tag1_content_map:
return []
all_relations = []
tag3_ids = []
for tag1_id in tag1_ids:
tag3_id = self.tag1_map_tag3.get(tag1_id)
content_ids = tag1_content_map.get(tag1_id)
if not all([tag3_id, content_ids]):
continue
tag3_ids.append(tag3_id)
all_relations.extend([(tag3_id, content_id) for content_id in content_ids])
fields = ['tag_v3_id', self.content_field_id]
exist_relations = set(
self.relation_model_v3.objects.using(settings.SLAVE_DB_NAME).filter(
tag_v3_id__in=tag3_ids).values_list(*fields)
)
print("所有关系:", all_relations)
print("已经存在的关系:", exist_relations)
need_create = set(all_relations) - exist_relations
creation = [{self.content_field_id: content_id, 'tag_v3_id': tag_id} for tag_id, content_id in need_create]
print("需要创建的关系", creation)
return [
self.relation_model_v3(**item)
for item in creation
]
class Command(BaseCommand):
"""python django_manage.py tag_migration_v2 --content_type="""
def add_arguments(self, parser):
parser.add_argument(
'--content_type',
help=u'内容类型(单选), choice is diary/topic/question/answer ...'
)
def handle(self, *args, **options):
begin = time.time()
content_type = options['content_type']
if content_type not in CONTENT_TYPE_MAP:
print("内容参数有误,请重新输入")
return
t = TagTool(content_type=content_type)
tag1_ids = list(t.tags_v1)
create_info = []
while tag1_ids:
if not tag1_ids:
break
create_info.extend(t.get_tag3_content_map(tag1_ids[:10]))
tag1_ids = tag1_ids[10:]
print('需要创建关联关系个数', len(create_info))
while create_info:
if not create_info:
break
t.relation_model_v3.objects.bulk_create(create_info[:500])
create_info = create_info[500:]
print('Done cost {}s'.format(int(time.time() - begin)))
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .quick_search import *
from .hot_content import *
""" 灌水评论配置"""
from django.db import models
from gm_types.mimas import FAKE_REPLY_BAND_TYPE
class FakeReplyPool(models.Model):
class Meta:
verbose_name = '灌水评论池'
db_table = 'api_fake_replies'
content = models.CharField(max_length=256, null=False, verbose_name=u'评论内容')
is_online = models.BooleanField(verbose_name="是否有效", default=True)
class FakeReplyConfig(models.Model):
class Meta:
verbose_name = '标签、大组配置'
db_table = 'api_fake_reply_tag'
band_type = models.SmallIntegerField(verbose_name='绑定类型', choices=FAKE_REPLY_BAND_TYPE)
tag_id = models.IntegerField(verbose_name='标签ID', default=0)
tag_attr_id = models.IntegerField(verbose_name='属性标签ID', default=0)
tag_group_id = models.IntegerField(verbose_name='标签组ID', default=0)
is_online = models.BooleanField(verbose_name="是否有效", default=True)
class FakeReplyConfigMapReply(models.Model):
class Meta:
verbose_name = '标签、大组绑定评论'
db_table = 'api_fake_config_map_reply'
config_id = models.IntegerField(verbose_name='配置ID', db_index=True)
reply_id = models.IntegerField(verbose_name='灌水评论ID', db_index=True)
is_online = models.BooleanField(verbose_name="是否有效", default=True)
from .hot_content import *
\ No newline at end of file
from django.db import models
from gm_types.push import PUSH_CONTENT_TYPE
class HotContent(models.Model):
"""热门内容,来源: 数据组统计 目前用于个性化push"""
class Meta:
verbose_name = '热门内容'
db_table = 'api_hot_content'
unique_together = ('date_index', 'content_type', 'content_id')
date_index = models.DateField(u'统计日期', auto_now_add=True)
content_id = models.IntegerField(u'内容id, 多个内容id拼接的字符串')
content_type = models.CharField(u"内容类型", max_length=32, choices=PUSH_CONTENT_TYPE)
click_num = models.IntegerField(u'点击数量')
is_push = models.BooleanField(u'是否已经推送', default=False)
from .personalize_tag import PersonalizeActionTag
# coding: utf-8
from django.db import models
from communal.cache.push import personalize_push_cache
class PersonalizeActionTag(models.Model):
"""
用户行为标签,根据时间排序,取最新的10条行为对应的标签
http://wiki.wanmeizhensuo.com/pages/viewpage.action?pageId=36560964
"""
class Meta:
verbose_name = '用户行为标签'
db_table = 'api_personalize_action_tag'
unique_together = ('date_index', 'device_id')
date_index = models.DateField(verbose_name='统计日期', auto_now_add=True)
device_id = models.CharField(verbose_name='设备id', max_length=64)
tag_info = models.CharField(verbose_name='标签相关信息', max_length=512)
'''
tag_info 数据格式:
id,id:time#id,id:time
'''
#
# def parse_personalize_action_task(date_index=None):
# CACHE_KEY_PERSONALIZE_ACTION_TAG_INFO = 'demeter:push:action_tag_push:action_tag_info'
#
# def format_action_tag_info(tag_info):
# result = []
# for _item in tag_info.split('#'):
# tags, action_time = _item.split(':')
# if tags:
# result.append(dict(
# tags=tags,
# action_date=action_time
# ))
# return result
#
# personalize_action_dict = {}
# import datetime
# if not date_index:
# date_index = str(datetime.date.today())
#
# first_item = PersonalizeActionTag.objects.filter(date_index=date_index).first()
# last_item = PersonalizeActionTag.objects.filter(date_index=date_index).last()
#
# search_offset = 100
# split_ids = [(i, i+search_offset) for i in range(first_item.id, last_item.id, search_offset)]
# for (start_id, end_id) in split_ids:
# res = PersonalizeActionTag.objects.filter(id__range=(start_id, end_id)).values(
# "tag_info", "device_id"
# )
# for obj in res:
# action_tag_info = format_action_tag_info(obj['tag_info'])
# if action_tag_info:
# personalize_action_dict[obj['device_id']] = action_tag_info
#
# for device_id, tag_info in personalize_action_dict.items():
# personalize_push_cache.hmset(CACHE_KEY_PERSONALIZE_ACTION_TAG_INFO, device_id, tag_info)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .quick_search import (
QuickSearchContentKeyword,
)
# 文章内容快速搜索关键字
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.conf import settings
from django.db import models
from django.utils import timezone
from gm_types.mimas import (
QUICK_SEARCH_CONTENT_DIVISION,
)
class QuickSearchContentKeyword(models.Model):
class Meta:
verbose_name = '内容详情页快速搜索关键字'
db_table = 'api_quick_search_content_keyword'
index_together = ['content_id', 'content_type']
content_id = models.IntegerField(verbose_name="内容ID")
content_type = models.CharField(verbose_name="内容类型", max_length=128, choices=QUICK_SEARCH_CONTENT_DIVISION)
keywords = models.TextField(verbose_name="关键字,json数据") # eg: '[{"keyword": "", "keyword_type": ''}]'
create_time = models.DateTimeField(verbose_name="创建时间", default=timezone.now())
update_time = models.DateTimeField(verbose_name="更新时间", default=timezone.now())
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .user_manager import user_manager
from .tag_manager import tag_manager
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from talos.services import (
TagService,
TagV3Service,
AgileTagService,
)
from utils.base_manager import BaseManager
class TagManager(BaseManager):
"""
标签对外的集合
"""
def _convert_tag_info(self, tag):
"""
格式化老标签数据
:param tag:
:return:
"""
return {
"id": tag.id,
"tag_id": tag.id,
"name": tag.name,
"tag_name": tag.name,
"tag_type": tag.tag_type,
}
def _convert_agile_tag_info(self, agile_tag):
"""
格式化新标签数据
:param agile_tag:
:return:
"""
return {
"id": agile_tag.id,
"tag_id": agile_tag.id,
"name": agile_tag.name,
"tag_name": agile_tag.name,
# "recommends_type": agile_tag.recommends_type, # 推荐类型
"attribute": agile_tag.attribute, # 标签属性
}
def get_tags_info_by_ids(self, tag_ids):
"""
通过老标签id获取标签信息
:param tag_ids:
:return: {}
"""
tag_ids = self.filter_ids(tag_ids)
if not tag_ids:
return {}
tags_info = TagService.get_tags_by_tag_ids(ids=tag_ids)
result = {}
for tag in tags_info:
result[tag.id] = self._convert_tag_info(tag)
return result
def get_agile_tag_info_by_ids(self, tag_ids):
"""
通过新标签id,获取新标签数据
:param tag_ids:
:return:
"""
agile_tag_ids = self.filter_ids(tag_ids)
agile_tag_info_dic = AgileTagService.get_agile_tags_by_agile_tag_ids(agile_tag_ids)
result = {}
for k, v in agile_tag_info_dic.items():
result[k] = self._convert_agile_tag_info(v)
return result
def get_tags_v3_info_by_ids(self, tag_ids):
"""
获取 tag 3.0 标签数据
:param tag_ids:
:return:
"""
result = {}
if not tag_ids:
return result
tag_v3_infos = TagV3Service.get_tags_by_tag_v3_ids(tag_v3_ids=tag_ids)
for tag_v3_id, tag_v3_obj in tag_v3_infos.items():
result[tag_v3_id] = TagV3Service.format_tag_v3(tag_v3_obj=tag_v3_obj)
return result
tag_manager = TagManager()
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from talos.services import (
UserConvertService,
)
from social.models import SocialInfo
from utils.base_manager import BaseManager
class UserManager(BaseManager):
def get_user_all_fans(self, user_id):
"""
获取用户所有粉丝
:param user_id:
:return:
"""
social_info = SocialInfo.social_info(user_id)
fans_ids = []
for fans_id in social_info.get_fans_iterator():
fans_ids.append(fans_id)
return fans_ids
def _get_user_following_info(self, viewer_user_id, author_ids):
"""
获取用户关注状态
:param viewer_user_id: 查看的人
:param author_ids: 作者id列表
:return:
"""
follow_rels = {}
if viewer_user_id:
social_info = SocialInfo(uid=viewer_user_id)
follow_rels = social_info.is_following_users(author_ids)
return follow_rels
def _get_user_info_by_user_ids(self, user_ids, simple=True):
"""
获取用户信息
:param user_ids:
:param simple:
:return:
"""
user_info_dic = UserConvertService.get_user_info_by_user_ids(user_ids, simple=simple)
return user_info_dic
def convert_user_info(self, viewer_user_id, author_ids, simple=True):
"""
转化用户信息
:param viewer_user_id: 查看者
:param author_ids: 作者
:param simple: 用户信息多or少的开关
:return:
"""
author_ids = self.filter_ids(author_ids)
user_infos = self._get_user_info_by_user_ids(author_ids, simple=simple)
follow_rels = self._get_user_following_info(viewer_user_id, author_ids)
result = {}
for k, _data in user_infos.items():
_data.update({
"is_following": follow_rels.get(k, False),
})
result[k] = _data
return result
user_manager = UserManager()
This diff is collapsed.
import random
from itertools import chain
from django.conf import settings
from communal.models.fake_reply import FakeReplyConfig, FakeReplyPool, FakeReplyConfigMapReply
from talos.cache.base import fake_cache
from talos.rpc import get_current_rpc_invoker
call_rpc = get_current_rpc_invoker()
class FakeCacheService(object):
@classmethod
def generate_cache_key(cls, content_type, content_id):
return "{}:{}".format(content_type, content_id)
@classmethod
def cache_reply(cls, content_type, content_id, reply_id):
key = cls.generate_cache_key(content_type, content_id)
fake_cache.sadd(key, reply_id)
@classmethod
def get_all_cached_ids(cls, content_type, content_id):
key = cls.generate_cache_key(content_type, content_id)
ids = fake_cache.smembers(key)
return set(map(int, ids))
class FakeService(object):
@classmethod
def get_fake_reply_ids_by_tag_ids(cls, tag_ids):
"""
根据标签/标签大组获取评论
:param tag_ids:
:return:
"""
if not tag_ids:
return []
config_ids = list(FakeReplyConfig.objects.using(settings.SLAVE_DB_NAME).filter(
tag_id__in=tag_ids, is_online=True).values_list('id', flat=True))
reply_ids = list(FakeReplyConfigMapReply.objects.using(settings.SLAVE_DB_NAME).filter(
config_id__in=config_ids).values_list('reply_id', flat=True))
comment_ids = list(FakeReplyPool.objects.using(settings.SLAVE_DB_NAME).filter(
id__in=reply_ids, is_online=True,
).values_list('id', flat=True))
if not comment_ids:
try:
tag_category_map = call_rpc["api/tag_v3/list_group_by_tags"](tag_ids=tag_ids).unwrap()
except:
return []
tag_category_ids = [item.get('id', 0) for item in chain(*tag_category_map.values())]
config_ids = list(FakeReplyConfig.objects.filter(
tag_group_id__in=tag_category_ids, is_online=True).values_list('id', flat=True))
reply_ids = list(FakeReplyConfigMapReply.objects.using(settings.SLAVE_DB_NAME).filter(
config_id__in=config_ids).values_list('reply_id', flat=True))
comment_ids = list(FakeReplyPool.objects.filter(
id__in=reply_ids, is_online=True).values_list('id', flat=True))
return comment_ids
@classmethod
def get_comment_by_tag_ids(cls, tag_ids, content_type, content_id):
"""
通过新标签获取 灌水评论
:return:
"""
related_replies = cls.get_fake_reply_ids_by_tag_ids(tag_ids)
cached_ids = FakeCacheService.get_all_cached_ids(content_type, content_id)
cached_ids = set(map(int, cached_ids))
not_used_ids = list(set(related_replies) - cached_ids)
if not not_used_ids:
return None
reply_id = random.choice(not_used_ids)
reply = FakeReplyPool.objects.get(id=reply_id)
FakeCacheService.cache_reply(content_type, content_id, reply_id)
return reply.content
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .push_task import *
from .irrigation_task import *
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from celery import shared_task
from gm_types.mimas import COMMUNITY_CONTENT_TYPE, FAKE_TYPE
from talos.services import (
TagService,
TagV3Service,
)
from talos.logger import irrigation_logger
from utils.rpc import (
rpc_client,
logging_exception,
)
@shared_task
def irrigation_tasks(card_id, card_user_id, card_type, others):
"""
触发灌水的异步任务
:param card_id:数据id
:param card_user_id:创建的作者id
:param card_type:数据类型
:param others:其他字段 dict() {}
:return:
"""
return
#
# tags = TagService.get_tags_by_tag_ids(others.get("tag_ids", []))
# tags_v3 = TagV3Service.get_tags_by_tag_v3_ids(others.get("tag_v3_ids", []))
# tag_names = [tag.name for tag in tags]
# tag_v3_names = [tag.name for tag in tags_v3.values()]
#
# _rpc_url = "vest/moment/vest_irrigation"
# _kw = {
# "card_id": card_id,
# "card_type": card_type,
# "card_user_id": card_user_id,
# "content_level": others.get("content_level", 0),
# "tag_names": tag_names,
# "tagv3_names": tag_v3_names,
# "create_time": others.get("create_time", ""),
# }
#
# try:
# rpc_client[_rpc_url](**_kw).unwrap()
# call_state = True
# except:
# logging_exception()
# call_state = False
#
# irrigation_logger.info(json.dumps({
# "step": 2,
# "type": "irrigation",
# "resume": "Call irrigation service!",
# "rpc_url": _rpc_url,
# "params": _kw,
# "call_state": call_state,
# "call_state_desc": "Call Irrigation Server. OK!!!" if call_state else "Call irrigation service ERROR!!!"
# }))
@shared_task
def create_fake_task(business_id, business_type, business_level, author_id):
"""异步创建灌水任务"""
_rpc_url = "demeter/fake/task/product"
_kw = {
"business_id": business_id,
"business_type": business_type,
"level": business_level,
"author_id": author_id,
}
try:
rpc_client[_rpc_url](**_kw).unwrap()
except:
logging_exception()
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import random
import datetime
from celery import shared_task
from gm_types.push import PUSH_INFO_TYPE
from talos.cache.gaia import push_cache
from talos.logger import push_logger, info_logger
from utils.push import push_task_to_user_multi
from utils.rpc import (
rpc_client,
logging_exception,
)
from communal.models.push.personalize_tag import PersonalizeActionTag
from communal.cache.push import personalize_push_cache
@shared_task
def push_control_task(user_ids, platform, extra, alert, eta, push_type, labels, others=None):
"""
公共推送方法
:return:
"""
push_task_to_user_multi(
user_ids=user_ids,
platform=platform,
extra=extra,
alert=alert,
eta=eta,
push_type=push_type,
labels=labels,
others=others
)
@shared_task
def aggregation_push_trigger_task(cache_name, default_cache_key, unit_time=60 * 60, sole_sign=""):
"""
聚合推送 延迟触发任务
:param cache_name: 缓存名
:param default_cache_key: 默认缓存key
:param unit_time: 单位时间
:param sole_sign: 用于存日志的标识
:return:
"""
push_logger.info(json.dumps({
"subordinate": "aggregation_push",
"sole_sign": sole_sign,
"resume": "aggregation_push_trigger_task.",
"params": {
"cache_name": cache_name,
"default_cache_key": default_cache_key,
"unit_time": unit_time,
},
}))
# 从缓存中获取数据
for cache_key, item in push_cache.hscan_iter(cache_name, count=100):
if cache_key == default_cache_key:
continue
if item:
push_data = json.loads(item)
for push_type, record_ids in push_data.items():
countdown = random.choice(range(10, unit_time - 60 * 5)) # 获取触发的时间
_kw = {
"user_ids": [cache_key],
"action_type": push_type,
"record_ids": record_ids,
"sole_sign": sole_sign,
}
# 触发另一个异步任务
task_ = aggregation_push_timing_task.apply_async(
kwargs=_kw,
countdown=countdown
)
push_logger.info(json.dumps({
"subordinate": "aggregation_push",
"sole_sign": sole_sign,
"resume": "aggregation_push_timing_task.",
"task_id": task_.id,
"params": _kw,
"countdown": countdown,
}))
@shared_task
def aggregation_push_timing_task(**kwargs):
"""
聚合推送 单任务触发方法
:return:
"""
from communal.tools.push.push_aggregation import AggregationPushService
params = AggregationPushService.build_push_params(**kwargs)
if params:
push_logger.info(json.dumps({
"subordinate": "aggregation_push",
"sole_sign": kwargs.get("sole_sign", ""),
"resume": "aggregation_push builder params.",
"build_params": kwargs,
"push_params": params,
}))
push_task_to_user_multi(**kwargs)
@shared_task
def intelligent_push_task(content_id, user_ids, push_type, extra, platform=None, alert='', others=None, labels={}):
"""
push 统一管理 使用demeter发送 push
:param content_id: 内容id
:param user_ids: 接收push用户 list
:param push_type: 推送类型
:param platform: 推送渠道 Android ios
:param extra: dict
:param alert: object
:param others: object
:param labels: object
:return:
"""
_rpc_url = "demeter/push/user/community_push"
kwargs = {
"content_id": content_id,
"user_ids": user_ids,
"push_type": push_type,
"platform": platform,
"extra": extra,
"alert": alert,
"labels": labels,
}
if others:
kwargs.update({'others': others})
try:
rpc_client[_rpc_url](**kwargs).unwrap()
info_logger.info('invoke demeter push {}:{}'.format(content_id, push_type))
except:
logging_exception()
@shared_task
def parse_personalize_action_task(date_index=None):
push_logger.info(json.dumps(dict(mag='parse_personalize_action_task start!')))
if not date_index:
date_index = str(datetime.date.today() - datetime.timedelta(days=1))
cache_key_personalize_action_tag_info = 'demeter:push:action_tag_push:action_tag_info_{}'.format(date_index)
first_item = PersonalizeActionTag.objects.filter(date_index=date_index).first()
last_item = PersonalizeActionTag.objects.filter(date_index=date_index).last()
search_offset = 100
split_ids = [(i, i + search_offset) for i in range(first_item.id, last_item.id+1, search_offset)]
for (start_id, end_id) in split_ids:
res = PersonalizeActionTag.objects.filter(id__range=(start_id, end_id)).values(
"tag_info", "device_id"
)
for obj in res:
# personalize_action_dict[obj['device_id']] = obj['tag_info']
personalize_push_cache.hset(
cache_key_personalize_action_tag_info,
obj['device_id'],
obj['tag_info']
)
push_logger.info(json.dumps(dict(mag='parse_personalize_action_task finish!')))
@shared_task
def add_device_to_city_map_for_push():
push_logger.info(json.dumps(dict(mag='add_device_to_city_map_for_push start!')))
cache_key_demeter_device_2_city = 'demeter:push:device_2_city_map'
date_index = str(datetime.date.today() - datetime.timedelta(days=1))
first_item = PersonalizeActionTag.objects.filter(date_index=date_index).first()
last_item = PersonalizeActionTag.objects.filter(date_index=date_index).last()
search_offset = 200
split_ids = [(i, i + search_offset) for i in range(first_item.id, last_item.id+1, search_offset)]
for (start_id, end_id) in split_ids:
device_list = list(PersonalizeActionTag.objects.filter(id__range=(start_id, end_id)).values_list(
"device_id", flat=True
))
res = rpc_client['api/device/get_device_city'](
device_id_list=device_list
).unwrap()
for device_id in res:
personalize_push_cache.hset(
cache_key_demeter_device_2_city,
device_id,
res[device_id]
)
push_logger.info(json.dumps(dict(mag='add_device_to_city_map_for_push finish!')))
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .push import (
PushService,
)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .push import PushService
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import uuid
from django.conf import settings
from communal.normal_manager import user_manager
from .push_base import (
push_logger,
PushServiceBase,
)
from .push_timely import TimelyPushService
from .push_aggregation import AggregationPushService
class PushService(PushServiceBase):
"""
push service 对外使用的方法
"""
@staticmethod
def get_user_all_fans(user_id):
"""
获取用户的所有粉丝
:param user_id:
:return:
"""
return user_manager.get_user_all_fans(user_id)
@staticmethod
def filter_valid_user_ids(user_ids, filter_user_id):
"""
获取有效的用户id.
eg: 自己给自己发布的内容点赞,push需要过滤掉自己
:param user_ids:
:param filter_user_id:
:return:
"""
return [uid for uid in user_ids if uid != filter_user_id]
@classmethod
def push(cls, user_ids, action_type, is_timely_push=True, **kwargs):
"""
对外出口
:param user_ids:
:param action_type:
:param is_timely_push:是否是实时推送
:param kwargs:
:return:
"""
sole_sign = uuid.uuid4().hex # 唯一标识,目前用来串日志
push_logger.info(json.dumps({
"step": 1,
"sole_sign": sole_sign,
"resume": "Call the push method. Specific parameters",
"push_users": user_ids,
"action_type": action_type,
"is_timely_push": is_timely_push,
"other_params": kwargs,
}))
# 过滤掉用户 + 自己
_user_ids = [uid for uid in user_ids if uid not in [settings.SUOZHANG_UID, ]]
if not _user_ids:
return
now = cls.get_now_datetime()
times_str_dic = cls.current_times(now) or {} # 获取时间字符串
# 实时推送
if is_timely_push:
# 不在规定时间内 TODO 未来可能会合并到聚合推送
if not cls.in_push_limit_time(now):
return
TimelyPushService.push(
user_ids=_user_ids,
action_type=action_type,
times_str_dic=times_str_dic,
sole_sign=sole_sign,
**kwargs
)
else: # 聚合推送
AggregationPushService.aggregation_push_data(
user_ids=user_ids,
action_type=action_type,
now=now,
sole_sign=sole_sign,
**kwargs
)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 聚合推送
import datetime
import json
from utils.common import big_data_iter
from .push_base import (
hour,
minute,
push_cache,
push_logger,
PushServiceBase,
get_datetime_to_str,
)
from .push_builder import PushEvents
class AggregationPushService(PushServiceBase):
"""
聚合推送 service
"""
_default_cache_name = "aggregation_push"
ratelimit_unit_times = 2
push_handlers = PushEvents.aggregation_push_handlers
# < ----------- 用户 缓存名 获取 begin ------------ >
@classmethod
def _get_aggregation_push_normal_time_cache_name(cls, push_types, times):
"""
获取正常时间段内 需要聚合推送的name
:param push_types: 推送类型
:param times: 字符串类型的格式化时间
:return:
"""
_cache_name = "{d_cache_name}_normal_types_{push_types}_times_{times}".format(
d_cache_name=cls._default_cache_name,
push_types=":".join(push_types),
times=times
)
return _cache_name
@classmethod
def _get_aggregation_push_abnormal_times_info(cls, push_types, now):
"""
获取非正常时间段的缓存name + expire_time
:param push_types:
:return:
"""
if now.hour >= cls.end_push_hour: # 22 - 23 点
start_date = now.date()
end_date = start_date + datetime.timedelta(days=1)
elif now.hour < cls.start_push_hour: # 0 - 8 点
end_date = now.date()
start_date = end_date - datetime.timedelta(days=1)
_name = "{d_cache_name}_abnormal_types_{push_types}_times_{times}".format(
d_cache_name=cls._default_cache_name,
push_types=":".join(push_types),
times="{}_{}".format(
get_datetime_to_str(datetime.datetime.combine(start_date, datetime.time(hour=cls.end_push_hour))),
get_datetime_to_str(datetime.datetime.combine(end_date, datetime.time(hour=cls.start_push_hour)))
)
)
eta = datetime.datetime.combine(end_date, datetime.time(cls.start_push_hour))
return {
"cache_name": _name,
"eta": eta,
}
# < ----------- 用户 缓存名 获取 end ------------ >
@classmethod
def _create_cache(cls, cache_name):
"""
缓存创建
:param cache_name: 缓存key
:return:
"""
if not push_cache.exists(cache_name):
push_cache.hincrby(cache_name, cls._default_cache_name)
push_cache.expire(cache_name, hour * 26) # 设置超时时间
cls._trigger_task_switch(cache_name) # 触发异步任务开关 加锁
return True
return False
# < ----------- 用户 聚合推送 缓存创建 + 异步任务触发 begin ------------ >
@staticmethod
def _trigger_task_switch(cache_name):
"""
触发异步任务开关
:return:
"""
_cache_name = "{}_task_switch".format(cache_name) # 任务触发开关,缓存名 区分下
return push_cache.set(_cache_name, 1, ex=60, nx=True)
@classmethod
def _trigger_task(cls, cache_name, eta, unit_time, **kwargs):
"""
异步任务触发 只会触发有效时间内的任务
:param cache_name: 缓存名字
:param eta: 延迟时间
:return:
"""
sole_sign = kwargs.get("sole_sign", "")
from communal.tasks import aggregation_push_trigger_task
task_ = aggregation_push_trigger_task.apply_async(
args=(
cache_name,
cls._default_cache_name,
unit_time,
sole_sign,
),
eta=eta - datetime.timedelta(hours=8) # utc时间 ε=(´ο`*)))唉
)
return task_.id
@classmethod
def get_cache_name_and_trigger_task(cls, push_types, now, unit_time=60 * 60, **kwargs):
"""
获取缓存name + 触发任务
:return:
"""
sole_sign = kwargs.get("sole_sign", "")
if cls.in_push_limit_time(now): # 在正常推送时间内 9 - 22小时
hour_str = (cls.current_times(now) or {}).get("hour_str", "")
_cache_name = cls._get_aggregation_push_normal_time_cache_name(push_types, hour_str)
eta = cls.expire_time(unit_time=unit_time, need_datetime=True)
else: # 时间段问题 今日22时 - 次日8时内数据 由定时任务在9点触发
_abnormal_info = cls._get_aggregation_push_abnormal_times_info(
push_types=push_types,
now=now
)
_cache_name = _abnormal_info.get("cache_name", "")
eta = _abnormal_info.get("eta", None)
_status = cls._create_cache(_cache_name) # 检测创建状态
# 业务逻辑触发异步任务
if _status and cls._trigger_task_switch(_cache_name):
task_id = cls._trigger_task(
_cache_name,
eta=eta,
unit_time=unit_time,
sole_sign=sole_sign
)
need_trigger_task = True
else:
task_id = ""
need_trigger_task = False
push_logger.info(json.dumps({
"aggregation_step": 4,
"sole_sign": sole_sign,
"subordinate": "aggregation_push",
"resume": "Aggregation Push. get_cache_name_and_trigger_task",
"push_types": push_types,
"cache_name": _cache_name,
"now": now.strftime("%Y%m%d %H:%M:%S"),
"need_trigger_task": need_trigger_task,
"Triggered task": {
"task_id": task_id,
"parameters": {
"cache_name": _cache_name,
"default_cache_key": cls._default_cache_name,
"unit_time": unit_time,
},
"eta": eta and eta.strftime("%Y%m%d %H:%M:%S") or eta,
},
}))
return _cache_name
# < ----------- 用户 聚合推送 缓存创建 + 异步任务触发 end ------------ >
# < ----------- 用户 聚合推送数据汇总 begin ------------ >
@classmethod
def aggregation_push_data(cls, user_ids, action_type, **kwargs):
"""
聚合推送信息
把需要聚合的数据写到redis中
hash 操作
name 精确到小时的时间
key use_id
value jsonstring {push_action_type: [source_id, source_id]}
---> {推送类型: [对应表索引id,]} --> eg: {回答点赞:[AnswerVote.id]}
:param user_ids:
:param action_type:
:param kwargs:
:return:
"""
_source_id = kwargs.get("source_id", 0)
now = kwargs.pop("now", cls.get_now_datetime()) # 当前时间
sole_sign = kwargs.get("sole_sign", "") # 唯一标识
push_logger.info(json.dumps({
"aggregation_step": 2,
"sole_sign": sole_sign,
"resume": "Aggregation Push. Trigger parameters.",
"now": now.strftime("%Y%m%d %H:%M:%S"),
"user_ids": user_ids,
"action_type": action_type,
"other_params": kwargs,
}))
if all([user_ids, action_type, _source_id]):
# 获取异步任务触发时间
delay_time = hour * 1
_push_types = []
for item in PushEvents.aggregation_push_rule.values():
aggregation_push_types = sorted(item.get("aggregation_push_types", [])) # 这里做了一次排序!!!
if action_type in aggregation_push_types:
delay_time = item.get("delay_times", hour * 1)
_push_types = aggregation_push_types
break
push_logger.info(json.dumps({
"aggregation_step": 3,
"sole_sign": sole_sign,
"resume": "Aggregation Push. action_type conditions satisfied.",
"action_type": action_type,
"aggregation_push_types": _push_types,
"delay_times": delay_time,
}))
# 获取缓存名
_cache_name = cls.get_cache_name_and_trigger_task(
push_types=_push_types,
now=now,
unit_time=delay_time,
sole_sign=sole_sign # 这是个日志的标识,没多大意义
)
# 聚合数据 处理
for user_ids in big_data_iter(user_ids, fetch_num=200):
vs = push_cache.hmget(_cache_name, user_ids)
write_to_cache_dic = {}
for user_id, v in zip(user_ids, vs):
if v:
action_type_record_dic = json.loads(v) # 从缓存中获取
else:
action_type_record_dic = {}
if action_type not in action_type_record_dic:
action_type_record_dic[action_type] = []
action_type_record_dic[action_type].append(_source_id)
write_to_cache_dic[user_id] = json.dumps(action_type_record_dic)
# 写入缓存
push_cache.hmset(_cache_name, write_to_cache_dic)
push_logger.info(json.dumps({
"aggregation_step": 5,
"sole_sign": sole_sign,
"resume": "Aggregation Push. write in redis OK.",
"cache_name": _cache_name,
"action_type": action_type,
"user_ids": user_ids,
}))
# < ----------- 用户 聚合推送数据汇总 end ------------ >
This diff is collapsed.
This diff is collapsed.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 实时推送
import json
from django.conf import settings
from .push_base import (
push_logger,
PushServiceBase,
)
from .push_builder import PushEvents
class TimelyPushService(PushServiceBase):
"""
实时推送 service
"""
_default_cache_name = "timely_push"
ratelimit_unit_times = 2 # 默认的单位时间推送数量
ratelimit_whole_rate_limit = settings.PUSH_RATE_LIMIT_SETTINGS.get("total_count", 10) # 一天推送总数
push_handlers = PushEvents.timely_push_handlers
# < ----------- push begin ------------ >
@classmethod
def push(cls, user_ids, action_type, **kwargs):
"""
实时 推送
:return:
"""
params = cls.build_push_params(user_ids, action_type, **kwargs)
if params:
from communal.tasks import push_control_task
task_ = push_control_task.delay(**params)
push_logger.info(json.dumps({
"step": 6,
"sole_sign": kwargs.get("sole_sign", ""),
"resume": "push information completed! timely push tasks delay.",
"params": params,
"task_id": task_.id,
}))
# < ----------- push end ------------ >
# coding: utf-8
import json
from communal.cache.push import personalize_push_cache as push_cache
from talos.rpc import get_current_rpc_invoker
rpc_invoker = get_current_rpc_invoker()
class Tag2GroupTopic(object):
"""标签映射到小组话题"""
CACHE_KEY_ACTION_TAG_2_GROUP_MAP = 'mimas:communal:tag2group_map:{}'
CACHE_KEY_ACTION_TAG_2_TOPIC_MAP = 'mimas:communal:tag2topic_map:{}'
@classmethod
def get_topic_ids_with_tag_v3(cls, tag_v3_id):
if not tag_v3_id:
return []
cache_key = cls.CACHE_KEY_ACTION_TAG_2_TOPIC_MAP.format(tag_v3_id)
topic_ids = push_cache.get(cache_key)
if not topic_ids:
res = rpc_invoker["api/topic/list_by_tag_v3_ids"](tag_ids=[tag_v3_id]).unwrap()
topic_ids = res.get(str(tag_v3_id), [])
push_cache.set(cache_key, json.dumps(topic_ids), 3600)
return topic_ids
return json.loads(topic_ids)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .push import *
from .content_quick_search import *
from .fake import *
from .group_reply import *
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
from gm_rpcd.all import bind, RPCDFaultException
from communal.models import QuickSearchContentKeyword
@bind("mimas/quick_search/get_keywords")
def content_quick_search_get_keywords(content_ids, content_type):
"""
获取内容快捷搜索关键字。这个接口只支持同一类型下的数据
:param content_ids:
:param content_type:
:return:
"""
result = {}
keywords = QuickSearchContentKeyword.objects.filter(
content_id__in=content_ids,
content_type=content_type
).values_list("content_id", "keywords")
for content_id, keyword in keywords:
result.update({
str(content_id): json.loads(keyword) or [],
})
return result
@bind("mimas/quick_search/update_keywords")
def content_quick_search_update_keywords(content_id, content_type, keywords):
"""
内容快捷搜索更新关键字
:param content_id:
:param content_type:
:param keywords:
:return:
"""
obj, _ = QuickSearchContentKeyword.objects.update_or_create(
content_id=content_id,
content_type=content_type,
defaults={
"keywords": json.dumps(keywords),
"update_time": datetime.datetime.now(),
}
)
return {
"id": obj.id,
}
@bind("mimas/quick_search/bulk_update_keywords")
def content_quick_search_bulk_update_keywords(bulk_update_list):
"""
内容快捷搜索批量更新关键字
:param bulk_update_list:
:return:
"""
result = []
for item in bulk_update_list:
obj, _ = QuickSearchContentKeyword.objects.update_or_create(
content_id=item.get("content_id", 0),
content_type=item.get("content_type", ""),
defaults={
"keywords": json.dumps(item.get("keywords", [])),
"update_time": datetime.datetime.now(),
}
)
result.append(obj.id)
return result
from gm_types.mimas import FAKE_REPLY_BAND_TYPE, COMMUNITY_CONTENT_TYPE, FAKE_TYPE
from communal.services.fake.fake_behavior import FakeBehaviorService
from talos.rpc import bind
from talos.services.other import get_user_lastest_device_app_version_by_user_id
from utils.common import is_version_gray
@bind('mimas/fake/create')
def fake_create(business_id, business_type, business_level, fake_type, fake_user_id, author_id):
"""
灌水
:param business_id:
:param business_type: COMMUNITY_CONTENT_TYPE
:param business_level: 2,3,4,5,6
:param fake_type: FAKE_TYPE
:param fake_user_id:
:param author_id:
:return:
"""
version = get_user_lastest_device_app_version_by_user_id(author_id)
# 推送跳转到消息页的赞列表
jump_to_vote_list = is_version_gray(version, '7.29.0')
if business_type == COMMUNITY_CONTENT_TYPE.ANSWER:
if fake_type == FAKE_TYPE.COMMENT:
FakeBehaviorService.answer_reply(int(business_id), int(fake_user_id), author_id)
elif fake_type == FAKE_TYPE.VOTE:
FakeBehaviorService.answer_vote(int(business_id), int(fake_user_id), author_id, jump_to_vote_list=jump_to_vote_list)
elif fake_type == FAKE_TYPE.QUALITY:
FakeBehaviorService.answer_quality(int(business_id), int(fake_user_id), author_id)
elif business_type == COMMUNITY_CONTENT_TYPE.TOPIC:
if fake_type == FAKE_TYPE.COMMENT:
FakeBehaviorService.topic_reply(int(business_id), int(fake_user_id), author_id)
elif fake_type == FAKE_TYPE.VOTE:
FakeBehaviorService.topic_vote(int(business_id), int(fake_user_id), author_id, jump_to_vote_list=jump_to_vote_list)
elif business_type == COMMUNITY_CONTENT_TYPE.TRACTATE:
if fake_type == FAKE_TYPE.COMMENT:
FakeBehaviorService.tractate_reply(int(business_id), int(fake_user_id), author_id)
elif fake_type == FAKE_TYPE.VOTE:
FakeBehaviorService.tractate_vote(int(business_id), int(fake_user_id), author_id, jump_to_vote_list=jump_to_vote_list)
from qa.service import AnswerReplyService
from talos.rpc import bind
from talos.services.tractate import TractateReplyService
from utils.group_routine import GroupRoutine
@bind('mimas/group/reply_count')
def get_group_reply_count(diary_ids, answer_ids, tractate_ids):
"""统计话题、小组下关联内容的评论总数"""
total_count = 0
routine = GroupRoutine()
for answer_id in answer_ids:
routine.submit('a_' + str(answer_id), AnswerReplyService.get_reply_count, answer_id)
for tractate_id in tractate_ids:
routine.submit('t_' + str(tractate_id), TractateReplyService.get_reply_count_by_tractate_id, tractate_id)
routine.go()
for answer_id in answer_ids:
total_count += routine.results.get('a_' + str(answer_id), 0)
for tractate_id in tractate_ids:
total_count += routine.results.get('t_' + str(tractate_id), 0)
return {
'total_count': total_count
}
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from gm_rpcd.all import bind, RPCDFaultException
from gm_types.push import PUSH_CONTENT_TYPE
from communal.tools import PushService
from communal.tools.push.push_builder import CtrPushContentFormat
from communal.models.hot_content import HotContent
from utils.rpc import logging_exception
@bind("mimas/communal/push_service")
def mimas_call_push_logic(user_ids, action_type, is_timely_push, **kwargs):
"""
推送逻辑
:param user_ids:
:param action_type:
:param is_timely_push:
:param kwargs:
:return:
"""
valid_user_ids = PushService.filter_valid_user_ids(user_ids, kwargs.pop("filter_user_id", 0))
PushService.push(
user_ids=valid_user_ids,
action_type=action_type,
is_timely_push=is_timely_push,
**kwargs
)
@bind('mimas/ctr/push_info/format')
def ctr_push_info_format(content_id, push_type, content_type, tag_name=''):
"""
个性化 push 封装推送数据
:param content_id: 推送内容id
:param push_type: 推送内容类型
:param content_type: 推送类型枚举
:param tag_name: 推送关联的标签名字
:return: 封装好的push数据
"""
push_info = {}
try:
if content_type == PUSH_CONTENT_TYPE.ANSWER:
push_info = CtrPushContentFormat.ctr_answer(content_id, push_type)
if content_type == PUSH_CONTENT_TYPE.QUESTION:
push_info = CtrPushContentFormat.ctr_question(content_id, push_type, tag_name)
elif content_type == PUSH_CONTENT_TYPE.DIARY:
push_info = CtrPushContentFormat.ctr_diary(content_id, push_type)
elif content_type == PUSH_CONTENT_TYPE.TRACTATE:
push_info = CtrPushContentFormat.ctr_tractate(content_id, push_type)
except:
logging_exception()
return push_info
@bind('mimas/ctr/push_content_ids')
def get_ctr_content_by_ids(content_type, date_object=None):
"""demeter调用,返回按照点击数排序的内容id"""
result = {
'content_ids': []
}
if not content_type:
return result
if not date_object:
date_object = datetime.now().date() + timedelta(days=-1)
hot_contents = HotContent.objects.filter(
date_index=date_object, content_type=content_type
).values_list('content_id', 'click_num')
sorted_contents = sorted(hot_contents, key=lambda item: item[1], reverse=True)
result['content_ids'] = [data[0] for data in sorted_contents][:30]
return result
# -*- coding: UTF-8 -*-
\ No newline at end of file
from .connection import index_data_source, pk_data_source
from .transfer import get_answers
from .transfer import get_answers_rpc
from .transfer import get_answer_ids
# -*- coding: UTF-8 -*-
from django.conf import settings
from gm_dbmw_api.descriptor import source, relation
from gm_types.mq.dbmw import DBMWEndPoints
pk_data_source = source.GroupedSource.of(
source.MySQLTableSource(
logical_database_id=settings.LOGICAL_DATABASE_ID,
table_name='api_answer',
value_columns_all=True,
key_columns=['id']
)
)
index_data_source = pk_data_source
This diff is collapsed.
from .connection import index_data_source, pk_data_source
from .transfer import get_articles
# -*- coding: UTF-8 -*-
from django.conf import settings
from gm_dbmw_api.descriptor import source, relation
from gm_types.mq.dbmw import DBMWEndPoints
pk_data_source = source.GroupedSource.of(
source.MySQLTableSource(
logical_database_id=settings.LOGICAL_DATABASE_ID,
table_name='api_article',
value_columns_all=True,
key_columns=['id']
)
)
index_data_source = pk_data_source
from talos.models.topic.column import Article
from talos.models.topic.recommendsocre import ArticleRelatedRecommendScore
import logging
from utils.rpc import get_rpc_invoker
from talos.models.topic.topic import Problem
from qa.models.toutiao import by_content_type_id_get_keywords, get_content_star_keywords, get_content_title_keywords, \
get_content_star_first_keyword
from tags.services.tag import (get_tagv3_analysis_info, get_tagv3_ids_by_tagv3_names,
get_first_demand_ids_by_name, get_second_demand_ids_by_name,
get_first_position_ids_by_name, get_second_position_ids_by_name,
get_first_solution_ids_by_name, get_second_solution_ids_by_name)
rpc = get_rpc_invoker()
def get_articles(pks):
articles = Article.objects.filter(id__in=pks)
rs = ArticleRelatedRecommendScore.objects.filter(id__in=pks).values('id', 'score')
rs_dict = {item['id']: item['score'] for item in list(rs)}
data = []
for article in articles:
item = {}
item['id'] = article.id
item['article_id'] = article.article_id
item['smart_rank'] = article.smart_rank()
item["is_online"] = Problem.objects.filter(id=article.article_id).values_list("is_online", flat=True).first()
item['recommend_score'] = rs_dict.get(article.id, 0)
item["content_keyword"] = by_content_type_id_get_keywords(id=article.id, content_type="article")
item["content_star_keyword"] = get_content_star_keywords(id=article.id, content_type="article")
item["content_star_first_keyword"] = get_content_star_first_keyword(id=article.id, content_type="article")
# item["title_keyword"] = get_content_title_keywords(id=article.id, content_type="article")
topic = article.topic()
if topic:
item['title'] = topic.title
item['content'] = topic.answer_richtext
tags = topic.tags
tags_id_list = [tag.id for tag in tags]
item['tag_ids'] = tags_id_list
item['tags'] = [tag.name for tag in tags]
# 新标签
fresh_tag_result = rpc["api/agile_tag/tuple_new_tags"](old_tag_ids=tags_id_list)
fresh_tag_id_list = list()
fresh_tag_name_list = list()
for fresh_tag_id, fresh_tag_name in fresh_tag_result.unwrap():
fresh_tag_id_list.append(fresh_tag_id)
fresh_tag_name_list.append(fresh_tag_name)
item["fresh_tag_ids"] = fresh_tag_id_list
item["fresh_tags"] = fresh_tag_name_list
item["ask"] = topic.ask
item['user'] = {
"id": topic.user.id
}
item["content_pre"] = topic.answer_richtext
item['title_pre'] = topic.title
(need_refresh_data, second_demands_list, second_solutions_list, second_positions_list,
second_demands_ids_list,
second_solutions_ids_list, second_positions_ids_list,
first_demands_ids_list, first_solutions_ids_list, first_positions_ids_list, first_demands_list,
first_solutions_list, first_positions_list,
project_tags_list, project_tags_ids_list, first_classify_ids_list, first_classify_names_list,
second_classify_ids_list, second_classify_names_list) = get_tagv3_analysis_info(content_id=item["id"],
content_type="article")
if need_refresh_data:
item["tags_v3"] = list(project_tags_list)
item["first_demands"] = list(first_demands_list)
item["second_demands"] = list(second_demands_list)
item["first_solutions"] = list(first_solutions_list)
item["second_solutions"] = list(second_solutions_list)
item["positions"] = list(first_positions_list)
item["second_positions"] = list(second_positions_list)
item["tagv3_ids"] = list(project_tags_ids_list)
item["first_demands_ids"] = list(first_demands_ids_list)
item["second_demands_ids"] = list(second_demands_ids_list)
item["first_solutions_ids"] = list(first_solutions_ids_list)
item["second_solutions_ids"] = list(second_solutions_ids_list)
item["first_positions_ids"] = list(first_positions_ids_list)
item["second_positions_ids"] = list(second_positions_ids_list)
item["first_classify_ids"] = list(first_classify_ids_list)
item["first_classify_names"] = list(first_classify_names_list)
item["second_classify_ids"] = list(second_classify_ids_list)
item["second_classify_names"] = list(second_classify_names_list)
logging.info("get data:%s" % data)
data.append(item)
return data
"""日记本相关任务。
1、日记回复数更新
2、日记点赞数更新
"""
\ No newline at end of file
from talos.portal import update_diary_extra_info
update_extra_info = update_diary_extra_info
from .transfer import get_soft_article
from .connection import pk_data_source,index_data_source
\ No newline at end of file
# -*- coding: UTF-8 -*-
from django.conf import settings
from gm_dbmw_api.descriptor import source
pk_data_source = source.GroupedSource.of(
source.MySQLTableSource(
logical_database_id=settings.LOGICAL_DATABASE_ID,
table_name='api_doctor_softarticle',
value_columns_all=True,
key_columns=['id']
)
)
index_data_source = pk_data_source
import logging
from talos.models.soft_article.soft_article import SoftArticle, SoftArticleRelation
from data_sync.utils import to_epoch, tzlc
from qa.models.toutiao import by_content_type_id_get_keywords, get_content_star_keywords, get_content_title_keywords, get_content_star_first_keyword
def get_soft_article(pks):
doctor_tractates = SoftArticle.objects.filter(id__in=pks)
data = []
for doctor_tractate in doctor_tractates:
item = {}
item['id'] = doctor_tractate.id
item["content"] = doctor_tractate.content
item['is_video'] = doctor_tractate.get_is_video
item["doctor_id"] = doctor_tractate.doctor_id # TODO mapping改成string
# item["content_pre"] = doctor_tractate.content
item["is_online"] = doctor_tractate.is_online
item["status"] = doctor_tractate.status
item["platform"] = doctor_tractate.platform
item["content_level"] = doctor_tractate.content_level
item["create_time"] = tzlc(doctor_tractate.create_time)
item["tractate_tag_list"] = doctor_tractate.get_tag_list_sort_article
item["content_keyword"] = by_content_type_id_get_keywords(id=doctor_tractate.id, content_type="doctortopic")
# item["content_star_keyword"] = get_content_star_keywords(id=doctor_tractate.id, content_type="doctortopic")
# item["content_star_first_keyword"] = get_content_star_first_keyword(id=doctor_tractate.id, content_type="doctortopic")
# item["title_keyword"] = get_content_title_keywords(id=doctor_tractate.id, content_type="doctortopic")
data.append(item)
logging.info("get data:%s" % data)
return data
# -*- coding: UTF-8 -*-
from multiprocessing import Pool
from functools import partial
from django.core.management.base import BaseCommand
from data_sync.tasks import write_to_es
from data_sync.type_info import get_type_info_map
def process(dtype, pks):
write_to_es(dtype, pks)
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('-t', '--type', dest='dtype')
parser.add_argument('-p', '--procs', dest='parallel', type=int)
def handle(self, *args, **options):
t = options['dtype']
p = options['parallel'] or 2
pool = Pool(processes=p)
dt = get_type_info_map()[t]
pks = dt.model.objects.all().values_list('id', flat=True)
pks = list(pks)
n = int(len(pks) / p)
_process = partial(process, t)
pool.map(_process, [pks[i:i + n] for i in range(0, len(pks), n)])
This diff is collapsed.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .connection import index_data_source, pk_data_source
from .transfer import get_live_notice, get_activity, get_article, get_live_stream,get_video_tractate,get_word_tractate
# -*- coding: UTF-8 -*-
from django.conf import settings
from gm_dbmw_api.descriptor import source, relation
from gm_types.mq.dbmw import DBMWEndPoints
pk_data_source = source.GroupedSource.of(
source.MySQLTableSource(
logical_database_id=settings.LOGICAL_DATABASE_ID,
table_name='api_live_stream',
value_columns_all=True,
key_columns=['id']
)
)
index_data_source = pk_data_source
This diff is collapsed.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .connection import index_data_source, pk_data_source
from .transfer import get_qa_tops
# -*- coding: UTF-8 -*-
from django.conf import settings
from gm_dbmw_api.descriptor import source, relation
from gm_types.mq.dbmw import DBMWEndPoints
pk_data_source = source.GroupedSource.of(
source.MySQLTableSource(
logical_database_id=settings.LOGICAL_DATABASE_ID,
table_name='api_answer_top',
value_columns_all=True,
key_columns=['id']
)
)
index_data_source = pk_data_source
This diff is collapsed.
# -*- coding: UTF-8 -*-
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
from .transfer import get_subscript_article
from .connection import pk_data_source, index_data_source
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
from .util import get_problems
from .connection import pk_data_source, index_data_source
This diff is collapsed.
This diff is collapsed.
from .transfer import get_tractate
from .connection import pk_data_source,index_data_source
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment