Commit 0b8e2572 authored by lixiaofang's avatar lixiaofang

最新miams代码

parents
<component name="ProjectCodeStyleConfiguration">
<code_scheme name="Project" version="173">
<DBN-PSQL>
<case-options enabled="false">
<option name="KEYWORD_CASE" value="lower" />
<option name="FUNCTION_CASE" value="lower" />
<option name="PARAMETER_CASE" value="lower" />
<option name="DATATYPE_CASE" value="lower" />
<option name="OBJECT_CASE" value="preserve" />
</case-options>
<formatting-settings enabled="false" />
</DBN-PSQL>
<DBN-SQL>
<case-options enabled="false">
<option name="KEYWORD_CASE" value="lower" />
<option name="FUNCTION_CASE" value="lower" />
<option name="PARAMETER_CASE" value="lower" />
<option name="DATATYPE_CASE" value="lower" />
<option name="OBJECT_CASE" value="preserve" />
</case-options>
<formatting-settings enabled="false">
<option name="STATEMENT_SPACING" value="one_line" />
<option name="CLAUSE_CHOP_DOWN" value="chop_down_if_statement_long" />
<option name="ITERATION_ELEMENTS_WRAPPING" value="chop_down_if_not_single" />
</formatting-settings>
</DBN-SQL>
<DBN-PSQL>
<case-options enabled="false">
<option name="KEYWORD_CASE" value="lower" />
<option name="FUNCTION_CASE" value="lower" />
<option name="PARAMETER_CASE" value="lower" />
<option name="DATATYPE_CASE" value="lower" />
<option name="OBJECT_CASE" value="preserve" />
</case-options>
<formatting-settings enabled="false" />
</DBN-PSQL>
<DBN-SQL>
<case-options enabled="false">
<option name="KEYWORD_CASE" value="lower" />
<option name="FUNCTION_CASE" value="lower" />
<option name="PARAMETER_CASE" value="lower" />
<option name="DATATYPE_CASE" value="lower" />
<option name="OBJECT_CASE" value="preserve" />
</case-options>
<formatting-settings enabled="false">
<option name="STATEMENT_SPACING" value="one_line" />
<option name="CLAUSE_CHOP_DOWN" value="chop_down_if_statement_long" />
<option name="ITERATION_ELEMENTS_WRAPPING" value="chop_down_if_not_single" />
</formatting-settings>
</DBN-SQL>
</code_scheme>
</component>
\ No newline at end of file
This diff is collapsed.
<component name="ProjectDictionaryState">
<dictionary name="lxrent" />
</component>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
<component name="TestRunnerService">
<option name="PROJECT_TEST_RUNNER" value="Unittests" />
</component>
</module>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager" version="2" project-jdk-name="Python 2.7 (gaia_env)" project-jdk-type="Python SDK" />
<component name="PyCharmProfessionalAdvertiser">
<option name="shown" value="true" />
</component>
</project>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/mentha.iml" filepath="$PROJECT_DIR$/.idea/mentha.iml" />
</modules>
</component>
</project>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$" vcs="Git" />
</component>
</project>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ChangeListManager">
<list default="true" id="50d0c2bb-024a-4978-bd06-1e5ecc60eec4" name="Default Changelist" comment="">
<change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" />
</list>
<option name="EXCLUDED_CONVERTED_TO_IGNORED" value="true" />
<option name="SHOW_DIALOG" value="false" />
<option name="HIGHLIGHT_CONFLICTS" value="true" />
<option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
<option name="LAST_RESOLUTION" value="IGNORE" />
</component>
<component name="FUSProjectUsageTrigger">
<session id="-757420138">
<usages-collector id="statistics.file.extensions.open">
<counts>
<entry key="py" value="3" />
</counts>
</usages-collector>
<usages-collector id="statistics.file.types.open">
<counts>
<entry key="Python" value="3" />
</counts>
</usages-collector>
<usages-collector id="statistics.lifecycle.project">
<counts>
<entry key="project.open.time.35" value="1" />
<entry key="project.opened" value="1" />
</counts>
</usages-collector>
</session>
</component>
<component name="FileEditorManager">
<leaf SIDE_TABS_SIZE_LIMIT_KEY="300">
<file pinned="false" current-in-tab="false">
<entry file="file://$PROJECT_DIR$/data_sync/tractate/transfer.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="375">
<caret line="31" column="35" selection-start-line="31" selection-start-column="35" selection-end-line="31" selection-end-column="35" />
<folding>
<element signature="e#0#14#0" expanded="true" />
</folding>
</state>
</provider>
</entry>
</file>
<file pinned="false" current-in-tab="false">
<entry file="file://$PROJECT_DIR$/talos/models/tractate/tractate.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="555">
<caret line="63" column="89" selection-start-line="63" selection-start-column="89" selection-end-line="63" selection-end-column="89" />
</state>
</provider>
</entry>
</file>
<file pinned="false" current-in-tab="true">
<entry file="file://$USER_HOME$/gaia_env/lib/python2.7/site-packages/gm_types/mimas/tractate.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="209">
<caret line="29" selection-start-line="29" selection-end-line="36" selection-end-column="28" />
</state>
</provider>
</entry>
</file>
</leaf>
</component>
<component name="FindInProjectRecents">
<findStrings>
<find>write_to_es</find>
<find>/data/log</find>
<find>mimas-dbmw</find>
<find>diary_</find>
<find>diary_portrait</find>
<find>DORIS_DB_NAME</find>
<find>principal_page</find>
<find>dbmw</find>
</findStrings>
</component>
<component name="Git.Settings">
<option name="RECENT_GIT_ROOT_PATH" value="$PROJECT_DIR$" />
</component>
<component name="IdeDocumentHistory">
<option name="CHANGED_PATHS">
<list>
<option value="$PROJECT_DIR$/settings/log_settings.py" />
<option value="$PROJECT_DIR$/data_sync/management/commands/trans_data_source.py" />
</list>
</option>
</component>
<component name="ProjectFrameBounds" extendedState="1">
<option name="y" value="23" />
<option name="width" value="1280" />
<option name="height" value="723" />
</component>
<component name="ProjectLevelVcsManager" settingsEditedManually="true" />
<component name="ProjectView">
<navigator proportions="" version="1">
<foldersAlwaysOnTop value="true" />
</navigator>
<panes>
<pane id="ProjectPane">
<subPane>
<expand>
<path>
<item name="mentha" type="b2602c69:ProjectViewProjectNode" />
<item name="mentha" type="462c0819:PsiDirectoryNode" />
</path>
<path>
<item name="mentha" type="b2602c69:ProjectViewProjectNode" />
<item name="mentha" type="462c0819:PsiDirectoryNode" />
<item name="data_sync" type="462c0819:PsiDirectoryNode" />
</path>
<path>
<item name="mentha" type="b2602c69:ProjectViewProjectNode" />
<item name="mentha" type="462c0819:PsiDirectoryNode" />
<item name="data_sync" type="462c0819:PsiDirectoryNode" />
<item name="tractate" type="462c0819:PsiDirectoryNode" />
</path>
</expand>
<select />
</subPane>
</pane>
<pane id="Scope" />
</panes>
</component>
<component name="RunDashboard">
<option name="ruleStates">
<list>
<RuleState>
<option name="name" value="ConfigurationTypeDashboardGroupingRule" />
</RuleState>
<RuleState>
<option name="name" value="StatusDashboardGroupingRule" />
</RuleState>
</list>
</option>
</component>
<component name="SvnConfiguration">
<configuration />
</component>
<component name="TaskManager">
<task active="true" id="Default" summary="Default task">
<changelist id="50d0c2bb-024a-4978-bd06-1e5ecc60eec4" name="Default Changelist" comment="" />
<created>1576030190402</created>
<option name="number" value="Default" />
<option name="presentableId" value="Default" />
<updated>1576030190402</updated>
</task>
<servers />
</component>
<component name="ToolWindowManager">
<frame x="0" y="23" width="1280" height="723" extended-state="1" />
<editor active="true" />
<layout>
<window_info active="true" content_ui="combo" id="Project" order="0" visible="true" weight="0.13741064" />
<window_info id="Structure" order="1" side_tool="true" weight="0.25" />
<window_info id="DB Browser" order="2" />
<window_info id="Favorites" order="3" side_tool="true" />
<window_info anchor="bottom" id="Message" order="0" />
<window_info anchor="bottom" id="Find" order="1" />
<window_info anchor="bottom" id="Run" order="2" />
<window_info anchor="bottom" id="Debug" order="3" weight="0.4" />
<window_info anchor="bottom" id="Cvs" order="4" weight="0.25" />
<window_info anchor="bottom" id="Inspection" order="5" weight="0.4" />
<window_info anchor="bottom" id="TODO" order="6" />
<window_info anchor="bottom" id="Version Control" order="7" show_stripe_button="false" />
<window_info anchor="bottom" id="DB Execution Console" order="8" />
<window_info anchor="bottom" id="Terminal" order="9" visible="true" weight="0.32894737" />
<window_info anchor="bottom" id="Python Console" order="10" />
<window_info anchor="bottom" id="Event Log" order="11" side_tool="true" />
<window_info anchor="right" id="Commander" internal_type="SLIDING" order="0" type="SLIDING" weight="0.4" />
<window_info anchor="right" id="Ant Build" order="1" weight="0.25" />
<window_info anchor="right" content_ui="combo" id="Hierarchy" order="2" weight="0.25" />
</layout>
</component>
<component name="VcsContentAnnotationSettings">
<option name="myLimit" value="2678400000" />
</component>
<component name="XDebuggerManager">
<breakpoint-manager>
<breakpoints>
<line-breakpoint enabled="true" suspend="THREAD" type="python-line">
<url>file://$PROJECT_DIR$/data_sync/principal_page/__init__.py</url>
<line>3</line>
<option name="timeStamp" value="1" />
</line-breakpoint>
</breakpoints>
</breakpoint-manager>
</component>
<component name="editorHistoryManager">
<entry file="file://$PROJECT_DIR$/data_sync/tractate/transfer.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="375">
<caret line="31" column="35" selection-start-line="31" selection-start-column="35" selection-end-line="31" selection-end-column="35" />
<folding>
<element signature="e#0#14#0" expanded="true" />
</folding>
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/talos/models/tractate/tractate.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="555">
<caret line="63" column="89" selection-start-line="63" selection-start-column="89" selection-end-line="63" selection-end-column="89" />
</state>
</provider>
</entry>
<entry file="file://$USER_HOME$/gaia_env/lib/python2.7/site-packages/gm_types/mimas/tractate.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="209">
<caret line="29" selection-start-line="29" selection-end-line="36" selection-end-column="28" />
</state>
</provider>
</entry>
</component>
</project>
\ No newline at end of file
FROM ccr.ccs.tencentyun.com/gm-base/gm-alpine-ffmpeg:v1.1
MAINTAINER wph [wangpenghong@igengmei.com]
COPY ./requirements.txt /tmp
RUN apk add --no-cache --virtual .build-deps \
bzip2-dev \
coreutils \
dpkg-dev dpkg \
expat-dev \
findutils \
gcc \
gdbm-dev \
libc-dev \
libffi-dev \
libnsl-dev \
libressl-dev \
libtirpc-dev \
linux-headers \
make \
ncurses-dev \
pax-utils \
readline-dev \
sqlite-dev \
tcl-dev \
tk \
tk-dev \
xz-dev \
zlib-dev \
# 业务相关依赖和安装工具
linux-headers \
python3-dev \
librdkafka-dev \
mariadb-client \
mariadb-dev \
git \
openssh \
\
&& apk add --no-cache jpeg-dev zlib-dev freetype-dev lcms2-dev openjpeg-dev tiff-dev tk-dev tcl-dev \
# 取消ssh第一次链接的确认
&& echo "StrictHostKeyChecking no" >> /etc/ssh/ssh_config \
&& apk add --no-cache mariadb-connector-c-dev libxml2-dev libxslt-dev librdkafka-dev \
&& pip install --no-cache-dir -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com -r /tmp/requirements.txt \
&& mkdir -p /tmp/video_convert \
&& mkdir -p /data/log/mentha/app
ENV GM_RPCD_DEPLOY_CONF_PATH "/srv/apps/mentha/deploy_prod.xml"
COPY . /srv/apps/mentha/
WORKDIR /srv/apps/mentha/
RUN cat requirements.txt | grep master > /tmp/gm-requirements.txt \
&& pip install --no-deps --upgrade -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com -r /tmp/gm-requirements.txt \
&& apk del .build-deps
CMD gunicorn gm_rpcd.wsgi:application -w 5 -k gevent -b 0.0.0.0:8000 --worker-tmp-dir /dev/shm
@Library('gm-pipeline-library') _
pipeline {
agent any
options {
// Console output add timestamps
timestamps()
// Disallow concurrent executions of the Pipeline
disableConcurrentBuilds()
// On failure, retry the entire Pipeline the specified number of times.
retry(1)
}
parameters {
choice(name: 'CACHE', choices: ['', '--no-cache'], description: 'docker build 是否使用cache,默认使用,不使用为--no-cache')
}
environment {
// Image Tag branch.time.hash
TAG = dockerTag()
// Image Full Tag
IMAGE = "${DOCKER_REGISTRY}/gm-backend/mentha:$TAG"
}
stages {
stage("Begin") {
steps {
dingNotify "before"
}
}
stage('Build Images') {
parallel {
stage('Build Service') {
steps {
sh "docker build . ${params.CACHE} -t $IMAGE"
sh "docker push $IMAGE"
}
}
}
}
}
post {
always {
dingNotify "after", "${currentBuild.currentResult}"
}
}
}
1.modify sqitchconf: add target_db
2. cd sqls; sqitch deploy -t target_db
3.python manage.py init_tasks
\ No newline at end of file
# !/usr/bin/env python
# encoding=utf-8
from __future__ import absolute_import
import os
import raven
from celery import Celery
from django.conf import settings
from raven.contrib.celery import register_signal, register_logger_signal
# set the default Django settings module for the 'celery' program.
# os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings.settings')
class _Celery(Celery):
"""wrap for celery.Celery."""
def on_configure(self):
# check if sentry settings provided
if not settings.SENTRY_CELERY_ENDPOINT:
return
client = raven.Client(settings.SENTRY_CELERY_ENDPOINT)
# register a custom filter to filter out duplicate logs
register_logger_signal(client)
# hook into the Celery error handler
register_signal(client)
app = _Celery('mimas_tasks')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
<?xml version="1.0" encoding="utf-8"?>
<gm_rpcd_config>
<info config_name="app" version="1.0"/>
<config name="application_name" value="mimas"/>
<config name="statuses" value="system:status" />
<config name="request_info_extractor" value="system:MimasRequestInfoExtractor" />
<config name="service_list">
<element value="mimas"/>
<element value="qa"/>
<element value="topic"/>
<element value="diary"/>
</config>
<config name="initializer_list">
<element value="init_django"/>
<element value="user_hierarchy.views"/>
<element value="qa.views"/>
<element value="hera.views"/>
<element value="hera.queries"/>
<element value="live.views"/>
<element value="talos.views"/>
<element value="data_sync.views"/>
<element value="search.views"/>
</config>
</gm_rpcd_config>
#!/bin/sh
cd docs
make html
cd ..
# -*- coding: UTF-8 -*-
\ No newline at end of file
from .connection import index_data_source, pk_data_source
from .transfer import get_answers
from .transfer import get_answers_rpc
from .transfer import get_answer_ids
# -*- coding: UTF-8 -*-
from django.conf import settings
from gm_dbmw_api.descriptor import source, relation
from gm_types.mq.dbmw import DBMWEndPoints
pk_data_source = source.GroupedSource.of(
source.MySQLTableSource(
logical_database_id=settings.LOGICAL_DATABASE_ID,
table_name='api_answer',
value_columns_all=True,
key_columns=['id']
)
)
index_data_source = pk_data_source
from qa.models.answer import Answer, Question
from qa.models.recommendscore import AnswerRelatedRecommendScore
from data_sync.utils import to_epoch, tzlc
from qa.models.answer import QuestionTag
from utils.rpc import get_rpc_invoker
from qa.models.toutiao import by_contentz_type_id_get_keywords, get_content_star_keywords, get_content_title_keywords, get_content_star_first_keyword
rpc = get_rpc_invoker()
def get_answers(pks):
answers = Answer.objects.filter(id__in=pks)
rs = AnswerRelatedRecommendScore.objects.filter(answer_id__in=pks).values(
'answer_id', 'score'
)
rs_dict = {k: v for (k, v) in rs}
data = []
for answer in answers:
try:
item = {}
item['id'] = answer.id
item['good_click'] = answer.get_good_click
item['user_id'] = answer.user_id
item['question_id'] = answer.question.id
item['smart_rank'] = answer.smart_rank()
item['smart_rank_v2'] = answer.smart_rank_v2()
item['title'] = answer.question.title
item['desc'] = answer.question.content
item['question_type'] = answer.question.question_type
item['answer'] = answer.content
item['content_length'] = len(item['answer'])
item['has_picture'] = answer.get_has_picture()
item["is_online"] = bool(answer.is_online and answer.question.is_online)
item['recommend_score'] = rs_dict.get(answer.id, 0)
item['tag_ids'] = [t.tag_id for t in answer.question.tags]
tags_id_list = [t.tag_id for t in answer.question.tags]
item['tag_name'] = QuestionTag.get_name_list(tags_id_list)
item['tag_name_analyze'] = QuestionTag.get_name_list(tags_id_list)
item["hot_score"] = answer.get_hot_score_answer()
# 新标签
fresh_tag_result = rpc["api/agile_tag/tuple_new_tags"](old_tag_ids=tags_id_list)
fresh_tag_id_list = list()
fresh_tag_name_list = list()
for fresh_tag_id, fresh_tag_name in fresh_tag_result.unwrap():
fresh_tag_id_list.append(fresh_tag_id)
fresh_tag_name_list.append(fresh_tag_name)
item["fresh_tag_ids"] = fresh_tag_id_list
item["fresh_tag_name"] = fresh_tag_name_list
item['content_level'] = answer.level
item['content_type'] = answer.content_type
item['create_time'] = tzlc(answer.create_time)
item['create_time_epoch'] = to_epoch(tzlc(answer.create_time))
item['is_recommend'] = answer.is_recommend
# item["last_answer_add_time"] = Question.object.filter(answer.question.id).order_by("-create_time")
item['title_pre'] = answer.question.title
item['answer_pre'] = answer.content
item['desc_pre'] = answer.question.content
item["last_any_reply_time"] = tzlc(answer.get_last_any_reply_time())
item["content_keyword"] = by_contentz_type_id_get_keywords(id=answer.id, content_type="answer")
item["content_star_keyword"] = get_content_star_keywords(id=answer.id, content_type="answer")
item["content_star_first_keyword"] = get_content_star_first_keyword(id=answer.id, content_type="answer")
# item["title_keyword"] = get_content_title_keywords(id=answer.id, content_type="answer")
data.append(item)
except (Answer.DoesNotExist, Question.DoesNotExist):
pass
return data
def get_answer_ids(pks):
questions = Question.objects.filter(id__in=pks)
ids = []
for question in questions:
id = list(question.answers.filter(is_online=True).values_list('id', flat=True))
ids = ids + id
return get_answers_rpc(ids)
def get_answers_rpc(pks):
answers = Answer.objects.filter(id__in=pks)
rs = AnswerRelatedRecommendScore.objects.filter(answer_id__in=pks).values(
'answer_id', 'score'
)
rs_dict = {k: v for (k, v) in rs}
data = []
for answer in answers:
try:
item = {}
item['id'] = answer.id
item['user_id'] = answer.user_id
item['question_id'] = answer.question.id
item['smart_rank'] = answer.smart_rank()
item['title'] = answer.question.title
item['desc'] = answer.question.content
item['question_type'] = answer.question.question_type
item['answer'] = answer.content
item["is_online"] = bool(answer.is_online and answer.question.is_online)
item['recommend_score'] = rs_dict.get(answer.id, 0)
item['tag_ids'] = [t.tag_id for t in answer.question.tags]
item['content_level'] = answer.level
item['content_type'] = answer.content_type
item['create_time'] = answer.create_time.timestamp()
item['create_time_epoch'] = to_epoch(tzlc(answer.create_time))
item['is_recommend'] = answer.is_recommend
key = {}
key['id'] = answer.id
value = {}
value['value'] = item
value['key'] = key
data.append(value)
except (Answer.DoesNotExist, Question.DoesNotExist):
pass
return data
from .connection import index_data_source, pk_data_source
from .transfer import get_articles
# -*- coding: UTF-8 -*-
from django.conf import settings
from gm_dbmw_api.descriptor import source, relation
from gm_types.mq.dbmw import DBMWEndPoints
pk_data_source = source.GroupedSource.of(
source.MySQLTableSource(
logical_database_id=settings.LOGICAL_DATABASE_ID,
table_name='api_article',
value_columns_all=True,
key_columns=['id']
)
)
index_data_source = pk_data_source
from talos.models.topic.column import Article
from talos.models.topic.recommendsocre import ArticleRelatedRecommendScore
import logging
from utils.rpc import get_rpc_invoker
from talos.models.topic.topic import Problem
from qa.models.toutiao import by_contentz_type_id_get_keywords, get_content_star_keywords, get_content_title_keywords, get_content_star_first_keyword
rpc = get_rpc_invoker()
def get_articles(pks):
articles = Article.objects.filter(id__in=pks)
rs = ArticleRelatedRecommendScore.objects.filter(id__in=pks).values('id', 'score')
rs_dict = {item['id']: item['score'] for item in list(rs)}
data = []
for article in articles:
item = {}
item['id'] = article.id
item['article_id'] = article.article_id
item['smart_rank'] = article.smart_rank()
item["is_online"] = Problem.objects.filter(id=article.article_id).values_list("is_online", flat=True).first()
item['recommend_score'] = rs_dict.get(article.id, 0)
item["content_keyword"] = by_contentz_type_id_get_keywords(id=article.id, content_type="article")
item["content_star_keyword"] = get_content_star_keywords(id=article.id, content_type="article")
item["content_star_first_keyword"] = get_content_star_first_keyword(id=article.id, content_type="article")
# item["title_keyword"] = get_content_title_keywords(id=article.id, content_type="article")
topic = article.topic()
if topic:
item['title'] = topic.title
item['content'] = topic.answer_richtext
tags = topic.tags
tags_id_list = [tag.id for tag in tags]
item['tag_ids'] = tags_id_list
item['tags'] = [tag.name for tag in tags]
# 新标签
fresh_tag_result = rpc["api/agile_tag/tuple_new_tags"](old_tag_ids=tags_id_list)
fresh_tag_id_list = list()
fresh_tag_name_list = list()
for fresh_tag_id, fresh_tag_name in fresh_tag_result.unwrap():
fresh_tag_id_list.append(fresh_tag_id)
fresh_tag_name_list.append(fresh_tag_name)
item["fresh_tag_ids"] = fresh_tag_id_list
item["fresh_tags"] = fresh_tag_name_list
item["ask"] = topic.ask
item['user'] = {
"id": topic.user.id
}
item["content_pre"] = topic.answer_richtext
item['title_pre'] = topic.title
logging.info("get data:%s" % data)
data.append(item)
return data
"""日记本相关任务。
1、日记回复数更新
2、日记点赞数更新
"""
\ No newline at end of file
from talos.portal import update_diary_extra_info
update_extra_info = update_diary_extra_info
from .transfer import get_soft_article
from .connection import pk_data_source,index_data_source
\ No newline at end of file
# -*- coding: UTF-8 -*-
from django.conf import settings
from gm_dbmw_api.descriptor import source
pk_data_source = source.GroupedSource.of(
source.MySQLTableSource(
logical_database_id=settings.LOGICAL_DATABASE_ID,
table_name='api_doctor_softarticle',
value_columns_all=True,
key_columns=['id']
)
)
index_data_source = pk_data_source
import logging
from talos.models.soft_article.soft_article import SoftArticle, SoftArticleRelation
from data_sync.utils import to_epoch, tzlc
from qa.models.toutiao import by_contentz_type_id_get_keywords, get_content_star_keywords, get_content_title_keywords, get_content_star_first_keyword
def get_soft_article(pks):
doctor_tractates = SoftArticle.objects.filter(id__in=pks)
data = []
for doctor_tractate in doctor_tractates:
item = {}
item['id'] = doctor_tractate.id
item["content"] = doctor_tractate.content
item['is_video'] = doctor_tractate.get_is_video
item["doctor_id"] = doctor_tractate.doctor_id # TODO mapping改成string
# item["content_pre"] = doctor_tractate.content
item["is_online"] = doctor_tractate.is_online
item["status"] = doctor_tractate.status
item["platform"] = doctor_tractate.platform
item["content_level"] = doctor_tractate.content_level
item["create_time"] = tzlc(doctor_tractate.create_time)
item["tractate_tag_list"] = doctor_tractate.get_tag_list_sort_article
item["content_keyword"] = by_contentz_type_id_get_keywords(id=doctor_tractate.id, content_type="doctortopic")
# item["content_star_keyword"] = get_content_star_keywords(id=doctor_tractate.id, content_type="doctortopic")
# item["content_star_first_keyword"] = get_content_star_first_keyword(id=doctor_tractate.id, content_type="doctortopic")
# item["title_keyword"] = get_content_title_keywords(id=doctor_tractate.id, content_type="doctortopic")
data.append(item)
logging.info("get data:%s" % data)
return data
# -*- coding: UTF-8 -*-
from multiprocessing import Pool
from functools import partial
from django.core.management.base import BaseCommand
from data_sync.tasks import write_to_es
from data_sync.type_info import get_type_info_map
def process(dtype, pks):
write_to_es(dtype, pks)
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('-t', '--type', dest='dtype')
parser.add_argument('-p', '--procs', dest='parallel', type=int)
def handle(self, *args, **options):
t = options['dtype']
p = options['parallel'] or 2
pool = Pool(processes=p)
dt = get_type_info_map()[t]
pks = dt.model.objects.all().values_list('id', flat=True)
pks = list(pks)
n = int(len(pks) / p)
_process = partial(process, t)
pool.map(_process, [pks[i:i + n] for i in range(0, len(pks), n)])
This diff is collapsed.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .connection import index_data_source, pk_data_source
from .transfer import get_live_notice, get_activity, get_article, get_live_stream,get_video_tractate,get_word_tractate
# -*- coding: UTF-8 -*-
from django.conf import settings
from gm_dbmw_api.descriptor import source, relation
from gm_types.mq.dbmw import DBMWEndPoints
pk_data_source = source.GroupedSource.of(
source.MySQLTableSource(
logical_database_id=settings.LOGICAL_DATABASE_ID,
table_name='api_live_stream',
value_columns_all=True,
key_columns=['id']
)
)
index_data_source = pk_data_source
import logging
import time
from gm_types.gaia import TOPIC_TYPE
from gm_types.mimas import SOFT_ARTICLE_TYPE
from live.models import LiveStream, ZhiboConfig, LiveChannel
from talos.models.topic.activity import Activity
from talos.models.soft_article import SoftArticle
from talos.services.user import UserService
from talos.services.doctor import DoctorService
from data_sync.utils import tzlc
from talos.models.topic import Problem
# 直播
def get_live_stream(pks):
# 回放的
lives = LiveStream.objects.filter(id__in=pks)
_channel_ids = list(filter(None, lives.values_list("channel_id", flat=True).distinct()))
person_ids = set(LiveChannel.objects.filter(pk__in=_channel_ids).values_list("person_id", flat=True))
user_ids = UserService._get_user_data_by_person_ids(list(person_ids))
data = []
for live in lives:
item = {}
item["id"] = live.id
person_id = live.get_live_stream_user_id()
user_id_id = user_ids.get(person_id, None)
user_id = None
if user_id_id is not None:
user_id = user_id_id.id
item["doctor_id"] = DoctorService.get_doctor_by_user_id_v1(user_id)
item["user_id"] = user_id
item["is_online"] = live.is_finish
item["principal_type"] = 1
try:
start_time = tzlc(live.created_time)
item["start_time"] = int(time.mktime(start_time.timetuple()))
except:
pass
item["end_time"] = None
item["zhibo_time"] = None
item["show_order"] = 999
item["article_type"] = None
item["status"] = None
item["live_status"] = live.status
item["hera_is_online"] = None
item["topic_isnull"] = True if live.topic else False
item["topic_is_online"] = live.get_topic_is_online()
data.append(item)
logging.info("get data:%s" % data)
return data
# 预告
def get_live_notice(pks):
notices = ZhiboConfig.objects.filter(id__in=pks)
data = []
for notice in notices:
item = {}
item["id"] = notice.id
item["doctor_id"] = DoctorService.get_doctor_by_user_id_v1(notice.anchor_user_id)
item["user_id"] = notice.anchor_user_id
item["is_online"] = notice.is_online
item["principal_type"] = 3
try:
start_time = tzlc(notice.start_time)
item["start_time"] = int(time.mktime(start_time.timetuple()))
end_time = tzlc(notice.end_time)
item["end_time"] = int(time.mktime(end_time.timetuple()))
zhibo_time = tzlc(notice.zhibo_time)
item["zhibo_time"] = int(time.mktime(zhibo_time.timetuple()))
except:
pass
item["show_order"] = 999
item["article_type"] = None
item["status"] = None
item["live_status"] = None
item["hera_is_online"] = None
item["topic_isnull"] = None
item["topic_is_online"] = None
data.append(item)
logging.info("get data:%s" % data)
return data
# 免费招募
def get_activity(pks):
activities = Activity.objects.filter(id__in=pks)
data = []
for activity in activities:
item = {}
item["id"] = activity.id
item["doctor_id"] = activity.doctor_id
item["user_id"] = ''
item["is_online"] = activity.is_online
item["principal_type"] = 2
try:
start_time = tzlc(activity.start_time)
item["start_time"] = int(time.mktime(start_time.timetuple()))
end_time = tzlc(activity.end_time)
item["end_time"] = int(time.mktime(end_time.timetuple()))
except:
pass
item["zhibo_time"] = None
item["show_order"] = 999
item["article_type"] = None
item["status"] = None
item["live_status"] = None
item["hera_is_online"] = None
item["topic_isnull"] = None
item["topic_is_online"] = None
data.append(item)
logging.info("get data:%s" % data)
return data
# 视频贴
def get_video_tractate(pks):
softarticles = SoftArticle.objects.filter(id__in=pks, article_type=SOFT_ARTICLE_TYPE.VIDEO)
data = []
for softarticle in softarticles:
item = {}
item["id"] = softarticle.id
item["doctor_id"] = softarticle.doctor_id
item["user_id"] = ""
item["is_online"] = softarticle.is_online
item["principal_type"] = 4
try:
start_time = tzlc(softarticle.create_time)
item["start_time"] = int(time.mktime(start_time.timetuple()))
except:
pass
item["end_time"] = None
item["zhibo_time"] = None
item["show_order"] = softarticle.show_order
item["article_type"] = softarticle.article_type
item["status"] = softarticle.status
item["live_status"] = None
item["hera_is_online"] = softarticle.hera_is_online
item["topic_isnull"] = None
item["topic_is_online"] = None
data.append(item)
logging.info("get data:%s" % data)
return data
# 文本帖
def get_word_tractate(pks):
softarticles = SoftArticle.objects.filter(id__in=pks, article_type=SOFT_ARTICLE_TYPE.ORDINARY)
data = []
for softarticle in softarticles:
item = {}
item["id"] = softarticle.id
item["doctor_id"] = softarticle.doctor_id
item["user_id"] = ""
item["is_online"] = softarticle.is_online
item["principal_type"] = 5
try:
start_time = tzlc(softarticle.create_time)
item["start_time"] = int(time.mktime(start_time.timetuple()))
except:
pass
item["end_time"] = None
item["zhibo_time"] = None
item["show_order"] = softarticle.show_order
item["article_type"] = softarticle.article_type
item["status"] = softarticle.status
item["live_status"] = None
item["hera_is_online"] = softarticle.hera_is_online
item["topic_isnull"] = None
item["topic_is_online"] = None
data.append(item)
logging.info("get data:%s" % data)
return data
# 专栏
def get_article(pks):
articles = Problem.objects.filter(
id__in=pks,
topic_type__in=[TOPIC_TYPE.COLUMN_ARTICLE, TOPIC_TYPE.USER_ARTICLE]
)
data = []
for article in articles:
item = {}
item["id"] = article.id
item["user_id"] = article.user_id
item["doctor_id"] = UserService.get_doctor_id_from_user_id(article.user_id)
logging.info("get doctor_id:%s" % item["doctor_id"])
item["is_online"] = article.is_online
item["principal_type"] = 6
try:
start_time = tzlc(article.created_time)
item["start_time"] = int(time.mktime(start_time.timetuple()))
except:
pass
item["end_time"] = None
item["zhibo_time"] = None
item["show_order"] = 999
item["article_type"] = None
item["status"] = None
item["live_status"] = None
item["hera_is_online"] = None
item["topic_isnull"] = None
item["topic_is_online"] = None
data.append(item)
logging.info("get data:%s" % data)
return data
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .connection import index_data_source, pk_data_source
from .transfer import get_qa_tops
# -*- coding: UTF-8 -*-
from django.conf import settings
from gm_dbmw_api.descriptor import source, relation
from gm_types.mq.dbmw import DBMWEndPoints
pk_data_source = source.GroupedSource.of(
source.MySQLTableSource(
logical_database_id=settings.LOGICAL_DATABASE_ID,
table_name='api_answer_top',
value_columns_all=True,
key_columns=['id']
)
)
index_data_source = pk_data_source
from collections import defaultdict
from data_sync.utils import to_epoch, tzlc
from qa.models.answer import AnswerTop, QuestionTag
from utils.rpc import get_rpc_invoker
rpc = get_rpc_invoker()
def get_qa_tops(pks):
results = []
queryset = AnswerTop.objects.filter(id__in=pks)
question_ids = list(filter(None, queryset.values_list("question_id", flat=True)))
q_top_tags = defaultdict(list)
for q_id, t_id in QuestionTag.objects.filter(question_id__in=question_ids).values_list("question_id", "tag"):
q_top_tags[q_id].append(t_id)
for qa_top in queryset:
_c_time = tzlc(qa_top.create_time)
_s_time = tzlc(qa_top.start_time)
_e_time = tzlc(qa_top.end_time)
_data = {
"id": qa_top.id,
"question_id": qa_top.question_id,
"answer_id": qa_top.answer_id,
"top_type": qa_top.top_type,
"enable": qa_top.enable,
"create_time": _c_time,
"start_time": _s_time,
"end_time": _e_time,
"create_time_epoch": to_epoch(_c_time),
"start_time_epoch": to_epoch(_s_time),
"end_time_epoch": to_epoch(_e_time),
}
tag_ids = q_top_tags.get(qa_top.question_id, [])
_data["tag_ids"] = tag_ids
_data['closure_tag_ids'] = rpc['api/tag/closure_tags'](tag_ids=tag_ids) if tag_ids else []
results.append(_data)
for result in results:
if not isinstance(result['closure_tag_ids'], list):
result['closure_tag_ids'] = list(map(lambda tag: tag['id'], result['closure_tag_ids'].unwrap()))
return results
# -*- coding: UTF-8 -*-
\ No newline at end of file
# -*- coding: UTF-8 -*-
from django.conf import settings
from gm_dbmw_api.descriptor import source, relation
from gm_types.mq.dbmw import DBMWEndPoints
pk_data_source = source.GroupedSource.of(
source.MySQLTableSource(
logical_database_id=settings.LOGICAL_DATABASE_ID,
table_name='api_question',
value_columns_all=True,
key_columns=['id']
),
source.MySQLTableSource(
logical_database_id=settings.LOGICAL_DATABASE_ID,
table_name='api_answer',
key_columns=['question_id'],
value_columns_all=True,
),
source.RelatedSource(
source=source.GMMQSource(
endpoint=DBMWEndPoints.GAIA_TAG,
),
relation=relation.MySQLTableRelation(
logical_database_id=settings.LOGICAL_DATABASE_ID,
table_name='api_questiontag',
from_columns=['tag_id'],
to_columns=['question_id'],
)
),
source.RelatedSource(
source=source.GroupedSource.of(
source.MySQLTableSource(
logical_database_id=settings.DBMW_LOGICAL_DATABASE_ID,
table_name="api_doctor",
key_columns=['user_id'],
value_columns_excluded=['view_num', 'reply_num', 'new_pm_num', 'reply_pm_num', 'no_reply_pm_num',
'share_topic_num', 'last_answered_time'],
value_columns_all=True,
),
source.RelatedSource(
source=source.GMMQSource(
endpoint=DBMWEndPoints.PUNISHMENT_TRIGGER,
),
relation=relation.MySQLTableRelation(
logical_database_id=settings.DBMW_LOGICAL_DATABASE_ID,
table_name="api_doctor",
from_columns=["id"],
to_columns=["user_id"],
)
)
),
relation=relation.MySQLTableRelation(
logical_database_id=settings.LOGICAL_DATABASE_ID,
table_name='api_question',
from_columns=['user_id'],
to_columns=['id'],
)
),
source.GMMQSource(
endpoint=DBMWEndPoints.QUESTION_TRIGGER,
)
)
index_data_source = pk_data_source
This diff is collapsed.
import datetime
import json
import six
import uuid
from django.db import models
from django.apps import apps
from django.http import JsonResponse
from gm_types.utils.enum import EnumMeta
def schema_view(app_label=None, db_table=None):
"""retrieve model schema
"""
modelCls = get_models(app_label, db_table)
schemas = []
for model in modelCls:
schema = {
"app_label": model._meta.app_label,
"db_table": model._meta.db_table,
"fields": [],
}
for field in model._meta.get_fields():
if not field.concrete:
continue
if field.default is models.fields.NOT_PROVIDED:
default = "NOT_PROVIDED"
elif isinstance(field.default, datetime.datetime):
default = field.default.strftime("%Y-%m-%d %H:%M:%S")
elif isinstance(field.default, datetime.time):
default = field.default.strftime("%H:%M:%S")
elif type(field.default) == type(uuid.uuid4):
default = field.default.__name__.upper()
else:
try:
json.dumps(field.default)
default = field.default
except TypeError:
default = "NOT_SERIALIZABLE"
if type(field.choices) == EnumMeta:
choices = [v[0] for v in field.choices]
else:
choices = field.choices
schema["fields"].append({
"name": field.name,
"null": field.null,
"blank": field.blank,
"choices": choices,
"db_column": field.db_column,
"db_index": field.db_index,
"db_tablespace": field.db_tablespace,
"default": default,
"unique": field.unique,
"primary_key": field.primary_key,
"verbose_name": field.verbose_name if isinstance(field.verbose_name, six.string_types) else "",
"help_text": field.help_text if isinstance(field.help_text, six.string_types) else "",
})
json.dumps(schema)
schemas.append(schema)
return schemas
def get_models(app_label=None, db_table=None):
try:
mCls = models.get_models()
if app_label:
f = lambda m: m._meta.app_label == app_label
mCls = filter(f, mCls)
except AttributeError:
if app_label:
mDict = apps.all_models[app_label]
mCls = mDict.values()
else:
mCls = []
for app in apps.all_models:
mCls.extend(list(apps.all_models[app].values()))
if db_table:
f = lambda m: m._meta.db_table == db_table
mCls = filter(f, mCls)
return mCls
from .transfer import get_subscript_article
from .connection import pk_data_source, index_data_source
\ No newline at end of file
# -*- coding: UTF-8 -*-
from django.conf import settings
from gm_dbmw_api.descriptor import source
pk_data_source = source.GroupedSource.of(
source.MySQLTableSource(
logical_database_id=settings.LOGICAL_DATABASE_ID,
table_name='api_tractate',
value_columns_all=True,
key_columns=['id']
)
)
index_data_source = pk_data_source
import logging
from datetime import datetime
from talos.services.subscript_article import SubscriptArticleService
from data_sync.utils import tzlc
def get_subscript_article(pks):
articles = SubscriptArticleService.list_by_ids(article_ids=pks)
data = []
for article in articles:
item = {}
item['id'] = article["id"]
item["title"] = article["title"]
item["name"] = article["name"]
# item["url"] = article["url"]
# item["image_url"] = article["image_url"]
# item["share_img"] = article["share_img"]
item["share_content"] = article["share_content"]
item["is_online"] = article["is_online"]
item["is_header_article"] = article["is_header_article"]
item["platform"] = article["platform"]
item["ordering"] = article["ordering"]
item["create_time"] = tzlc(datetime.fromtimestamp(article["create_time"]))
item["create_time_val"] = article["create_time"]
item["update_time"] = tzlc(datetime.fromtimestamp(article["update_time"]))
item["update_time_val"] = article["update_time"]
tag_list = SubscriptArticleService.list_tags_by_id(article["id"])
item["tags"] = [item.id for item in tag_list]
item["tag_names"] = [item.name for item in tag_list]
categories = SubscriptArticleService.list_category_by_id(article["id"])
item["categories"] = [item["id"] for item in categories]
item["category_names"] = [item["name"] for item in categories]
data.append(item)
return data
# -*- coding: UTF-8 -*-
from celery import shared_task
from django.conf import settings
import datetime
from data_sync.type_info import get_type_info_map
from data_sync.diary.tasks import update_extra_info
from data_sync.user.user import sync_user_level
from qa.models.answer import Answer
from search.models import MixIndex, MixIndexTag
from talos.models.topic import Problem, Article
from gm_types.gaia import INDEX_CARD_TYPE
from gm_types.mimas.qa import CONTENT_CLASS
import logging
@shared_task
def write_to_es(es_type, pk_list):
pk_list = list(frozenset(pk_list))
type_info_map = get_type_info_map()
type_info = type_info_map[es_type]
logging.info("get pk_list:%s" % pk_list)
type_info.insert_table_by_pk_list(
index_prefix=settings.ES_INDEX_PREFIX,
pk_list=pk_list,
)
class AnswerScore(object):
'''
计算回答的分值,计算规则类似data_sync.question.tran2es.Score
http://wiki.wanmeizhensuo.com/pages/viewpage.action?pageId=4441797
'''
@classmethod
def get_score(cls, answer):
now = datetime.datetime.now()
vote_num = answer.answervote_set.filter(is_fake=False).count()
content_score = cls.get_answer_content_score(answer.level)
social_score = cls.get_social_score(vote_num, answer.replys.count())
time_score = (now - answer.question.create_time).seconds / 3600 * 0.03 * 0.7 + \
(now - answer.create_time).seconds / 3600 * 0.06 * 1.5
answer_score = 0.8 * content_score + 0.2 * social_score - time_score
return max(0.0, answer_score)
@staticmethod
def get_answer_content_score(level):
if level < 2:
return 0
elif level < 3:
return 5
elif level < 4:
return 10
elif level < 5:
return 70
else:
return 100
@staticmethod
def get_social_score(likes_num, reply_num):
likes_score = AnswerScore.get_likes_score(likes_num)
reply_score = AnswerScore.get_reply_score(reply_num)
return 0.4 * likes_score + 0.6 * reply_score
@staticmethod
def get_likes_score(likes_num):
if likes_num <= 5:
return 10
elif likes_num <= 20:
return 20
elif likes_num <= 50:
return 30
elif likes_num <= 70:
return 60
elif likes_num <= 100:
return 70
else:
return 100
@staticmethod
def get_reply_score(reply_num):
if reply_num <= 5:
return 10
elif reply_num <= 20:
return 20
elif reply_num <= 50:
return 30
elif reply_num <= 70:
return 60
elif reply_num <= 100:
return 70
else:
return 100
@shared_task
def update_knowledge(model_type, pk_list):
'''
知识数据同步
:param model_type:
:param pk_list:
:return:
'''
if not pk_list:
return
if model_type == 'answer':
answers = Answer.objects.filter(id__in=pk_list)
answer_in_mix = MixIndex.objects.filter(original_id__in=pk_list, original_type=INDEX_CARD_TYPE.ANSWER)
answer_in_mix_dict = {mix.original_id: mix for mix in answer_in_mix}
for item in answers:
new_tags = item.question.tags
if item.id in answer_in_mix_dict:
mix_obj = answer_in_mix_dict[item.id]
if item.level < CONTENT_CLASS.FINE or not item.is_online or not item.question.is_online:
mix_obj.delete()
continue
# 重新计算分值
answer_score = AnswerScore.get_score(item) if item.is_recommend else mix_obj.answer_score
mix_obj.answer_score = answer_score
mix_obj.answer_is_recommend = item.is_recommend
mix_obj.save()
# 更新tag
new_tags_set = set()
old_tags_dict = {tag.tag_id: tag for tag in mix_obj.mixindextag_set.all()}
for _tag in new_tags:
if not _tag.tag_id in old_tags_dict:
# 新增tag关系
MixIndexTag.objects.create(mix_index=mix_obj, tag_id=_tag.tag_id)
new_tags_set.add(_tag.tag_id)
# 删除已未关联的tag关系
remove_tag = old_tags_dict.keys() - new_tags_set
if remove_tag:
for t_id in remove_tag:
old_tags_dict[t_id].delete()
else:
if item.level < CONTENT_CLASS.FINE or not item.is_online or not item.question.is_online:
continue
answer_score = AnswerScore.get_score(item) if item.is_recommend else 0.0
# 新增记录
mix_index_obj = MixIndex.objects.create(original_id=item.id, original_type=INDEX_CARD_TYPE.ANSWER,
answer_score=answer_score,
original_create_time=item.create_time,
answer_is_recommend=item.is_recommend)
insert_lst = []
for n_tag in new_tags:
insert_lst.append(MixIndexTag(mix_index=mix_index_obj, tag_id=n_tag.tag_id))
MixIndexTag.objects.bulk_create(insert_lst)
elif model_type == 'article':
articles = Article.objects.filter(id__in=pk_list)
articles_in_mix = MixIndex.objects.filter(original_id__in=pk_list, original_type=INDEX_CARD_TYPE.ARTICLE)
articles_in_mix_dict = {mix.original_id: mix for mix in articles_in_mix}
for item in articles:
new_tags = Problem.objects.get(id=item.article_id).problemtag_set.all() # 如果下线,93 line 直接continue,不会更新tag
if item.id in articles_in_mix_dict:
mix_obj = articles_in_mix_dict[item.id]
if not item.is_online:
mix_obj.delete() # 关联外键是否删除
continue
# 更新tag
new_tags_set = set()
old_tags_dict = {tag.tag_id: tag for tag in mix_obj.mixindextag_set.all()}
for _tag in new_tags:
if not _tag.tag_id in old_tags_dict:
# 新增tag关系
MixIndexTag.objects.create(mix_index=mix_obj, tag_id=_tag.tag_id)
new_tags_set.add(_tag.tag_id)
# 删除已未关联的tag关系
remove_tag = old_tags_dict.keys() - new_tags_set
if remove_tag:
for t_id in remove_tag:
old_tags_dict[t_id].delete()
else:
if not item.is_online:
continue
# 新增记录
mix_index_obj = MixIndex.objects.create(original_id=item.id, original_type=INDEX_CARD_TYPE.ARTICLE,
original_create_time=item.created_time)
insert_lst = []
for n_tag in new_tags:
insert_lst.append(MixIndexTag(mix_index=mix_index_obj, tag_id=n_tag.tag_id))
MixIndexTag.objects.bulk_create(insert_lst)
@shared_task
def sync_diary_extra_info(diary_ids: list):
"""同步日记本相关统计数据。
更具变更的日记本 id 列表从三个方面进行统计表的更新:点赞总数,另外贴子总数在其他地方
有更新,回复总数api_diary中有字段进行更新
"""
if not diary_ids:
return
update_extra_info(diary_ids)
@shared_task
def sync_user_level_to_gaia(user_ids: list):
"""同步用户等级信息到 gaia。"""
sync_user_level(user_ids)
from .util import get_problems
from .connection import pk_data_source, index_data_source
# coding=utf-8
from django.conf import settings
from gm_dbmw_api.descriptor import source, relation
from gm_types.mq.dbmw import DBMWEndPoints
pk_data_source = source.GroupedSource.of(
source.MySQLTableSource(
logical_database_id=settings.MIMAS_LOGICAL_DATABASE_ID,
table_name="api_problem",
key_columns=['id'],
value_columns_excluded=[],
value_columns_all=True,
),
source.RelatedSource(
source=source.GroupedSource.of(
source.MySQLTableSource(
logical_database_id=settings.MIMAS_LOGICAL_DATABASE_ID,
table_name="api_problemtag",
key_columns=['id'],
value_columns_excluded=[],
value_columns_all=True,
),
source.RelatedSource(
source=source.MySQLTableSource(
logical_database_id=settings.DBMW_LOGICAL_DATABASE_ID,
table_name="api_tag",
key_columns=['id'],
value_columns_excluded=[],
value_columns_all=True,
),
relation=relation.MySQLTableRelation(
logical_database_id=settings.MIMAS_LOGICAL_DATABASE_ID,
table_name="api_problemtag",
from_columns=["tag_id"],
to_columns=["id"],
)
)
),
relation=relation.MySQLTableRelation(
logical_database_id=settings.MIMAS_LOGICAL_DATABASE_ID,
table_name="api_problemtag",
from_columns=["id"],
to_columns=["problem_id"],
)
),
source.RelatedSource(
source=source.MySQLTableSource(
logical_database_id=settings.MIMAS_LOGICAL_DATABASE_ID,
table_name="api_topicreply",
key_columns=['id'],
value_columns_excluded=[],
value_columns_all=True,
),
relation=relation.MySQLTableRelation(
logical_database_id=settings.MIMAS_LOGICAL_DATABASE_ID,
table_name="api_topicreply",
from_columns=["id"],
to_columns=["problem_id"],
)
),
source.RelatedSource(
source=source.MySQLTableSource(
logical_database_id=settings.MIMAS_LOGICAL_DATABASE_ID,
table_name="api_topicvote",
key_columns=['id'],
value_columns_excluded=[],
value_columns_all=True,
),
relation=relation.MySQLTableRelation(
logical_database_id=settings.MIMAS_LOGICAL_DATABASE_ID,
table_name="api_topicvote",
from_columns=["id"],
to_columns=["topic_id"],
)
),
source.RelatedSource(
source=source.MySQLTableSource(
logical_database_id=settings.MIMAS_LOGICAL_DATABASE_ID,
table_name="api_topicrankingscore",
key_columns=['id'],
value_columns_excluded=[],
value_columns_all=True,
),
relation=relation.MySQLTableRelation(
logical_database_id=settings.MIMAS_LOGICAL_DATABASE_ID,
table_name="api_topicrankingscore",
from_columns=["id"],
to_columns=["topic_id"],
)
),
source.RelatedSource(
source=source.GroupedSource.of(
source.MySQLTableSource(
logical_database_id=settings.MIMAS_LOGICAL_DATABASE_ID,
table_name="api_diary",
key_columns=['id'],
value_columns_excluded=[],
value_columns_all=True,
),
source.RelatedSource(
source=source.MySQLTableSource(
logical_database_id=settings.DBMW_LOGICAL_DATABASE_ID,
table_name="api_service",
key_columns=['id'],
value_columns_excluded=[],
value_columns_all=True,
),
relation=relation.MySQLTableRelation(
logical_database_id=settings.MIMAS_LOGICAL_DATABASE_ID,
table_name="api_diary",
from_columns=["service_id"],
to_columns=["id"],
)
),
source.RelatedSource(
source=source.GroupedSource.of(
source.MySQLTableSource(
logical_database_id=settings.DBMW_LOGICAL_DATABASE_ID,
table_name="api_doctor",
key_columns=['id'],
value_columns_excluded=['view_num', 'reply_num', 'new_pm_num', 'reply_pm_num', 'no_reply_pm_num',
'share_topic_num', 'last_answered_time'],
value_columns_all=True,
),
source.GMMQSource(
endpoint=DBMWEndPoints.PUNISHMENT_TRIGGER,
)
),
relation=relation.MySQLTableRelation(
logical_database_id=settings.MIMAS_LOGICAL_DATABASE_ID,
table_name="api_diary",
from_columns=["doctor_id"],
to_columns=["id"],
)
)
),
relation=relation.MySQLTableRelation(
logical_database_id=settings.MIMAS_LOGICAL_DATABASE_ID,
table_name="api_problem",
from_columns=["diary_id"],
to_columns=["id"],
)
),
source.RelatedSource(
source=source.MySQLTableSource(
logical_database_id=settings.MIMAS_LOGICAL_DATABASE_ID,
table_name="api_topic_video",
key_columns=['id'],
value_columns_excluded=[],
value_columns_all=True,
),
relation=relation.MySQLTableRelation(
logical_database_id=settings.MIMAS_LOGICAL_DATABASE_ID,
table_name="api_topic_video",
from_columns=["id"],
to_columns=["topic_id"],
)
),
source.GMMQSource(
endpoint=DBMWEndPoints.TOPIC_TRIGGER,
)
)
index_data_source = source.MySQLTableSource(
logical_database_id=settings.MIMAS_LOGICAL_DATABASE_ID,
table_name="api_problem",
key_columns=['id'],
value_columns_excluded=[],
value_columns_all=True,
)
# coding=utf-8
from talos.models.topic import Problem, TopicRankingScore
from data_sync.utils import tzlc
from gm_types.gaia import TOPIC_TYPE
from utils.rpc import get_rpc_invoker
def get_problems(pks):
"""
:param pks:
:return:
"""
r = get_rpc_invoker()
problems = Problem.objects.filter(pk__in=pks)
data = []
params = []
for p in problems:
tids = [pt.tag_id for pt in p.problemtag_set.all()]
params.append({
"pid": p.id, "uid": p.user_id, "tids": tids,
"sid": p.diary and p.diary.service_id,
"did": p.diary and p.diary.doctor_id,
})
data.append(get_problem(p))
extras = r['api/dbmw/get_problem_extra'](params=params).unwrap()
assert len(extras) == len(data)
for idx, topic in enumerate(data):
topic['user'].update(extras[idx].pop('user', {}))
topic.update(extras[idx])
return data
def get_problem(instance):
p = instance
res = {
'id': p.id,
'diary_id': p.diary_id,
'doctor_num': p.doctor_num,
'created_time': tzlc(p.created_time),
'content': p.answer,
'is_public': p.is_public,
'is_online': p.is_online,
'is_elite': p.selected_qa,
'is_sink': p.is_sink,
'is_topic': p.is_topic,
'pgc_category': p.pgc_classfy.id if p.pgc_classfy else None,
'flag': p.flag,
'topic_type': p.topic_type,
'private_status': p.private_status,
'title': p.ask,
'title2': p.title,
'channel_headline': p.channel_headline,
'user': {
'id': p.user_id,
'last_name': p.user.nickname,
},
'content_level': -1 #所属日记本星级
}
try:
p.video
res['has_video'] = True
except Problem.video.RelatedObjectDoesNotExist:
res['has_video'] = False
if p.topic_type in (TOPIC_TYPE.ASK, TOPIC_TYPE.TOPIC):
res['ranking_popularity'] = TopicRankingScore.get_pop_score(topic=p)
else:
res['ranking_popularity'] = 0.0
if p.doctor_num > 0:
res['has_doctor_reply'] = True # 是否有医生回复
else:
res['has_doctor_reply'] = False
if p.images.count() > 0:
res['has_image'] = True # 是否有图
else:
res['has_image'] = False
if p.diary:
if (not p.diary.is_online):
res['is_online'] = False # 关联日记本的帖子如果日记本下线则帖子也下线
if p.diary.is_sink:
res['is_sink'] = True # 关联日记本的帖子如果日记本下沉则帖子也下沉
res['content_level'] = p.diary.content_level
problem_replys = p.topicreply_set.all()
res['replys_num'] = problem_replys.count()
res['replys'] = [{
'id': r.id,
'content': r.content,
} for r in problem_replys]
last_update_time = p.last_modified
res['last_update_time'] = tzlc(last_update_time)
try:
if res['replys_num'] > 0:
problem_replys_sorted = sorted(list(problem_replys), key=lambda item: item.reply_date, reverse=True)
res['latest_reply_time'] = tzlc(problem_replys_sorted[0].reply_date)
else:
res['latest_reply_time'] = res['created_time']
except:
res['latest_reply_time'] = res['created_time']
popularity = 0
popularity += problem_replys.count()
popularity += p.votes.count()
res['popularity'] = popularity
res['topic_score'] = [{
'user_id': ts.user.id,
'score': ts.score,
} for ts in p.topic_score.all()]
res['topic_vote_count'] = p.vote_amount
return res
from .transfer import get_tractate
from .connection import pk_data_source,index_data_source
\ No newline at end of file
# -*- coding: UTF-8 -*-
from django.conf import settings
from gm_dbmw_api.descriptor import source
pk_data_source = source.GroupedSource.of(
source.MySQLTableSource(
logical_database_id=settings.LOGICAL_DATABASE_ID,
table_name='api_tractate',
value_columns_all=True,
key_columns=['id']
)
)
index_data_source = pk_data_source
import logging
from gm_types.mimas import TRACTATE_STATUS
from talos.models.tractate.tractate import Tractate
from talos.services.user import UserService
from data_sync.utils import to_epoch, tzlc
from qa.models.toutiao import by_contentz_type_id_get_keywords, get_content_star_keywords, get_content_title_keywords, get_content_star_first_keyword
def get_tractate(pks):
tractates = Tractate.objects.filter(id__in=pks)
data = []
user_ids = list(set(tractates.values_list("user_id", flat=True)))
user_infos = UserService.get_users_by_user_ids(user_ids)
for tractate in tractates:
item = {}
user = user_infos.get(tractate.user_id, None)
user_name = user and user.nickname or ""
cut_bool = False
cut_word = [" 医生", " 机构"]
for i in cut_word:
if user_name.find(i) >= 0:
cut_bool = True
if cut_bool == True:
user_name = user_name.split()[0]
item['id'] = tractate.id
item["user_id"] = tractate.user_id
item["content"] = tractate.content
item["content_pre"] = tractate.content
item["is_online"] = tractate.is_online
item["status"] = tractate.status
item["platform"] = tractate.platform
item["content_level"] = tractate.content_level
item["is_excellent"] = tractate.is_excellent
item["create_time"] = tzlc(tractate.create_time)
item["last_modified"] = tzlc(tractate.last_modified)
item["audit_time"] = tzlc(tractate.audit_time)
item["tractate_score"] = tractate.get_tractate_score
item['good_click'] = tractate.get_goodclick
item["good_click_tractate_score"] = tractate.get_good_click_tractate_score
item["tractate_tag_list"] = tractate.get_tag_list
item["post_time"] = tzlc(tractate.audit_time) if tractate.status == TRACTATE_STATUS.AUDIT_SUCCESS else None
item["author"] = user_name
tag_list = tractate.get_tag_list
item["tractate_tag"] = tractate.get_tag(tag_list)
item["tractate_tag_name"] = tractate.get_tag_names(tag_list)
item["is_video"] = tractate.get_is_video
item["tractate_tag_name_content"] = tractate.get_tag_names_content(tag_list)
item["hot_score"] = tractate.get_hot_score()
# 新标签
fresh_tag_list = tractate.get_fresh_tag_list
item["fresh_tractate_tag_list"] = fresh_tag_list
item["fresh_tractate_tag_name"] = tractate.get_fresh_tag_names(fresh_tag_list)
item["fresh_tractate_tag_name_content"] = tractate.get_tag_names_content(fresh_tag_list)
item["last_any_reply_time"] = tzlc(tractate.get_tractate_last_any_reply_time())
item["is_office"] = tractate.get_user_info_office()
item["content_keyword"] = by_contentz_type_id_get_keywords(id=tractate.id, content_type="usertopic")
item["content_star_keyword"] = get_content_star_keywords(id=tractate.id, content_type="usertopic")
item["content_star_first_keyword"] = get_content_star_first_keyword(id=tractate.id, content_type="usertopic")
# item["title_keyword"] = get_content_title_keywords(id=tractate.id, content_type="usertopic")
data.append(item)
logging.info("get data:%s" % data)
return data
This diff is collapsed.
"""用户相关的操作"""
__all__ = [
'sync_user_level',
]
from user_hierarchy.portal import sync_user_level
# coding=utf-8
from datetime import datetime
from elasticsearch import Elasticsearch
from elasticsearch.helpers import scan, bulk
from django.conf import settings
from pytz import timezone
def es_index_adapt(index_prefix, doc_type, rw=None):
"""get the adapted index name
"""
assert rw in [None, 'read', 'write']
index = '-'.join((index_prefix, doc_type))
if rw:
index = '-'.join((index, rw))
return index
def tzlc(dt, truncate_to_sec=True):
if dt is None:
return None
if truncate_to_sec:
dt = dt.replace(microsecond=0)
return timezone(settings.TIME_ZONE).localize(dt)
def to_epoch(dt):
return (dt - datetime(year=1970, month=1, day=1, tzinfo=timezone('UTC'))).total_seconds()
def reindex(es, old_index='', new_index='', chunk_size=500):
docs = scan(client=es, scroll='1m', index=old_index)
def _change_op(docs):
for d in docs:
d['_op_type'] = 'create'
d['_index'] = new_index
yield d
return bulk(client=es, actions=_change_op(docs), chunk_size=chunk_size)
def create_index(es, index_prefix, doc_type):
cl = es.indices
index = es_index_adapt(
index_prefix=index_prefix,
doc_type=doc_type
)
if not cl.exists(index=index):
cl.create(index=index)
return True
else:
return False
def delete_index(es, index_prefix, doc_type):
cl = es.indices
index = es_index_adapt(
index_prefix=index_prefix,
doc_type=doc_type
)
if cl.exists(index=index):
cl.delete(index=index)
return True
else:
return False
def load_mapping(doc_type):
import os.path
import re
import json
mapping_file_path = os.path.join(
os.path.dirname(__file__),
'..', 'mapping', '%s.json' % (doc_type,))
mapping = ''
with open(mapping_file_path, 'r') as f:
for line in f:
# 去掉注释
mapping += re.sub(r'//.*$', '', line)
mapping = json.loads(mapping)
return mapping
def put_mapping(es, index_prefix, doc_type, mapping, delete=False):
cl = es.indices
index = es_index_adapt(
index_prefix=index_prefix,
doc_type=doc_type,
rw='write'
)
# delete mapping first it type exists
if cl.exists_type(index=index, doc_type=doc_type) and delete:
cl.delete_mapping(index=index, doc_type=doc_type)
# there is no "index_analyzer" option in ver 2.x
# but only "analyzer" and "search_analyzer"
def _mapping_opt_modify(o):
if 'analyzer' in o and 'search_analyzer' in o:
o['index_analyzer'] = o['analyzer']
del o['analyzer']
for v in o.values():
if isinstance(v, dict):
_mapping_opt_modify(v)
return o
# mapping body needs to be modified
# as there is a difference between ver 1.x and 2.x
def _mapping_modify(es, doc_type, mp):
es_info = es.info()
es_ver = es_info['version']['number']
from distutils.version import LooseVersion
if LooseVersion(es_ver) < LooseVersion('2.0.0'):
return {
doc_type: _mapping_opt_modify(mp)
}
else:
return mp
import copy
mapping_copy = copy.deepcopy(mapping)
mapping_copy = _mapping_modify(es, doc_type, mapping_copy)
return cl.put_mapping(index=index, doc_type=doc_type, body=mapping_copy)
def alias_shift(es, alias, old_index, new_index):
cl = es.indices
if cl.exists(index=old_index) and cl.exists(index=new_index):
q = {
'actions': [
{'remove': {'index': old_index, 'alias': alias}},
{'add': {'index': new_index, 'alias': alias}},
]
}
cl.update_aliases(body=q)
def init_alias(es, index_prefix, doc_type):
cl = es.indices
index = es_index_adapt(
index_prefix=index_prefix,
doc_type=doc_type
)
index_read = es_index_adapt(
index_prefix=index_prefix,
doc_type=doc_type,
rw='read',
)
index_write = es_index_adapt(
index_prefix=index_prefix,
doc_type=doc_type,
rw='write',
)
content = {
'actions': [
{'add': {'index': index, 'alias': index_read}},
{'add': {'index': index, 'alias': index_write}},
]
}
cl.update_aliases(body=content)
def area_tag_id_filter(prefix_list, value):
return {
'bool': {
'should': [
{'term': {prefix + field: value}}
for prefix in prefix_list
for field in ['city_tag_id',
'city_province_tag_id',
'city_province_country_tag_id',
]
]
}
}
__es_instance = None
def get_es_instance():
global __es_instance
if __es_instance is not None:
return __es_instance
init_args = {
'sniff_on_start': False,
'sniff_on_connection_fail': False,
}
new_hosts = settings.ES_HOSTS
__es_instance = Elasticsearch(hosts=new_hosts, **init_args)
return __es_instance
from gm_rpcd.all import bind
from social.models import UserFollow
from .schema import schema_view
from data_sync.answer import get_answers_rpc
from data_sync.answer import get_answer_ids
@bind('mimas/sync/user_follow')
def sync_user_follow(items):
"""
:param items:
:return:
"""
for obj in items:
fid = obj.pop('id')
UserFollow.objects.update_or_create(id=fid, defaults=obj)
@bind('mimas/meta/schema')
def get_schema(app_label=None, db_table=None):
return schema_view(app_label, db_table)
@bind('mimas/sync/answer')
def answer(ids):
result = get_answers_rpc(ids.split())
return result
@bind('mimas/sync/question_answer')
def question_answer(ids):
result = get_answer_ids(ids.split())
return result
import sys
from gm_rpcd.commands.utils import add_cwd_to_path
from gm_rpcd.internals.utils import serve
def main(args):
add_cwd_to_path()
from gm_rpcd.internals.configuration import config
config.is_develop_mode = True
config.freeze()
host = '127.0.0.1'
port = 9000
try:
first_arg = args[0]
except IndexError:
pass
else:
if ':' in first_arg:
host, port = first_arg.split(':')
port = int(port)
else:
port = int(first_arg)
print('Serving on {}:{}'.format(host, port))
serve(host=host, port=port)
if __name__ == '__main__':
main(sys.argv[1:])
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = python -msphinx
SPHINXPROJ = mimas
SOURCEDIR = .
BUILDDIR = _build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
\ No newline at end of file
<?xml version="1.0" encoding="utf-8"?>
<gm_rpcd_config>
<info config_name="app" version="1.0"/>
<config name="application_name" value="mimas"/>
<config name="statuses" value="system:status" />
<config name="request_info_extractor" value="system:MimasRequestInfoExtractor" />
<config name="service_list">
<element value="mimas"/>
<element value="qa"/>
<element value="topic"/>
<element value="diary"/>
</config>
<config name="initializer_list">
<element value="init_django"/>
<element value="user_hierarchy.views"/>
<element value="qa.views"/>
<element value="hera.views"/>
<element value="hera.queries"/>
<element value="live.views"/>
<element value="talos.views"/>
<element value="data_sync.views"/>
<element value="search.views"/>
</config>
</gm_rpcd_config>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# mimas documentation build configuration file, created by
# sphinx-quickstart on Sat Sep 16 04:54:21 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import django
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings.settings')
django.setup()
from gm_rpcd.commands.utils import add_cwd_to_path
from gm_rpcd.internals.configuration import config
config.is_develop_mode = True
config.freeze()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinxcontrib.httpdomain',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'mimas'
copyright = '2017, Community Team'
author = 'Community Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'mimasdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'mimas.tex', 'mimas Documentation',
'Community Team', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'mimas', 'mimas Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'mimas', 'mimas Documentation',
author, 'mimas', 'One line description of project.',
'Miscellaneous'),
]
.. mimas documentation master file, created by
sphinx-quickstart on Sat Sep 16 04:54:21 2017.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Mimas文档
=================================
.. toctree::
:maxdepth: 2
:caption: Contents:
user_hierarchy.rst
qa/index.rst
@ECHO OFF
pushd %~dp0
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=python -msphinx
)
set SOURCEDIR=.
set BUILDDIR=_build
set SPHINXPROJ=mimas
if "%1" == "" goto help
%SPHINXBUILD% >NUL 2>NUL
if errorlevel 9009 (
echo.
echo.The Sphinx module was not found. Make sure you have Sphinx installed,
echo.then set the SPHINXBUILD environment variable to point to the full
echo.path of the 'sphinx-build' executable. Alternatively you may add the
echo.Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.http://sphinx-doc.org/
exit /b 1
)
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
goto end
:help
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
:end
popd
问答接口文档
=================================
.. automodule:: qa.views.answer
:members:
.. automodule:: qa.views.answer
:members:
:undoc-members:
用户等级权益接口文档
=================================
.. automodule:: user_hierarchy.views
:members:
from __future__ import absolute_import
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# Author : RobertDing
# E-mail : robertdingx@gmail.com
# Date : 18/05/04 18:31:32
# Desc : diary
#
from gm_dataquery.db import DataSQLQuery, DB
from talos.models.diary.diaryfeed import DiaryFeedWeight
@DB
class DiaryFeedWeightDQ(DataSQLQuery):
model = DiaryFeedWeight
# coding=utf-8
import json
from django.db.models import Q
from gm_dataquery.client import rpc
from qa.models import (
KolQuestion,
KolAnswer,
Question,
Answer,
TouTiaoTag,
ToutiaoRelation,
KolAnswerRelationTag,
)
from gm_dataquery.dataquery import DataBuilder, DataSQLQuery
from gm_dataquery.db import DB
from gm_types.mimas import ASSESSMENT_TYPE
from ..views.common import get_tag_id_by_name
class KolQuestionDB(DataBuilder):
def getval_is_review(self, obj):
if obj.question_id:
return True
else:
return False
def getval_vast_name(self, obj):
if obj.question_id:
return Question.objects.get(id=obj.question_id).user.nickname
return ''
@DB
class KolQuestionDQ(DataSQLQuery):
model = KolQuestion
data_model = KolQuestionDB
def update(self, updates, **kwargs):
kol_obj = KolQuestion.objects.get(**kwargs)
kolanswer = KolAnswer.objects.get(kol_question_id=kol_obj.original_id)
kolanswer.question_id = updates['question_id']
kolanswer.save()
return super().update(updates, **kwargs)
def filter_catch_time(self, srch_key, srch_val, regex=False):
return self._qry_time_range(srch_key, srch_val, regex)
def filter_is_review(self, srch_key, srch_val, regex=False):
q = Q(question_id__isnull=False)
if srch_val == '0':
return ~q
else:
return q
def filter_user_id(self, srch_key, srch_val, regex=False):
vast_id = Question.objects.filter(user__nickname=srch_val).value_list('user_id', flat=True)
return Q(vast_id__in=vast_id)
class KolAnswerDB(DataBuilder):
def get_new_related_tag(self, kol_answer_id):
tag_ids = KolAnswerRelationTag.objects.filter(
kol_answer_id=kol_answer_id).values_list("tag_id", flat=True)
return list(tag_ids)
def get_related_tag(self, tag_list):
"""
根据抓取标签,去映射标签库获取对应标签库的id
:param tag_list:
:return:
"""
tag_id_list = []
for tag_name in tag_list:
try:
related_tag_ids = list(
TouTiaoTag.objects.get(toutiao=tag_name, is_online=True).relationtags.values_list('tag', flat=True)
)
except Exception:
related_tag_ids = []
tag_id_list.extend(related_tag_ids)
return list(set(tag_id_list))
def getval_is_review(self, obj):
if obj.answer_id or obj.article_id:
return True
else:
return False
def getval_vast_name(self, obj):
user = rpc.gaia.user
user.fields = ['id', 'last_name']
return user.get(id=obj.vast_id)['last_name']
def getval_question_title(self, obj):
return KolQuestion.objects.get(original_id=obj.kol_question_id, platform=obj.platform).title
def getval_content(self, obj, need_escape=False):
return obj.content
def getval_tags_id(self, obj):
# if obj.tags:
# tags = json.loads(obj.tags)
# tag_id_list = self.get_related_tag(tags)
# return tag_id_list
# return []
return self.get_new_related_tag(obj.id)
def getval_kolanswer_has_title(self, obj):
if obj.title or obj.kol_question_id or obj.question_id:
return True
else:
return False
def getval_kol_source_question(self, obj):
try:
title = KolQuestion.objects.get(original_id=obj.kol_question_id).title
except:
title = ''
return title
@DB
class KolAnswerDQ(DataSQLQuery):
model = KolAnswer
data_model = KolAnswerDB
def filter_catch_time(self, srch_key, srch_val, regex=False):
return self._qry_time_range(srch_key, srch_val, regex)
def filter_is_review(self, srch_key, srch_val, regex=False):
q = Q(question_id__isnull=False) | Q(article_id__isnull=False)
if srch_val == '0':
return ~q
else:
return q
def filter_kolanswer_has_title(self, srch_key, srch_bal, regex=False):
q = Q(title__isnull=False) | Q(kol_question_id__isnull=False) | Q(question_id__isnull=False)
if srch_bal == "0":
return ~q
else:
return q
def filter_question_title(self, srch_key, srch_val, regex=False):
questions = KolQuestion.objects.filter(title__contains=srch_val)
return Q(kol_question_id__in=[x.original_id for x in questions])
def filter_tags_id(self, srch_key, srch_val, regex=False):
kol_answer_ids = KolAnswerRelationTag.objects.filter(tag_id=int(srch_val)).values_list('kol_answer_id', flat=True)
return Q(id__in=kol_answer_ids)
def update(self, updates, **kwargs):
answer_id = updates.pop('answer_id', None)
image = updates.get('image', None)
tags_id = updates.pop('tags_id', None)
is_star = updates.pop('is_star', None)
check_type = updates.get('check_type', '0')
article_id = updates.pop('article_id', None)
vast_id = updates.get('vast_id')
obj = KolAnswer.objects.get(**kwargs)
if tags_id:
old_tags = set(KolAnswerRelationTag.objects.filter(kol_answer_id=obj.id).values_list('tag_id', flat=True))
new_tags = set(map(int, tags_id))
for tag in (new_tags - old_tags):
KolAnswerRelationTag.objects.get_or_create(
kol_answer_id=obj.id,
tag_id=tag
)
KolAnswerRelationTag.objects.filter(
kol_answer_id=obj.id,
tag_id__in=(old_tags - new_tags)
).delete()
if check_type == ASSESSMENT_TYPE.SPECIALCOLUMN:
obj.check_type = check_type
obj.vast_id = vast_id
obj.article_id = article_id # 专栏类型 审核通过保存专栏ID
obj.save()
return {'article_id': article_id, 'type': check_type}
else:
obj.check_type = check_type
obj.vast_id = vast_id
obj.save()
return {'answer_id': obj.answer_id, 'type': check_type}
from gm_dataquery.db import DB
from live.models import LiveStream, ZhiboConfig
from gm_dataquery.dataquery import DataBuilder, DataSQLQuery
class LiveStreamDB(DataBuilder):
pass
@DB
class LiveStreamDQ(DataSQLQuery):
model = LiveStream
data_model = LiveStreamDB
class ZhiboConfigDB(DataBuilder):
pass
@DB
class TouTiaoTagDQ(DataSQLQuery):
model = ZhiboConfig
data_model = ZhiboConfigDB
This diff is collapsed.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from urllib.parse import urljoin
from django.conf import settings
from django.db.models import Q
from gm_dataquery.dataquery import DataBuilder, DataSQLQuery
from gm_dataquery.db import DB
from gm_dataquery.dict_mixin import to_dict
from talos.models.subscript_article import (
SubscriptArticle,
SubCategory,
SubscriptArticleTag,
SubscriptArticleCategory,
)
from talos.services.tag import TagService
class SubscriptArticleDB(DataBuilder):
def getval_tag_ids(self, obj):
tags = list(SubscriptArticleTag.objects.filter(
article_id=obj.id, is_online=True).values_list("tag_id", flat=True))
return tags
def getval_category_ids(self, obj):
category_ids = list(SubscriptArticleCategory.objects.filter(
article_id=obj.id, is_online=True).values_list("category_id", flat=True))
return category_ids
def getval_tags_name(self, obj):
tags = self.getval_tag_ids(obj)
tag_info_list = TagService.get_tags_by_tag_ids(tags)
return "、".join([tag.name for tag in tag_info_list])
def getval_categoy_name(self, obj):
category_ids = self.getval_category_ids(obj)
cate_info_list = SubCategory.objects.filter(id__in=category_ids, is_online=True)
return "、".join([category.name for category in cate_info_list])
@DB
class SubscriptArticleDQ(DataSQLQuery):
model = SubscriptArticle
data_model = SubscriptArticleDB
# def filter_user_id(self, srch_key, srch_val, regex=False):
# return Q(user_id=srch_val)
# def filter_last_modified(self, srch_key, srch_val, regex=False):
# return self._qry_time_range(srch_key, srch_val, regex)
def update(self, updates, **kwargs):
tag_ids = updates.pop('tag_ids', None)
if tag_ids is not None:
tag_ids = list(map(lambda i: int(i), tag_ids))
category_ids = updates.pop('category_ids', None)
if category_ids is not None:
category_ids = list(map(lambda i: int(i), category_ids))
obj = SubscriptArticle.objects.get(**kwargs)
update_fields = [
"name", "title", "url", "image_url", "share_img", "share_content",
"is_header_article", "ordering", "is_online"
]
to_update = False
for field in update_fields:
value = updates.pop(field, None)
if value is not None:
to_update = True
setattr(obj, field, value)
if to_update:
obj.save()
# 更新tag
if tag_ids is not None:
tags = list(SubscriptArticleTag.objects.filter(article_id=obj.id).values_list('tag_id', flat=True))
rm_ids = set(tags) - set(tag_ids)
if rm_ids:
SubscriptArticleTag.objects.filter(article_id=obj.id, tag_id__in=rm_ids).update(is_online=False)
_ids = set(tags) & set(tag_ids)
if _ids:
SubscriptArticleTag.objects.filter(article_id=obj.id, tag_id__in=_ids).update(is_online=True)
new_ids = set(tag_ids) - set(tags)
ats = [
SubscriptArticleTag(
article_id=obj.id,
tag_id=tag_id
)
for tag_id in new_ids
]
if ats:
SubscriptArticleTag.objects.bulk_create(ats)
# 更新category
if category_ids is not None:
cids = list(SubscriptArticleCategory.objects.filter(article_id=obj.id).values_list('category_id', flat=True))
rm_ids = set(cids) - set(category_ids)
if rm_ids:
SubscriptArticleCategory.objects.filter(article_id=obj.id, category_id__in=rm_ids).update(is_online=False)
_ids = set(cids) & set(category_ids)
if _ids:
SubscriptArticleCategory.objects.filter(article_id=obj.id, category_id__in=_ids).update(is_online=True)
new_ids = set(category_ids) - set(cids)
acs = [
SubscriptArticleCategory(
article_id=obj.id,
category_id=tag_id
)
for tag_id in new_ids
]
if acs:
SubscriptArticleCategory.objects.bulk_create(acs)
return {'id': obj.id}
def create(self, **kwargs):
tag_ids = kwargs.pop('tag_ids', None)
category_ids = kwargs.pop('category_ids', None)
obj = SubscriptArticle.objects.create(**kwargs)
# 更新tag
ats = [
SubscriptArticleTag(
article_id=obj.id,
tag_id=tag_id
)
for tag_id in tag_ids
]
if ats:
SubscriptArticleTag.objects.bulk_create(ats)
# 更新category
acs = [
SubscriptArticleCategory(
article_id=obj.id,
category_id=category_id
)
for category_id in category_ids
]
if acs:
SubscriptArticleCategory.objects.bulk_create(acs)
return {'id': obj.id}
class SubCategoryDB(DataBuilder):
pass
@DB
class SubCategoryDQ(DataSQLQuery):
model = SubCategory
data_model = SubCategoryDB
This diff is collapsed.
This diff is collapsed.
# -*- coding: UTF-8 -*-
from __future__ import absolute_import
from ..queries import qa, kol, diary, tag, tractate, live, subscript_article
from .qa import *
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
import os
import django
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings.settings')
django.setup()
from django.apps import AppConfig
class LiveConfig(AppConfig):
name = 'live'
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
# coding=utf8
from __future__ import unicode_literals, print_function
default_app_config = 'qa.apps.QaConfig'
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment