# -*- coding: utf8 -*- from __future__ import unicode_literals, absolute_import, print_function from collections import defaultdict from django.db.models import Q, Count from gm_types.gaia import AUTHOR_TYPE, PROBLEM_FLAG_CHOICES from gm_types.gaia import TOPIC_ORDER_IN_DIARY from ..logger import info_logger from ..models.topic import Problem, ProblemTag, ProblemTagV3, TopicImage, TopicReply from social.models import SocialInfo from talos.libs.datetime_utils import get_timestamp_or_none from talos.libs.image_utils import get_full_path from talos.models.diary.diary import Diary from talos.models.topic.topicvote import TopicVote from talos.rpc import get_objects_from_queryset_and_pk_list from talos.services.tag import TagService from talos.services.user import UserService from talos.services.goods import GoodsService from talos.services import UserConvertService, TagV3Service def get_topicvote_info_for_user(topic_ids, user_id): if not (topic_ids and user_id): return {} tvs = TopicVote.objects.filter(topic_id__in=topic_ids, user_id=user_id) if not tvs: return {} result = {} for tv in tvs: result[tv.topic_id] = True return result class TopicListDataManager(object): """get topic list data. TODO: clean fields, backend ship pc requires updates. """ @staticmethod def _get_topic_author_info_by_user_ids(user_ids): """get topic user_info. :param user_ids topic author user_id :return dict of user info as { user_id : { user_id user_name membership_level portrait, topic_num_posted vote_num_gained } } """ result = {} users = UserConvertService.get_user_info_by_user_ids(user_ids, simple=False) if not users: return result for k, v in users.items(): v.update({ 'topic_num_posted': v.pop("topic_count", 0), 'vote_num_gained': v.pop("vote_count", 0), 'city': v.pop("city_name", ""), }) result[k] = v return result @staticmethod def _get_compatible_fields(): # for compatible only return { 'author_type': AUTHOR_TYPE.USER, 'comments': [], 'diary_num': 0, 'is_favord': False, 'is_topic': False, 'is_recommend': False, 'is_recommend_all': False, 'is_private': False, 'interval': '', 'patient_image': '', 'patient_image_thumb': '', 'pre_operation_images': [], 'show_large_images': False, 'topic_type_id': '0', 'title_style_type': '', } @staticmethod def _get_images_and_images_raw_of_topic_ids(topic_ids): if not topic_ids: return {} image_map = {tid: {'images': [], 'images_raw': []} for tid in topic_ids} images = TopicImage.objects.filter(topic_id__in=topic_ids).order_by('id') for image in images: image_url = get_full_path(image.image_url) if image_url.strip(): image_map[image.topic_id]['images'].append(image_url) image_map[image.topic_id]['images_raw'].append({ 'image_url': image_url, 'taken_time': get_timestamp_or_none(image.taken_time), 'is_cover': image.is_cover, }) return image_map @staticmethod def _get_images_and_images_raw(topic_obj): """return images info. :return: dict { 'images: list of str, 'images_raw': list of dict as {image_url: str, taken_time: int} """ result = { 'images': [], 'images_raw': [], } for image in topic_obj.images.order_by('id'): image_url = get_full_path(image.image_url) if image_url.strip(): result['images'].append(image_url) result['images_raw'].append({ 'image_url': image_url, 'taken_time': get_timestamp_or_none(image.taken_time), 'is_cover': image.is_cover, }) return result @staticmethod def _get_diary_operation_time(topic_objs): result = {} t_d = {t.diary_id: t.id for t in topic_objs if t.diary_id} if not t_d: return result diary_ids = list(t_d.keys()) ds = Diary.objects.filter(id__in=diary_ids).values_list('id', 'operation_time') for diary_id, operation_time in ds: topic_id = t_d[diary_id] result[topic_id] = get_timestamp_or_none(operation_time) return result @staticmethod def _get_vote_info(viewer_user_id, topics): topic_ids = [t.id for t in topics] vote_info = get_topicvote_info_for_user(topic_ids, viewer_user_id) return vote_info @staticmethod def _get_follow_info(viewer_user_id, author_user_ids): follow_rels = {} if viewer_user_id: social_info = SocialInfo(viewer_user_id) follow_rels = social_info.is_following_users(uids=author_user_ids) return follow_rels @staticmethod def get_topic_tagv3_info_by_topic_ids(topic_ids): """ 获取topic关联的tagv3信息 :param topic_ids: :return: """ result = {} if not topic_ids: return result topic_tag_v3_ids = ProblemTagV3.objects.filter( problem_id__in=topic_ids ).values_list("problem_id", "tag_v3_id") tag_v3_infos = TagV3Service.get_tags_by_tag_v3_ids(set(item[1] for item in topic_tag_v3_ids)) for topic_id, tag_v3_id in topic_tag_v3_ids: if topic_id not in result: result[topic_id] = [] tag_v3_info = tag_v3_infos.get(tag_v3_id, None) if tag_v3_info: result[topic_id].append(TagV3Service.format_tag_v3(tag_v3_info)) return result @staticmethod def get_topic_tag_info_by_topic_ids(topic_ids): """ 获取topic关联的tag信息 :param topic_ids: :return: """ result = {} if not topic_ids: return result topic_tag_ids = ProblemTag.objects.filter( problem_id__in=topic_ids ).values_list("problem_id", "tag_id") tag_infos = TagService.get_tags_dict_by_tag_ids(set(item[1] for item in topic_tag_ids)) for topic_id, tag_id in topic_tag_ids: if topic_id not in result: result[topic_id] = [] tag_info = tag_infos.get(tag_id, None) if tag_info: result[topic_id].append(TagService.format_tag(tag_info)) return result def get_topic_list_data_for_diary_detail_page(self, topics): return self._get_inner_data_by_topic_objs_v2(topics) def _get_inner_data_by_topic_objs(self, topics, need_live_reply=True): """ 获取talos内部(即不需要外部服务的数据)的帖子信息 """ result = [] operation_time_dict = self._get_diary_operation_time(topics) for topic in topics: operation_time = operation_time_dict.get(topic.id) image_info = self._get_images_and_images_raw(topic) data = { 'is_online': topic.is_online, # 兼容 talos/diary/get, 等去除 talos/diary/get 即可删除此字段 'content': topic.content, 'answer_richtext': topic.answer_richtext, 'created_time': get_timestamp_or_none(topic.created_time), 'last_modified_time': get_timestamp_or_none(topic.last_modified), 'operation_date': get_timestamp_or_none(topic.operation_date), 'is_mixed': topic.is_mixed, 'images': image_info['images'], 'images_raw': image_info['images_raw'], 'doctor_num': topic.doctor_num, # 医生版需要使用 'reply_num': topic.reply_num, 'title': topic.get_title(), 'topic_id': topic.id, # 兼容接口 'id': topic.id, # 兼容接口 'vote_num': topic.vote_amount, 'view_num': topic.view_amount, 'operation_time': operation_time, 'replay': topic.get_live_replay_info(), 'video': topic.get_video_info(), # 兼容接口 'activity_id': topic.activity and topic.activity.id or None, 'activity_title': topic.activity and topic.activity.title or None, 'operation_record': topic.operation_record or '', 'topic_type': topic.topic_type, 'operation_after_days': topic.operation_after_days, } if need_live_reply: data.update({'replay_app': topic.get_live_replay_info_to_app(None)}) data.update(topic.get_video_info()) # 兼容接口 data.update(self._get_compatible_fields()) result.append(data) return result def _get_inner_data_by_topic_objs_v2(self, topics): """ 获取talos内部(即不需要外部服务的数据)的帖子信息 v2 去掉不必要的数据 """ from live.models import LiveStream result = [] topic_ids = [topic.id for topic in topics] image_map = self._get_images_and_images_raw_of_topic_ids(topic_ids) operation_time_dict = self._get_diary_operation_time(topics) vote_amount_map = Problem.get_vote_amount_of_topic_ids(topic_ids) view_amount_map = Problem.get_view_amount_of_topic_ids(topic_ids) stream_map = Problem.get_extra_obj_of_topic_ids(topic_ids, LiveStream) replay_map = Problem.get_live_replay_info_of_topic_ids(topic_ids, stream_map) video_map = Problem.get_video_info_of_topic_ids(topic_ids) for topic in topics: operation_time = operation_time_dict.get(topic.id) image_info = image_map.get(topic.id, {}) data = { 'is_online': topic.is_online, # 兼容 talos/diary/get, 等去除 talos/diary/get 即可删除此字段 'content': topic.content, 'answer_richtext': topic.answer_richtext, 'created_time': get_timestamp_or_none(topic.created_time), 'last_modified_time': get_timestamp_or_none(topic.last_modified), 'operation_date': get_timestamp_or_none(topic.operation_date), 'is_mixed': topic.is_mixed, 'images': image_info['images'], 'images_raw': image_info['images_raw'], 'doctor_num': topic.doctor_num, # 医生版需要使用 'reply_num': topic.reply_num, 'title': topic.get_title(), 'topic_id': topic.id, # 兼容接口 'id': topic.id, # 兼容接口 'vote_num': vote_amount_map.get(topic.id, 0), 'view_num': view_amount_map.get(topic.id, 0), 'operation_time': operation_time, 'replay': replay_map.get(topic.id, {}), 'video': video_map.get(topic.id, {}), # 兼容接口 'activity_id': topic.activity and topic.activity.id or None, 'activity_title': topic.activity and topic.activity.title or None, 'topic_type': topic.topic_type, } data.update(video_map.get(topic.id, {})) # 兼容接口 data.update(self._get_compatible_fields()) result.append(data) return result def _generate_topic_data(self, topics, viewer_user_id, need_service_or_live_reply=True): result = [] topic_tag_dict = defaultdict(list) tag_ids = set() topic_ids = [t.id for t in topics] topic_tag_ids = self._get_distinct_problem_tag_objs_by_topic_ids(topic_ids) topic_tag_v3_infos = self.get_topic_tagv3_info_by_topic_ids(topic_ids) service_ids = [t.diary.service_id for t in topics if t.diary_id and t.diary.service_id] service_mapping = GoodsService.get_diary_show_info_by_service_ids(service_ids) if need_service_or_live_reply else {} for tt in topic_tag_ids: tag_ids.add(tt.tag_id) topic_tag_dict[tt.problem_id].append(tt.tag_id) tags = TagService.get_tags_by_tag_ids(ids=list(tag_ids)) tags_dict = {t.id: t for t in tags} topic_author_user_ids = [t.user_id for t in topics] topic_author_info_dict = self._get_topic_author_info_by_user_ids( user_ids=topic_author_user_ids ) # follow_rels = self._get_follow_info(viewer_user_id, topic_author_user_ids) vote_info = self._get_vote_info(viewer_user_id, topics) topics_dict = {} topic_ids = [] for t in topics: topics_dict[t.id] = t topic_ids.append(t.id) topic_infos = self._get_inner_data_by_topic_objs(topics, need_live_reply=need_service_or_live_reply) topic_infos = {t['topic_id']: t for t in topic_infos} for topic_id in topic_ids: topic = topics_dict.get(topic_id) if not topic: continue topic_author_info = topic_author_info_dict.get(topic.user_id) if not topic_author_info: continue tag_info = [ tags_dict.get(tag_id) for tag_id in topic_tag_dict.get(topic.id, []) if tag_id in tags_dict ] tag_info = [{'name': t.name, 'tag_id': t.id, 'tag_type': t.tag_type, 'type': t.tag_type} for t in tag_info] data = { 'user': topic_author_info, 'problem': { 'is_voted': vote_info.get(topic.id, False), # 'is_following': follow_rels.get(topic.user_id, False), 'is_following': True, 'tags': tag_info, "tags_v3": topic_tag_v3_infos.get(topic.id, []) }, } topic_info = topic_infos[topic_id] data['problem'].update(topic_info) if topic.diary_id: # 根据日记本获取关联的美购信息, tags 城市名称等 service_info = service_mapping.get(topic.diary.service_id) or {} data['diary'] = {} topic_num = topic.diary.topic_num data['diary']['topic_num'] = topic_num data['diary']['city_name'] = service_info.get('city_name', '') data['diary']['tags'] = service_info.get('tags', []) data['update_time'] = get_timestamp_or_none(topic.created_time) else: data['diary'] = None result.append(data) return result @staticmethod def _get_distinct_problem_tag_objs_by_topic_ids(topic_ids): topic_tag_objs = ProblemTag.objects.filter( problem_id__in=topic_ids ).distinct() return topic_tag_objs def get_list_data_by_topic_objs(self, topic_objs, viewer_user_id=None, need_service_or_live_reply=True): if not topic_objs: return [] return self._generate_topic_data(topics=topic_objs, viewer_user_id=viewer_user_id, need_service_or_live_reply=need_service_or_live_reply) def list_problems_by_ids(self, topic_ids): topics = get_objects_from_queryset_and_pk_list( Problem.objects.filter(is_online=True), topic_ids ) return { topic.id: topic for topic in topics } def get_list_data_by_topic_ids(self, topic_ids, viewer_user_id=None): """get list of topic data. NOTE: return order determined by the order of topics_ids :param: topic_ids list of topic id viewer_user_id user id who is viewing topic list """ topics = get_objects_from_queryset_and_pk_list( Problem.objects.select_related('video').filter(is_online=True), topic_ids ) if not topics: return [] return self._generate_topic_data(topics=topics, viewer_user_id=viewer_user_id) def get_list_data_by_topic_objs_for_pc(self, topics, topic_sort, start_num, topic_count, topic_ids): result = [] for t in topics: if topic_sort == TOPIC_ORDER_IN_DIARY.OLDEST: diary_num = start_num + topic_ids.index(t.id) + 1 else: diary_num = topic_count - start_num - topic_ids.index(t.id) data = { 'content': t.content, 'id': t.id, 'created_time': get_timestamp_or_none(t.created_time), 'images': self._get_images_and_images_raw(t)['images'], 'video': t.get_video_info(), 'reply_num': t.reply_num, 'vote_num': t.vote_amount, 'operation_after_days': t.operation_after_days, 'diary_num': diary_num, } result.append(data) return result def generate_info_for_index_banner_by_ids(self, topic_ids, viewer_user_id): _info = {} if not topic_ids: return _info topics = Problem.objects.filter(pk__in=topic_ids) vote_info = get_topicvote_info_for_user(topic_ids, viewer_user_id) topic_author_user_ids = [t.user_id for t in topics] topic_author_info_dict = self._get_topic_author_info_by_user_ids( user_ids=topic_author_user_ids ) for topic in topics: topic_user_info = topic_author_info_dict.get(topic.user_id) if not topic_user_info: continue _data = { 'is_voted': vote_info.get(topic.id, False), 'vote_num': topic.vote_amount, 'reply_num': topic.reply_num, 'view_num': topic.view_num, 'user_id': topic.user_id, 'user_nickname': topic_user_info['user_name'], 'user_portrait': topic_user_info['portrait'], } _info.update({str(topic.id): _data}) return _info def get_topic_data_by_topics(self, topics): topic_ids = [t.id for t in topics] # 获取每个日记帖前两个评论(根据时间倒序) 只有日记本的一级评论 comment_topic = TopicReply.objects.filter(problem_id__in=topic_ids, replied_topic_id=None, is_online=True). \ order_by('-reply_date').values('id', 'problem_id', 'user_id', 'content', 'reply_date') comments_count_info = TopicReply.objects.filter(problem_id__in=topic_ids). \ values('problem_id').annotate(count=Count('id')) comment_dic, comment_count = {}, {} for comment_number in comments_count_info: _id = comment_number['problem_id'] _count = comment_number['count'] comment_count[_id] = _count limit_comment_number, comment_dic = 2, {} for comment in comment_topic: if len(comment_dic.get(comment['problem_id'], [])) >= limit_comment_number: continue comment_dic.setdefault(comment['problem_id'], []).append(comment) topics_data = self.get_topic_list_data_for_diary_detail_page(topics) for topic in topics_data: topic['content'] = topic['content'] topic_id = topic['id'] comments = comment_dic.get(topic_id, []) for comment in comments: comment.update({ "reply_date": get_timestamp_or_none(comment.get('reply_date')), }) topic['comment_count'] = comment_count.get(topic_id, 0) topic['comments'] = comments return topics_data topic_list_manager = TopicListDataManager()