0
点赞
收藏
分享

微信扫一扫

电影推荐系统

ITWYY 2022-03-11 阅读 73

电影推荐系统

技术栈:python + django + sqlite + scrapy爬虫

**推荐算法:**基于用户的协同过滤算法 + 基于项目的协同过滤算法

​ 首先使用Scrapy爬虫工具爬取豆瓣网站关于中国大陆红色教育电影的数据集,经过数据重组和筛选,基于两种推荐算法得出推荐结果保存至SQLite 数据库中,并通过Django 框架进行前端展示。

推荐过程:

image-20220304200258991

系统运行结果:

image-20220304200333154

image-20220304200346435

image-20220304200359805

image-20220304200415416

image-20220304200429417

image-20220304200452376

推荐算法实现:

class UserCf:

    # 获得初始化数据
    def __init__(self, all_user):
        self.all_user = all_user

    # 通过用户名获得商品列表,仅调试使用
    def getItems(self, username1, username2):
        return self.all_user[username1], self.all_user[username2]

    # 计算两个用户的皮尔逊相关系数
    def pearson(self, user1, user2):  # 数据格式为:商品id,浏览此
        sum_xy = 0.0  # user1,user2 每项打分的成绩的累加
        n = 0  # 公共浏览次数
        sum_x = 0.0  # user1 的打分总和
        sum_y = 0.0  # user2 的打分总和
        sumX2 = 0.0  # user1每项打分平方的累加
        sumY2 = 0.0  # user2每项打分平方的累加
        for movie1, score1 in user1.items():
            if movie1 in user2.keys():  # 计算公共的浏览次数
                n += 1
                sum_xy += score1 * user2[movie1]
                sum_x += score1
                sum_y += user2[movie1]
                sumX2 += pow(score1, 2)
                sumY2 += pow(user2[movie1], 2)
        if n == 0:
            # print("p氏距离为0")
            return 0
        molecule = sum_xy - (sum_x * sum_y) / n  # 分子
        denominator = sqrt((sumX2 - pow(sum_x, 2) / n) * (sumY2 - pow(sum_y, 2) / n))  # 分母
        if denominator == 0:
            return 0
        r = molecule / denominator
        return r

    # 计算与当前用户的距离,获得最临近的用户
    def nearest_user(self, current_user, n=1):
        distances = {}
        # 用户,相似度
        # 遍历整个数据集
        for user, rate_set in self.all_user.items():
            # 非当前的用户
            if user != current_user:
                distance = self.pearson(self.all_user[current_user], self.all_user[user])
                # 计算两个用户的相似度
                distances[user] = distance
        closest_distance = sorted(
            distances.items(), key=operator.itemgetter(1), reverse=True
        )
        # 最相似的N个用户
        # print("closest user:", closest_distance[:n])
        return closest_distance[:n]

    # 给用户推荐商品
    def recommend(self, username, n=3):
        recommend = {}
        nearest_user = self.nearest_user(username, n)
        for user, score in dict(nearest_user).items():  # 最相近的n个用户
            for movies, scores in self.all_user[user].items():  # 推荐的用户的商品列表
                if movies not in self.all_user[username].keys():  # 当前username没有看过
                    if movies not in recommend.keys():  # 添加到推荐列表中
                        recommend[movies] = scores*score
        # 对推荐的结果按照商品浏览次数排序
        return sorted(recommend.items(), key=operator.itemgetter(1), reverse=True)


# 入口函数
def recommend_by_user_id(user_id):
    user_prefer = UserTagPrefer.objects.filter(user_id=user_id).order_by('-score').values_list('tag_id', flat=True)
    current_user = User.objects.get(id=user_id)
    # 如果当前用户没有打分 则看是否选择过标签,选过的话,就从标签中找
    # 没有的话,就按照浏览度推荐15个
    if current_user.rate_set.count() == 0:
        if len(user_prefer) != 0:
            movie_list = Movie.objects.filter(tags__in=user_prefer)[:15]
        else:
            movie_list = Movie.objects.order_by("-num")[:15]
        return movie_list
    # 选取评分最多的10个用户
    users_rate = Rate.objects.values('user').annotate(mark_num=Count('user')).order_by('-mark_num')
    user_ids = [user_rate['user'] for user_rate in users_rate]
    user_ids.append(user_id)
    users = User.objects.filter(id__in=user_ids)
    all_user = {}
    for user in users:
        rates = user.rate_set.all()
        rate = {}
        # 用户有给电影打分 在rate和all_user中进行设置
        if rates:
            for i in rates:
                rate.setdefault(str(i.movie.id), i.mark)
            all_user.setdefault(user.username, rate)
        else:
            # 用户没有为电影打过分,设为0
            all_user.setdefault(user.username, {})

    user_cf = UserCf(all_user=all_user)
    recommend_list = [each[0] for each in user_cf.recommend(current_user.username, 15)]
    movie_list = list(Movie.objects.filter(id__in=recommend_list).order_by("-num")[:15])
    other_length = 15 - len(movie_list)
    if other_length > 0:
        fix_list = Movie.objects.filter(~Q(rate__user_id=user_id)).order_by('-collect')
        for fix in fix_list:
            if fix not in movie_list:
                movie_list.append(fix)
            if len(movie_list) >= 15:
                break
    return movie_list


# 计算相似度
def similarity(movie1_id, movie2_id):
    movie1_set = Rate.objects.filter(movie_id=movie1_id)
    # movie1的打分用户数
    movie1_sum = movie1_set.count()
    # movie_2的打分用户数
    movie2_sum = Rate.objects.filter(movie_id=movie2_id).count()
    # 两者的交集
    common = Rate.objects.filter(user_id__in=Subquery(movie1_set.values('user_id')), movie=movie2_id).values('user_id').count()
    # 没有人给当前电影打分
    if movie1_sum == 0 or movie2_sum == 0:
        return 0
    similar_value = common / sqrt(movie1_sum * movie2_sum)
    return similar_value


#
def recommend_by_item_id(user_id, k=15):
    # 前三的tag
    user_prefer = UserTagPrefer.objects.filter(user_id=user_id).order_by('-score').values_list('tag_id', flat=True)
    user_prefer = list(user_prefer)[:3]
    current_user = User.objects.get(id=user_id)
    # 如果当前用户没有打分 则看是否选择过标签,选过的话,就从标签中找
    # 没有的话,就按照浏览度推荐15个
    if current_user.rate_set.count() == 0:
        if len(user_prefer) != 0:
            movie_list = Movie.objects.filter(tags__in=user_prefer)[:15]
        else:
            movie_list = Movie.objects.order_by("-num")[:15]
        print('from here')
        return movie_list
    # most_tags = Tags.objects.annotate(tags_sum=Count('name')).order_by('-tags_sum').filter(movie__rate__user_id=user_id).order_by('-tags_sum')
    # 选用户最喜欢的标签中的电影,用户没看过的30部,对这30部电影,计算距离最近
    un_watched = Movie.objects.filter(~Q(rate__user_id=user_id), tags__in=user_prefer).order_by('?')[:30]  # 看过的电影
    watched = Rate.objects.filter(user_id=user_id).values_list('movie_id', 'mark')
    distances = []
    names = []
    # 在未看过的电影中找到
    # 后续改进,选择top15
    for un_watched_movie in un_watched:
        for watched_movie in watched:
            if un_watched_movie not in names:
                names.append(un_watched_movie)
                distances.append((similarity(un_watched_movie.id, watched_movie[0]) * watched_movie[1], un_watched_movie))
    distances.sort(key=lambda x: x[0], reverse=True)
    print('this is distances', distances[:15])
    recommend_list = []
    for mark, movie in distances:
        if len(recommend_list) >= k:
            break
        if movie not in recommend_list:
            recommend_list.append(movie)
    # print('this is recommend list', recommend_list)
    # 如果得不到有效数量的推荐 按照未看过的电影中的热度进行填充
    print('recommend list', recommend_list)
    return recommend_list


if __name__ == '__main__':
    similarity(2003, 2008)
    recommend_by_item_id(1)

爬取豆瓣电影爬虫实现:

class MovieSpider(scrapy.Spider):
    name = 'movie'
    allowed_domains = ['movie.douban.com']

    base_url = "https://movie.douban.com/j/new_search_subjects?sort=U&range=0,10&tags=&start={page}&genres={genres}&countries={countries}&limit=150"


    def start_requests(self):
        genres = "战争"
        countries = "中国大陆"
        bid = ''.join(random.choice(string.ascii_letters + string.digits) for x in range(11))
        cookies = {'bid': bid}
        yield Request(self.base_url.format(page=0, genres=genres, countries=countries), callback=self.parse,
                      meta={'page': 0, 'genres': genres, 'countries': countries}, cookies=cookies)


    def parse(self, response):
        result = json.loads(response.text)
        if len(result.get('data')) != 0:
            for node in result.get('data'):
                id = node.get('id')
                bid = ''.join(random.choice(string.ascii_letters + string.digits) for x in range(11))
                cookies = {'bid': bid}
                yield Request(url='https://movie.douban.com/subject/{}/'.format(id), callback=self.parse_subject,
                              meta={'id': id}, cookies=cookies)

            # 翻页
            page = response.meta['page']
            if page < 600:
                page = page + 150
                genres = response.meta['genres']
                countries = response.meta['countries']
                yield Request(self.base_url.format(page=page, genres=genres, countries=countries), callback=self.parse,
                              meta={'page': page, 'genres': genres, 'countries': countries})

    def parse_subject(self, response):
        item = RedmovieItem()
        id = response.meta['id']
        # item['douban_movie_id'] = id

        # 电影名
        item['movie_name'] = response.xpath('//span[@property="v:itemreviewed"]/text()').extract()[0]

        # 导演
        directors = ';'.join(response.xpath('//*[@rel="v:directedBy"]/text()').extract())
        if len(directors) != 0:
            item['movie_directors'] = directors
        else:
            item['movie_directors'] = None

        # 制片国家/地区
        try:
            item['movie_country'] = response.xpath('//*[@id="info"]').re('制片国家/地区:</span>\s(.*)<br>')[0]
        except IndexError:
            item['movie_country'] = None

        # 上映日期
        Date = ';'.join(response.xpath('//span[@property="v:initialReleaseDate"]/text()').extract())
        if len(Date) != 0:
            item['movie_years'] = Date.split('(')[0]
        else:
            item['movie_years'] = None

        # 主演
        movie_leader = ';'.join(response.xpath('//*[@rel="v:starring"]/text()').extract())
        if len(movie_leader) != 0:
            item['movie_leader'] = movie_leader
        else:
            item['movie_leader'] = None

        # 评分人数
        item['movie_d_rate_nums'] = response.xpath('//span[@property="v:votes"]/text()').extract_first()

        # 评分
        item['movie_d_rate'] = response.xpath('//strong[@property="v:average"]/text()').extract_first()

        # 简介
        item['movie_intro'] = response.xpath('//span[@property="v:summary"]/text()').extract_first().strip()

        # 封面原始链接
        item['movie_origin_image_link'] = response.xpath('//*[@rel="v:image"]/@src').extract()[0]

        # 封面本地地址
        item['movie_image_link'] = "movie_cover/" + item['movie_name'] + ".jpg"

        # 链接
        item['movie_imdb_link'] = 'https://movie.douban.com/subject/{}/'.format(id)

        # print(item)

        yield item

        # # # 链接
        # # item['url'] = 'https://movie.douban.com/subject/{}/'.format(id)
        # #
        # # 封面链接
        # item['cover'] = response.xpath('//*[@rel="v:image"]/@src').extract()[0]
        #
        # # # 编剧
        # # scriptwriter = ';'.join(
        # #     response.xpath('//span[contains(text(),"编剧")]/..//span[@class="attrs"]/a/text()').extract())
        # # if len(scriptwriter) != 0:
        # #     item['scriptwriter'] = scriptwriter
        # # else:
        # #     item['scriptwriter'] = None
        # #
        #
        # #
        # # # 类型
        # # item['type'] = '/'.join(response.xpath('//span[@property="v:genre"]//text()').extract())
        # #
        #
        #
        # # # 评分语言
        # # try:
        # #     item['language'] = response.xpath('//*[@id="info"]').re('语言:</span>\s(.*)<br>')[0]
        # # except IndexError:
        # #     item['language'] = None
        # #
        # # # 片长
        # # if response.xpath('//span[@property="v:runtime"]/text()').extract():
        # #     item['runtime'] = response.xpath('//span[@property="v:runtime"]/text()').extract()[0]
        # # elif response.xpath('//*[@id="info"]').re('片长:</span>\s(.*)<br>'):
        # #     item['runtime'] = response.xpath('//*[@id="info"]').re('片长:</span>\s(.*)<br>')[0]
        # # else:
        # #     item['runtime'] = None
        # #
        # # # IMDb
        # # try:
        # #     item['IMDb'] = \
        # #     response.xpath('//a[@rel="nofollow" and contains(@href, "www.imdb.com/title/")]/@href').extract()[0]
        # # except IndexError:
        # #     item['IMDb'] = None


    # # else:
    # #     item['runtime'] = None
    # #
    # # # IMDb
    # # try:
    # #     item['IMDb'] = \
    # #     response.xpath('//a[@rel="nofollow" and contains(@href, "www.imdb.com/title/")]/@href').extract()[0]
    # # except IndexError:
    # #     item['IMDb'] = None
举报

相关推荐

0 条评论