⏲️解决当文章数量过多时导致的数据量过大的问题(#24)
This commit is contained in:
parent
ec87edc284
commit
cfbf13bffa
Binary file not shown.
@ -374,9 +374,12 @@ def marge_data_from_json_url(data, marge_json_url):
|
|||||||
print("合并数据完成,现在共有 %d 篇文章" % len(data['article_data']))
|
print("合并数据完成,现在共有 %d 篇文章" % len(data['article_data']))
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
import requests
|
||||||
|
|
||||||
def marge_errors_from_json_url(errors, marge_json_url):
|
def marge_errors_from_json_url(errors, marge_json_url):
|
||||||
"""
|
"""
|
||||||
从另一个网络 JSON 文件中获取错误信息并遍历,删除在errors中,不存在于marge_errors中的友链信息。
|
从另一个网络 JSON 文件中获取错误信息并遍历,删除在errors中,
|
||||||
|
不存在于marge_errors中的友链信息。
|
||||||
|
|
||||||
参数:
|
参数:
|
||||||
errors (list): 包含错误信息的列表
|
errors (list): 包含错误信息的列表
|
||||||
@ -386,15 +389,49 @@ def marge_errors_from_json_url(errors, marge_json_url):
|
|||||||
list: 合并后的错误信息列表
|
list: 合并后的错误信息列表
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
response = requests.get(marge_json_url, headers=headers, timeout=timeout)
|
response = requests.get(marge_json_url, timeout=10) # 设置请求超时时间
|
||||||
marge_errors = response.json()
|
marge_errors = response.json()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"无法获取该链接:{marge_json_url} , 出现的问题为:{e}")
|
print(f"无法获取该链接:{marge_json_url},出现的问题为:{e}")
|
||||||
return errors
|
return errors
|
||||||
|
|
||||||
print("开始合并错误信息,原错误信息共有 %d 位朋友,境外错误信息共有 %d 位朋友" % (len(errors), len(marge_errors)))
|
# 提取 marge_errors 中的 URL
|
||||||
for error in errors:
|
marge_urls = {item[1] for item in marge_errors}
|
||||||
if error not in marge_errors:
|
|
||||||
errors.remove(error)
|
# 使用过滤器保留 errors 中在 marge_errors 中出现的 URL
|
||||||
print("合并错误信息完成,现在共有 %d 位朋友" % len(errors))
|
filtered_errors = [error for error in errors if error[1] in marge_urls]
|
||||||
return errors
|
|
||||||
|
print("合并错误信息完成,保留了 %d 位朋友" % len(filtered_errors))
|
||||||
|
return filtered_errors
|
||||||
|
|
||||||
|
def deal_with_large_data(result):
|
||||||
|
"""
|
||||||
|
处理文章数据,保留前200篇及其作者在后续文章中的出现。
|
||||||
|
|
||||||
|
参数:
|
||||||
|
result (dict): 包含统计数据和文章数据的字典。
|
||||||
|
|
||||||
|
返回:
|
||||||
|
dict: 处理后的数据,只包含需要的文章。
|
||||||
|
"""
|
||||||
|
article_data = result.get("article_data", [])
|
||||||
|
|
||||||
|
# 检查文章数量是否大于 200
|
||||||
|
if len(article_data) > 200:
|
||||||
|
print("数据量较大,开始进行处理")
|
||||||
|
# 获取前 200 篇文章的作者集合
|
||||||
|
first_200_authors = {article["author"] for article in article_data[:200]}
|
||||||
|
|
||||||
|
# 从第201篇开始过滤,只保留前200篇出现过的作者的文章
|
||||||
|
filtered_articles = article_data[:200] + [
|
||||||
|
article for article in article_data[200:]
|
||||||
|
if article["author"] in first_200_authors
|
||||||
|
]
|
||||||
|
|
||||||
|
# 更新结果中的 article_data
|
||||||
|
result["article_data"] = filtered_articles
|
||||||
|
# 更新结果中的统计数据
|
||||||
|
result["statistical_data"]["article_num"] = len(filtered_articles)
|
||||||
|
print("数据处理完成,保留 %d 篇文章" % len(filtered_articles))
|
||||||
|
|
||||||
|
return result
|
||||||
|
4
run.py
4
run.py
@ -1,5 +1,5 @@
|
|||||||
# 引入 check_feed 和 parse_feed 函数
|
# 引入 check_feed 和 parse_feed 函数
|
||||||
from friend_circle_lite.get_info import fetch_and_process_data, sort_articles_by_time, marge_data_from_json_url, marge_errors_from_json_url
|
from friend_circle_lite.get_info import fetch_and_process_data, sort_articles_by_time, marge_data_from_json_url, marge_errors_from_json_url, deal_with_large_data
|
||||||
from friend_circle_lite.get_conf import load_config
|
from friend_circle_lite.get_conf import load_config
|
||||||
from rss_subscribe.push_article_update import get_latest_articles_from_link, extract_emails_from_issues
|
from rss_subscribe.push_article_update import get_latest_articles_from_link, extract_emails_from_issues
|
||||||
from push_rss_update.send_email import send_emails
|
from push_rss_update.send_email import send_emails
|
||||||
@ -22,7 +22,7 @@ if config["spider_settings"]["enable"]:
|
|||||||
print("合并数据功能开启,从 {marge_json_url} 中获取境外数据并合并".format(marge_json_url=marge_json_url + "/all.json"))
|
print("合并数据功能开启,从 {marge_json_url} 中获取境外数据并合并".format(marge_json_url=marge_json_url + "/all.json"))
|
||||||
result = marge_data_from_json_url(result, marge_json_url + "/all.json")
|
result = marge_data_from_json_url(result, marge_json_url + "/all.json")
|
||||||
lost_friends = marge_errors_from_json_url(lost_friends, marge_json_url + "/errors.json")
|
lost_friends = marge_errors_from_json_url(lost_friends, marge_json_url + "/errors.json")
|
||||||
|
result = deal_with_large_data(result)
|
||||||
sorted_result = sort_articles_by_time(result)
|
sorted_result = sort_articles_by_time(result)
|
||||||
with open("all.json", "w", encoding="utf-8") as f:
|
with open("all.json", "w", encoding="utf-8") as f:
|
||||||
json.dump(sorted_result, f, ensure_ascii=False, indent=2)
|
json.dump(sorted_result, f, ensure_ascii=False, indent=2)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user