From cfbf13bffac55cc2a709cc6e292074b47fe734bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9F=B3=E7=A5=9E?= <3162475700@qq.com> Date: Tue, 29 Oct 2024 20:43:22 +0800 Subject: [PATCH] =?UTF-8?q?=E2=8F=B2=EF=B8=8F=E8=A7=A3=E5=86=B3=E5=BD=93?= =?UTF-8?q?=E6=96=87=E7=AB=A0=E6=95=B0=E9=87=8F=E8=BF=87=E5=A4=9A=E6=97=B6?= =?UTF-8?q?=E5=AF=BC=E8=87=B4=E7=9A=84=E6=95=B0=E6=8D=AE=E9=87=8F=E8=BF=87?= =?UTF-8?q?=E5=A4=A7=E7=9A=84=E9=97=AE=E9=A2=98=EF=BC=88#24=EF=BC=89?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../__pycache__/get_info.cpython-311.pyc | Bin 19339 -> 20985 bytes friend_circle_lite/get_info.py | 55 +++++++++++++++--- run.py | 4 +- 3 files changed, 48 insertions(+), 11 deletions(-) diff --git a/friend_circle_lite/__pycache__/get_info.cpython-311.pyc b/friend_circle_lite/__pycache__/get_info.cpython-311.pyc index 18248433d5a00bdcb593e73974c739492cb859a7..cce2a3a607a71ea8071beab711fd280b23266d3b 100644 GIT binary patch delta 2814 zcmb7GdrVu`89(=BUre#%*ckJ2%p-u;#)aS{q)-+Tno^Pm2+%FfT0*%7yg83u+e^eu zaXJW20>#NKgQc`(U6gpu14rJ2vQ$a`n3PT3bZ>P3U`UZ74F@u5D%CbAtF&p~xfe`H zqiH)n{+;iB=R4>8&N=t{*=NUyfBuZnUrtNYAt=8T?OiFm&greh#JS?viS_4BIWiGP zd6`!kZe2fm)x;SA%^cN?9HtvHHyFYl_iCb6lCsCsH@r_}RYaRbeFfB5liEt`cA4ES zvfJZ-$WLJy3oK$6=8B7$3~a^el3B9vyTq(1JIv0SZQ+!AY?`R=9Ipj{8nddc*mPjo z(058~m&|sFN=?{GqSn|p$yO=bDkU~MoD!)}X6VK75(M>5_`uAvj61>fCz^z(1%OwJ#EAUkdf&2w4HbOze+TLD6 z&=5@RB;k-U;@;8=1Y?C@dtA5qB)Oe00z0q?;K69@!SMBmgCmQ#1``)Ae|~c)aWj;- zd^R!lvD&qG>)pkl4ka%3FGNl`)(gX~*DP~p&fV^9JMQs!w|U$7E_WN(?r#_V;#y4o z0H&mYp|6ka9_At}vN!Ins%3~D3O8!rCq5J!cQ!Lu z!QClDckaz@iP}Vc3$9K3RYPK1WwuphTjPdZT7vKi8MRw9P9Qp{5gKcAEF5u_yhOt3 z!dET5WRMgh≻~r{QVph_e9sH7k7x6(T;bm-eTudN?Xbyo)$mSgg(MVtVN}>1M_ej_yeNq_);1Mm$G&MdqgTe#5waP0cS569=H zXFk7qVV{s!V`;HL#2D&ELG+rgmk1IZ$^?&E&XYr_&FJR@e;S_oBLEJPKMKtp?eV)0 z`|#1(?feF4zD%j|x&0kS4l*ci@V(5GX1TA~iI+-4OVLI;@b+>h*s~&13ptgW~ zGsFhUgx|er+c|&hy@k=A%+H*%Z{+OrpA0{kIGY$b>(KC%t$d!i^p~XulahF1TWiTIsZSh4ocl-V)F{#DzUl9|&8ysKN68;7_dWxU)ljE%N z9_?uN`l_8txw(q7As0r3P@b66q}JHB@o!CjTh4VRJxaH( z;V8bq=ka$8VCCxYEKi~Q@~|fSro`G3u5Dt)OOmTzcGZV9q9IRa^C#gSMcMsSW9Vh& zXy6C226o{{=dn!iwD9|vUQfq=Tlk9@@Sg}C%PK!ARMh9{Zi5LsN4{#It^QBe-2_pz kguaH2LgStdTM@A?WWb*#I*Z6&LP@B|A=WRUuY*G9KR?1o1^@s6 delta 1142 zcmah{Z)jUp6u5Ck6OyzO$Jpyq zx*2JUo5ZzlEqBZ+qgppyM4=5&2EytToa~EmpFAZLiwb*n`o)M2HdOH3q!yW=m&5OO z?(f`t&OP_uI~VZm>sY>2Rb@jkE=qr-U);AKd+_OnmS=GF!Ye_^c>)c)lBNU-))cN6 z1pKgi+?@AdPHnnp$<)xEPf)oXY_+$-n%JR>9aQX4zu9FK_&Uf+erjpv>%md$0!41> zATGMRIk8yl%UL(XDk`TI!vJ8bo(j|0Q(KOl(!{hbrfJy>Um)*UZP$FAy023cy*cZA zM>)eN%P801Xk*@>D>%R&HEf-ke$a`O2Je`mKK|#>v9w`B(kBll~iPuWg>6HO|bk zMen?9Ts-$b>oR*-rMqg7Q;$jH^FF7voi1o4-}eQ&*Ud`m(BX+x(7bL=Dav7GTzL=n zD@lNL`=RHQ(bThB`az~o0*sph{N+Jv`*#HjFg?bevJqe!mFfkde#7NG+x51*X8FQR zU2)A7*IjWk(jWYjH^5DIY-u7uDdT!MpG zh#O)tMj&SJ6HJuKC_Kp`fDiyIR=Z1x6Lyv%Cln>;qMb@jU`2b)ndqtL;%?3E*X^x- zYWLsX;H}iwbM3)TCO+-_tW&cMX#Akg4^n=xC{<5Kl@FlfAXa)A)smW6%E`X7!ak5dXX47qGb>kW~ Ky&IGSl=%x4V~5ND diff --git a/friend_circle_lite/get_info.py b/friend_circle_lite/get_info.py index e20a866..b6615e2 100644 --- a/friend_circle_lite/get_info.py +++ b/friend_circle_lite/get_info.py @@ -374,9 +374,12 @@ def marge_data_from_json_url(data, marge_json_url): print("合并数据完成,现在共有 %d 篇文章" % len(data['article_data'])) return data +import requests + def marge_errors_from_json_url(errors, marge_json_url): """ - 从另一个网络 JSON 文件中获取错误信息并遍历,删除在errors中,不存在于marge_errors中的友链信息。 + 从另一个网络 JSON 文件中获取错误信息并遍历,删除在errors中, + 不存在于marge_errors中的友链信息。 参数: errors (list): 包含错误信息的列表 @@ -386,15 +389,49 @@ def marge_errors_from_json_url(errors, marge_json_url): list: 合并后的错误信息列表 """ try: - response = requests.get(marge_json_url, headers=headers, timeout=timeout) + response = requests.get(marge_json_url, timeout=10) # 设置请求超时时间 marge_errors = response.json() except Exception as e: - print(f"无法获取该链接:{marge_json_url} , 出现的问题为:{e}") + print(f"无法获取该链接:{marge_json_url},出现的问题为:{e}") return errors - print("开始合并错误信息,原错误信息共有 %d 位朋友,境外错误信息共有 %d 位朋友" % (len(errors), len(marge_errors))) - for error in errors: - if error not in marge_errors: - errors.remove(error) - print("合并错误信息完成,现在共有 %d 位朋友" % len(errors)) - return errors \ No newline at end of file + # 提取 marge_errors 中的 URL + marge_urls = {item[1] for item in marge_errors} + + # 使用过滤器保留 errors 中在 marge_errors 中出现的 URL + filtered_errors = [error for error in errors if error[1] in marge_urls] + + print("合并错误信息完成,保留了 %d 位朋友" % len(filtered_errors)) + return filtered_errors + +def deal_with_large_data(result): + """ + 处理文章数据,保留前200篇及其作者在后续文章中的出现。 + + 参数: + result (dict): 包含统计数据和文章数据的字典。 + + 返回: + dict: 处理后的数据,只包含需要的文章。 + """ + article_data = result.get("article_data", []) + + # 检查文章数量是否大于 200 + if len(article_data) > 200: + print("数据量较大,开始进行处理") + # 获取前 200 篇文章的作者集合 + first_200_authors = {article["author"] for article in article_data[:200]} + + # 从第201篇开始过滤,只保留前200篇出现过的作者的文章 + filtered_articles = article_data[:200] + [ + article for article in article_data[200:] + if article["author"] in first_200_authors + ] + + # 更新结果中的 article_data + result["article_data"] = filtered_articles + # 更新结果中的统计数据 + result["statistical_data"]["article_num"] = len(filtered_articles) + print("数据处理完成,保留 %d 篇文章" % len(filtered_articles)) + + return result diff --git a/run.py b/run.py index 35daf28..9691caf 100644 --- a/run.py +++ b/run.py @@ -1,5 +1,5 @@ # 引入 check_feed 和 parse_feed 函数 -from friend_circle_lite.get_info import fetch_and_process_data, sort_articles_by_time, marge_data_from_json_url, marge_errors_from_json_url +from friend_circle_lite.get_info import fetch_and_process_data, sort_articles_by_time, marge_data_from_json_url, marge_errors_from_json_url, deal_with_large_data from friend_circle_lite.get_conf import load_config from rss_subscribe.push_article_update import get_latest_articles_from_link, extract_emails_from_issues from push_rss_update.send_email import send_emails @@ -22,7 +22,7 @@ if config["spider_settings"]["enable"]: print("合并数据功能开启,从 {marge_json_url} 中获取境外数据并合并".format(marge_json_url=marge_json_url + "/all.json")) result = marge_data_from_json_url(result, marge_json_url + "/all.json") lost_friends = marge_errors_from_json_url(lost_friends, marge_json_url + "/errors.json") - + result = deal_with_large_data(result) sorted_result = sort_articles_by_time(result) with open("all.json", "w", encoding="utf-8") as f: json.dump(sorted_result, f, ensure_ascii=False, indent=2)