js一键全自动获取最新的词库,小白也能完成

首先 安装自动执行的面板 教程。

js如下:自建zx.py作为文件名称+格式。

# cron: 0 9 * * *
# 青龙任务每日上午9点运行

import requests
import os
from datetime import datetime
from bs4 import BeautifulSoup

today = datetime.now().strftime('%Y-%m-%d')
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"
}

save_dir = "/ql/scripts/input_dicts"
os.makedirs(save_dir, exist_ok=True)

def fetch_baidu():
    print("正在抓取百度输入法词库...")
    url = "https://shurufa.baidu.com/dict/"
    res = requests.get(url, headers=headers)
    soup = BeautifulSoup(res.text, "html.parser")

    result = []
    for a in soup.select("a[href^='/dict_inner']"):
        dict_url = "https://shurufa.baidu.com" + a['href']
        dict_name = a.get_text(strip=True)
        try:
            detail = requests.get(dict_url, headers=headers)
            detail_soup = BeautifulSoup(detail.text, "html.parser")
            down_a = detail_soup.select_one("a[href$='.bdict']")
            if down_a:
                result.append(dict_name)
        except:
            continue

    filepath = os.path.join(save_dir, f"百度输入法{today}.txt")
    with open(filepath, "w", encoding="utf-8") as f:
        f.write("\n".join(result))
    print("百度词库保存成功:", filepath)

def fetch_sogou():
    print("正在抓取搜狗输入法词库...")
    url = "https://pinyin.sogou.com/dict/"
    res = requests.get(url, headers=headers)
    soup = BeautifulSoup(res.text, "html.parser")

    result = []
    for a in soup.select("div.dict_detail_title a"):
        dict_name = a.get_text(strip=True)
        result.append(dict_name)

    filepath = os.path.join(save_dir, f"搜狗输入法{today}.txt")
    with open(filepath, "w", encoding="utf-8") as f:
        f.write("\n".join(result))
    print("搜狗词库保存成功:", filepath)

def fetch_ifly():
    print("正在抓取讯飞输入法词库...")
    url = "https://srf.xunfei.cn/resource"
    res = requests.get(url, headers=headers)
    soup = BeautifulSoup(res.text, "html.parser")

    result = []
    for a in soup.select("a[href^='/resource/detail']"):
        dict_name = a.get_text(strip=True)
        result.append(dict_name)

    filepath = os.path.join(save_dir, f"讯飞输入法{today}.txt")
    with open(filepath, "w", encoding="utf-8") as f:
        f.write("\n".join(result))
    print("讯飞词库保存成功:", filepath)

if __name__ == "__main__":
    try:
        fetch_baidu()
    except Exception as e:
        print("百度抓取失败:", e)

    try:
        fetch_sogou()
    except Exception as e:
        print("搜狗抓取失败:", e)

    try:
        fetch_ifly()
    except Exception as e:
        print("讯飞抓取失败:", e)

要安装依赖:

pip3 install requests beautifulsoup4

以上定时的是每天早上9点自动抓取,也可以自定义,修改cron: 0 9 * * * 来定义。建议每个月或隔开3个月一次,不要每天去获取,官方每天更新那一两个词库也没啥意思。

文件保存在你的青龙面板文件内。

赞(1)
未经允许不得转载:iQSOO » js一键全自动获取最新的词库,小白也能完成
探索更多网络教程