PHP文件展示

This commit is contained in:
2025-04-26 12:41:01 +08:00
parent cb8dc2693c
commit 0dfa2b995d
131 changed files with 65545 additions and 183 deletions

View File

@@ -0,0 +1,105 @@
from hashlib import sha1
import hmac
import requests
import json
import urllib
from datetime import datetime, timedelta
def dogecloud_api(api_path, data={}, json_mode=False):
"""
调用多吉云 API
"""
access_key = '91fc21b83716ce4e' # 替换为你的 AccessKey
secret_key = 'ea944e5745a1565f979df0a5457fe6e4' # 替换为你的 SecretKey
body = ''
mime = ''
if json_mode:
body = json.dumps(data)
mime = 'application/json'
else:
body = urllib.parse.urlencode(data)
mime = 'application/x-www-form-urlencoded'
sign_str = api_path + "\n" + body
signed_data = hmac.new(secret_key.encode('utf-8'), sign_str.encode('utf-8'), sha1)
sign = signed_data.digest().hex()
authorization = 'TOKEN ' + access_key + ':' + sign
response = requests.post('https://api.dogecloud.com' + api_path, data=body, headers={
'Authorization': authorization,
'Content-Type': mime
})
return response.json()
def get_traffic_data(start_date, end_date):
"""
查询指定日期范围内的 CDN 流量数据
"""
api_path = "/cdn/stat/traffic.json"
params = {
"start_date": start_date,
"end_date": end_date,
"granularity": "day", # 按天粒度查询
"area": "china", # 查询中国境内的流量
"domains": "down-cdn.ovofish.com" # 替换为你的加速域名
}
response = dogecloud_api(api_path, params)
if response.get("code") == 200:
return response["data"]
else:
raise Exception(f"API 错误: {response.get('msg')}")
def bytes_to_gb(bytes_value):
"""
将字节B转换为千兆字节GB
"""
bytes_value /= 1024 ** 3
# 保留小数点后两位
bytes_value = round(bytes_value, 2)
return bytes_value
def calculate_traffic_stats():
"""
计算本月流量、今日流量和昨日流量
"""
today = datetime.now().date()
yesterday = today - timedelta(days=1)
first_day_of_month = today.replace(day=1)
# 查询本月流量数据
month_data = get_traffic_data(first_day_of_month.strftime("%Y-%m-%d"), today.strftime("%Y-%m-%d"))
this_month_traffic = sum(day["data"][0] for day in month_data["result"]) # 本月总流量
# 查询今日和昨日流量数据
today_data = get_traffic_data(today.strftime("%Y-%m-%d"), today.strftime("%Y-%m-%d"))
yesterday_data = get_traffic_data(yesterday.strftime("%Y-%m-%d"), yesterday.strftime("%Y-%m-%d"))
today_traffic = today_data["result"][0]["data"][0] if today_data["result"] else 0
yesterday_traffic = yesterday_data["result"][0]["data"][0] if yesterday_data["result"] else 0
# 将流量数据从字节转换为 GB
return {
"this_month_traffic_gb": bytes_to_gb(this_month_traffic),
"today_traffic_gb": bytes_to_gb(today_traffic),
"yesterday_traffic_gb": bytes_to_gb(yesterday_traffic)
}
def save_to_json(data, filename="traffic_stats.json"):
"""
将流量统计数据保存到 JSON 文件
"""
with open(filename, "w") as f:
json.dump(data, f, indent=4)
if __name__ == "__main__":
try:
# 计算流量统计数据
stats = calculate_traffic_stats()
print("流量统计单位GB")
print(json.dumps(stats, indent=4))
# 将统计数据保存到 JSON 文件
save_to_json(stats)
print(f"流量统计数据已保存到 traffic_stats.json")
except Exception as e:
print(f"错误: {e}")

View File

@@ -0,0 +1,156 @@
import requests
from bs4 import BeautifulSoup
import re
import json
import os
import hmac
from hashlib import sha1
# 配置选项
ENABLE_CDN_REFRESH = True # 设置为False禁用CDN预热功能
print("Start downloading file links...")
# 发送GET请求获取页面内容
url = "https://teamspeak.com/zh-CN/downloads/"
response = requests.get(url)
soup = BeautifulSoup(response.content, "html.parser")
# 找到所有<a>标签并提取链接
links = soup.find_all("a", href=True)
# 匹配包含"https://files.teamspeak-services.com"的链接
file_links = [link["href"] for link in links if re.search(r"https://files.teamspeak-services.com", link["href"])]
# 按版本划分链接新增Ver6支持
ver3_links = [link for link in file_links if re.search(r"\/3\.\d+\.\d+\/", link)]
ver5_links = [link for link in file_links if re.search(r"\/(?:5\.\d+\.\d+|5\.\d+\.\d+\-\w+)/", link)]
ver6_links = [link for link in file_links if re.search(r"\/6\.\d+\.\d+.*\/", link)] # 新增Ver6匹配模式
# 设置全局变量
download_file = True
file_updated = False
# 创建版本文件夹并保存链接
def save_links_to_folder(links, folder_name):
global download_file, file_updated
folder_path = os.path.join(os.getcwd(), "tsfile", folder_name)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
for link in links:
file_name = link.split("/")[-1]
file_path = os.path.join(folder_path, file_name)
# 检查文件是否需要更新
if os.path.exists(file_path):
response = requests.head(link)
remote_file_size = int(response.headers.get('Content-Length', 0))
local_file_size = os.path.getsize(file_path)
if remote_file_size == local_file_size:
print(f"文件 {file_name} 无需更新。")
continue
else:
os.remove(file_path)
print(f"发现新版本 {file_name},开始下载...")
file_updated = True
# 下载文件
with open(file_path, "wb") as file:
response = requests.get(link)
file.write(response.content)
print(f"文件 {file_name} 已下载到 {folder_name}")
file_updated = True
# 保存链接到对应版本文件夹中新增Ver6处理
save_links_to_folder(ver3_links, "Ver3")
save_links_to_folder(ver5_links, "Ver5")
save_links_to_folder(ver6_links, "Ver6")
# 将链接存储为JSON文件新增Ver6数据
data = {
"Ver3": ver3_links,
"Ver5": ver5_links,
"Ver6": ver6_links # 新增Ver6数据
}
with open("file_links.json", "w") as json_file:
json.dump(data, json_file, indent=4)
print("链接已保存至file_links.json")
# 生成本地文件下载链接
base_download_url = "https://file-us.ovofish.com/tsfile/"
def generate_local_download_links(links, folder_name):
local_download_links = [base_download_url + folder_name + "/" + link.split("/")[-1] for link in links]
return local_download_links
ver3_local_download_links = generate_local_download_links(ver3_links, "Ver3")
ver5_local_download_links = generate_local_download_links(ver5_links, "Ver5")
ver6_local_download_links = generate_local_download_links(ver6_links, "Ver6") # 新增Ver6链接生成
# 合并所有下载链接用于CDN预热
all_local_links = ver3_local_download_links + ver5_local_download_links + ver6_local_download_links
# 将本地文件下载链接存储为JSON文件新增Ver6数据
local_download_links_data = {
"Ver3": ver3_local_download_links,
"Ver5": ver5_local_download_links,
"Ver6": ver6_local_download_links # 新增Ver6数据
}
with open("local_download_links.json", "w") as local_json_file:
json.dump(local_download_links_data, local_json_file, indent=4)
print("本地下载链接已保存至local_download_links.json")
# CDN刷新功能新增开关控制
def dogecloud_api(api_path, data={}, json_mode=False):
"""多吉云API接口"""
print('正在调用多吉云API...')
access_key = '' # 请替换为你的Access Key
secret_key = '' # 请替换为你的Secret Key
if not access_key or not secret_key:
print('请填写多吉云Access Key和Secret Key。')
return None
body = json.dumps(data) if json_mode else urllib.parse.urlencode(data)
mime = 'application/json' if json_mode else 'application/x-www-form-urlencoded'
sign_str = api_path + "\n" + body
signed_data = hmac.new(secret_key.encode('utf-8'), sign_str.encode('utf-8'), sha1)
sign = signed_data.digest().hex()
authorization = 'TOKEN ' + access_key + ':' + sign
response = requests.post(
'https://api.dogecloud.com' + api_path,
data=body,
headers={'Authorization': authorization, 'Content-Type': mime}
)
return response.json()
# 根据配置决定是否执行CDN预热
if file_updated and ENABLE_CDN_REFRESH:
print("文件已更新开始预热CDN...")
# 预热具体文件
refresh_data = {
'rtype': 'prefetch',
'urls': json.dumps(all_local_links)
}
response = dogecloud_api('/cdn/refresh/add.json', refresh_data, json_mode=True)
print("文件预热响应:", response)
# 刷新目录
path_refresh_data = {
'rtype': 'path',
'urls': 'https://file-us.ovofish.com/file/'
}
response = dogecloud_api('/cdn/refresh/add.json', path_refresh_data, json_mode=True)
print("目录刷新响应:", response)
elif file_updated:
print("文件已更新但CDN刷新功能已禁用。")
else:
print("文件未更新,无需任何操作。")

View File

@@ -1,149 +0,0 @@
import requests
from bs4 import BeautifulSoup
import re
import json
import os
import hmac
from hashlib import sha1
print("Start downloading file links...")
# 发送GET请求获取页面内容
url = "https://teamspeak.com/zh-CN/downloads/"
response = requests.get(url)
soup = BeautifulSoup(response.content, "html.parser")
# 找到所有<a>标签并提取链接
links = soup.find_all("a", href=True)
# 匹配包含"https://files.teamspeak-services.com"的链接
file_links = [link["href"] for link in links if re.search(r"https://files.teamspeak-services.com", link["href"])]
# 按版本划分链接
ver3_links = [link for link in file_links if re.search(r"\/3\.\d+\.\d+\/", link)]
ver5_links = [link for link in file_links if re.search(r"\/(?:5\.\d+\.\d+|5\.\d+\.\d+\-\w+)/", link)]
# 设置全局变量
download_file = True
file_updated = False
# 创建版本文件夹并保存链接
def save_links_to_folder(links, folder_name):
folder_path = os.path.join(os.getcwd(), "file", folder_name)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
for link in links:
file_name = link.split("/")[-1]
file_path = os.path.join(folder_path, file_name)
# 检查文件是否需要更新
global download_file
if os.path.exists(file_path):
response = requests.head(link)
remote_file_size = int(response.headers.get('Content-Length', 0))
local_file_size = os.path.getsize(file_path)
if remote_file_size == local_file_size:
download_file = False
print(f"文件 {file_name} 已更新。无需下载。")
else:
download_file = True
os.remove(file_path)
# 如果需要下载文件
if download_file:
with open(file_path, "wb") as file:
response = requests.get(link)
file.write(response.content)
print(f"文件 {file_name} 已下载到 {folder_name}")
# 保存链接到对应版本文件夹中
save_links_to_folder(ver3_links, "Ver3")
save_links_to_folder(ver5_links, "Ver5")
# 将链接存储为JSON文件
data = {"Ver3": ver3_links, "Ver5": ver5_links}
with open("file_links.json", "w") as json_file:
json.dump(data, json_file, indent=4)
print("链接已保存至file_links.json")
def downloadFile():
global file_updated
if download_file:
print("download_file的值为:", download_file)
file_updated = True
else:
print("download_file的值为:", download_file)
file_updated = False
downloadFile()
print("file_updated的最终值为:", file_updated)
# 生成本地文件下载链接
base_download_url = "https://file-teamspeak-download.lolicon.team/file/"
def generate_local_download_links(links, folder_name):
local_download_links = [base_download_url + folder_name + "/" + link.split("/")[-1] for link in links]
return local_download_links
ver3_local_download_links = generate_local_download_links(ver3_links, "Ver3")
ver5_local_download_links = generate_local_download_links(ver5_links, "Ver5")
# 将本地文件下载链接存储为JSON文件
local_download_links_data = {"Ver3": ver3_local_download_links, "Ver5": ver5_local_download_links}
with open("local_download_links.json", "w") as local_json_file:
json.dump(local_download_links_data, local_json_file, indent=4)
print("本地下载链接已保存至local_download_links.json")
# 判断是否有文件更新
if file_updated:
print("文件已更新开始预热CDN...")
# 预热 CDN
def dogecloud_api(api_path, data={}, json_mode=False):
print('正在调用多吉云 API...')
access_key = '' # 请将此处替换为你的多吉云 Access Key
secret_key = '' # 请将此处替换为你的多吉云 Secret Key
# 如果没有 access_key 和 secret_key则跳过下面所有操作
if not access_key or not secret_key:
print('请填写多吉云 Access Key 和 Secret Key。')
return None
body = ''
mime = ''
if json_mode:
body = json.dumps(data)
mime = 'application/json'
else:
body = urllib.parse.urlencode(data)
mime = 'application/x-www-form-urlencoded'
sign_str = api_path + "\n" + body
signed_data = hmac.new(secret_key.encode('utf-8'), sign_str.encode('utf-8'), sha1)
sign = signed_data.digest().hex()
authorization = 'TOKEN ' + access_key + ':' + sign
response = requests.post('https://api.dogecloud.com' + api_path, data=body, headers={
'Authorization': authorization,
'Content-Type': mime
})
return response.json()
api_path = '/cdn/refresh/add.json'
data = {
'rtype': 'prefetch',
'urls': json.dumps(all_local_links)
}
response = dogecloud_api(api_path, data, json_mode=True)
print(response)
data = {
'rtype': 'path',
'urls': 'https://file-teamspeak-download.lolicon.team/file/'
}
response = dogecloud_api(api_path, data, json_mode=True)
print(response)
else:
print("文件未更新无需预热CDN。")

View File

@@ -1,23 +1,5 @@
beautifulsoup4==4.12.3
bs4==0.0.2
certifi==2024.7.4
charset-normalizer==3.3.2
colorama==0.4.6
colorlog==6.8.2
hjson==3.1.0
idna==3.7
mcdreforged==2.13.1
packaging==24.1
parse==1.20.2
prompt_toolkit==3.0.47
psutil==6.0.0
py-cpuinfo==9.0.0
PySocks==1.7.1
requests==2.32.3
resolvelib==1.0.1
ruamel.yaml==0.18.6
ruamel.yaml.clib==0.2.8
soupsieve==2.6
typing_extensions==4.12.2
urllib3==2.2.2
wcwidth==0.2.13