Skip to content

Commit f07f372

Browse files
author
Taois
committed
feat: 发布新版本
1 parent 969fd5c commit f07f372

File tree

10 files changed

+248
-52
lines changed

10 files changed

+248
-52
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -138,6 +138,7 @@ dist
138138
/spider/js/百忙无果[].js
139139
/data/settings/link_data.json
140140
/yarn.lock
141+
/t4_daemon.pid
141142
/custom.json
142143
/index.json
143144
/vod_cache/

README.md

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
# drpyS(drpy-node)
22

33
nodejs作为服务端的drpy实现。全面升级异步写法
4-
~~积极开发中,每日一更~~,当前进度 `75%`
4+
~~积极开发中,每日一更~~,当前进度 `77%`
55
~~找工作中,随缘更新~~
66
上班当牛马,下班要带娃,阶段性佛系趁娃睡觉熬夜更新
77

@@ -25,6 +25,10 @@ nodejs作为服务端的drpy实现。全面升级异步写法
2525

2626
## 更新记录
2727

28+
### 20250818
29+
30+
更新至V1.2.14
31+
2832
### 20250817
2933

3034
更新至V1.2.13
@@ -37,14 +41,6 @@ nodejs作为服务端的drpy实现。全面升级异步写法
3741

3842
更新至V1.2.11
3943

40-
### 20250813
41-
42-
更新至V1.2.10
43-
44-
### 20250812
45-
46-
更新至V1.2.9
47-
4844
[点此查看完整更新记录](docs/updateRecord.md)
4945

5046
**注意事项**

docs/apiList.md

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,14 @@
11
## API列表(一部分,逐步完善)
22

3+
### ds
4+
35
- 获取定时任务列表 [/tasks](/tasks)
46
- 立即执行全部任务 [/execute-now/:taskName](/execute-now/)
57
- 立即执行钉钉消息任务 [/execute-now/dingtalk_test](/execute-now/dingtalk_test)
68
- 立即执行企业微信消息任务 [/execute-now/wechat_test](/execute-now/wechat_test)
79
- 立即执行吾爱论坛签到任务 [/execute-now/52pojie_sign](/execute-now/52pojie_sign) | [说明](./cron/52pojie_sign.md)
8-
- 获取指定任务信息 [/tasks/:taskName](/tasks/)
10+
- 获取指定任务信息 [/tasks/:taskName](/tasks/)
11+
12+
### hipy
13+
14+
- 检查python环境 [/health](/health)

docs/updateRecord.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,8 @@
66

77
1. 修复 cat源内import assets库处理逻辑错误,推荐环境变量开启 `CAT_DEBUG=1`
88
2. 重构py源执行逻辑,确保代码健壮性和py执行性能最强。
9+
3. 尝试修复 vercel没写文件权限导致的整个项目无法启动问题(vercel无法支持py源)
10+
4. 修复两个py源
911

1012
### 20250817
1113

package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"name": "drpy-node",
3-
"version": "1.2.13",
3+
"version": "1.2.14",
44
"main": "index.js",
55
"type": "module",
66
"scripts": {

public/index.html

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
</head>
99
<body>
1010
<h1 id="drpysdrpy-node">drpyS(drpy-node)</h1>
11-
<p>nodejs作为服务端的drpy实现。全面升级异步写法<br><del>积极开发中,每日一更</del>,当前进度 <code>75%</code><br><del>找工作中,随缘更新</del><br>上班当牛马,下班要带娃,阶段性佛系趁娃睡觉熬夜更新</p>
11+
<p>nodejs作为服务端的drpy实现。全面升级异步写法<br><del>积极开发中,每日一更</del>,当前进度 <code>77%</code><br><del>找工作中,随缘更新</del><br>上班当牛马,下班要带娃,阶段性佛系趁娃睡觉熬夜更新</p>
1212
<ul>
1313
<li><a href="docs/apidoc.md">接口文档</a> | <a href="docs/apiList.md">接口列表如定时任务</a> | <a href="https://github.com/waifu-project/movie/pull/135">小猫影视-待对接T4</a></li>
1414
<li><a href="/config?pwd=dzyyds">本地配置接口-动态本地</a></li>
@@ -29,16 +29,14 @@ <h1 id="drpysdrpy-node">drpyS(drpy-node)</h1>
2929
<li><a href="/cat/index.html">在线猫ds源主页</a></li>
3030
</ul>
3131
<h2 id="更新记录">更新记录</h2>
32+
<h3 id="20250818">20250818</h3>
33+
<p>更新至V1.2.14</p>
3234
<h3 id="20250817">20250817</h3>
3335
<p>更新至V1.2.13</p>
3436
<h3 id="20250815">20250815</h3>
3537
<p>更新至V1.2.12</p>
3638
<h3 id="20250814">20250814</h3>
3739
<p>更新至V1.2.11</p>
38-
<h3 id="20250813">20250813</h3>
39-
<p>更新至V1.2.10</p>
40-
<h3 id="20250812">20250812</h3>
41-
<p>更新至V1.2.9</p>
4240
<p><a href="docs/updateRecord.md">点此查看完整更新记录</a></p>
4341
<p><strong>注意事项</strong></p>
4442
<p>总是有人遇到各种奇葩问题,像什么没弹幕,访问/config/1服务马上崩溃等等,能自行解决最好,解决不了我建议你使用下方安装教程

spider/py/base/spider.py

Lines changed: 21 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,6 @@ def initEnv(self, env=None):
118118
self._ENV = env
119119
self.t4_api = env.get('proxyUrl')
120120

121-
122121
def getProxyUrl(self):
123122
"""
124123
获取本地代理地址
@@ -147,12 +146,12 @@ def regStr(self, src, reg, group=1):
147146
return src
148147

149148
def custom_RegexGetText(self, Text, RegexText, Index, find_all=False):
150-
"""改进版:支持返回所有匹配结果或单个匹配"""
151-
if not find_all:
152-
match = re.search(RegexText, Text, re.M | re.S)
153-
return match.group(Index) if match else ""
154-
else:
155-
return [m.group(Index) for m in re.finditer(RegexText, Text, re.M | re.S)]
149+
"""改进版:支持返回所有匹配结果或单个匹配"""
150+
if not find_all:
151+
match = re.search(RegexText, Text, re.M | re.S)
152+
return match.group(Index) if match else ""
153+
else:
154+
return [m.group(Index) for m in re.finditer(RegexText, Text, re.M | re.S)]
156155

157156
# cGroup = re.compile('[\U00010000-\U0010ffff]')
158157
# clean = cGroup.sub('',rsp.text)
@@ -161,34 +160,38 @@ def cleanText(self, src):
161160
src)
162161
return clean
163162

164-
def fetch(self, url, data=None, headers={}, cookies="", timeout=5):
165-
if data is None:
166-
data = {}
167-
rsp = requests.get(url, params=data, headers=headers, cookies=cookies, timeout=timeout, verify=False)
163+
def fetch(self, url, params=None, headers=None, cookies=None, timeout=5, verify=True, allow_redirects=True):
164+
rsp = requests.get(url, params=params, headers=headers, cookies=cookies, timeout=timeout, verify=verify,
165+
allow_redirects=allow_redirects)
168166
rsp.encoding = 'utf-8'
169167
return rsp
170168

171-
def post(self, url, data, headers={}, cookies={}, timeout=5):
172-
rsp = requests.post(url, data=data, headers=headers, cookies=cookies, timeout=timeout, verify=False)
169+
def post(self, url, data, headers=None, cookies=None, timeout=5, verify=True, allow_redirects=True):
170+
rsp = requests.post(url, data=data, headers=headers, cookies=cookies, timeout=timeout, verify=verify,
171+
allow_redirects=allow_redirects)
173172
rsp.encoding = 'utf-8'
174173
return rsp
175174

176-
def postJson(self, url, json, headers={}, cookies={}, timeout=5):
177-
rsp = requests.post(url, json=json, headers=headers, cookies=cookies, timeout=timeout, verify=False)
175+
def postJson(self, url, json, headers=None, cookies=None, timeout=5, verify=True, allow_redirects=True):
176+
rsp = requests.post(url, json=json, headers=headers, cookies=cookies, timeout=timeout, verify=verify,
177+
allow_redirects=allow_redirects)
178178
rsp.encoding = 'utf-8'
179179
return rsp
180180

181-
def postBinary(self, url, data: dict, boundary=None, headers={}, cookies={}, timeout=5):
181+
def postBinary(self, url, data: dict, boundary=None, headers=None, cookies=None, timeout=5, verify=True,
182+
allow_redirects=True):
182183
if boundary is None:
183184
boundary = f'--dio-boundary-{int(time.time())}'
185+
if headers is None:
186+
headers = {}
184187
headers['Content-Type'] = f'multipart/form-data; boundary={boundary}'
185-
# print(headers)
186188
fields = []
187189
for key, value in data.items():
188190
fields.append((key, (None, value, None)))
189191
m = encode_multipart_formdata(fields, boundary=boundary)
190192
data = m[0]
191-
rsp = requests.post(url, data=data, headers=headers, cookies=cookies, timeout=timeout, verify=False)
193+
rsp = requests.post(url, data=data, headers=headers, cookies=cookies, timeout=timeout, verify=verify,
194+
allow_redirects=allow_redirects)
192195
rsp.encoding = 'utf-8'
193196
return rsp
194197

spider/py/懒懒视频.py

Lines changed: 16 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -8,14 +8,17 @@
88
})
99
"""
1010

11-
import sys,time,uuid,json,urllib3
12-
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
13-
sys.path.append('..')
11+
# -*- coding: utf-8 -*-
12+
# 本资源来源于互联网公开渠道,仅可用于个人学习爬虫技术。
13+
# 严禁将其用于任何商业用途,下载后请于 24 小时内删除,搜索结果均来自源站,本人不承担任何责任。
14+
1415
try:
15-
# from base.spider import Spider as BaseSpider
1616
from base.spider import BaseSpider
1717
except ImportError:
1818
from t4.base.spider import BaseSpider
19+
import sys,time,uuid,json,urllib3
20+
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
21+
sys.path.append('..')
1922

2023
class Spider(BaseSpider):
2124
device_id,cms_host,parses = '','',{}
@@ -45,7 +48,7 @@ class Spider(BaseSpider):
4548

4649
def init(self, extend=''):
4750
try:
48-
config = json.loads(extend)
51+
config = json.loads(self.extend)
4952
except (json.JSONDecodeError, TypeError):
5053
config = {}
5154
host = config.get('host', 'http://llsp2.洛阳it商城.com').rstrip('/')
@@ -105,22 +108,23 @@ def detailContent(self, ids):
105108
return {'list': [data]}
106109

107110
def playerContent(self, flag, id, vipflags):
108-
def_header = {'User-Agent': ' Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/929.36 (KHTML, like Gecko) Chrome/86.0.3347.284 Safari/709.36'}
109-
jx,url = 0,''
111+
header = {
112+
'User-Agent': ' Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/929.36 (KHTML, like Gecko) Chrome/86.0.3347.284 Safari/709.36'}
113+
jx, url = 0, ''
110114
play_from, raw_url = id.split('@')
111-
parses = self.parses.get(play_from,[])
115+
parses = self.parses.get(play_from, [])
112116
for i in parses:
113117
try:
114-
data = self.fetch(f'{i}{raw_url}',headers=self.headers2, verify=False).json()
115-
data = data['data']
118+
data = self.fetch(f'{i}{raw_url}', headers=self.headers2, verify=False).json()
119+
data = data['data']
116120
play_url = data['url']
117121
if play_url.startswith('http'):
118122
url = play_url
119-
header = data.get('header',def_header)
123+
header = data.get('header', header)
120124
break
121125
except Exception:
122126
continue
123-
return { 'jx': jx, 'parse': '0', 'url': url, 'header': def_header}
127+
return {'jx': jx, 'parse': '0', 'url': url, 'header': header}
124128

125129
def timestamp(self):
126130
return str(int(time.time() * 1000))

0 commit comments

Comments
 (0)