Update app.py
Browse files
app.py
CHANGED
@@ -5,17 +5,24 @@ import asyncio
|
|
5 |
import datetime
|
6 |
import sys
|
7 |
import traceback
|
8 |
-
from aiohttp import web
|
9 |
from urllib.parse import parse_qs
|
10 |
from cachetools import TTLCache
|
11 |
-
from functools import partial
|
12 |
|
13 |
# 创建一个TTL缓存,最多存储1000个项目,每个项目的有效期为1小时
|
14 |
-
cache = TTLCache(maxsize=1000, ttl=
|
15 |
-
|
16 |
-
async def fetch_url(url, session):
|
17 |
-
|
18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
async def extract_and_transform_proxies(input_text):
|
21 |
try:
|
@@ -120,7 +127,7 @@ async def handle_request(request):
|
|
120 |
|
121 |
try:
|
122 |
print(f"Fetching URL: {url}", flush=True)
|
123 |
-
async with aiohttp.ClientSession() as session:
|
124 |
input_text = await fetch_url(url, session)
|
125 |
print(f"URL content length: {len(input_text)}", flush=True)
|
126 |
result = await extract_and_transform_proxies(input_text)
|
|
|
5 |
import datetime
|
6 |
import sys
|
7 |
import traceback
|
8 |
+
from aiohttp import web, ClientTimeout, TCPConnector
|
9 |
from urllib.parse import parse_qs
|
10 |
from cachetools import TTLCache
|
|
|
11 |
|
12 |
# 创建一个TTL缓存,最多存储1000个项目,每个项目的有效期为1小时
|
13 |
+
cache = TTLCache(maxsize=1000, ttl=1800)
|
14 |
+
|
15 |
+
async def fetch_url(url, session, max_retries=3):
|
16 |
+
for attempt in range(max_retries):
|
17 |
+
try:
|
18 |
+
async with session.get(url, timeout=ClientTimeout(total=60)) as response:
|
19 |
+
response.raise_for_status()
|
20 |
+
return await response.text()
|
21 |
+
except aiohttp.ClientError as e:
|
22 |
+
print(f"Attempt {attempt + 1} failed: {str(e)}", flush=True)
|
23 |
+
if attempt == max_retries - 1:
|
24 |
+
raise
|
25 |
+
await asyncio.sleep(1) # 在重试之前等待1秒
|
26 |
|
27 |
async def extract_and_transform_proxies(input_text):
|
28 |
try:
|
|
|
127 |
|
128 |
try:
|
129 |
print(f"Fetching URL: {url}", flush=True)
|
130 |
+
async with aiohttp.ClientSession(connector=TCPConnector(ssl=False)) as session:
|
131 |
input_text = await fetch_url(url, session)
|
132 |
print(f"URL content length: {len(input_text)}", flush=True)
|
133 |
result = await extract_and_transform_proxies(input_text)
|