使用请求在python中下载大文件
- 2024-11-26 08:37:00
- admin 原创
- 126
问题描述:
Requests是一个非常好的库。我想用它来下载大文件(>1GB)。问题是不可能将整个文件保存在内存中;我需要分块读取。以下代码存在问题:
import requests
def DownloadFile(url)
local_filename = url.split('/')[-1]
r = requests.get(url)
f = open(local_filename, 'wb')
for chunk in r.iter_content(chunk_size=512 * 1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.close()
return
由于某种原因,它不能以这种方式工作;它仍然会将响应加载到内存中,然后再保存到文件。
解决方案 1:
使用以下流式传输代码,无论下载文件的大小如何,Python 内存使用量都会受到限制:
def download_file(url):
local_filename = url.split('/')[-1]
# NOTE the stream=True parameter below
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
# If you have chunk encoded response uncomment if
# and set chunk_size parameter to None.
#if chunk:
f.write(chunk)
return local_filename
请注意,使用返回的字节数iter_content
并不完全是chunk_size
;它应该是一个随机数,通常要大得多,并且每次迭代都会有所不同。
请参阅body-content-workflow和Response.iter_content以获取更多参考。
解决方案 2:
Response.raw
如果你使用and 的话会容易得多shutil.copyfileobj()
:
import requests
import shutil
def download_file(url):
local_filename = url.split('/')[-1]
with requests.get(url, stream=True) as r:
with open(local_filename, 'wb') as f:
shutil.copyfileobj(r.raw, f)
return local_filename
这将文件流式传输到磁盘而不使用过多的内存,并且代码很简单。
注意:根据文档,Response.raw
不会解码和传输编码,因此您需要手动执行此操作。gzip
`deflate`
解决方案 3:
这并不是 OP 所要求的,但是......使用以下命令可以非常轻松地做到这一点urllib
:
from urllib.request import urlretrieve
url = 'http://mirror.pnl.gov/releases/16.04.2/ubuntu-16.04.2-desktop-amd64.iso'
dst = 'ubuntu-16.04.2-desktop-amd64.iso'
urlretrieve(url, dst)
或者这样,如果您想将其保存到临时文件:
from urllib.request import urlopen
from shutil import copyfileobj
from tempfile import NamedTemporaryFile
url = 'http://mirror.pnl.gov/releases/16.04.2/ubuntu-16.04.2-desktop-amd64.iso'
with urlopen(url) as fsrc, NamedTemporaryFile(delete=False) as fdst:
copyfileobj(fsrc, fdst)
我观看了整个过程:
watch 'ps -p 18647 -o pid,ppid,pmem,rsz,vsz,comm,args; ls -al *.iso'
我看到文件在增长,但内存使用量保持在 17 MB。我是不是漏掉了什么?
解决方案 4:
您的块大小可能太大,您是否尝试过删除它 - 也许一次 1024 字节?(此外,您还可以使用它with
来整理语法)
def DownloadFile(url):
local_filename = url.split('/')[-1]
r = requests.get(url)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
return
顺便问一下,您如何推断响应已被加载到内存中?
听起来好像 python 没有将数据刷新到文件中,从其他SO 问题来看,您可以尝试f.flush()
强制os.fsync()
文件写入并释放内存;
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
os.fsync(f.fileno())
解决方案 5:
请使用wget
python 模块。以下是代码片段
import wget
wget.download(url)
解决方案 6:
根据上面 Roman 最受支持的注释,以下是我的实现,包括“下载为”和“重试”机制:
def download(url: str, file_path='', attempts=2):
"""Downloads a URL content into a file (with large file support by streaming)
:param url: URL to download
:param file_path: Local file name to contain the data downloaded
:param attempts: Number of attempts
:return: New file path. Empty string if the download failed
"""
if not file_path:
file_path = os.path.realpath(os.path.basename(url))
logger.info(f'Downloading {url} content to {file_path}')
url_sections = urlparse(url)
if not url_sections.scheme:
logger.debug('The given url is missing a scheme. Adding http scheme')
url = f'http://{url}'
logger.debug(f'New url: {url}')
for attempt in range(1, attempts+1):
try:
if attempt > 1:
time.sleep(10) # 10 seconds wait time between downloads
with requests.get(url, stream=True) as response:
response.raise_for_status()
with open(file_path, 'wb') as out_file:
for chunk in response.iter_content(chunk_size=1024*1024): # 1MB chunks
out_file.write(chunk)
logger.info('Download finished successfully')
return file_path
except Exception as ex:
logger.error(f'Attempt #{attempt} failed with error: {ex}')
return ''
解决方案 7:
这是异步分块下载用例的附加方法,无需将所有文件内容读入内存。
这意味着从 URL 读取和写入文件都是使用asyncio
库实现的(aiohttp
从 URL 读取和aiofiles
写入文件)。
以下代码应在Python 3.7
以后运行。
只需在复制和粘贴之前编辑SRC_URL
和DEST_FILE
变量即可。
import aiofiles
import aiohttp
import asyncio
async def async_http_download(src_url, dest_file, chunk_size=65536):
async with aiofiles.open(dest_file, 'wb') as fd:
async with aiohttp.ClientSession() as session:
async with session.get(src_url) as resp:
async for chunk in resp.content.iter_chunked(chunk_size):
await fd.write(chunk)
SRC_URL = "/path/to/url"
DEST_FILE = "/path/to/file/on/local/machine"
asyncio.run(async_http_download(SRC_URL, DEST_FILE))
解决方案 8:
这是下载大文件的另一个选项。这将允许您停止并稍后继续(按下Enter
停止键),否则,如果您的连接断开,将从您上次中断的地方继续。
import datetime
import os
import requests
import threading as th
keep_going = True
def key_capture_thread():
global keep_going
input()
keep_going = False
pkey_capture = th.Thread(target=key_capture_thread, args=(), name='key_capture_process', daemon=True).start()
def download_file(url, local_filepath):
#assumptions:
# headers contain Content-Length:
# headers contain Accept-Ranges: bytes
# stream is not encoded (otherwise start bytes are not known, unless this is stored seperately)
chunk_size = 1048576 #1MB
# chunk_size = 8096 #8KB
# chunk_size = 1024 #1KB
decoded_bytes_downloaded_this_session = 0
start_time = datetime.datetime.now()
if os.path.exists(local_filepath):
decoded_bytes_downloaded = os.path.getsize(local_filepath)
else:
decoded_bytes_downloaded = 0
with requests.Session() as s:
with s.get(url, stream=True) as r:
#check for required headers:
if 'Content-Length' not in r.headers:
print('STOP: request headers do not contain Content-Length')
return
if ('Accept-Ranges','bytes') not in r.headers.items():
print('STOP: request headers do not contain Accept-Ranges: bytes')
with s.get(url) as r:
print(str(r.content, encoding='iso-8859-1'))
return
content_length = int(r.headers['Content-Length'])
if decoded_bytes_downloaded>=content_length:
print('STOP: file already downloaded. decoded_bytes_downloaded>=r.headers[Content-Length]; {}>={}'.format(decoded_bytes_downloaded,r.headers['Content-Length']))
return
if decoded_bytes_downloaded>0:
s.headers['Range'] = 'bytes={}-{}'.format(decoded_bytes_downloaded, content_length-1) #range is inclusive
print('Retrieving byte range (inclusive) {}-{}'.format(decoded_bytes_downloaded, content_length-1))
with s.get(url, stream=True) as r:
r.raise_for_status()
with open(local_filepath, mode='ab') as fwrite:
for chunk in r.iter_content(chunk_size=chunk_size):
decoded_bytes_downloaded+=len(chunk)
decoded_bytes_downloaded_this_session+=len(chunk)
time_taken:datetime.timedelta = (datetime.datetime.now() - start_time)
seconds_per_byte = time_taken.total_seconds()/decoded_bytes_downloaded_this_session
remaining_bytes = content_length-decoded_bytes_downloaded
remaining_seconds = seconds_per_byte * remaining_bytes
remaining_time = datetime.timedelta(seconds=remaining_seconds)
#print updated statistics here
fwrite.write(chunk)
if not keep_going:
break
output_folder = '/mnt/HDD1TB/DownloadsBIG'
# url = 'https://file-examples.com/storage/fea508993d645be1b98bfcf/2017/10/file_example_JPG_100kB.jpg'
# url = 'https://file-examples.com/storage/fe563fce08645a90397f28d/2017/10/file_example_JPG_2500kB.jpg'
url = 'https://ftp.ncbi.nlm.nih.gov/blast/db/nr.00.tar.gz'
local_filepath = os.path.join(output_folder, os.path.split(url)[-1])
download_file(url, local_filepath)
解决方案 9:
requests
很好,但是socket
解决方案如何?
def stream_(host):
import socket
import ssl
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
context = ssl.create_default_context(Purpose.CLIENT_AUTH)
with context.wrap_socket(sock, server_hostname=host) as wrapped_socket:
wrapped_socket.connect((socket.gethostbyname(host), 443))
wrapped_socket.send(
"GET / HTTP/1.1
Host:thiscatdoesnotexist.com
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9
".encode())
resp = b""
while resp[-4:-1] != b"
":
resp += wrapped_socket.recv(1)
else:
resp = resp.decode()
content_length = int("".join([tag.split(" ")[1] for tag in resp.split("
") if "content-length" in tag.lower()]))
image = b""
while content_length > 0:
data = wrapped_socket.recv(2048)
if not data:
print("EOF")
break
image += data
content_length -= len(data)
with open("image.jpeg", "wb") as file:
file.write(image)
- 2024年20款好用的项目管理软件推荐,项目管理提效的20个工具和技巧
- 2024年开源项目管理软件有哪些?推荐5款好用的项目管理工具
- 2024年常用的项目管理软件有哪些?推荐这10款国内外好用的项目管理工具
- 项目管理软件有哪些?推荐7款超好用的项目管理工具
- 项目管理软件有哪些最好用?推荐6款好用的项目管理工具
- 项目管理软件哪个最好用?盘点推荐5款好用的项目管理工具
- 项目管理软件有哪些,盘点推荐国内外超好用的7款项目管理工具
- 项目管理软件排行榜:2024年项目经理必备5款开源项目管理软件汇总
- 项目管理必备:盘点2024年13款好用的项目管理软件
- 2024项目管理软件排行榜(10类常用的项目管理工具全推荐)