Skip to content

Commit

Permalink
Reverted to 57278e1
Browse files Browse the repository at this point in the history
  • Loading branch information
DeekshithSH committed May 11, 2024
1 parent f95c0a5 commit df12e8f
Show file tree
Hide file tree
Showing 2 changed files with 66 additions and 25 deletions.
34 changes: 25 additions & 9 deletions WebStreamer/server/stream_routes.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,20 +74,36 @@ async def media_streamer(request: web.Request, db_id: str):

file_size = file_id.file_size

try:
offset = request.http_range.start or 0
limit = request.http_range.stop or file_size
if (limit > file_size) or (offset < 0) or (limit < offset):
raise ValueError("range not in acceptable format")
except ValueError:
if range_header:
from_bytes, until_bytes = range_header.replace("bytes=", "").split("-")
from_bytes = int(from_bytes)
until_bytes = int(until_bytes) if until_bytes else file_size - 1
else:
from_bytes = request.http_range.start or 0
until_bytes = (request.http_range.stop or file_size) - 1

logging.debug(f"from_bytes: {from_bytes} until_bytes: {until_bytes}")
if from_bytes <10 and until_bytes >200:
await db.increment_dl_count(file_id.org_id)

if (until_bytes > file_size) or (from_bytes < 0) or (until_bytes < from_bytes):
return web.Response(
status=416,
body="416: Range not satisfiable",
headers={"Content-Range": f"bytes */{file_size}"},
)

chunk_size = 1024 * 1024
until_bytes = min(until_bytes, file_size - 1)

offset = from_bytes - (from_bytes % chunk_size)
first_part_cut = from_bytes - offset
last_part_cut = until_bytes % chunk_size + 1

req_length = until_bytes - from_bytes + 1
part_count = math.ceil(until_bytes / chunk_size) - math.floor(offset / chunk_size)
body = tg_connect.yield_file(
file_id, offset, limit, multi_clients
file_id, offset, first_part_cut, last_part_cut, part_count, chunk_size, multi_clients
)

mime_type = file_id.mime_type
Expand All @@ -105,8 +121,8 @@ async def media_streamer(request: web.Request, db_id: str):
body=body,
headers={
"Content-Type": f"{mime_type}",
"Content-Range": f"bytes {offset}-{limit}/{file_size}",
"Content-Length": str(limit - offset),
"Content-Range": f"bytes {from_bytes}-{until_bytes}/{file_size}",
"Content-Length": str(req_length),
"Content-Disposition": f'{disposition}; filename="{file_name}"',
"Accept-Ranges": "bytes",
},
Expand Down
57 changes: 41 additions & 16 deletions WebStreamer/utils/custom_dl.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,9 @@
import asyncio
import logging
# import heapq
import math
from typing import Dict, Union
from WebStreamer.bot import StreamBot, work_loads
from WebStreamer.bot import work_loads
from pyrogram import Client, utils, raw
from pyrogram.errors import FloodWait
from .file_properties import get_file_ids
from pyrogram.session import Session, Auth
from pyrogram.errors import AuthBytesInvalid
Expand Down Expand Up @@ -166,7 +164,10 @@ async def yield_file(
self,
file_id: FileId,
offset: int,
limit: int,
first_part_cut: int,
last_part_cut: int,
part_count: int,
chunk_size: int,
multi_clients
) -> Union[str, None]:
"""
Expand All @@ -177,23 +178,47 @@ async def yield_file(
client = multi_clients[file_id.index]
work_loads[file_id.index] += 1
logging.debug(f"Starting to yielding file with client {file_id.index}.")
try:
if offset < 0:
if file_id.file_size == 0:
raise ValueError("Negative offsets are not supported for file ids, pass a Message object instead")
media_session = await self.generate_media_session(client, file_id)

chunks = math.ceil(file_id.file_size / 1024 / 1024)
offset += chunks
current_part = 1

async for chunk in client.get_file(file_id, file_id.file_size, limit, offset):
yield chunk
location = await self.get_location(file_id)

try:
r = await media_session.invoke(
raw.functions.upload.GetFile(
location=location, offset=offset, limit=chunk_size
),
)
if isinstance(r, raw.types.upload.File):
while True:
chunk = r.bytes
if not chunk:
break
elif part_count == 1:
yield chunk[first_part_cut:last_part_cut]
elif current_part == 1:
yield chunk[first_part_cut:]
elif current_part == part_count:
yield chunk[:last_part_cut]
else:
yield chunk

current_part += 1
offset += chunk_size

if current_part > part_count:
break

r = await media_session.invoke(
raw.functions.upload.GetFile(
location=location, offset=offset, limit=chunk_size
),
)
except (TimeoutError, AttributeError):
pass
except FloodWait as e:
logging.error(f"{e} for client {client.username}")
raise e
finally:
logging.debug(f"Finished yielding file with {current_part} parts.")
work_loads[file_id.index] -= 1


Expand All @@ -204,4 +229,4 @@ async def clean_cache(self) -> None:
while True:
await asyncio.sleep(self.clean_timer)
self.cached_file_ids.clear()
logging.debug("Cleaned the cache")
logging.debug("Cleaned the cache")

0 comments on commit df12e8f

Please sign in to comment.