Merge branch 'dev' into fix-regex-appid-appsecret

This commit is contained in:
Joe Goldin 2023-03-16 15:56:24 -07:00 committed by GitHub
commit c5bbd11414
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
7 changed files with 51 additions and 33 deletions

View file

@ -1,6 +1,6 @@
[tool.poetry] [tool.poetry]
name = "streamrip" name = "streamrip"
version = "1.9.5" version = "1.9.6"
description = "A fast, all-in-one music ripper for Qobuz, Deezer, Tidal, and SoundCloud" description = "A fast, all-in-one music ripper for Qobuz, Deezer, Tidal, and SoundCloud"
authors = ["nathom <nathanthomas707@gmail.com>"] authors = ["nathom <nathanthomas707@gmail.com>"]
license = "GPL-3.0-only" license = "GPL-3.0-only"

View file

@ -20,7 +20,7 @@ logging.basicConfig(level="WARNING")
logger = logging.getLogger("streamrip") logger = logging.getLogger("streamrip")
outdated = False outdated = False
newest_version = __version__ newest_version: Optional[str] = None
class DownloadCommand(Command): class DownloadCommand(Command):
@ -126,6 +126,7 @@ class DownloadCommand(Command):
self.line("<error>Must pass arguments. See </><cmd>rip url -h</cmd>.") self.line("<error>Must pass arguments. See </><cmd>rip url -h</cmd>.")
update_check.join() update_check.join()
if outdated: if outdated:
import re import re
import subprocess import subprocess
@ -814,7 +815,15 @@ def is_outdated():
global newest_version global newest_version
r = requests.get("https://pypi.org/pypi/streamrip/json").json() r = requests.get("https://pypi.org/pypi/streamrip/json").json()
newest_version = r["info"]["version"] newest_version = r["info"]["version"]
outdated = newest_version != __version__
# Compare versions
curr_version_parsed = map(int, __version__.split("."))
newest_version_parsed = map(int, newest_version.split("."))
outdated = False
for c, n in zip(curr_version_parsed, newest_version_parsed):
outdated = c < n
if c != n:
break
def main(): def main():

View file

@ -153,6 +153,9 @@ folder_format = "{albumartist} - {title} ({year}) [{container}] [{bit_depth}B-{s
track_format = "{tracknumber}. {artist} - {title}{explicit}" track_format = "{tracknumber}. {artist} - {title}{explicit}"
# Only allow printable ASCII characters in filenames. # Only allow printable ASCII characters in filenames.
restrict_characters = false restrict_characters = false
# Truncate the filename if it is greater than 120 characters
# Setting this to false may cause downloads to fail on some systems
truncate = true
# Last.fm playlists are downloaded by searching for the titles of the tracks # Last.fm playlists are downloaded by searching for the titles of the tracks
@ -169,4 +172,4 @@ progress_bar = "dainty"
[misc] [misc]
# Metadata to identify this config file. Do not change. # Metadata to identify this config file. Do not change.
version = "1.9.2" version = "1.9.6"

View file

@ -216,6 +216,7 @@ class RipCore(list):
concurrency = session["downloads"]["concurrency"] concurrency = session["downloads"]["concurrency"]
return { return {
"restrict_filenames": filepaths["restrict_characters"], "restrict_filenames": filepaths["restrict_characters"],
"truncate_filenames": filepaths["truncate"],
"parent_folder": session["downloads"]["folder"], "parent_folder": session["downloads"]["folder"],
"folder_format": filepaths["folder_format"], "folder_format": filepaths["folder_format"],
"track_format": filepaths["track_format"], "track_format": filepaths["track_format"],
@ -312,7 +313,8 @@ class RipCore(list):
try: try:
item.download(**arguments) item.download(**arguments)
for item_id in item.downloaded_ids: for item_id in item.downloaded_ids:
self.db.add([item_id]) # Add items row by row
self.db.add((item_id,))
except NonStreamable as e: except NonStreamable as e:
e.print(item) e.print(item)
self.failed_db.add((item.client.source, item.type, item.id)) self.failed_db.add((item.client.source, item.type, item.id))
@ -815,7 +817,7 @@ class RipCore(list):
info = [] info = []
words = re.compile(r"[\w\s]+") words = re.compile(r"[\w\s]+")
title_tags = re.compile('title="([^"]+)"') title_tags = re.compile(r'<a\s+href="[^"]+"\s+title="([^"]+)"')
def essence(s): def essence(s):
s = re.sub(r"&#\d+;", "", s) # remove HTML entities s = re.sub(r"&#\d+;", "", s) # remove HTML entities
@ -823,7 +825,7 @@ class RipCore(list):
return "".join(words.findall(s)) return "".join(words.findall(s))
def get_titles(s): def get_titles(s):
titles = title_tags.findall(s)[2:] titles = title_tags.findall(s) # [2:]
for i in range(0, len(titles) - 1, 2): for i in range(0, len(titles) - 1, 2):
info.append((essence(titles[i]), essence(titles[i + 1]))) info.append((essence(titles[i]), essence(titles[i + 1])))
@ -850,13 +852,14 @@ class RipCore(list):
if remaining_tracks > 0: if remaining_tracks > 0:
with concurrent.futures.ThreadPoolExecutor(max_workers=15) as executor: with concurrent.futures.ThreadPoolExecutor(max_workers=15) as executor:
last_page = int(remaining_tracks // 50) + int( last_page = (
remaining_tracks % 50 != 0 1 + int(remaining_tracks // 50) + int(remaining_tracks % 50 != 0)
) )
logger.debug("Fetching up to page %d", last_page)
futures = [ futures = [
executor.submit(requests.get, f"{url}?page={page}") executor.submit(requests.get, f"{url}?page={page}")
for page in range(1, last_page + 1) for page in range(2, last_page + 1)
] ]
for future in concurrent.futures.as_completed(futures): for future in concurrent.futures.as_completed(futures):

View file

@ -1,5 +1,5 @@
"""streamrip: the all in one music downloader.""" """streamrip: the all in one music downloader."""
__version__ = "1.9.5" __version__ = "1.9.6"
from . import clients, constants, converter, downloadtools, media from . import clients, constants, converter, downloadtools, media

View file

@ -29,12 +29,7 @@ from pathvalidate import sanitize_filepath
from . import converter from . import converter
from .clients import Client, DeezloaderClient from .clients import Client, DeezloaderClient
from .constants import ( from .constants import ALBUM_KEYS, FLAC_MAX_BLOCKSIZE, FOLDER_FORMAT, TRACK_FORMAT
ALBUM_KEYS,
FLAC_MAX_BLOCKSIZE,
FOLDER_FORMAT,
TRACK_FORMAT,
)
from .downloadtools import DownloadPool, DownloadStream from .downloadtools import DownloadPool, DownloadStream
from .exceptions import ( from .exceptions import (
InvalidQuality, InvalidQuality,
@ -1516,7 +1511,9 @@ class Album(Tracklist, Media):
parent_folder = kwargs.get("parent_folder", "StreamripDownloads") parent_folder = kwargs.get("parent_folder", "StreamripDownloads")
if self.folder_format: if self.folder_format:
self.folder = self._get_formatted_folder( self.folder = self._get_formatted_folder(
parent_folder, restrict=kwargs.get("restrict_filenames", False) parent_folder,
restrict=kwargs.get("restrict_filenames", False),
truncate=kwargs.get("truncate_filenames", True),
) )
else: else:
self.folder = parent_folder self.folder = parent_folder
@ -1525,7 +1522,7 @@ class Album(Tracklist, Media):
self.download_message() self.download_message()
cover_path = ( cover_path: Optional[str] = (
_choose_and_download_cover( _choose_and_download_cover(
self.cover_urls, self.cover_urls,
kwargs.get("embed_cover_size", "large"), kwargs.get("embed_cover_size", "large"),
@ -1660,7 +1657,9 @@ class Album(Tracklist, Media):
logger.debug("Formatter: %s", fmt) logger.debug("Formatter: %s", fmt)
return fmt return fmt
def _get_formatted_folder(self, parent_folder: str, restrict: bool = False) -> str: def _get_formatted_folder(
self, parent_folder: str, restrict: bool = False, truncate: bool = True
) -> str:
"""Generate the folder name for this album. """Generate the folder name for this album.
:param parent_folder: :param parent_folder:
@ -1675,8 +1674,8 @@ class Album(Tracklist, Media):
self._get_formatter(), self._get_formatter(),
restrict=restrict, restrict=restrict,
) )
if len(formatted_folder) > 120: if truncate and len(formatted_folder) > 120:
formatted_folder = f"{formatted_folder[:120]}..." formatted_folder = formatted_folder[:120]
return os.path.join(parent_folder, formatted_folder) return os.path.join(parent_folder, formatted_folder)
@ -1840,13 +1839,13 @@ class Playlist(Tracklist, Media):
self.append(Track(self.client, id=track["id"])) self.append(Track(self.client, id=track["id"]))
else: else:
for track in tracklist: for track in tracklist:
# TODO: This should be managed with .m3u files and alike. Arbitrary
# tracknumber tags might cause conflicts if the playlist files are
# inside of a library folder
meta = TrackMetadata(track=track, source=self.client.source) meta = TrackMetadata(track=track, source=self.client.source)
cover_url = get_cover_urls(track["album"], self.client.source)[ cover_urls = get_cover_urls(track["album"], self.client.source)
kwargs.get("embed_cover_size", "large") cover_url = (
] cover_urls[kwargs.get("embed_cover_size", "large")]
if cover_urls is not None
else None
)
self.append( self.append(
Track( Track(
@ -2052,7 +2051,7 @@ class Artist(Tracklist, Media):
else: else:
self.folder = parent_folder self.folder = parent_folder
logger.debug("Artist folder: %s", folder) logger.debug("Artist folder: %s", self.folder)
logger.debug("Length of tracklist %d", len(self)) logger.debug("Length of tracklist %d", len(self))
logger.debug("Filters: %s", filters) logger.debug("Filters: %s", filters)
@ -2322,7 +2321,7 @@ def _choose_and_download_cover(
directory: str, directory: str,
keep_hires_cover: bool = True, keep_hires_cover: bool = True,
downsize: Tuple[int, int] = (999999, 999999), downsize: Tuple[int, int] = (999999, 999999),
) -> str: ) -> Optional[str]:
# choose optimal cover size and download it # choose optimal cover size and download it
hashcode: str = hashlib.md5( hashcode: str = hashlib.md5(
@ -2346,12 +2345,16 @@ def _choose_and_download_cover(
), f"Invalid cover size. Must be in {cover_urls.keys()}" ), f"Invalid cover size. Must be in {cover_urls.keys()}"
embed_cover_url = cover_urls[preferred_size] embed_cover_url = cover_urls[preferred_size]
logger.debug("Chosen cover url: %s", embed_cover_url) logger.debug("Chosen cover url: %s", embed_cover_url)
if not os.path.exists(temp_cover_path): if not os.path.exists(temp_cover_path):
# Sometimes a size isn't available. When this is the case, find # Sometimes a size isn't available. When this is the case, find
# the first `not None` url. # the first `not None` url.
if embed_cover_url is None: if embed_cover_url is None:
embed_cover_url = next(filter(None, cover_urls.values())) urls = tuple(filter(None, cover_urls.values()))
if len(urls) == 0:
return None
embed_cover_url = urls[0]
logger.debug("Downloading cover from url %s", embed_cover_url) logger.debug("Downloading cover from url %s", embed_cover_url)

View file

@ -306,7 +306,7 @@ def get_container(quality: int, source: str) -> str:
return "MP3" return "MP3"
def get_cover_urls(resp: dict, source: str) -> dict: def get_cover_urls(resp: dict, source: str) -> Optional[dict]:
"""Parse a response dict containing cover info according to the source. """Parse a response dict containing cover info according to the source.
:param resp: :param resp:
@ -318,7 +318,7 @@ def get_cover_urls(resp: dict, source: str) -> dict:
if source == "qobuz": if source == "qobuz":
cover_urls = resp["image"] cover_urls = resp["image"]
cover_urls["original"] = cover_urls["large"].replace("600", "org") cover_urls["original"] = "org".join(cover_urls["large"].rsplit('600', 1))
return cover_urls return cover_urls
if source == "tidal": if source == "tidal":