From 7012cf7c0d44259b8a5a4757712b071a41a9b66c Mon Sep 17 00:00:00 2001 From: fernandog Date: Wed, 6 Dec 2017 14:25:00 -0200 Subject: [PATCH 01/60] Bump USER_AGENT version 0.1.21 (dev) --- medusa/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/medusa/common.py b/medusa/common.py index 1f4bde847a..e0a4cf3d93 100644 --- a/medusa/common.py +++ b/medusa/common.py @@ -46,7 +46,7 @@ # To enable, set SPOOF_USER_AGENT = True SPOOF_USER_AGENT = False INSTANCE_ID = str(uuid.uuid1()) -VERSION = '0.1.20' +VERSION = '0.1.21' USER_AGENT = u'Medusa/{version} ({system}; {release}; {instance})'.format( version=VERSION, system=platform.system(), release=platform.release(), instance=INSTANCE_ID) From 45c755c3fd38ac35b3ad9e22d3883bacb188d95a Mon Sep 17 00:00:00 2001 From: adaur Date: Thu, 7 Dec 2017 02:20:00 -0500 Subject: [PATCH 02/60] Add missing mappings, fixes #3427 --- medusa/providers/torrent/html/yggtorrent.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/medusa/providers/torrent/html/yggtorrent.py b/medusa/providers/torrent/html/yggtorrent.py index c3dd8a0206..9cfd466db6 100644 --- a/medusa/providers/torrent/html/yggtorrent.py +++ b/medusa/providers/torrent/html/yggtorrent.py @@ -45,6 +45,8 @@ def __init__(self): # Miscellaneous Options self.translation = { + 'minute': 'minute', + 'minutes': 'minutes', 'heure': 'hour', 'heures': 'hours', 'jour': 'day', From 23aed14d396ce13ca8fa38f356ce54520689276b Mon Sep 17 00:00:00 2001 From: adaur Date: Thu, 7 Dec 2017 02:28:16 -0500 Subject: [PATCH 03/60] Add seconds also And years could be either "an" or "ans" but we cannot know since the tracker is not old enough --- medusa/providers/torrent/html/yggtorrent.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/medusa/providers/torrent/html/yggtorrent.py b/medusa/providers/torrent/html/yggtorrent.py index 9cfd466db6..e3ba052b4e 100644 --- a/medusa/providers/torrent/html/yggtorrent.py +++ b/medusa/providers/torrent/html/yggtorrent.py @@ -45,6 +45,8 @@ def __init__(self): # Miscellaneous Options self.translation = { + 'seconde': 'second', + 'secondes': 'seconds', 'minute': 'minute', 'minutes': 'minutes', 'heure': 'hour', @@ -53,6 +55,7 @@ def __init__(self): 'jours': 'days', 'mois': 'month', 'an': 'year', + 'ans': 'years', 'années': 'years' } From 72089e6c07f8ed0e0aadd1082edf50647d3cd6e7 Mon Sep 17 00:00:00 2001 From: adaur Date: Thu, 7 Dec 2017 02:36:45 -0500 Subject: [PATCH 04/60] =?UTF-8?q?Year=20could=20be=20either=20"an"=20or=20?= =?UTF-8?q?"ann=C3=A9e"?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit But we can't know yet - the website is too young --- medusa/providers/torrent/html/yggtorrent.py | 1 + 1 file changed, 1 insertion(+) diff --git a/medusa/providers/torrent/html/yggtorrent.py b/medusa/providers/torrent/html/yggtorrent.py index e3ba052b4e..452bb239c4 100644 --- a/medusa/providers/torrent/html/yggtorrent.py +++ b/medusa/providers/torrent/html/yggtorrent.py @@ -56,6 +56,7 @@ def __init__(self): 'mois': 'month', 'an': 'year', 'ans': 'years', + 'année': 'year', 'années': 'years' } From d2d3c32cb7faf544f733f593231dc342dea8454c Mon Sep 17 00:00:00 2001 From: bobbysteel Date: Thu, 7 Dec 2017 08:04:13 +0000 Subject: [PATCH 05/60] Alternate docker repo. (#3425) --- readme.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/readme.md b/readme.md index 08a5971032..822544b946 100644 --- a/readme.md +++ b/readme.md @@ -89,11 +89,12 @@ MediaInfo is optional, but highly recommended since it increases the number of s #### Docker images -The [linuxserver.io](https://www.linuxserver.io) team have kindly produced docker images for X86, armhf and aarch64 platforms. - +The [linuxserver.io](https://www.linuxserver.io) team have kindly produced docker images for X86, armhf and aarch64 platforms. This is built from an older intermediary Dockerfile. * X86 - [Dockerhub](https://hub.docker.com/r/linuxserver/medusa/), [Github](https://github.com/linuxserver/docker-medusa) * armhf - [Dockerhub](https://hub.docker.com/r/lsioarmhf/medusa/), [Github](https://github.com/linuxserver/docker-medusa-armhf) * aarch64 - [Dockerhub](https://hub.docker.com/r/lsioarmhf/medusa-aarch64/), [Github](https://github.com/linuxserver/docker-medusa-arm64) +Now that the Dockerfile is native as of Dec 6 2017, there's also a direct build available here [Dockerhub](https://hub.docker.com/r/bobbysteel/medusa/), built directly from this repo on a daily basis. + From a7d90e2028ea72e87d9ebc9e6e5936601393a63b Mon Sep 17 00:00:00 2001 From: Fernando Date: Thu, 7 Dec 2017 14:21:44 -0200 Subject: [PATCH 06/60] Rename rtejd logo (#3432) --- .../images/network/rtejr.png | Bin 1 file changed, 0 insertions(+), 0 deletions(-) rename "static/images/network/rt\303\251jr.png" => static/images/network/rtejr.png (100%) diff --git "a/static/images/network/rt\303\251jr.png" b/static/images/network/rtejr.png similarity index 100% rename from "static/images/network/rt\303\251jr.png" rename to static/images/network/rtejr.png From 187b1e68bd76b751a56d2e4d894078a10ac33643 Mon Sep 17 00:00:00 2001 From: adaur Date: Thu, 7 Dec 2017 12:48:55 -0500 Subject: [PATCH 07/60] Remove continue preventing a snatch If pubdate is not translated --- medusa/providers/torrent/html/yggtorrent.py | 1 - 1 file changed, 1 deletion(-) diff --git a/medusa/providers/torrent/html/yggtorrent.py b/medusa/providers/torrent/html/yggtorrent.py index 452bb239c4..11c89b606f 100644 --- a/medusa/providers/torrent/html/yggtorrent.py +++ b/medusa/providers/torrent/html/yggtorrent.py @@ -147,7 +147,6 @@ def parse(self, data, mode): translated = self.translation.get(pubdate_match.group(2)) if not translated: log.exception('No translation mapping available for value: {0}', pubdate_match.group(2)) - continue else: pubdate_raw = '{0} {1}'.format(pubdate_match.group(1), translated) pubdate = self.parse_pubdate(pubdate_raw, human_time=True) From cb5be6e06108a2c9e7cac3c35a3797d0e37d5373 Mon Sep 17 00:00:00 2001 From: Fernando Date: Thu, 7 Dec 2017 16:16:37 -0200 Subject: [PATCH 08/60] Add network information while adding new show (#3430) * Add network information while adding new show * review --- medusa/server/web/home/add_shows.py | 3 ++- static/js/add-shows/new-show.js | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/medusa/server/web/home/add_shows.py b/medusa/server/web/home/add_shows.py index 6612792203..806a169a29 100644 --- a/medusa/server/web/home/add_shows.py +++ b/medusa/server/web/home/add_shows.py @@ -100,7 +100,8 @@ def searchIndexersForShowName(search_term, lang=None, indexer=None): for i, shows in iteritems(results): final_results.extend({(indexerApi(i).name, i, indexerApi(i).config['show_url'], int(show['id']), - show['seriesname'].encode('utf-8'), show['firstaired']) for show in shows}) + show['seriesname'].encode('utf-8'), show['firstaired'] or 'N/A', + show['network'].encode('utf-8') or 'N/A') for show in shows}) lang_id = indexerApi().config['langabbv_to_id'][lang] return json.dumps({'results': final_results, 'langid': lang_id}) diff --git a/static/js/add-shows/new-show.js b/static/js/add-shows/new-show.js index a73bddc908..e3907ce4cd 100644 --- a/static/js/add-shows/new-show.js +++ b/static/js/add-shows/new-show.js @@ -119,9 +119,9 @@ MEDUSA.addShows.newShow = function() { var startDate = new Date(obj[5]); var today = new Date(); if (startDate > today) { - resultStr += ' (will debut on ' + obj[5] + ')'; + resultStr += ' (will debut on ' + obj[5] + ' on ' + obj[6] + ')'; } else { - resultStr += ' (started on ' + obj[5] + ')'; + resultStr += ' (started on ' + obj[5] + ' on ' + obj[6] + ')'; } } From 11678453735b098485146268580c538930485c97 Mon Sep 17 00:00:00 2001 From: fernandog Date: Fri, 8 Dec 2017 08:06:04 -0200 Subject: [PATCH 09/60] Fix TMDB not returning `networks` in show search --- medusa/server/web/home/add_shows.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/medusa/server/web/home/add_shows.py b/medusa/server/web/home/add_shows.py index 806a169a29..51761e3b0f 100644 --- a/medusa/server/web/home/add_shows.py +++ b/medusa/server/web/home/add_shows.py @@ -101,7 +101,7 @@ def searchIndexersForShowName(search_term, lang=None, indexer=None): for i, shows in iteritems(results): final_results.extend({(indexerApi(i).name, i, indexerApi(i).config['show_url'], int(show['id']), show['seriesname'].encode('utf-8'), show['firstaired'] or 'N/A', - show['network'].encode('utf-8') or 'N/A') for show in shows}) + show.get('network', '').encode('utf-8') or 'N/A') for show in shows}) lang_id = indexerApi().config['langabbv_to_id'][lang] return json.dumps({'results': final_results, 'langid': lang_id}) From 6b503835d0b03788db164dc2681c0bacf770b2dd Mon Sep 17 00:00:00 2001 From: Labrys Date: Sat, 9 Dec 2017 09:47:03 -0500 Subject: [PATCH 10/60] Fix translations for ascending order by period length --- medusa/providers/torrent/html/yggtorrent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/medusa/providers/torrent/html/yggtorrent.py b/medusa/providers/torrent/html/yggtorrent.py index 11c89b606f..3cd1aa3b7a 100644 --- a/medusa/providers/torrent/html/yggtorrent.py +++ b/medusa/providers/torrent/html/yggtorrent.py @@ -55,8 +55,8 @@ def __init__(self): 'jours': 'days', 'mois': 'month', 'an': 'year', - 'ans': 'years', 'année': 'year', + 'ans': 'years', 'années': 'years' } From d545c8895c3d6b38e243d53fcad01e5f0e59a2fb Mon Sep 17 00:00:00 2001 From: bobbysteel Date: Sat, 9 Dec 2017 16:57:48 +0000 Subject: [PATCH 11/60] Fix bad link to changes.MD --- readme.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/readme.md b/readme.md index 822544b946..cdcb5a1a4f 100644 --- a/readme.md +++ b/readme.md @@ -79,7 +79,7 @@ A full list can be found here: [Link](https://github.com/pymedusa/Medusa/wiki/Me | Edge| last 2 versions| last 2 versions| last 2 versions #### News and Changelog -[news.md and CHANGES.md have moved to a separate repo, click here](https://cdn.pymedusa.com) +[news.md and CHANGES.md have moved to a separate repo, click here](https://github.com/pymedusa/medusa.github.io/tree/master/news) #### External dependencies This product uses [MediaInfo](http://mediaarea.net/MediaInfo) library, Copyright (c) 2002-2016 [MediaArea.net SARL](mailto:Info@MediaArea.net) From abd49268dbc5fb797f0dc526b801471c9f5b6dde Mon Sep 17 00:00:00 2001 From: Labrys Date: Sat, 9 Dec 2017 17:21:24 -0500 Subject: [PATCH 12/60] Fix style adapter repr fallback --- medusa/logger/adapters/style.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/medusa/logger/adapters/style.py b/medusa/logger/adapters/style.py index 6cd36839e3..3c91c1c68c 100644 --- a/medusa/logger/adapters/style.py +++ b/medusa/logger/adapters/style.py @@ -42,8 +42,9 @@ def __str__(self): return self.msg except Exception: log.error( - 'BraceMessage string formatting failed. Using representation instead.\n{1}'.format( - ''.join(traceback.format_stack()) + 'BraceMessage string formatting failed. ' + 'Using representation instead.\n{0}'.format( + ''.join(traceback.format_stack()), ) ) return repr(self) From 34bd8b6987153c30e6bc7d065683c570c0c3140f Mon Sep 17 00:00:00 2001 From: Labrys Date: Sat, 9 Dec 2017 22:56:54 -0500 Subject: [PATCH 13/60] Fix Issue report format --- medusa/issue_submitter.py | 45 +++++++++++++++++++++++---------------- 1 file changed, 27 insertions(+), 18 deletions(-) diff --git a/medusa/issue_submitter.py b/medusa/issue_submitter.py index 9b9dcba955..fec1894f13 100644 --- a/medusa/issue_submitter.py +++ b/medusa/issue_submitter.py @@ -18,6 +18,21 @@ log = BraceAdapter(logging.getLogger(__name__)) log.logger.addHandler(logging.NullHandler()) +ISSUE_REPORT = """ +### INFO +**Python Version**: `{python_version}` +**Operating System**: `{os}` +**Locale**: `{locale}` +**Branch**: [{branch}](../tree/{branch}) +**Database**: `{db_major_version}.{db_minor_version}` +**Commit**: {org}/{repo}@{commit} +**Link to Log**: {log_url} +### ERROR +{log_line} +--- +_STAFF NOTIFIED_: @{org}/support @{org}/moderators +""" + class IssueSubmitter(object): """GitHub issue submitter.""" @@ -62,24 +77,18 @@ def create_issue_data(logline, log_url): commit = app.CUR_COMMIT_HASH base_url = '../blob/{commit}'.format(commit=commit) if commit else None - return """ - ### INFO - **Python Version**: `{python_version} - **Operating System**: `{os}` - **Locale**: `{locale}` - **Branch**: [{branch}](../tree/{branch}) - **Database**: `{db_major_version}.{db_minor_version}` - **Commit**: {org}/{repo}@{commit} - **Link to Log**: {log_url} - ### ERROR - {log_line} - --- - _STAFF NOTIFIED_: @{org}/support @{org}/moderators - """.format( - python_version=sys.version[:120].replace('\n', ''), os=platform.platform(), locale=locale_name, - branch=app.BRANCH, org=app.GIT_ORG, repo=app.GIT_REPO, commit=commit, - db_major_version=cur_branch_major_db_version, db_minor_version=cur_branch_minor_db_version, - log_url=log_url or '**No Log available**', log_line=logline.format_to_html(base_url=base_url), + return ISSUE_REPORT.format( + python_version=sys.version[:120].replace('\n', ''), + os=platform.platform(), + locale=locale_name, + branch=app.BRANCH, + org=app.GIT_ORG, + repo=app.GIT_REPO, + commit=commit, + db_major_version=cur_branch_major_db_version, + db_minor_version=cur_branch_minor_db_version, + log_url=log_url or '**No Log available**', + log_line=logline.format_to_html(base_url=base_url), ) @classmethod From 99ac95a2523d9424c633ab37391df587428adf4c Mon Sep 17 00:00:00 2001 From: Dario Date: Sun, 10 Dec 2017 18:56:45 +0100 Subject: [PATCH 14/60] Fix AlphaRatio provider login by using https URL --- medusa/providers/torrent/html/alpharatio.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/medusa/providers/torrent/html/alpharatio.py b/medusa/providers/torrent/html/alpharatio.py index b2994459be..ccc824265a 100644 --- a/medusa/providers/torrent/html/alpharatio.py +++ b/medusa/providers/torrent/html/alpharatio.py @@ -36,7 +36,7 @@ def __init__(self): self.password = None # URLs - self.url = 'http://alpharatio.cc' + self.url = 'https://alpharatio.cc' self.urls = { 'login': urljoin(self.url, 'login.php'), 'search': urljoin(self.url, 'torrents.php'), From 4e1dad99ac36b3f8bc8ed3cae4c3d6c250dbc38e Mon Sep 17 00:00:00 2001 From: Alexis Tyler Date: Mon, 11 Dec 2017 09:00:36 +1030 Subject: [PATCH 15/60] Add real patch to series (#3275) * add real patch to series * add missing import * fix flake warning * Added series.save_to_db() to save changes to the db. --- medusa/server/api/v2/series.py | 52 ++++++++++++++++++++++++---------- 1 file changed, 37 insertions(+), 15 deletions(-) diff --git a/medusa/server/api/v2/series.py b/medusa/server/api/v2/series.py index 851ed265aa..26e3b71e5e 100644 --- a/medusa/server/api/v2/series.py +++ b/medusa/server/api/v2/series.py @@ -1,10 +1,19 @@ # coding=utf-8 """Request handler for series and episodes.""" - -from medusa.server.api.v2.base import BaseRequestHandler +import logging + +from medusa.server.api.v2.base import ( + BaseRequestHandler, + BooleanField, + StringField, + iter_nested_items, + set_nested_value +) from medusa.tv.series import Series, SeriesIdentifier from tornado.escape import json_decode +log = logging.getLogger(__name__) + class SeriesHandler(BaseRequestHandler): """Series request handler.""" @@ -81,7 +90,7 @@ def post(self, series_slug=None, path_param=None): def patch(self, series_slug, path_param=None): """Patch series.""" if not series_slug: - return self._method_not_allowed('Patching multiple series are not allowed') + return self._method_not_allowed('Patching multiple series is not allowed') identifier = SeriesIdentifier.from_slug(series_slug) if not identifier: @@ -96,18 +105,31 @@ def patch(self, series_slug, path_param=None): if indexer_id is not None and indexer_id != identifier.id: return self._bad_request('Conflicting series identifier') - done = {} - for key, value in data.items(): - if key == 'pause': - if value is True: - series.pause() - elif value is False: - series.unpause() - else: - return self._bad_request('Invalid request body: pause') - done[key] = value - - return self._ok(done) + accepted = {} + ignored = {} + patches = { + 'config.dvdOrder': BooleanField(series, 'dvd_order'), + 'config.flattenFolders': BooleanField(series, 'flatten_folders'), + 'config.scene': BooleanField(series, 'scene'), + 'config.paused': BooleanField(series, 'paused'), + 'config.location': StringField(series, '_location'), + 'config.airByDate': BooleanField(series, 'air_by_date'), + 'config.subtitlesEnabled': BooleanField(series, 'subtitles') + } + for key, value in iter_nested_items(data): + patch_field = patches.get(key) + if patch_field and patch_field.patch(series, value): + set_nested_value(accepted, key, value) + else: + set_nested_value(ignored, key, value) + + # Save patched attributes in db. + series.save_to_db() + + if ignored: + log.warning('Series patch ignored %r', ignored) + + self._ok(data=accepted) def delete(self, series_slug, path_param=None): """Delete the series.""" From 088c6eaada435b76127f583c65279413b345ec8c Mon Sep 17 00:00:00 2001 From: Labrys of Knossos Date: Mon, 11 Dec 2017 08:10:17 -0500 Subject: [PATCH 16/60] Feature/cleanpp (#3447) * Add iterable support to `PostProcessor._delete` This makes `_delete` more robust since `files` can now be any iterable. If the `files` argument is text, it gets special attention to avoid iterating through each character. * Make `PostProcessor._run_extra_scripts` more DRY * Refactor cur_file -> filename * Flake8 * Flake8 import order --- medusa/post_processor.py | 98 +++++++++++++++++++--------------------- 1 file changed, 47 insertions(+), 51 deletions(-) diff --git a/medusa/post_processor.py b/medusa/post_processor.py index 3646ec5eaf..cf92f62a03 100644 --- a/medusa/post_processor.py +++ b/medusa/post_processor.py @@ -47,6 +47,7 @@ ShowDirectoryNotFoundException, ) from medusa.helpers import is_subtitle, verify_freespace +from medusa.helpers.utils import generate from medusa.name_parser.parser import ( InvalidNameException, InvalidShowException, @@ -316,38 +317,36 @@ def _delete(self, files, associated_files=False): :param files: path(s) to file(s) that should be deleted :param associated_files: True to delete all files which differ only by extension, False to leave them """ - if not files: - return - - # Check if files is a list, if not, make it one - if not isinstance(files, list): - file_list = [files] - else: - file_list = files + gen_files = generate(files or []) + files = list(gen_files) # also delete associated files, works only for 1 file - if associated_files and len(file_list) == 1: - file_list += self.list_associated_files(file_list[0], subfolders=True) + if associated_files and len(files) == 1: + files += self.list_associated_files(files[0], subfolders=True) - for cur_file in file_list: - if os.path.isfile(cur_file): - self.log(u'Deleting file: {0}'.format(cur_file), logger.DEBUG) + for filename in files: + if os.path.isfile(filename): + self.log(u'Deleting file: {0}'.format(filename), logger.DEBUG) # check first the read-only attribute - file_attribute = os.stat(cur_file)[0] + file_attribute = os.stat(filename)[0] if not file_attribute & stat.S_IWRITE: # File is read-only, so make it writeable - self.log(u'Read only mode on file {0}. Will try to make it writeable'.format - (cur_file), logger.DEBUG) + self.log(u'Read only mode on file {0}. ' + u'Will try to make it writeable'.format(filename), + logger.DEBUG) try: - os.chmod(cur_file, stat.S_IWRITE) + os.chmod(filename, stat.S_IWRITE) except OSError as error: - self.log(u'Cannot change permissions of {filename}. Error: {msg}'.format - (filename=cur_file, msg=error), logger.WARNING) + self.log( + u'Cannot change permissions for {path}. ' + u'Error: {msg}'.format(path=filename, msg=error), + logger.WARNING + ) - os.remove(cur_file) + os.remove(filename) # do the library update for synoindex - notifiers.synoindex_notifier.deleteFile(cur_file) + notifiers.synoindex_notifier.deleteFile(filename) @staticmethod def rename_associated_file(new_path, new_basename, filepath): @@ -907,53 +906,50 @@ def _run_extra_scripts(self, ep_obj): if not app.EXTRA_SCRIPTS: return - file_path = self.file_path - if isinstance(file_path, text_type): - try: - file_path = file_path.encode(app.SYS_ENCODING) - except UnicodeEncodeError: - # ignore it - pass + def _attempt_to_encode(item, _encoding): + if isinstance(item, text_type): + try: + item = item.encode(_encoding) + except UnicodeEncodeError: + pass # ignore it + finally: + return item - ep_location = ep_obj.location - if isinstance(ep_location, text_type): - try: - ep_location = ep_location.encode(app.SYS_ENCODING) - except UnicodeEncodeError: - # ignore it - pass + encoding = app.SYS_ENCODING + + file_path = _attempt_to_encode(self.file_path, encoding) + ep_location = _attempt_to_encode(ep_obj.location, encoding) + indexer_id = str(ep_obj.series.indexerid) + season = str(ep_obj.season) + episode = str(ep_obj.episode) + airdate = str(ep_obj.airdate) for cur_script_name in app.EXTRA_SCRIPTS: - if isinstance(cur_script_name, text_type): - try: - cur_script_name = cur_script_name.encode(app.SYS_ENCODING) - except UnicodeEncodeError: - # ignore it - pass + cur_script_name = _attempt_to_encode(cur_script_name, encoding) # generate a safe command line string to execute the script and provide all the parameters script_cmd = [piece for piece in re.split(r'(\'.*?\'|".*?"| )', cur_script_name) if piece.strip()] script_cmd[0] = os.path.abspath(script_cmd[0]) self.log(u'Absolute path to script: {0}'.format(script_cmd[0]), logger.DEBUG) - script_cmd += [ - ep_location, file_path, str(ep_obj.series.indexerid), - str(ep_obj.season), str(ep_obj.episode), str(ep_obj.airdate) - ] + script_cmd += [ep_location, file_path, indexer_id, season, episode, airdate] # use subprocess to run the command and capture output self.log(u'Executing command: {0}'.format(script_cmd)) try: - p = subprocess.Popen( - script_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, cwd=app.PROG_DIR + process = subprocess.Popen( + script_cmd, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + cwd=app.PROG_DIR ) - out, _ = p.communicate() + out, _ = process.communicate() self.log(u'Script result: {0}'.format(out), logger.DEBUG) - except Exception as e: - self.log(u'Unable to run extra_script: {0!r}'.format(e)) + except Exception as error: + self.log(u'Unable to run extra_script: {0!r}'.format(error)) def flag_kodi_clean_library(self): """Set flag to clean Kodi's library if Kodi is enabled.""" From 0d61bb40c96ee76cc16ca98a9ee68bec4ac85432 Mon Sep 17 00:00:00 2001 From: Labrys Date: Wed, 13 Dec 2017 20:27:20 -0500 Subject: [PATCH 17/60] Add `asf` and `wma` to media extensions --- medusa/helper/common.py | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/medusa/helper/common.py b/medusa/helper/common.py index 33fc516d99..d6de44e425 100644 --- a/medusa/helper/common.py +++ b/medusa/helper/common.py @@ -99,8 +99,32 @@ 599: 'Network connect timeout error', } media_extensions = [ - '3gp', 'avi', 'divx', 'dvr-ms', 'f4v', 'flv', 'img', 'iso', 'm2ts', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', - 'ogm', 'ogv', 'rmvb', 'tp', 'ts', 'vob', 'webm', 'wmv', 'wtv', + '3gp', + 'asf', + 'avi', + 'divx', + 'dvr-ms', + 'f4v', + 'flv', + 'img', + 'iso', + 'm2ts', + 'm4v', + 'mkv', + 'mov', + 'mp4', + 'mpeg', + 'mpg', + 'ogm', + 'ogv', + 'rmvb', + 'tp', + 'ts', + 'vob', + 'webm', + 'wma', + 'wmv', + 'wtv', ] subtitle_extensions = ['ass', 'idx', 'srt', 'ssa', 'sub', 'mpl', 'smi'] timeFormat = '%A %I:%M %p' From 669feeb3fd28e3112e93accb396e368618766e7c Mon Sep 17 00:00:00 2001 From: "A. Manual Goldstein" <34269555+amanualgoldstein@users.noreply.github.com> Date: Wed, 13 Dec 2017 22:20:10 -0800 Subject: [PATCH 18/60] Adds support for self-hosted Slack instances. (#3426) Removes the webhook construction using 'SLACK_WEBHOOK_URL' and requires that the plugin-defined URI is the absolute URL to the incoming webhook. Any Slack-compatible incoming webhook will now support announcements. --- medusa/notifiers/slack.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/medusa/notifiers/slack.py b/medusa/notifiers/slack.py index 09c4bc5cee..c3d6e80672 100644 --- a/medusa/notifiers/slack.py +++ b/medusa/notifiers/slack.py @@ -17,8 +17,6 @@ class Notifier(object): """Slack notifier class.""" - SLACK_WEBHOOK_URL = 'https://hooks.slack.com/services/' - def notify_snatch(self, ep_name, is_proper): """ Send a notification to a Slack channel when an episode is snatched. @@ -84,18 +82,17 @@ def test_notify(self, slack_webhook): def _send_slack(self, message=None, webhook=None): """Send the http request using the Slack webhook.""" - app.SLACK_WEBHOOK = webhook or app.SLACK_WEBHOOK - slack_webhook = self.SLACK_WEBHOOK_URL + app.SLACK_WEBHOOK.replace(self.SLACK_WEBHOOK_URL, '') + webhook = webhook or app.SLACK_WEBHOOK log.info('Sending slack message: {message}', {'message': message}) - log.info('Sending slack message to url: {url}', {'url': slack_webhook}) + log.info('Sending slack message to url: {url}', {'url': webhook}) if isinstance(message, six.text_type): message = message.encode('utf-8') headers = {b'Content-Type': b'application/json'} try: - r = requests.post(slack_webhook, data=json.dumps(dict(text=message, username='MedusaBot')), headers=headers) + r = requests.post(webhook, data=json.dumps(dict(text=message, username='MedusaBot')), headers=headers) r.raise_for_status() except Exception: log.exception('Error Sending Slack message') From 05beec6d9967f2d7dc5c4da02fca115d273e26c0 Mon Sep 17 00:00:00 2001 From: neoatomic Date: Thu, 14 Dec 2017 15:57:26 +0100 Subject: [PATCH 19/60] Added paramount network logo (#3466) --- static/images/network/paramount network.png | Bin 0 -> 1838 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 static/images/network/paramount network.png diff --git a/static/images/network/paramount network.png b/static/images/network/paramount network.png new file mode 100644 index 0000000000000000000000000000000000000000..f82dbfff0bf3f76703a4544577c1536b114d94e1 GIT binary patch literal 1838 zcmV+}2hsS6P)Px#1ZP1_K>z@;j|==^1poj532;bRa{vGi!vFvd!vV){sAK>D02y>eSaefwW^{L9 za%BK;VQFr3E^cLXAT%y8E-^PVZl7ZS00y2(L_t(oN9~t+P*rsl$IBwkEX($^y==AJ zsb*Sc%2uwlwrS1P3Qv@a(Qv@a(V-c9P%68T|`zLmIyu8=vk!^0A zb#(2tbS5366PU3^w%9!MP(aoWx3sTLM7;NR(3h4GraqZL*;RV71fA8_5RFj?EHLp7 z&aT#zC)}hf(|UC4I{Vb}M$h!hYlnk2ftU996xO!AYU-;c2Y3JBG-re3$45i@2L^sT z6Q%WZvYF?1)ZrS2O0!uQ(1h8R3fnCSXg;u$IWZy zoOgMo_p16{-|r_?xY{uGq%u0Dk4xZB|E$xI1?pRB>FbjbGuFuSYFh0Ei9K)Z)zaB( z?3ID7hqt;>R|sdWm6NnP4IIUK+z?_Ea%d1B(h+%zX~TL}(M2B}30ZVFxUau| ztt?@MO>96`6;pN9g*b=A(x`&k;<`3oSyJDIyuDk=#xQV5Iv-P1Slb#?REOWqojouw zsc%Q&ufS|oU;nqKF5;^^q0}{{JgKzd6;od(TWLc(4aTtMMGN=&*(a3dRJS-Mm-Q(7 z*gq#D@|Au4298Ph8ak_8RYQXSfjk>k_|w_w`!+hieK6ouL_Xs(B&WKgNBP_N*vGcJ z-?-XtuXpCf0tL3}+q*e0wg{u5js)jG206qY-|mk7&u61II3#r|RoF?dXo@YWTe#1+ zTS@KtV-{r+6_!`yl3d0cyLwggcY1iFRm2z9&)wvL-|$&}KDME~hbTl9)_P}DO8t~V zg8(4W)CQ%(mGjd^o=R1JcwUW2{cEPaT}sspCfzQX@f6-%1E(bUm1$H0-@ zr)cehVQx*!naF}S_WKVE43I|pK3~!L&{ns$Zso7%Vkp1P9@Y1!Bgtf7Ru!f&h}kVJ zDcHepl;sH}3yi&?O(95~ltP<;77hnxGjqs0XZSFS2nNB+tZeehy!_??e}YHqd74;S zZyBCPMd3a1qkqUd-yj4yt-O(H?&wjm#i7^K*>lHw#}zhl&7HmEhXgJ%3pNYNscr3k zXp3uITQ_z-Fbl@=izePwT7jZ<)b&n*y1A3&Ajj`@@d$z=^oEyH&m$0Ku+Kzgn(&* zL9TR;duqiugEc%Vh6wy_69;KgY8YK?9+p+rOqq_XtN$L5*OEvvM1URf&Y?gfk920= zUhfQY$2?Ihi47t(gaoMRiCdv2Zn+A5bhLh`KJ$p%kx!*M<6?-w*fvy;uV7~}SOo1Q z7GXQx)94dwdfsLiB+u^lViM@vC$hiA4+#Ok8e{5@#DoYCCmf>D)dbiw%dBJRGqnx` z)D=Zvrj#|%R4fgj;U)URnW&W);wYGN(M7n9ge#v6{Qa0a$Ok}xdB~&|w-dpH2;c{s zHKwpuO+eAog=f;q>ylC~?Rcx`!ljlG+uc$bphCje9y^=$Mj41+>jVS_? ci Date: Thu, 14 Dec 2017 15:57:36 +0100 Subject: [PATCH 20/60] Fix download link for YGG provider (#3461) * Fix download link for YGG provider * Fix daily search --- medusa/providers/torrent/html/yggtorrent.py | 30 +++++++++++++++------ 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/medusa/providers/torrent/html/yggtorrent.py b/medusa/providers/torrent/html/yggtorrent.py index 3cd1aa3b7a..176ac04d9a 100644 --- a/medusa/providers/torrent/html/yggtorrent.py +++ b/medusa/providers/torrent/html/yggtorrent.py @@ -25,6 +25,8 @@ class YggtorrentProvider(TorrentProvider): """Yggtorrent Torrent provider.""" + torrent_id = re.compile(r'\/(\d+)-') + def __init__(self): """Initialize the class.""" super(YggtorrentProvider, self).__init__('Yggtorrent') @@ -34,10 +36,12 @@ def __init__(self): self.password = None # URLs - self.url = 'https://ww1.yggtorrent.com/' + self.url = 'https://ww1.yggtorrent.com' self.urls = { 'login': urljoin(self.url, 'user/login'), 'search': urljoin(self.url, 'engine/search'), + 'daily': urljoin(self.url, 'torrents/today'), + 'download': urljoin(self.url, 'engine/download_torrent?id={0}') } # Proper Strings @@ -94,8 +98,13 @@ def search(self, search_strings, age=0, ep_obj=None, **kwargs): log.debug('Search string: {search}', {'search': search_string}) - search_params['q'] = re.sub(r'[()]', '', search_string) - response = self.session.get(self.urls['search'], params=search_params) + search_params['q'] = re.sub(r'[()]', '', search_string) + url = self.urls['search'] + else: + search_params['per_page'] = 50 + url = self.urls['daily'] + + response = self.session.get(url, params=search_params) if not response or not response.text: log.debug('No data returned from provider') continue @@ -130,12 +139,16 @@ def parse(self, data, mode): if len(cells) < 5: continue try: - title = cells[0].find('a', class_='torrent-name').get_text(strip=True) - download_url = cells[0].find_all('a')[2]['href'] + info = cells[0].find('a', class_='torrent-name') + title = info.get_text(strip=True) + download_url = info.get('href') if not (title and download_url): continue - seeders = try_int(cells[4].get_text(strip=True), 1) + torrent_id = YggtorrentProvider.torrent_id.search(download_url) + download_url = self.urls['download'].format(torrent_id.group(1)) + + seeders = try_int(cells[4].get_text(strip=True), 0) leechers = try_int(cells[5].get_text(strip=True), 0) torrent_size = cells[3].get_text() @@ -151,7 +164,8 @@ def parse(self, data, mode): pubdate_raw = '{0} {1}'.format(pubdate_match.group(1), translated) pubdate = self.parse_pubdate(pubdate_raw, human_time=True) else: - log.warning('Could not translate publishing date with value: {0}', cells[2].get_text(strip=True)) + log.warning('Could not translate publishing date with value: {0}', + cells[2].get_text(strip=True)) # Filter unseeded torrent if seeders < min(self.minseed, 1): @@ -197,7 +211,7 @@ def login(self): log.warning('Invalid username or password. Check your settings') return False - if 'Mon compte' not in response.text: + if 'Mes torrents' not in response.text: log.warning('Unable to login to provider') return False From aed573b757681693e378cdc425f0a89d310f9f5b Mon Sep 17 00:00:00 2001 From: supergonkas Date: Fri, 15 Dec 2017 15:54:36 +0000 Subject: [PATCH 21/60] Add search abilities to BinSearch (#2657) * Add search abbilities to BinSearch * Update binsearch.py * Code clean up, comments, regex fixup and add search for both "server" modes * Update binsearch.py * Update binsearch.py * Simplify regex * Update binsearch.py * Simplify title regex * Simplify size regex * Fix download url * Refactor torrent_table -> table and torrent_rows -> rows * Add labels from headers * Remove one level of nesting * Fix and simplify rows * Fix no-result detection * Fix no-result detection * Make attributes a dictionary of label: column * Refactor torrent_size -> size_field * Refactor attributes -> col * Fix double-quotes * Fix missing process_column_headers function * Fix "IndexError: list index out of range" * Update binsearch.py * Added binsearch snatching result using the post. * Fix daily search * Patched download_file to also support the method POST. * Cleaned up binsearch.py. Unfortunately couldn't test binsearch as the service is down. * Fixed import. * Added sending binseach nzb data to sabnzbd using post multipart/form. * Check if the nzb downloaded from binsearch is correct, before sending it to sab. * Added docstrings for the methods. * Added download_nzb_for_post to nzb_provider.py, with a NotImplement exception. * Sanitize nzb name before sending it to nzb client. * Truncate post log for multipart/form-data. * Fix regex. * Proper Strings * Use the post url provided by the search. * Fix the get size for binsearch results. * Move the title cleanup to a staticmethod. * Add regex for removing piece of text before the title. * Fixed cutting of strings at the end. As rstrip strips on charactor not on the string! Added tests for a number of binsearch title cleanups. * Flakes. * Moved the download_nzb_for_post download logic to snatch_selection in core.py * reused the SearchResult attributes extra_info and result_type. A result type of 'nzbdata' will make sab and nzbget send the nzbdata instead of the url. * Refactored attributes to snakecase. * Flake2. * Remove lines. * Add additional test cases * Ups * Removed unicode chars. * Unused import. * Additional newline crap. * Flake on the test module. * Catch regex search group exception. Fixed the last two unittests. * Allign continious line. * Removed the overwrite. --- medusa/classes.py | 16 +- medusa/clients/nzb/nzbget.py | 6 +- medusa/clients/nzb/sab.py | 60 ++++- medusa/helpers/__init__.py | 10 +- medusa/providers/nzb/binsearch.py | 315 ++++++++++++++++++------ medusa/search/core.py | 18 +- medusa/search/queue.py | 2 +- tests/providers/test_binsearch_title.py | 101 ++++++++ 8 files changed, 426 insertions(+), 102 deletions(-) create mode 100644 tests/providers/test_binsearch_title.py diff --git a/medusa/classes.py b/medusa/classes.py index 7a97830627..a29c5551a2 100644 --- a/medusa/classes.py +++ b/medusa/classes.py @@ -51,7 +51,7 @@ def __init__(self, episodes=None, provider=None): self.url = u'' # used by some providers to store extra info associated with the result - self.extraInfo = [] + self.extra_info = [] # quality of the release self.quality = Quality.UNKNOWN @@ -93,7 +93,7 @@ def __init__(self, episodes=None, provider=None): self.content = None # Result type like: nzb, nzbdata, torrent - self.resultType = u'' + self.result_type = u'' # Store the parse result, as it might be useful for other information later on. self.parsed_result = None @@ -157,7 +157,7 @@ def __str__(self): my_string = u'{0} @ {1}\n'.format(self.provider.name, self.url) my_string += u'Extra Info:\n' - for extra in self.extraInfo: + for extra in self.extra_info: my_string += u' {0}\n'.format(extra) my_string += u'Episodes:\n' @@ -172,7 +172,7 @@ def __str__(self): return my_string def file_name(self): - return u'{0}.{1}'.format(self.episodes[0].pretty_name(), self.resultType) + return u'{0}.{1}'.format(self.episodes[0].pretty_name(), self.result_type) def add_result_to_cache(self, cache): """Cache the item if needed.""" @@ -200,15 +200,15 @@ class NZBSearchResult(SearchResult): def __init__(self, episodes, provider=None): super(NZBSearchResult, self).__init__(episodes, provider=provider) - self.resultType = u'nzb' + self.result_type = u'nzb' class NZBDataSearchResult(SearchResult): - """NZB result where the actual NZB XML data is stored in the extraInfo.""" + """NZB result where the actual NZB XML data is stored in the extra_info.""" def __init__(self, episodes, provider=None): super(NZBDataSearchResult, self).__init__(episodes, provider=provider) - self.resultType = u'nzbdata' + self.result_type = u'nzbdata' class TorrentSearchResult(SearchResult): @@ -216,7 +216,7 @@ class TorrentSearchResult(SearchResult): def __init__(self, episodes, provider=None): super(TorrentSearchResult, self).__init__(episodes, provider=provider) - self.resultType = u'torrent' + self.result_type = u'torrent' class AllShowsListUI(object): # pylint: disable=too-few-public-methods diff --git a/medusa/clients/nzb/nzbget.py b/medusa/clients/nzb/nzbget.py index 64317a0406..f573f89151 100644 --- a/medusa/clients/nzb/nzbget.py +++ b/medusa/clients/nzb/nzbget.py @@ -118,8 +118,8 @@ def sendNZB(nzb, proper=False): dupescore += 10 nzbcontent64 = None - if nzb.resultType == 'nzbdata': - data = nzb.extraInfo[0] + if nzb.result_type == 'nzbdata': + data = nzb.extra_info[0] nzbcontent64 = standard_b64encode(data) log.info('Sending NZB to NZBget') @@ -141,7 +141,7 @@ def sendNZB(nzb, proper=False): nzbcontent64 ) else: - if nzb.resultType == 'nzb': + if nzb.result_type == 'nzb': if not nzb.provider.login(): return False diff --git a/medusa/clients/nzb/sab.py b/medusa/clients/nzb/sab.py index 24ae06383e..b794b7b204 100644 --- a/medusa/clients/nzb/sab.py +++ b/medusa/clients/nzb/sab.py @@ -13,6 +13,7 @@ import logging from medusa import app +from medusa.helper.common import sanitize_filename from medusa.logger.adapters.style import BraceAdapter from medusa.session.core import MedusaSafeSession @@ -26,9 +27,10 @@ def send_nzb(nzb): """ - Sends an NZB to SABnzbd via the API. + Dispatch method for sending an nzb to sabnzbd using it's api. - :param nzb: The NZBSearchResult object to send to SAB + :param nzb: nzb SearchResult object + :return: result of the communication with sabnzbd (True/False) """ session.params.update({ 'output': 'json', @@ -49,14 +51,28 @@ def send_nzb(nzb): # set up a dict with the URL params in it params = { 'cat': category, - 'mode': 'addurl', - 'name': nzb.url, } if nzb.priority: params['priority'] = 2 if app.SAB_FORCED else 1 + if nzb.result_type == 'nzbdata' and nzb.extra_info: + return send_nzb_post(params, nzb) + else: + return send_nzb_get(params, nzb) + + +def send_nzb_get(params, nzb): + """ + Sends an NZB to SABnzbd via the API using a get request. + + :param nzb: The NZBSearchResult object to send to SAB + :return: result of the communication with sabnzbd (True/False) + """ + log.info('Sending NZB to SABnzbd') + + params.update({'name': nzb.url, 'mode': 'addurl'}) url = urljoin(app.SAB_HOST, 'api') response = session.get(url, params=params, verify=False) @@ -72,6 +88,42 @@ def send_nzb(nzb): return result +def send_nzb_post(params, nzb): + """ + Sends an NZB to SABnzbd via the API. + + :param params: Prepared post parameters. + :param nzb: The NZBSearchResult object to send to SAB + :return: result of the communication with sabnzbd (True/False) + """ + + log.info('Sending NZB to SABnzbd using the post multipart/form data.') + url = urljoin(app.SAB_HOST, 'api') + params['mode'] = 'addfile' + files = { + 'name': nzb.extra_info[0] + } + + data = session.params + data.update(params) + data['nzbname'] = sanitize_filename(nzb.name) + + # Empty session.params, because else these are added to the url. + session.params = {} + + response = session.post(url, data=data, files=files, verify=False) + + try: + data = response.json() + except ValueError: + log.info('Error connecting to sab, no data returned') + else: + log.debug('Result text from SAB: {0}', data) + result, text = _check_sab_response(data) + del text + return result + + def _check_sab_response(jdata): """ Check response from SAB diff --git a/medusa/helpers/__init__.py b/medusa/helpers/__init__.py index 391abbdb1b..63a86f0451 100644 --- a/medusa/helpers/__init__.py +++ b/medusa/helpers/__init__.py @@ -1283,11 +1283,13 @@ def get_url(url, post_data=None, params=None, headers=None, timeout=30, session= return getattr(resp, response_type, None) -def download_file(url, filename, session, headers=None, **kwargs): +def download_file(url, filename, session, method='GET', data=None, headers=None, **kwargs): """Download a file specified. :param url: Source URL :param filename: Target file on filesystem + :param method: Specity the http method. Currently only GET and POST supported + :param data: sessions post data :param session: request session to use :param headers: override existing headers in request session :return: True on success, False on failure @@ -1296,9 +1298,9 @@ def download_file(url, filename, session, headers=None, **kwargs): hooks, cookies, verify, proxies = request_defaults(**kwargs) with session as s: - resp = s.get(url, allow_redirects=True, stream=True, - verify=verify, headers=headers, cookies=cookies, - hooks=hooks, proxies=proxies) + resp = s.request(method, url, data=data, allow_redirects=True, stream=True, + verify=verify, headers=headers, cookies=cookies, + hooks=hooks, proxies=proxies) if not resp: log.debug( diff --git a/medusa/providers/nzb/binsearch.py b/medusa/providers/nzb/binsearch.py index 258d9294c8..4ecc587dca 100644 --- a/medusa/providers/nzb/binsearch.py +++ b/medusa/providers/nzb/binsearch.py @@ -6,120 +6,279 @@ import logging import re -from time import time - +from os.path import join from medusa import tv +from medusa.bs4_parser import BS4Parser +from medusa.helper.common import convert_size, sanitize_filename +from medusa.helpers import download_file +from medusa.logger.adapters.style import BraceAdapter from medusa.providers.nzb.nzb_provider import NZBProvider - from requests.compat import urljoin -log = logging.getLogger(__name__) -log.addHandler(logging.NullHandler()) +log = BraceAdapter(logging.getLogger(__name__)) +log.logger.addHandler(logging.NullHandler()) class BinSearchProvider(NZBProvider): """BinSearch Newznab provider.""" + size_regex = re.compile(r'size: (\d+\.\d+\xa0\w{2}), parts', re.I) + title_regex = re.compile(r'\"([^\"]+)"', re.I) + title_reqex_clean = re.compile(r'^[ \d_]+ (.+)') + title_regex_rss = re.compile(r'- \"([^\"]+)"', re.I) + nzb_check_segment = re.compile(r' tag of the RSS feed - :return: A tuple containing two strings representing title and URL respectively + :param result: Nzb SearchResult object. + :return: The content of the nzb file if successful else None. """ - title = item.get('description') - if title: - if self.descTitleStart.match(title): - title = self.descTitleStart.sub('', title) - title = self.descTitleEnd.sub('', title) - title = title.replace('+', '.') - else: - # just use the entire title, looks hard/impossible to parse - title = item.get('title') - if title: - for titleCleaner in self.titleCleaners: - title = titleCleaner.sub('', title) - - url = item.get('link') - if url: - url = url.replace('&', '&') - - return title, url - - def update_cache(self): - """Updade provider cache.""" - # check if we should update - if not self.should_update(): - return - - # clear cache - self._clear_cache() - - # set updated - self.updated = time() - - cl = [] - for group in ['alt.binaries.hdtv', 'alt.binaries.hdtv.x264', 'alt.binaries.tv', 'alt.binaries.tvseries']: - search_params = {'max': 50, 'g': group} - data = self.get_rss_feed(self.provider.urls['rss'], search_params)['entries'] - if not data: - log.debug('No data returned from provider') - continue - - for item in data: - ci = self._parse_item(item) - if ci: - cl.append(ci) - - if cl: - cache_db_con = self._get_db() - cache_db_con.mass_action(cl) - - def _check_auth(self, data): - return data if data['feed'] and data['feed']['title'] != 'Invalid Link' else None + if not self.login(): + return False + + # For now to separate the url and the post data, where splitting it with a pipe. + url, data = result.url.split('|') + + data = { + data.split('=')[1]: 'on', + 'action': 'nzb' + } + + log.info('Downloading {result} from {provider} at {url} and data {data}', + {'result': result.name, 'provider': self.name, 'url': result.url, 'data': data}) + + verify = False if self.public else None + + result = self.session.post(url, data=data, headers=self.session.headers, + verify=verify, hooks={}, allow_redirects=True).content + + # Validate that the result has the content of a valid nzb. + if not BinSearchProvider.nzb_check_segment.search(result): + log.info('Result returned from BinSearch was not a valid nzb') + return None + + return result + + def _get_size(self, item): + """ + Get result size. + + Overwrite this, as the default _get_size() from nzb_provider isn't working for us. + :param item: + :return: size in bytes or -1 + """ + return item.get('size', -1) provider = BinSearchProvider() diff --git a/medusa/search/core.py b/medusa/search/core.py index b6638797ed..4604c7e440 100644 --- a/medusa/search/core.py +++ b/medusa/search/core.py @@ -77,7 +77,7 @@ def _download_result(result): # save the data to disk try: with open(file_name, u'w') as fileOut: - fileOut.write(result.extraInfo[0]) + fileOut.write(result.extra_info[0]) helpers.chmod_as_parent(file_name) @@ -120,8 +120,18 @@ def snatch_episode(result): if result.url.startswith(u'magnet:') or result.url.endswith(u'.torrent'): result.resultType = u'torrent' + # Binsearch.info requires you to download the nzb through a post. + if hasattr(result.provider, 'download_nzb_for_post'): + result.result_type = 'nzbdata' + nzb_data = result.provider.download_nzb_for_post(result) + result.extra_info.append(nzb_data) + + if not nzb_data: + log.warning('Error trying to get the nzb data from provider binsearch, no data returned') + return False + # NZBs can be sent straight to SAB or saved to disk - if result.resultType in (u'nzb', u'nzbdata'): + if result.result_type in (u'nzb', u'nzbdata'): if app.NZB_METHOD == u'blackhole': result_downloaded = _download_result(result) elif app.NZB_METHOD == u'sabnzbd': @@ -133,7 +143,7 @@ def snatch_episode(result): result_downloaded = False # Torrents can be sent to clients or saved to disk - elif result.resultType == u'torrent': + elif result.result_type == u'torrent': # torrents are saved to disk when blackhole mode if app.TORRENT_METHOD == u'blackhole': result_downloaded = _download_result(result) @@ -149,7 +159,7 @@ def snatch_episode(result): log.warning(u'Torrent file content is empty: {0}', result.name) result_downloaded = False else: - log.error(u'Unknown result type, unable to download it: {0!r}', result.resultType) + log.error(u'Unknown result type, unable to download it: {0!r}', result.result_type) result_downloaded = False if not result_downloaded: diff --git a/medusa/search/queue.py b/medusa/search/queue.py index 44873ebf06..3704ad8944 100644 --- a/medusa/search/queue.py +++ b/medusa/search/queue.py @@ -494,7 +494,7 @@ def run(self): except Exception: self.success = False - log.debug(traceback.format_exc()) + log.exception('Manual snatch failed!. For result: {name}', {'name': result.name}) ui.notifications.message('Error while snatching selected result', 'Unable to snatch the result for {name}'.format(name=result.name)) diff --git a/tests/providers/test_binsearch_title.py b/tests/providers/test_binsearch_title.py new file mode 100644 index 0000000000..1c6d8778eb --- /dev/null +++ b/tests/providers/test_binsearch_title.py @@ -0,0 +1,101 @@ +# coding=utf-8 +"""Title parse test code for Binsearch Provider.""" + +from medusa.providers.nzb.binsearch import BinSearchProvider +import pytest + + +@pytest.mark.parametrize('p', [ + { # p0: None + 'title': '[ 160929_02 Arrow.S05E02.1080p.AMZN.WEBRip.DD5.1.x264-NTb ] - [1/1] - "160929_02 Arrow.S05E02.The.' + 'Recruits.1080p.AMZN.WEBRip.DD5.1.x264-NTb.nfo" yEnc (1/1)', + 'mode': 'episode', + 'expected': 'Arrow.S05E02.The.Recruits.1080p.AMZN.WEBRip.DD5.1.x264-NTb' + }, + { # p0: None + 'title': 'AMS Arrow.S05E02.The.Recruits.1080p.AMZN.WEBRip.DD5.1.x264-NTb [01/55] - "Arrow.S05E02.The.Recruits.' + '1080p.AMZN.WEBRip.DD5.1.x264-NTb.par2" yEnc (1/1)', + 'mode': 'episode', + 'expected': 'Arrow.S05E02.The.Recruits.1080p.AMZN.WEBRip.DD5.1.x264-NTb' + }, + { # p0: None + 'title': 'Arrow S05E02 [1 of 15] "Arrow.S05E02.FRENCH.WEBRip.XviD.avi.part.par2" yEnc (1/1)collection size: ' + '358.31 MB, parts available: 1041 / 1041- 7 rar files- 1 par2 file', + 'mode': 'episode', + 'expected': 'Arrow.S05E02.FRENCH.WEBRip.XviD.avi' + }, + { # p0: None + 'title': '(????) [013/275] - "Arrow.S05E02.HDTV.HebSubs.XviD-AFG.par2" yEnc (1/1)collection size: 506.17 MB, ' + 'parts available: 1344 / 1344- 9 par2 files- 3 rar files', + 'mode': 'episode', + 'expected': 'Arrow.S05E02.HDTV.HebSubs.XviD-AFG' + }, + { # p0: None + 'title': '[382187]-[FULL]-[#[email protected]]-[ Architects.Of.F1.S01E02.Gordon.Murray.1080p.HDTV.x264-GRiP ]' + '-[01/23] - "architects.of.f1.s01e02.gordon.murray.1080p.hdtv.x264-grip.nfo" yEnc (1/1)collection size: ' + '2.39 GB, parts available: 3247 / 3247- 8 par2 files- 12 rar files- 1 srr file- 1 sfv file- 1 nfo fileview NFO', + 'mode': 'episode', + 'expected': 'architects.of.f1.s01e02.gordon.murray.1080p.hdtv.x264-grip' + }, + { # p0: None + 'title': '(1/1) - Description - "The.Grand.Tour.S01E01.720p.HEVC.X265-M!B[S1n].nzb" - 418.22 kB - yEnc (1/2)' + 'collection size: 855.66 KB, parts available: 5 / 5- 2 nzb files- 1 nfo fileview NFO', + 'mode': 'episode', + 'expected': 'The.Grand.Tour.S01E01.720p.HEVC.X265-M!B[S1n]' + }, + { # p0: None + 'title': 'grandtour - [00/20] - "The.Grand.Tour.S01E01.WEBRip.X264-DEFLATE[ettv].nzb" yEnc (1/1)collection size: ' + '1.12 GB, parts available: 3037 / 3037- 10 par2 files- 10 rar files- 1 nzb file', + 'mode': 'episode', + 'expected': 'The.Grand.Tour.S01E01.WEBRip.X264-DEFLATE[ettv]' + }, + { # p0: None + 'title': '[ TrollHD ] - [ 000/343 ] - "The Grand Tour S01E01 2160p Amazon WEBRip DD+ 5.1 x264-TrollUHD.nzb" ' + 'yEnc (1/10)collection size: 34.68 GB, parts available: 56365 / 56365- unidentified files (note to ' + 'poster: put quotes around the filename in the subject)- 72 par2 files- 1 nzb file', + 'mode': 'episode', + 'expected': 'The Grand Tour S01E01 2160p Amazon WEBRip DD+ 5.1 x264-TrollUHD' + }, + { # p0: None + 'title': '[00/65] - "The.Grand.Tour.S01E01.German.WebHD-720p.x264.nzb" yEnc (1/3)collection size: 3.04 GB, parts ' + 'available: 8245 / 8245- 9 par2 files- 55 rar files- 1 nfo file- 1 nzb fileview NFO', + 'mode': 'episode', + 'expected': 'The.Grand.Tour.S01E01.German.WebHD-720p.x264' + }, + { # p0: None + 'title': '[ TOWN ]-[ www.town.ag ]-[ partner of www.ssl-news.info ]-[ OPEN ] [01/16] - "The.Passing.Bells.' + 'S01E01.720p.HDTV.x264-TASTETV.par2" - 555,41 MB yEnc (1/1)collection size: 576.49 MB, parts available:' + ' 1522 / 1522- 4 par2 files- 11 rar files- 1 nfo fileview NFO', + 'mode': 'episode', + 'expected': 'The.Passing.Bells.S01E01.720p.HDTV.x264-TASTETV' + }, + { # p0: None + 'title': '"Sense8 S01 MULTi 1080p WEBRip DD5 1 H 264-IMPERIUM.zip" yEnc (1/1)', + 'mode': 'episode', + 'expected': 'Sense8 S01 MULTi 1080p WEBRip DD5 1 H 264-IMPERIUM' + }, + { # p0: None + 'title': 'pFyLzeqxIhLQS5qdjCuHXBYOgqRb5A - [887/998] - "Selfie S01E01.part1.rar" yEnc (1/82)collection size:' + ' 88.54 MB, parts available: 234 / 234- 3 rar files', + 'mode': 'episode', + 'expected': 'Selfie S01E01.part1' + }, + { # p0: None + 'title': '[PRiVATE] Murdered.For.Her.Selfies.S01E01.WEB.h264-ROFL [newzNZB] [1/7] - "murdered.for.her.' + 'selfies.s01e01.web.h264-rofl.nfo" yEnc (1/1)collection size: 90.87 MB, parts available: 126 / ' + '126- 1 sfv file- 5 rar files- 1 nfo fileview NFO', + 'mode': 'episode', + 'expected': 'murdered.for.her.selfies.s01e01.web.h264-rofl' + } +]) +def test_parse_binsearch_title(p): + # Given + title = p['title'] + mode = p['mode'] + expected = p['expected'] + + # When + actual = BinSearchProvider.clean_title(title, mode) + + # Then + assert expected == actual From 2906ef0ec867bf27f2791eeaac58aebdd9c035be Mon Sep 17 00:00:00 2001 From: p0ps Date: Fri, 15 Dec 2017 17:59:29 +0100 Subject: [PATCH 22/60] Feature/delay downloads (#3360) * Add app fail over constants. This will enable a user to delay a download for nzb, torrents or both. For ex. Delaying a download for torrents, will not snatch a torrent result, but will snatch a nzb result, when it hits one. * Added date_added column to cache provider tables. I need to add this table, as the others are used, and are always overwritten. * Needed to remove the INSERT OR REPLACE. Because the date_added may never be overwritten. * Removed duplicate items based on the url, as we can't use the INSERT OR REPLACE anymore. This has the advantage that we're eliminating allot of (duplicated) search results early on. * Fixed the INSERT and the UPDATE. * Implemented the provider delay for the daily_search. * currently no UI available. Config needs to be done in config.ini. * Only include provider cache tables, when they have the new column added. * Added more verbose logging. * Added date added to snatchSelection.mako. * Added more info to delayed download logging. * Renamed fail_over_enabled and fail_over_hours attributes to enable_search_delay and search_delay. * Add search_delay and enable_search_delay to config UI. * Typo. * Separated the function from the daily_search function. Reused it for the backlog search. * Fixed the calculation of the hours remaining for the provider delay. * Removed one parameter as that's also available within the SearchResult. * Fixed Flake. * changed config description from minutes to hours. * Mention the PROPER search exempt from delay. * Flake. * Add comma to create. --- medusa/__main__.py | 3 + medusa/providers/generic_provider.py | 14 ++++- medusa/search/backlog.py | 3 +- medusa/search/core.py | 60 ++++++++++++++++++- medusa/search/manual.py | 4 +- medusa/server/web/config/providers.py | 29 +++++++++ medusa/tv/cache.py | 54 ++++++++++++----- views/config_providers.mako | 84 +++++++++++++++++++++++++++ views/snatchSelection.mako | 3 + 9 files changed, 233 insertions(+), 21 deletions(-) diff --git a/medusa/__main__.py b/medusa/__main__.py index e71bc31889..467723f6e0 100755 --- a/medusa/__main__.py +++ b/medusa/__main__.py @@ -984,6 +984,8 @@ def initialize(self, console_logging=True): load_provider_setting(app.CFG, provider, 'bool', 'enable_daily', 1) load_provider_setting(app.CFG, provider, 'bool', 'enable_backlog', provider.supports_backlog) load_provider_setting(app.CFG, provider, 'bool', 'enable_manualsearch', 1) + load_provider_setting(app.CFG, provider, 'bool', 'enable_search_delay', 0) + load_provider_setting(app.CFG, provider, 'int', 'search_delay', 480) if provider.provider_type == GenericProvider.TORRENT: load_provider_setting(app.CFG, provider, 'string', 'custom_url', '', censor_log='low') @@ -1525,6 +1527,7 @@ def save_config(): 'name', 'url', 'api_key', 'username', 'search_mode', 'search_fallback', 'enable_daily', 'enable_backlog', 'enable_manualsearch', + 'enable_search_delay', 'search_delay', ], 'encrypted': [ 'password', diff --git a/medusa/providers/generic_provider.py b/medusa/providers/generic_provider.py index cbc046c8a8..1104e42608 100644 --- a/medusa/providers/generic_provider.py +++ b/medusa/providers/generic_provider.py @@ -112,9 +112,9 @@ def __init__(self, name): self.max_recent_items = 5 self.stop_at = 3 - # Police attributes - self.enable_api_hit_cooldown = False - self.enable_daily_request_reserve = False + # Delay downloads + self.enable_search_delay = False + self.search_delay = 480 # minutes def download_result(self, result): """Download result from provider.""" @@ -229,6 +229,14 @@ def find_search_results(self, show, episodes, search_mode, forced_search=False, return results if items_list: + # Remove duplicate items + items_list_without_dups = [] + for item in items_list: + if item['link'] not in [_['link'] for _ in items_list_without_dups]: + items_list_without_dups.append(item) + + items_list = items_list_without_dups + # categorize the items into lists by quality items = defaultdict(list) for item in items_list: diff --git a/medusa/search/backlog.py b/medusa/search/backlog.py index e4514ea9aa..f03d1b5eb0 100644 --- a/medusa/search/backlog.py +++ b/medusa/search/backlog.py @@ -106,7 +106,8 @@ def search_backlog(self, which_shows=None): segments = self._get_segments(cur_show, from_date) for season, segment in iteritems(segments): - self.currentSearchInfo = {'title': cur_show.name + ' Season ' + str(season)} + self.currentSearchInfo = {'title': '{series_name} Season {season}'.format(series_name=cur_show.name, + season=season)} backlog_queue_item = BacklogQueueItem(cur_show, segment) app.search_queue_scheduler.action.add_item(backlog_queue_item) # @UndefinedVariable diff --git a/medusa/search/core.py b/medusa/search/core.py index 4604c7e440..bedc6163df 100644 --- a/medusa/search/core.py +++ b/medusa/search/core.py @@ -6,6 +6,7 @@ import logging import os import threading +import time from medusa import ( app, @@ -45,6 +46,7 @@ from medusa.providers.generic_provider import GenericProvider from medusa.show import naming + log = BraceAdapter(logging.getLogger(__name__)) log.logger.addHandler(logging.NullHandler()) @@ -477,6 +479,10 @@ def search_for_needed_episodes(force=False): if cur_ep in found_results and best_result.quality <= found_results[cur_ep].quality: continue + # Skip the result if search delay is enabled for the provider. + if delay_search(best_result): + continue + found_results[cur_ep] = best_result threading.currentThread().name = original_thread_name @@ -484,6 +490,56 @@ def search_for_needed_episodes(force=False): return found_results.values() +def delay_search(best_result): + """Delay the search by ignoring the best result, when search delay is enabled for this provider. + + If the providers attribute enable_search_delay is enabled for this provider and it's younger then then it's + search_delay time (minutes) skipp it. For this we need to check if the result has already been + stored in the provider cache db, and if it's still younger then the providers attribute search_delay. + :param best_result: SearchResult object. + :return: True if we want to skipp this result. + """ + cur_provider = best_result.provider + if cur_provider.enable_search_delay and cur_provider.search_delay: # In minutes + cur_ep = best_result.episodes[0] + log.debug('DELAY: Provider {provider} delay enabled, with an expiration of {delay} hours', + {'provider': cur_provider.name, 'delay': round(cur_provider.search_delay / 60, 1)}) + from medusa.search.manual import get_provider_cache_results + results = get_provider_cache_results( + cur_ep.series.indexer, show_all_results=False, perform_search=False, show=cur_ep.series.indexerid, + season=cur_ep.season, episode=cur_ep.episode, manual_search_type='episode' + ) + + if results.get('found_items'): + results['found_items'].sort(key=lambda d: int(d['date_added'])) + first_result = results['found_items'][0] + if first_result['date_added'] + cur_provider.search_delay * 60 > int(time.time()): + # The provider's delay cooldown time hasn't expired yet. We're holding back the snatch. + log.debug( + u'DELAY: Holding back best result {best_result} over {first_result} for provider {provider}. The provider is waiting' + u' {search_delay_minutes} hours, before accepting the release. Still {hours_left} to go.', { + 'best_result': best_result.name, + 'first_result': first_result['name'], + 'provider': cur_provider.name, + 'search_delay_minutes': round(cur_provider.search_delay / 60, 1), + 'hours_left': round((cur_provider.search_delay - (time.time() - first_result['date_added']) / 60) / 60, 1) + } + ) + return True + else: + log.debug(u'DELAY: Provider {provider}, found a result in cache, and the delay has expired. ' + u'Time of first result: {first_result}', + {'provider': cur_provider.name, + 'first_result': datetime.datetime.fromtimestamp(first_result['date_added'])}) + else: + # This should never happen. + log.debug( + u'DELAY: Provider {provider}, searched cache but could not get any results for: {series} {season_ep}', + {'provider': cur_provider.name, 'series': best_result.show.name, + 'season_ep': episode_num(cur_ep.season, cur_ep.episode)}) + return False + + def search_providers(show, episodes, forced_search=False, down_cur_quality=False, manual_search=False, manual_search_type=u'episode'): """ @@ -782,7 +838,9 @@ def search_providers(show, episodes, forced_search=False, down_cur_quality=False else: found = True if not found: - final_results += [best_result] + # Skip the result if search delay is enabled for the provider. + if not delay_search(best_result): + final_results += [best_result] # Remove provider from thread name before return results threading.currentThread().name = original_thread_name diff --git a/medusa/search/manual.py b/medusa/search/manual.py index 9a265d7732..58e02c70cf 100644 --- a/medusa/search/manual.py +++ b/medusa/search/manual.py @@ -207,7 +207,7 @@ def get_provider_cache_results(indexer, show_all_results=None, perform_search=No # TODO: the implicit sqlite rowid is used, should be replaced with an explicit PK column # If table doesn't exist, start a search to create table and new columns seeders, leechers and size - required_columns = ['seeders', 'leechers', 'size', 'proper_tags'] + required_columns = ['seeders', 'leechers', 'size', 'proper_tags', 'date_added'] if table_exists and all(required_column in columns for required_column in required_columns): # The default sql, that's executed for each providers cache table common_sql = ( @@ -215,7 +215,7 @@ def get_provider_cache_results(indexer, show_all_results=None, perform_search=No b" ? AS 'provider', ? AS 'provider_id', ? 'provider_minseed'," b" ? 'provider_minleech', name, season, episodes, indexerid," b" url, time, proper_tags, quality, release_group, version," - b" seeders, leechers, size, time, pubdate " + b" seeders, leechers, size, time, pubdate, date_added " b"FROM '{provider_id}' " b"WHERE indexerid = ? AND quality > 0 ".format( provider_id=cur_provider.get_id() diff --git a/medusa/server/web/config/providers.py b/medusa/server/web/config/providers.py index 99478d5fb3..d09753c3cd 100644 --- a/medusa/server/web/config/providers.py +++ b/medusa/server/web/config/providers.py @@ -496,6 +496,20 @@ def saveProviders(self, newznab_string='', torrentrss_string='', provider_order= except (AttributeError, KeyError): cur_torrent_provider.subtitle = 0 # these exceptions are actually catching unselected checkboxes + if hasattr(cur_torrent_provider, 'enable_search_delay'): + try: + cur_torrent_provider.enable_search_delay = config.checkbox_to_value( + kwargs['{id}_enable_search_delay'.format(id=cur_torrent_provider.get_id())]) + except (AttributeError, KeyError): + cur_torrent_provider.enable_search_delay = 0 # these exceptions are actually catching unselected checkboxes + + if hasattr(cur_torrent_provider, 'search_delay'): + try: + search_delay = float(str(kwargs['{id}_search_delay'.format(id=cur_torrent_provider.get_id())]).strip()) + cur_torrent_provider.search_delay = (int(search_delay * 60), 30)[search_delay < 0.5] + except (AttributeError, KeyError, ValueError): + cur_torrent_provider.search_delay = 480 # these exceptions are actually catching unselected checkboxes + if cur_torrent_provider.enable_cookies: try: cur_torrent_provider.cookies = str(kwargs['{id}_cookies'.format(id=cur_torrent_provider.get_id())]).strip() @@ -552,6 +566,21 @@ def saveProviders(self, newznab_string='', torrentrss_string='', provider_order= except (AttributeError, KeyError): cur_nzb_provider.enable_backlog = 0 # these exceptions are actually catching unselected checkboxes + if hasattr(cur_nzb_provider, 'enable_search_delay'): + try: + cur_nzb_provider.enable_search_delay = config.checkbox_to_value( + kwargs['{id}_enable_search_delay'.format(id=cur_nzb_provider.get_id())]) + except (AttributeError, KeyError): + cur_nzb_provider.enable_search_delay = 0 # these exceptions are actually catching unselected checkboxes + + if hasattr(cur_nzb_provider, 'search_delay'): + try: + search_delay = float( + str(kwargs['{id}_search_delay'.format(id=cur_nzb_provider.get_id())]).strip()) + cur_nzb_provider.search_delay = (int(search_delay * 60), 30)[search_delay < 0.5] + except (AttributeError, KeyError, ValueError): + cur_nzb_provider.search_delay = 480 # these exceptions are actually catching unselected checkboxes + # app.NEWZNAB_DATA = '!!!'.join([x.config_string() for x in app.newznabProviderList]) app.PROVIDER_ORDER = provider_list diff --git a/medusa/tv/cache.py b/medusa/tv/cache.py index a8620d97f5..5b2e0c3933 100644 --- a/medusa/tv/cache.py +++ b/medusa/tv/cache.py @@ -51,7 +51,8 @@ def __init__(self, provider_id): b' url TEXT,' b' time NUMERIC,' b' quality NUMERIC,' - b' release_group TEXT)'.format(name=provider_id)) + b' release_group TEXT,' + b' date_added NUMERIC)'.format(name=provider_id)) else: sql_results = self.select( b'SELECT url, COUNT(url) AS count ' @@ -86,6 +87,7 @@ def __init__(self, provider_id): ('size', 'NUMERIC', -1), ('pubdate', 'NUMERIC', None), ('proper_tags', 'TEXT', None), + ('date_added', 'NUMERIC', 0), ) for column, data_type, default in table: # add columns to table if missing @@ -405,19 +407,34 @@ def add_cache_entry(self, name, url, seeders, leechers, size, pubdate, parsed_re # Store proper_tags as proper1|proper2|proper3 proper_tags = '|'.join(parse_result.proper_tags) - log.debug('Added RSS item: {0} to cache: {1}', name, self.provider_id) - return [ - b'INSERT OR REPLACE INTO [{name}] ' - b' (name, season, episodes, indexerid, url, ' - b' time, quality, release_group, version, ' - b' seeders, leechers, size, pubdate, proper_tags) ' - b'VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)'.format( - name=self.provider_id - ), - [name, season, episode_text, parse_result.show.indexerid, url, - cur_timestamp, quality, release_group, version, - seeders, leechers, size, pubdate, proper_tags] - ] + if not self.item_in_cache(url): + log.debug('Added RSS item: {0} to cache: {1}', name, self.provider_id) + return [ + b'INSERT INTO [{name}] ' + b' (name, season, episodes, indexerid, url, ' + b' time, quality, release_group, version, ' + b' seeders, leechers, size, pubdate, proper_tags, date_added) ' + b'VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)'.format( + name=self.provider_id + ), + [name, season, episode_text, parse_result.show.indexerid, url, + cur_timestamp, quality, release_group, version, + seeders, leechers, size, pubdate, proper_tags, cur_timestamp] + ] + else: + log.debug('Updating RSS item: {0} to cache: {1}', name, self.provider_id) + return [ + b'UPDATE [{name}] ' + b'SET name=?, season=?, episodes=?, indexerid=?, ' + b' time=?, quality=?, release_group=?, version=?, ' + b' seeders=?, leechers=?, size=?, pubdate=?, proper_tags=? ' + b'WHERE url=?'.format( + name=self.provider_id + ), + [name, season, episode_text, parse_result.show.indexerid, + cur_timestamp, quality, release_group, version, + seeders, leechers, size, pubdate, proper_tags, url] + ] def search_cache(self, episode, forced_search=False, down_cur_quality=False): @@ -426,6 +443,15 @@ def search_cache(self, episode, forced_search=False, down_cur_quality) return needed_eps[episode] if episode in needed_eps else [] + def item_in_cache(self, url): + """Check if the url is already available for the specific provider.""" + cache_db_con = self._get_db() + return cache_db_con.select( + b'SELECT COUNT(url) ' + b'FROM [{provider}] ' + b'WHERE url=?'.format(provider=self.provider_id), [url] + )[0][0] + def find_needed_episodes(self, episode, forced_search=False, down_cur_quality=False): """Find needed episodes.""" diff --git a/views/config_providers.mako b/views/config_providers.mako index 97da8bd0e3..8d52c10e8d 100644 --- a/views/config_providers.mako +++ b/views/config_providers.mako @@ -213,6 +213,34 @@ $('#config-components').tabs(); % endif + % if hasattr(cur_newznab_provider, 'enable_search_delay'): +
+ +
+ % endif + % if hasattr(cur_newznab_provider, 'search_delay'): +
+ + +
+ % endif % endfor % for cur_nzb_provider in [cur_provider for cur_provider in sorted_provider_list() if cur_provider.provider_type == GenericProvider.NZB and cur_provider not in app.newznabProviderList]: @@ -304,6 +332,34 @@ $('#config-components').tabs(); % endif + % if hasattr(cur_nzb_provider, 'enable_search_delay'): +
+ +
+ % endif + % if hasattr(cur_nzb_provider, 'search_delay'): +
+ + +
+ % endif % endfor % for cur_torrent_provider in [cur_provider for cur_provider in sorted_provider_list() if cur_provider.provider_type == GenericProvider.TORRENT]: @@ -611,6 +667,34 @@ $('#config-components').tabs(); % endif + % if hasattr(cur_torrent_provider, 'enable_search_delay'): +
+ +
+ % endif + % if hasattr(cur_torrent_provider, 'search_delay'): +
+ + +
+ % endif % endfor diff --git a/views/snatchSelection.mako b/views/snatchSelection.mako index cf44b34e4e..eb908d075b 100644 --- a/views/snatchSelection.mako +++ b/views/snatchSelection.mako @@ -1,5 +1,6 @@ <%inherit file="/layouts/main.mako"/> <%! + from datetime import datetime from medusa import app from medusa.helpers import anon_url %> @@ -105,6 +106,7 @@ Type Updated Published + Added Snatch @@ -139,6 +141,7 @@ ${hItem["pubdate"]} + ${datetime.fromtimestamp(hItem["date_added"])} search % endfor From ef84ecb6d18af9ae0bf61a1f93e88e29c31e8329 Mon Sep 17 00:00:00 2001 From: adaur Date: Fri, 15 Dec 2017 12:25:07 -0500 Subject: [PATCH 23/60] Clarify Jackett's configuration (#3472) * Clarify Jackett's configuration * Add note to UI --- readme.md | 2 +- views/config_providers.mako | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/readme.md b/readme.md index cdcb5a1a4f..8af2ea5c89 100644 --- a/readme.md +++ b/readme.md @@ -61,7 +61,7 @@ We HIGHLY recommend starting out with no database files at all to make this a fr #### Supported providers -A full list can be found here: [Link](https://github.com/pymedusa/Medusa/wiki/Medusa-Search-Providers) +A full list can be found [here](https://github.com/pymedusa/Medusa/wiki/Medusa-Search-Providers). Jackett is supported, however it must be configured [as follows](https://github.com/pymedusa/Medusa/wiki/Using-Jackett-with-Medusa). #### Special Thanks to: ![image](https://rarbg.com/favicon.ico)[RARBG](https://rarbg.to) diff --git a/views/config_providers.mako b/views/config_providers.mako index 8d52c10e8d..5742ecfd2a 100644 --- a/views/config_providers.mako +++ b/views/config_providers.mako @@ -786,6 +786,9 @@ $('#config-components').tabs(); +
From 96374c9174cdceef514f736dd6715715c541f5c5 Mon Sep 17 00:00:00 2001 From: Labrys Date: Fri, 15 Dec 2017 18:58:14 -0500 Subject: [PATCH 24/60] Fix duplicate item detection --- medusa/providers/generic_provider.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/medusa/providers/generic_provider.py b/medusa/providers/generic_provider.py index 1104e42608..68dfb45dcc 100644 --- a/medusa/providers/generic_provider.py +++ b/medusa/providers/generic_provider.py @@ -229,13 +229,11 @@ def find_search_results(self, show, episodes, search_mode, forced_search=False, return results if items_list: - # Remove duplicate items - items_list_without_dups = [] - for item in items_list: - if item['link'] not in [_['link'] for _ in items_list_without_dups]: - items_list_without_dups.append(item) - - items_list = items_list_without_dups + # Remove duplicate items using 'link' as primary key + items_list = { + item['link']:item + for item in items_list + }.values() # categorize the items into lists by quality items = defaultdict(list) From 64140353c35d6733bd4b24133752770e1c265c49 Mon Sep 17 00:00:00 2001 From: Labrys Date: Fri, 15 Dec 2017 20:16:41 -0500 Subject: [PATCH 25/60] Refactor items -> categorized_items --- medusa/providers/generic_provider.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/medusa/providers/generic_provider.py b/medusa/providers/generic_provider.py index 68dfb45dcc..2d14b5ea4e 100644 --- a/medusa/providers/generic_provider.py +++ b/medusa/providers/generic_provider.py @@ -236,15 +236,15 @@ def find_search_results(self, show, episodes, search_mode, forced_search=False, }.values() # categorize the items into lists by quality - items = defaultdict(list) + categorized_items = defaultdict(list) for item in items_list: - items[self.get_quality(item, anime=show.is_anime)].append(item) + categorized_items[self.get_quality(item, anime=show.is_anime)].append(item) # temporarily remove the list of items with unknown quality - unknown_items = items.pop(Quality.UNKNOWN, []) + unknown_items = categorized_items.pop(Quality.UNKNOWN, []) # make a generator to sort the remaining items by descending quality - items_list = (items[quality] for quality in sorted(items, reverse=True)) + items_list = (categorized_items[quality] for quality in sorted(categorized_items, reverse=True)) # unpack all of the quality lists into a single sorted list items_list = list(chain(*items_list)) From a314911b004e6044fa7e5fc86e504e2bcda6734c Mon Sep 17 00:00:00 2001 From: Labrys Date: Fri, 15 Dec 2017 20:18:27 -0500 Subject: [PATCH 26/60] Simplify statement --- medusa/providers/generic_provider.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/medusa/providers/generic_provider.py b/medusa/providers/generic_provider.py index 2d14b5ea4e..e93d8a64c5 100644 --- a/medusa/providers/generic_provider.py +++ b/medusa/providers/generic_provider.py @@ -238,7 +238,8 @@ def find_search_results(self, show, episodes, search_mode, forced_search=False, # categorize the items into lists by quality categorized_items = defaultdict(list) for item in items_list: - categorized_items[self.get_quality(item, anime=show.is_anime)].append(item) + quality = self.get_quality(item, anime=show.is_anime) + categorized_items[quality].append(item) # temporarily remove the list of items with unknown quality unknown_items = categorized_items.pop(Quality.UNKNOWN, []) From f2d648ce80368d3ff8e016407724e939f404a1e0 Mon Sep 17 00:00:00 2001 From: Labrys Date: Fri, 15 Dec 2017 20:19:42 -0500 Subject: [PATCH 27/60] Simplify statements --- medusa/providers/generic_provider.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/medusa/providers/generic_provider.py b/medusa/providers/generic_provider.py index e93d8a64c5..d372e4e1d0 100644 --- a/medusa/providers/generic_provider.py +++ b/medusa/providers/generic_provider.py @@ -244,8 +244,11 @@ def find_search_results(self, show, episodes, search_mode, forced_search=False, # temporarily remove the list of items with unknown quality unknown_items = categorized_items.pop(Quality.UNKNOWN, []) + # sort qualities in descending order + sorted_qualities = sorted(categorized_items, reverse=True) + # make a generator to sort the remaining items by descending quality - items_list = (categorized_items[quality] for quality in sorted(categorized_items, reverse=True)) + items_list = (categorized_items[quality] for quality in sorted_qualities) # unpack all of the quality lists into a single sorted list items_list = list(chain(*items_list)) From 70de4bf2cbd0cf21b1f215d140f518a822f174eb Mon Sep 17 00:00:00 2001 From: Labrys Date: Fri, 15 Dec 2017 20:21:24 -0500 Subject: [PATCH 28/60] Use `chain.from_iterable` instead of unpacking iterable --- medusa/providers/generic_provider.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/medusa/providers/generic_provider.py b/medusa/providers/generic_provider.py index d372e4e1d0..816855c3d5 100644 --- a/medusa/providers/generic_provider.py +++ b/medusa/providers/generic_provider.py @@ -251,7 +251,7 @@ def find_search_results(self, show, episodes, search_mode, forced_search=False, items_list = (categorized_items[quality] for quality in sorted_qualities) # unpack all of the quality lists into a single sorted list - items_list = list(chain(*items_list)) + items_list = list(chain.from_iterable(items_list)) # extend the list with the unknown qualities, now sorted at the bottom of the list items_list.extend(unknown_items) From 1a662515caa5117764a35712be053c7b21616de7 Mon Sep 17 00:00:00 2001 From: Labrys Date: Fri, 15 Dec 2017 20:24:41 -0500 Subject: [PATCH 29/60] Move Quality.UNKNOWN to the end of the sorted keys instead of a pop and append. --- medusa/providers/generic_provider.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/medusa/providers/generic_provider.py b/medusa/providers/generic_provider.py index 816855c3d5..ef3a88910b 100644 --- a/medusa/providers/generic_provider.py +++ b/medusa/providers/generic_provider.py @@ -241,21 +241,23 @@ def find_search_results(self, show, episodes, search_mode, forced_search=False, quality = self.get_quality(item, anime=show.is_anime) categorized_items[quality].append(item) - # temporarily remove the list of items with unknown quality - unknown_items = categorized_items.pop(Quality.UNKNOWN, []) - # sort qualities in descending order sorted_qualities = sorted(categorized_items, reverse=True) + # move Quality.UNKNOWN to the end of the list + try: + sorted_qualities.remove(Quality.UNKNOWN) + except ValueError: + pass + else: + sorted_qualities.append(Quality.UNKNOWN) + # make a generator to sort the remaining items by descending quality items_list = (categorized_items[quality] for quality in sorted_qualities) # unpack all of the quality lists into a single sorted list items_list = list(chain.from_iterable(items_list)) - # extend the list with the unknown qualities, now sorted at the bottom of the list - items_list.extend(unknown_items) - cl = [] # Move through each item and parse it into a quality From 7cf97d5db78d1f29a9cc9b822314bc75f79f8578 Mon Sep 17 00:00:00 2001 From: Labrys Date: Fri, 15 Dec 2017 20:26:33 -0500 Subject: [PATCH 30/60] Refactor items_list -> unique_items --- medusa/providers/generic_provider.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/medusa/providers/generic_provider.py b/medusa/providers/generic_provider.py index ef3a88910b..acdf343672 100644 --- a/medusa/providers/generic_provider.py +++ b/medusa/providers/generic_provider.py @@ -230,14 +230,14 @@ def find_search_results(self, show, episodes, search_mode, forced_search=False, if items_list: # Remove duplicate items using 'link' as primary key - items_list = { + unique_items = { item['link']:item for item in items_list }.values() # categorize the items into lists by quality categorized_items = defaultdict(list) - for item in items_list: + for item in unique_items: quality = self.get_quality(item, anime=show.is_anime) categorized_items[quality].append(item) From dfa141f17295a64f3153e4be1d0e835228f1cc47 Mon Sep 17 00:00:00 2001 From: Labrys Date: Fri, 15 Dec 2017 20:33:09 -0500 Subject: [PATCH 31/60] Refactor items_list -> sorted_items --- medusa/providers/generic_provider.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/medusa/providers/generic_provider.py b/medusa/providers/generic_provider.py index acdf343672..c5229828c3 100644 --- a/medusa/providers/generic_provider.py +++ b/medusa/providers/generic_provider.py @@ -252,11 +252,14 @@ def find_search_results(self, show, episodes, search_mode, forced_search=False, else: sorted_qualities.append(Quality.UNKNOWN) - # make a generator to sort the remaining items by descending quality - items_list = (categorized_items[quality] for quality in sorted_qualities) + # chain items sorted by quality + sorted_items = chain.from_iterable( + categorized_items[quality] + for quality in sorted_qualities + ) # unpack all of the quality lists into a single sorted list - items_list = list(chain.from_iterable(items_list)) + items_list = list(sorted_items) cl = [] From ec2b0c9493cfed4a0271d661bc9cca42731f598a Mon Sep 17 00:00:00 2001 From: Labrys Date: Fri, 15 Dec 2017 20:33:52 -0500 Subject: [PATCH 32/60] Remove unnecessary if statement --- medusa/providers/generic_provider.py | 59 ++++++++++++++-------------- 1 file changed, 29 insertions(+), 30 deletions(-) diff --git a/medusa/providers/generic_provider.py b/medusa/providers/generic_provider.py index c5229828c3..0573f7d0a8 100644 --- a/medusa/providers/generic_provider.py +++ b/medusa/providers/generic_provider.py @@ -228,38 +228,37 @@ def find_search_results(self, show, episodes, search_mode, forced_search=False, if len(results) == len(episodes): return results - if items_list: - # Remove duplicate items using 'link' as primary key - unique_items = { - item['link']:item - for item in items_list - }.values() - - # categorize the items into lists by quality - categorized_items = defaultdict(list) - for item in unique_items: - quality = self.get_quality(item, anime=show.is_anime) - categorized_items[quality].append(item) - - # sort qualities in descending order - sorted_qualities = sorted(categorized_items, reverse=True) - - # move Quality.UNKNOWN to the end of the list - try: - sorted_qualities.remove(Quality.UNKNOWN) - except ValueError: - pass - else: - sorted_qualities.append(Quality.UNKNOWN) + # Remove duplicate items using 'link' as primary key + unique_items = { + item['link']:item + for item in items_list + }.values() + + # categorize the items into lists by quality + categorized_items = defaultdict(list) + for item in unique_items: + quality = self.get_quality(item, anime=show.is_anime) + categorized_items[quality].append(item) + + # sort qualities in descending order + sorted_qualities = sorted(categorized_items, reverse=True) + + # move Quality.UNKNOWN to the end of the list + try: + sorted_qualities.remove(Quality.UNKNOWN) + except ValueError: + pass + else: + sorted_qualities.append(Quality.UNKNOWN) - # chain items sorted by quality - sorted_items = chain.from_iterable( - categorized_items[quality] - for quality in sorted_qualities - ) + # chain items sorted by quality + sorted_items = chain.from_iterable( + categorized_items[quality] + for quality in sorted_qualities + ) - # unpack all of the quality lists into a single sorted list - items_list = list(sorted_items) + # unpack all of the quality lists into a single sorted list + items_list = list(sorted_items) cl = [] From 4cb6bab888f23766ce83d43aa6b264954c011a01 Mon Sep 17 00:00:00 2001 From: Labrys Date: Fri, 15 Dec 2017 20:40:49 -0500 Subject: [PATCH 33/60] Add logging --- medusa/providers/generic_provider.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/medusa/providers/generic_provider.py b/medusa/providers/generic_provider.py index 0573f7d0a8..50efa2b106 100644 --- a/medusa/providers/generic_provider.py +++ b/medusa/providers/generic_provider.py @@ -233,6 +233,7 @@ def find_search_results(self, show, episodes, search_mode, forced_search=False, item['link']:item for item in items_list }.values() + log.debug('Found {0} unique items', len(unique_items)) # categorize the items into lists by quality categorized_items = defaultdict(list) @@ -242,14 +243,16 @@ def find_search_results(self, show, episodes, search_mode, forced_search=False, # sort qualities in descending order sorted_qualities = sorted(categorized_items, reverse=True) + log.debug('Found qualities: {0}', sorted_qualities) # move Quality.UNKNOWN to the end of the list try: sorted_qualities.remove(Quality.UNKNOWN) except ValueError: - pass + log.debug('No unknown qualities in results') else: sorted_qualities.append(Quality.UNKNOWN) + log.debug('Unknown qualities moved to end of results') # chain items sorted by quality sorted_items = chain.from_iterable( From b8a07fb13d15c9ae0d74a9efffac9e758d0d2cc8 Mon Sep 17 00:00:00 2001 From: P0psicles Date: Sat, 16 Dec 2017 14:39:02 +0100 Subject: [PATCH 34/60] created remove_duplicate_urls method to prevent duplicate url's being mass_inserted into the db. Reused the function for daily search. --- medusa/providers/generic_provider.py | 16 +++++++++------- medusa/tv/cache.py | 1 + 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/medusa/providers/generic_provider.py b/medusa/providers/generic_provider.py index 1104e42608..b714102c01 100644 --- a/medusa/providers/generic_provider.py +++ b/medusa/providers/generic_provider.py @@ -185,7 +185,14 @@ def find_propers(self, proper_candidates): search_result.date = datetime.today() search_result.show = show_obj - return results + return self.remove_duplicate_urls(results) + + def remove_duplicate_urls(self, items): + items_list_without_dups = [] + for item in items: + if item['link'] not in [_['link'] for _ in items_list_without_dups]: + items_list_without_dups.append(item) + return items def find_search_results(self, show, episodes, search_mode, forced_search=False, download_current_quality=False, manual_search=False, manual_search_type='episode'): @@ -230,12 +237,7 @@ def find_search_results(self, show, episodes, search_mode, forced_search=False, if items_list: # Remove duplicate items - items_list_without_dups = [] - for item in items_list: - if item['link'] not in [_['link'] for _ in items_list_without_dups]: - items_list_without_dups.append(item) - - items_list = items_list_without_dups + items_list = self.remove_duplicate_urls(items_list) # categorize the items into lists by quality items = defaultdict(list) diff --git a/medusa/tv/cache.py b/medusa/tv/cache.py index 5b2e0c3933..e55ab41f6c 100644 --- a/medusa/tv/cache.py +++ b/medusa/tv/cache.py @@ -196,6 +196,7 @@ def update_cache(self): try: data = self._get_rss_data() + data['entries'] = self.provider.remove_duplicate_urls(data['entries']) if self._check_auth(data): # clear cache self._clear_cache() From 5fbe98000d7699b59f8142b6f010e63b05823bce Mon Sep 17 00:00:00 2001 From: P0psicles Date: Sat, 16 Dec 2017 14:46:15 +0100 Subject: [PATCH 35/60] Reverted the remove duplicate items for propers, as it didn't work. And propers aren't added to the cache anyway. --- medusa/providers/generic_provider.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/medusa/providers/generic_provider.py b/medusa/providers/generic_provider.py index b714102c01..03c9dd2b68 100644 --- a/medusa/providers/generic_provider.py +++ b/medusa/providers/generic_provider.py @@ -185,7 +185,7 @@ def find_propers(self, proper_candidates): search_result.date = datetime.today() search_result.show = show_obj - return self.remove_duplicate_urls(results) + return results def remove_duplicate_urls(self, items): items_list_without_dups = [] From 87a4849a7e255d458209037cd7452f706d801d6a Mon Sep 17 00:00:00 2001 From: Labrys Date: Sat, 16 Dec 2017 09:41:25 -0500 Subject: [PATCH 36/60] Retain order --- medusa/providers/generic_provider.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/medusa/providers/generic_provider.py b/medusa/providers/generic_provider.py index 50efa2b106..e9f4f4aeac 100644 --- a/medusa/providers/generic_provider.py +++ b/medusa/providers/generic_provider.py @@ -7,7 +7,7 @@ import logging import re from base64 import b16encode, b32decode -from collections import defaultdict +from collections import defaultdict, OrderedDict from datetime import datetime, timedelta from itertools import chain from os.path import join @@ -229,10 +229,10 @@ def find_search_results(self, show, episodes, search_mode, forced_search=False, return results # Remove duplicate items using 'link' as primary key - unique_items = { - item['link']:item + unique_items = OrderedDict( + (item['link'], item) for item in items_list - }.values() + ).values() log.debug('Found {0} unique items', len(unique_items)) # categorize the items into lists by quality From 5e4c9fc16186edf5925a6e26232a7f04c15b9f82 Mon Sep 17 00:00:00 2001 From: adaur Date: Sat, 16 Dec 2017 15:54:56 +0100 Subject: [PATCH 37/60] Update yggtorrent.py (#3477) --- medusa/providers/torrent/html/yggtorrent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/medusa/providers/torrent/html/yggtorrent.py b/medusa/providers/torrent/html/yggtorrent.py index 176ac04d9a..14d731ad29 100644 --- a/medusa/providers/torrent/html/yggtorrent.py +++ b/medusa/providers/torrent/html/yggtorrent.py @@ -36,7 +36,7 @@ def __init__(self): self.password = None # URLs - self.url = 'https://ww1.yggtorrent.com' + self.url = 'https://yggtorrent.com' self.urls = { 'login': urljoin(self.url, 'user/login'), 'search': urljoin(self.url, 'engine/search'), From 1c2a3ec27db6b8d57dba4a3cfe46e6ba79cbedd1 Mon Sep 17 00:00:00 2001 From: Labrys Date: Sat, 16 Dec 2017 10:02:52 -0500 Subject: [PATCH 38/60] Merge branch 'feature/generic' into feature/fix-duplicate-urls-daily-search # Conflicts: # medusa/providers/generic_provider.py --- medusa/providers/generic_provider.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/medusa/providers/generic_provider.py b/medusa/providers/generic_provider.py index 5a6afb0ad3..e71963187b 100644 --- a/medusa/providers/generic_provider.py +++ b/medusa/providers/generic_provider.py @@ -188,7 +188,7 @@ def find_propers(self, proper_candidates): return results def remove_duplicate_urls(self, items): - OrderedDict( + return OrderedDict( (item['link'], item) for item in items ).values() @@ -234,7 +234,9 @@ def find_search_results(self, show, episodes, search_mode, forced_search=False, if len(results) == len(episodes): return results - unique_items = self.remove_duplicate_urls(items_list) + if items_list: + # Remove duplicate items + items_list = self.remove_duplicate_urls(items_list) log.debug('Found {0} unique items', len(unique_items)) # categorize the items into lists by quality From c688648eb7c027b82fdc50a942b5b4e4f4a2b090 Mon Sep 17 00:00:00 2001 From: Labrys Date: Sat, 16 Dec 2017 10:03:53 -0500 Subject: [PATCH 39/60] Fix remove duplicates to make it more generic. --- medusa/providers/generic_provider.py | 13 ++++++++++--- medusa/tv/cache.py | 2 +- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/medusa/providers/generic_provider.py b/medusa/providers/generic_provider.py index e71963187b..5c59bb2508 100644 --- a/medusa/providers/generic_provider.py +++ b/medusa/providers/generic_provider.py @@ -187,9 +187,16 @@ def find_propers(self, proper_candidates): return results - def remove_duplicate_urls(self, items): + def remove_duplicate_mappings(self, items, pk='link'): + """ + Remove duplicate items from an iterable of mappings. + + :param items: An iterable of mappings + :param pk: Primary key for removing duplicates + :return: An iterable of unique mappings + """ return OrderedDict( - (item['link'], item) + (item[pk], item) for item in items ).values() @@ -236,7 +243,7 @@ def find_search_results(self, show, episodes, search_mode, forced_search=False, if items_list: # Remove duplicate items - items_list = self.remove_duplicate_urls(items_list) + items_list = self.remove_duplicate_mappings(items_list) log.debug('Found {0} unique items', len(unique_items)) # categorize the items into lists by quality diff --git a/medusa/tv/cache.py b/medusa/tv/cache.py index e55ab41f6c..539415056f 100644 --- a/medusa/tv/cache.py +++ b/medusa/tv/cache.py @@ -196,7 +196,7 @@ def update_cache(self): try: data = self._get_rss_data() - data['entries'] = self.provider.remove_duplicate_urls(data['entries']) + data['entries'] = self.provider.remove_duplicate_mappings(data['entries']) if self._check_auth(data): # clear cache self._clear_cache() From ea64b7be893cbe61d3004501e90155887fe35156 Mon Sep 17 00:00:00 2001 From: Labrys Date: Sat, 16 Dec 2017 10:04:46 -0500 Subject: [PATCH 40/60] Convert to staticmethod --- medusa/providers/generic_provider.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/medusa/providers/generic_provider.py b/medusa/providers/generic_provider.py index 5c59bb2508..14f82139b2 100644 --- a/medusa/providers/generic_provider.py +++ b/medusa/providers/generic_provider.py @@ -187,7 +187,8 @@ def find_propers(self, proper_candidates): return results - def remove_duplicate_mappings(self, items, pk='link'): + @staticmethod + def remove_duplicate_mappings(items, pk='link'): """ Remove duplicate items from an iterable of mappings. From bec57427a364c14b23e63705505d354f8f949a4d Mon Sep 17 00:00:00 2001 From: Labrys Date: Sat, 16 Dec 2017 10:08:58 -0500 Subject: [PATCH 41/60] Flake8 import order --- medusa/providers/generic_provider.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/medusa/providers/generic_provider.py b/medusa/providers/generic_provider.py index e9f4f4aeac..b1114bed21 100644 --- a/medusa/providers/generic_provider.py +++ b/medusa/providers/generic_provider.py @@ -7,7 +7,7 @@ import logging import re from base64 import b16encode, b32decode -from collections import defaultdict, OrderedDict +from collections import OrderedDict, defaultdict from datetime import datetime, timedelta from itertools import chain from os.path import join From 7ee6748dbd7475a6e4286a9bd394860fde65260b Mon Sep 17 00:00:00 2001 From: Labrys Date: Sat, 16 Dec 2017 10:56:17 -0500 Subject: [PATCH 42/60] Fix merge issue --- medusa/providers/generic_provider.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/medusa/providers/generic_provider.py b/medusa/providers/generic_provider.py index 0ebd2bc3b0..c3b9eac7fe 100644 --- a/medusa/providers/generic_provider.py +++ b/medusa/providers/generic_provider.py @@ -242,9 +242,8 @@ def find_search_results(self, show, episodes, search_mode, forced_search=False, if len(results) == len(episodes): return results - if items_list: - # Remove duplicate items - items_list = self.remove_duplicate_mappings(items_list) + # Remove duplicate items + unique_items = self.remove_duplicate_mappings(items_list) log.debug('Found {0} unique items', len(unique_items)) # categorize the items into lists by quality From 9dc07257a0678a2f308ddbcd9ca0e17ec808f305 Mon Sep 17 00:00:00 2001 From: Labrys Date: Sat, 16 Dec 2017 21:49:49 -0500 Subject: [PATCH 43/60] Fix API root directories --- medusa/server/api/v1/core.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/medusa/server/api/v1/core.py b/medusa/server/api/v1/core.py index 45413c772a..e99bbac444 100644 --- a/medusa/server/api/v1/core.py +++ b/medusa/server/api/v1/core.py @@ -2060,11 +2060,12 @@ def run(self): return _responds(RESULT_FAILURE, msg='An existing indexerid already exists in database') if not self.location: - if app.ROOT_DIRS != '': - root_dirs = app.ROOT_DIRS - root_dirs.pop(0) + if app.ROOT_DIRS: + log.debug(u'Root directories: {0}', app.ROOT_DIRS) + root_dirs = app.ROOT_DIRS[1:] default_index = int(app.ROOT_DIRS[0]) self.location = root_dirs[default_index] + log.debug(u'Default location: {0}', self.location) else: return _responds(RESULT_FAILURE, msg='Root directory is not set, please provide a location') From c1a563e56b06a68ca1cb5f9142adba34348c3c5a Mon Sep 17 00:00:00 2001 From: p0ps Date: Sun, 17 Dec 2017 15:12:18 +0100 Subject: [PATCH 44/60] Fix pick the highest rated image. (#3486) * Flattening of the image structure had a bug. --- medusa/indexers/indexer_base.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/medusa/indexers/indexer_base.py b/medusa/indexers/indexer_base.py index 08b053ad62..22c77d2be4 100644 --- a/medusa/indexers/indexer_base.py +++ b/medusa/indexers/indexer_base.py @@ -210,8 +210,11 @@ def _save_images(self, sid, images): # For each image type, where going to save one image based on the highest rating if not len(images[image_type]): continue - merged_image_list = [y[1] for y in [next(iteritems(v)) for _, v in iteritems(images[image_type])]] - highest_rated = sorted(merged_image_list, key=lambda k: float(k['rating']), reverse=True)[0] + # This will flatten all the images for all of the resolutions. Meaning it could pick a higher + # rated image that has a lower resolution. + merged_image_list = {image_id: image for image_id, image_value in iteritems(images[image_type]) + for image_id, image in iteritems(image_value)} + highest_rated = sorted(merged_image_list.values(), key=lambda k: k['rating'], reverse=True)[0] self._set_show_data(sid, image_type, highest_rated['_bannerpath']) def __getitem__(self, key): From 4fda5f3fc90419c6adc47e259496624d8e602b5b Mon Sep 17 00:00:00 2001 From: Fernando Date: Mon, 18 Dec 2017 10:50:00 -0200 Subject: [PATCH 45/60] Log more info in PP (#3492) * Log more info * Review --- medusa/post_processor.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/medusa/post_processor.py b/medusa/post_processor.py index cf92f62a03..8bcf3763e9 100644 --- a/medusa/post_processor.py +++ b/medusa/post_processor.py @@ -849,6 +849,8 @@ def _is_priority(self, old_ep_quality, new_ep_quality): level = logger.DEBUG self.log(u'Snatch in history: {0}'.format(self.in_history), level) self.log(u'Manually snatched: {0}'.format(self.manually_searched), level) + self.log(u'Info hash: {0}'.format(self.info_hash), level) + self.log(u'NZB: {0}'.format(bool(self.nzb_name)), level) self.log(u'Current quality: {0}'.format(common.Quality.qualityStrings[old_ep_quality]), level) self.log(u'New quality: {0}'.format(common.Quality.qualityStrings[new_ep_quality]), level) self.log(u'Proper: {0}'.format(self.is_proper), level) From ba27a761b37cf5e08732b9f9bb15d55920a805b5 Mon Sep 17 00:00:00 2001 From: p0ps Date: Mon, 18 Dec 2017 16:17:42 +0100 Subject: [PATCH 46/60] Feature/change order series anime (#3260) * Choose the order of series vs anime for the home page. For users who'd like to have the anime shows on top. * Moved the title into the container, making it align with the posters. * Added the bars to control the order of the lists. * Implemented jquery sortable. * Remove commented code. * Save the order of series/anime after sorting it in the UI. * Store the order in the config.ini. * Improve the sort function. Sensitivity is not allot higher. Important when trying to move large objects. * fixed lint. * Fixed the patch method, used wrong attribute name. * py: Added showListOrder also to the api's route get. * js: Changed info log to error. * js: fixed lint warnings. * Flakes. * Import order. * Fix the test_config test. --- medusa/__main__.py | 2 + medusa/app.py | 2 +- medusa/server/api/v2/base.py | 9 + medusa/server/api/v2/config.py | 3 + medusa/server/web/home/handler.py | 11 +- static/css/style.css | 20 ++- static/js/home/index.js | 48 ++++++ tests/apiv2/test_config.py | 1 + views/partials/home/poster.mako | 269 +++++++++++++++--------------- 9 files changed, 228 insertions(+), 137 deletions(-) diff --git a/medusa/__main__.py b/medusa/__main__.py index 467723f6e0..66d040fa24 100755 --- a/medusa/__main__.py +++ b/medusa/__main__.py @@ -923,6 +923,7 @@ def initialize(self, console_logging=True): app.BACKLOG_PERIOD = check_setting_str(app.CFG, 'GUI', 'backlog_period', 'all') app.BACKLOG_STATUS = check_setting_str(app.CFG, 'GUI', 'backlog_status', 'all') app.LAYOUT_WIDE = check_setting_bool(app.CFG, 'GUI', 'layout_wide', 0) + app.SHOW_LIST_ORDER = check_setting_list(app.CFG, 'GUI', 'show_list_order', app.SHOW_LIST_ORDER) app.FALLBACK_PLEX_ENABLE = check_setting_int(app.CFG, 'General', 'fallback_plex_enable', 1) app.FALLBACK_PLEX_NOTIFICATIONS = check_setting_int(app.CFG, 'General', 'fallback_plex_notifications', 1) @@ -1825,6 +1826,7 @@ def save_config(): new_config['GUI']['poster_sortby'] = app.POSTER_SORTBY new_config['GUI']['poster_sortdir'] = app.POSTER_SORTDIR new_config['GUI']['layout_wide'] = app.LAYOUT_WIDE + new_config['GUI']['show_list_order'] = app.SHOW_LIST_ORDER new_config['Subtitles'] = {} new_config['Subtitles']['use_subtitles'] = int(app.USE_SUBTITLES) diff --git a/medusa/app.py b/medusa/app.py index b36f94162b..c7bc37c4a7 100644 --- a/medusa/app.py +++ b/medusa/app.py @@ -181,7 +181,7 @@ DISPLAY_ALL_SEASONS = True DEFAULT_PAGE = 'home' SEEDERS_LEECHERS_IN_NOTIFY = True - +SHOW_LIST_ORDER = ['Anime', 'Series'] USE_LISTVIEW = False METADATA_KODI = [] diff --git a/medusa/server/api/v2/base.py b/medusa/server/api/v2/base.py index 56c4eba4f7..8ca8f16a40 100644 --- a/medusa/server/api/v2/base.py +++ b/medusa/server/api/v2/base.py @@ -419,6 +419,15 @@ def __init__(self, target_type, attr, validator=None, converter=None, default_va default_value=default_value, post_processor=post_processor) +class ListField(PatchField): + """Patch list fields.""" + + def __init__(self, target_type, attr, validator=None, converter=None, default_value=None, post_processor=None): + """Constructor.""" + super(ListField, self).__init__(target_type, attr, list, validator=validator, converter=converter, + default_value=default_value, post_processor=post_processor) + + class BooleanField(PatchField): """Patch boolean fields.""" diff --git a/medusa/server/api/v2/config.py b/medusa/server/api/v2/config.py index e750a3091f..ac7b054c29 100644 --- a/medusa/server/api/v2/config.py +++ b/medusa/server/api/v2/config.py @@ -14,6 +14,7 @@ BooleanField, EnumField, IntegerField, + ListField, StringField, iter_nested_items, set_nested_value, @@ -60,6 +61,7 @@ class ConfigHandler(BaseRequestHandler): default_value='poster'), 'layout.show.allSeasons': BooleanField(app, 'DISPLAY_ALL_SEASONS'), 'layout.show.specials': BooleanField(app, 'DISPLAY_SHOW_SPECIALS'), + 'layout.show.showListOrder': ListField(app, 'SHOW_LIST_ORDER'), 'theme.name': StringField(app, 'THEME_NAME'), 'backlogOverview.period': StringField(app, 'BACKLOG_PERIOD'), 'backlogOverview.status': StringField(app, 'BACKLOG_STATUS'), @@ -170,6 +172,7 @@ def get(self, identifier, path_param=None): config_data['layout']['show'] = NonEmptyDict() config_data['layout']['show']['allSeasons'] = bool(app.DISPLAY_ALL_SEASONS) config_data['layout']['show']['specials'] = bool(app.DISPLAY_SHOW_SPECIALS) + config_data['layout']['show']['showListOrder'] = app.SHOW_LIST_ORDER config_data['selectedRootIndex'] = int(app.SELECTED_ROOT) if app.SELECTED_ROOT is not None else -1 # All paths config_data['backlogOverview'] = NonEmptyDict() config_data['backlogOverview']['period'] = app.BACKLOG_PERIOD diff --git a/medusa/server/web/home/handler.py b/medusa/server/web/home/handler.py index f38c5ce50a..3cc9919199 100644 --- a/medusa/server/web/home/handler.py +++ b/medusa/server/web/home/handler.py @@ -145,7 +145,7 @@ def index(self): shows_dir = None app.SELECTED_ROOT = -1 - shows = [] + series = [] if app.ANIME_SPLIT_HOME: anime = [] for show in app.showList: @@ -154,14 +154,15 @@ def index(self): if show.is_anime: anime.append(show) else: - shows.append(show) - show_lists = [['Shows', shows], ['Anime', anime]] + series.append(show) + + show_lists = [[order, {'Series': series, 'Anime': anime}[order]] for order in app.SHOW_LIST_ORDER] else: for show in app.showList: if shows_dir and not show._location.startswith(shows_dir): continue - shows.append(show) - show_lists = [['Shows', shows]] + series.append(show) + show_lists = [['Series', series]] stats = self.show_statistics() return t.render(title='Home', header='Show List', topmenu='home', show_lists=show_lists, show_stat=stats[0], max_download_count=stats[1], controller='home', action='index') diff --git a/static/css/style.css b/static/css/style.css index e1b35b085b..a057f11fa2 100644 --- a/static/css/style.css +++ b/static/css/style.css @@ -551,9 +551,26 @@ div.xem { .show-grid { display: none; + margin-bottom: 60px!important; } -#container, +.nav-show-list { + position: relative; + margin-top: 4px; + background-color: transparent; + border: none; + float: left; +} + +.nav-show-list .icon-bar { + display: block; + width: 22px; + height: 3px; + margin-top: 4px; + background-color: rgb(51, 51, 51); +} + +#container-series, #container-anime { margin: 0 auto; } @@ -780,6 +797,7 @@ div.xem { .posterview { margin: 0 auto; + position: relative; } td.tvShow a { diff --git a/static/js/home/index.js b/static/js/home/index.js index c05993fce6..cb9d1f2581 100644 --- a/static/js/home/index.js +++ b/static/js/home/index.js @@ -365,4 +365,52 @@ MEDUSA.home.index = function() { $('#showRoot').hide(); } } + + $('#poster-container').sortable({ + appendTo: document.body, + axis: 'y', + items: '> .show-grid', + scroll: false, + tolerance: 'pointer', + helper: 'clone', + handle: 'button.move-show-list', + cancel: '', + sort: function(event, ui) { + const draggedItem = $(ui.item); + const margin = 1.5; + + if (ui.position.top !== ui.originalPosition.top) { + if (ui.position.top > ui.originalPosition.top * margin) { + // Move to bottom + setTimeout(function() { + $(draggedItem).appendTo('#poster-container'); + return false; + }, 400); + } + if (ui.position.top < ui.originalPosition.top / margin) { + // Move to top + setTimeout(function() { + $(draggedItem).prependTo('#poster-container'); + return false; + }, 400); + } + } + }, + update: function(event) { + const showListOrder = $(event.target.children).map(function(index, el) { + return $(el).data('list'); + }); + api.patch('config/main', { + layout: { + show: { + showListOrder: showListOrder.toArray() + } + } + }).then(function(response) { + log.info(response); + }).catch(function(err) { + log.error(err); + }); + } + }); }; diff --git a/tests/apiv2/test_config.py b/tests/apiv2/test_config.py index aecc2da669..f5f2351565 100644 --- a/tests/apiv2/test_config.py +++ b/tests/apiv2/test_config.py @@ -114,6 +114,7 @@ def config(monkeypatch, app_config): config_data['layout']['show'] = NonEmptyDict() config_data['layout']['show']['allSeasons'] = bool(app.DISPLAY_ALL_SEASONS) config_data['layout']['show']['specials'] = bool(app.DISPLAY_SHOW_SPECIALS) + config_data['layout']['show']['showListOrder'] = app.SHOW_LIST_ORDER config_data['selectedRootIndex'] = int(app.SELECTED_ROOT) if app.SELECTED_ROOT is not None else -1 # All paths config_data['backlogOverview'] = NonEmptyDict() config_data['backlogOverview']['period'] = app.BACKLOG_PERIOD diff --git a/views/partials/home/poster.mako b/views/partials/home/poster.mako index 098e3b6353..d205de6df7 100644 --- a/views/partials/home/poster.mako +++ b/views/partials/home/poster.mako @@ -9,142 +9,151 @@ %> <%namespace file="/inc_defs.mako" import="renderQualityPill"/>
+ +
% for cur_show_list in show_lists: <% cur_list_type = cur_show_list[0] %> <% my_show_list = list(cur_show_list[1]) %> - % if cur_list_type == "Anime": -

Anime List

-
- % endif -
-
- % for cur_loading_show in app.show_queue_scheduler.action.loadingShowList: - % if cur_loading_show.show is None: -
- -
-
Loading... (${cur_loading_show.show_name})
-
-
- % endif - % endfor - <% my_show_list.sort(lambda x, y: cmp(x.name, y.name)) %> - % for cur_show in my_show_list: - <% - cur_airs_next = '' - cur_snatched = 0 - cur_downloaded = 0 - cur_total = 0 - download_stat_tip = '' - display_status = cur_show.status - if None is not display_status: - if re.search(r'(?i)(?:new|returning)\s*series', cur_show.status): - display_status = 'Continuing' - elif re.search(r'(?i)(?:nded)', cur_show.status): - display_status = 'Ended' - if cur_show.indexerid in show_stat: - cur_airs_next = show_stat[cur_show.indexerid]['ep_airs_next'] - cur_snatched = show_stat[cur_show.indexerid]['ep_snatched'] - if not cur_snatched: - cur_snatched = 0 - cur_downloaded = show_stat[cur_show.indexerid]['ep_downloaded'] - if not cur_downloaded: - cur_downloaded = 0 - cur_total = show_stat[cur_show.indexerid]['ep_total'] - if not cur_total: - cur_total = 0 - download_stat = str(cur_downloaded) - download_stat_tip = "Downloaded: " + str(cur_downloaded) - if cur_snatched: - download_stat = download_stat + "+" + str(cur_snatched) - download_stat_tip = download_stat_tip + " " + "Snatched: " + str(cur_snatched) - download_stat = download_stat + " / " + str(cur_total) - download_stat_tip = download_stat_tip + " " + "Total: " + str(cur_total) - nom = cur_downloaded - if cur_total: - den = cur_total - else: - den = 1 - download_stat_tip = "Unaired" - progressbar_percent = nom * 100 / den - data_date = '6000000000.0' - if cur_airs_next: - data_date = calendar.timegm(sbdatetime.sbdatetime.convert_to_setting(network_timezones.parse_date_time(cur_airs_next, cur_show.airs, cur_show.network)).timetuple()) - elif None is not display_status: - if 'nded' not in display_status and 1 == int(cur_show.paused): - data_date = '5000000500.0' - elif 'ontinu' in display_status: - data_date = '5000000000.0' - elif 'nded' in display_status: - data_date = '5000000100.0' - %> -
-
-
- -
-
- -
-
- From bc342c669138505e06223b5bdf5b13687d1069ce Mon Sep 17 00:00:00 2001 From: p0ps Date: Mon, 18 Dec 2017 16:55:09 +0100 Subject: [PATCH 47/60] Added pubdate parsing for iptorrents. (#3494) * Added pubdate parsing for iptorrents. * Needed to enhance the parse_pubdate method a little. The parse method does not support passing decimals like 1.0 months, 4.2 years. For for these granularity i'm casting them to int. * Added tests. * Use round to properly round the decimals. * Added one test for testing round up. --- medusa/providers/generic_provider.py | 11 ++++++-- medusa/providers/torrent/html/iptorrents.py | 5 +++- tests/providers/test_generic_provider.py | 28 +++++++++++++++++++++ 3 files changed, 41 insertions(+), 3 deletions(-) diff --git a/medusa/providers/generic_provider.py b/medusa/providers/generic_provider.py index c3b9eac7fe..a0196bb06a 100644 --- a/medusa/providers/generic_provider.py +++ b/medusa/providers/generic_provider.py @@ -587,8 +587,15 @@ def parse_pubdate(pubdate, human_time=False, timezone=None, **kwargs): if pubdate.lower() in now_alias: seconds = 0 else: - match = re.search(r'(?P