youtube-dl/bin/0000755000000000000000000000000013250601453012414 5ustar rootrootyoutube-dl/bin/youtube-dl0000755000000000000000000000013313250601453014430 0ustar rootroot#!/usr/bin/env python import youtube_dl if __name__ == '__main__': youtube_dl.main() youtube-dl/devscripts/0000755000000000000000000000000013250601453014032 5ustar rootrootyoutube-dl/devscripts/make_supportedsites.py0000644000000000000000000000220113250601453020471 0ustar rootroot#!/usr/bin/env python from __future__ import unicode_literals import io import optparse import os import sys # Import youtube_dl ROOT_DIR = os.path.join(os.path.dirname(__file__), '..') sys.path.insert(0, ROOT_DIR) import youtube_dl def main(): parser = optparse.OptionParser(usage='%prog OUTFILE.md') options, args = parser.parse_args() if len(args) != 1: parser.error('Expected an output filename') outfile, = args def gen_ies_md(ies): for ie in ies: ie_md = '**{0}**'.format(ie.IE_NAME) ie_desc = getattr(ie, 'IE_DESC', None) if ie_desc is False: continue if ie_desc is not None: ie_md += ': {0}'.format(ie.IE_DESC) if not ie.working(): ie_md += ' (Currently broken)' yield ie_md ies = sorted(youtube_dl.gen_extractors(), key=lambda i: i.IE_NAME.lower()) out = '# Supported sites\n' + ''.join( ' - ' + md + '\n' for md in gen_ies_md(ies)) with io.open(outfile, 'w', encoding='utf-8') as outf: outf.write(out) if __name__ == '__main__': main() youtube-dl/devscripts/lazy_load_template.py0000644000000000000000000000071013250601453020253 0ustar rootroot# coding: utf-8 from __future__ import unicode_literals import re class LazyLoadExtractor(object): _module = None @classmethod def ie_key(cls): return cls.__name__[:-2] def __new__(cls, *args, **kwargs): mod = __import__(cls._module, fromlist=(cls.__name__,)) real_cls = getattr(mod, cls.__name__) instance = real_cls.__new__(real_cls) instance.__init__(*args, **kwargs) return instance youtube-dl/devscripts/gh-pages/0000755000000000000000000000000013250601453015525 5ustar rootrootyoutube-dl/devscripts/gh-pages/sign-versions.py0000755000000000000000000000161513250601453020713 0ustar rootroot#!/usr/bin/env python3 from __future__ import unicode_literals, with_statement import rsa import json from binascii import hexlify try: input = raw_input except NameError: pass versions_info = json.load(open('update/versions.json')) if 'signature' in versions_info: del versions_info['signature'] print('Enter the PKCS1 private key, followed by a blank line:') privkey = b'' while True: try: line = input() except EOFError: break if line == '': break privkey += line.encode('ascii') + b'\n' privkey = rsa.PrivateKey.load_pkcs1(privkey) signature = hexlify(rsa.pkcs1.sign(json.dumps(versions_info, sort_keys=True).encode('utf-8'), privkey, 'SHA-256')).decode() print('signature: ' + signature) versions_info['signature'] = signature with open('update/versions.json', 'w') as versionsf: json.dump(versions_info, versionsf, indent=4, sort_keys=True) youtube-dl/devscripts/gh-pages/update-sites.py0000755000000000000000000000202213250601453020505 0ustar rootroot#!/usr/bin/env python3 from __future__ import unicode_literals import sys import os import textwrap # We must be able to import youtube_dl sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) import youtube_dl def main(): with open('supportedsites.html.in', 'r', encoding='utf-8') as tmplf: template = tmplf.read() ie_htmls = [] for ie in youtube_dl.list_extractors(age_limit=None): ie_html = '{}'.format(ie.IE_NAME) ie_desc = getattr(ie, 'IE_DESC', None) if ie_desc is False: continue elif ie_desc is not None: ie_html += ': {}'.format(ie.IE_DESC) if not ie.working(): ie_html += ' (Currently broken)' ie_htmls.append('
  • {}
  • '.format(ie_html)) template = template.replace('@SITES@', textwrap.indent('\n'.join(ie_htmls), '\t')) with open('supportedsites.html', 'w', encoding='utf-8') as sitesf: sitesf.write(template) if __name__ == '__main__': main() youtube-dl/devscripts/gh-pages/update-copyright.py0000755000000000000000000000114613250601453021374 0ustar rootroot#!/usr/bin/env python # coding: utf-8 from __future__ import with_statement, unicode_literals import datetime import glob import io # For Python 2 compatibility import os import re year = str(datetime.datetime.now().year) for fn in glob.glob('*.html*'): with io.open(fn, encoding='utf-8') as f: content = f.read() newc = re.sub(r'(?PCopyright © 2006-)(?P[0-9]{4})', 'Copyright © 2006-' + year, content) if content != newc: tmpFn = fn + '.part' with io.open(tmpFn, 'wt', encoding='utf-8') as outf: outf.write(newc) os.rename(tmpFn, fn) youtube-dl/devscripts/gh-pages/generate-download.py0000755000000000000000000000205513250601453021503 0ustar rootroot#!/usr/bin/env python3 from __future__ import unicode_literals import hashlib import urllib.request import json versions_info = json.load(open('update/versions.json')) version = versions_info['latest'] URL = versions_info['versions'][version]['bin'][0] data = urllib.request.urlopen(URL).read() # Read template page with open('download.html.in', 'r', encoding='utf-8') as tmplf: template = tmplf.read() sha256sum = hashlib.sha256(data).hexdigest() template = template.replace('@PROGRAM_VERSION@', version) template = template.replace('@PROGRAM_URL@', URL) template = template.replace('@PROGRAM_SHA256SUM@', sha256sum) template = template.replace('@EXE_URL@', versions_info['versions'][version]['exe'][0]) template = template.replace('@EXE_SHA256SUM@', versions_info['versions'][version]['exe'][1]) template = template.replace('@TAR_URL@', versions_info['versions'][version]['tar'][0]) template = template.replace('@TAR_SHA256SUM@', versions_info['versions'][version]['tar'][1]) with open('download.html', 'w', encoding='utf-8') as dlf: dlf.write(template) youtube-dl/devscripts/gh-pages/add-version.py0000755000000000000000000000222213250601453020313 0ustar rootroot#!/usr/bin/env python3 from __future__ import unicode_literals import json import sys import hashlib import os.path if len(sys.argv) <= 1: print('Specify the version number as parameter') sys.exit() version = sys.argv[1] with open('update/LATEST_VERSION', 'w') as f: f.write(version) versions_info = json.load(open('update/versions.json')) if 'signature' in versions_info: del versions_info['signature'] new_version = {} filenames = { 'bin': 'youtube-dl', 'exe': 'youtube-dl.exe', 'tar': 'youtube-dl-%s.tar.gz' % version} build_dir = os.path.join('..', '..', 'build', version) for key, filename in filenames.items(): url = 'https://yt-dl.org/downloads/%s/%s' % (version, filename) fn = os.path.join(build_dir, filename) with open(fn, 'rb') as f: data = f.read() if not data: raise ValueError('File %s is empty!' % fn) sha256sum = hashlib.sha256(data).hexdigest() new_version[key] = (url, sha256sum) versions_info['versions'][version] = new_version versions_info['latest'] = version with open('update/versions.json', 'w') as jsonf: json.dump(versions_info, jsonf, indent=4, sort_keys=True) youtube-dl/devscripts/gh-pages/update-feed.py0000755000000000000000000000442313250601453020270 0ustar rootroot#!/usr/bin/env python3 from __future__ import unicode_literals import datetime import io import json import textwrap atom_template = textwrap.dedent("""\ youtube-dl releases https://yt-dl.org/feed/youtube-dl-updates-feed @TIMESTAMP@ @ENTRIES@ """) entry_template = textwrap.dedent(""" https://yt-dl.org/feed/youtube-dl-updates-feed/youtube-dl-@VERSION@ New version @VERSION@
    Downloads available at https://yt-dl.org/downloads/@VERSION@/
    The youtube-dl maintainers @TIMESTAMP@
    """) now = datetime.datetime.now() now_iso = now.isoformat() + 'Z' atom_template = atom_template.replace('@TIMESTAMP@', now_iso) versions_info = json.load(open('update/versions.json')) versions = list(versions_info['versions'].keys()) versions.sort() entries = [] for v in versions: fields = v.split('.') year, month, day = map(int, fields[:3]) faked = 0 patchlevel = 0 while True: try: datetime.date(year, month, day) except ValueError: day -= 1 faked += 1 assert day > 0 continue break if len(fields) >= 4: try: patchlevel = int(fields[3]) except ValueError: patchlevel = 1 timestamp = '%04d-%02d-%02dT00:%02d:%02dZ' % (year, month, day, faked, patchlevel) entry = entry_template.replace('@TIMESTAMP@', timestamp) entry = entry.replace('@VERSION@', v) entries.append(entry) entries_str = textwrap.indent(''.join(entries), '\t') atom_template = atom_template.replace('@ENTRIES@', entries_str) with io.open('update/releases.atom', 'w', encoding='utf-8') as atom_file: atom_file.write(atom_template) youtube-dl/devscripts/release.sh0000755000000000000000000001136013250601453016012 0ustar rootroot#!/bin/bash # IMPORTANT: the following assumptions are made # * the GH repo is on the origin remote # * the gh-pages branch is named so locally # * the git config user.signingkey is properly set # You will need # pip install coverage nose rsa wheel # TODO # release notes # make hash on local files set -e skip_tests=true gpg_sign_commits="" buildserver='localhost:8142' while true do case "$1" in --run-tests) skip_tests=false shift ;; --gpg-sign-commits|-S) gpg_sign_commits="-S" shift ;; --buildserver) buildserver="$2" shift 2 ;; --*) echo "ERROR: unknown option $1" exit 1 ;; *) break ;; esac done if [ -z "$1" ]; then echo "ERROR: specify version number like this: $0 1994.09.06"; exit 1; fi version="$1" major_version=$(echo "$version" | sed -n 's#^\([0-9]*\.[0-9]*\.[0-9]*\).*#\1#p') if test "$major_version" '!=' "$(date '+%Y.%m.%d')"; then echo "$version does not start with today's date!" exit 1 fi if [ ! -z "`git tag | grep "$version"`" ]; then echo 'ERROR: version already present'; exit 1; fi if [ ! -z "`git status --porcelain | grep -v CHANGELOG`" ]; then echo 'ERROR: the working directory is not clean; commit or stash changes'; exit 1; fi useless_files=$(find youtube_dl -type f -not -name '*.py') if [ ! -z "$useless_files" ]; then echo "ERROR: Non-.py files in youtube_dl: $useless_files"; exit 1; fi if [ ! -f "updates_key.pem" ]; then echo 'ERROR: updates_key.pem missing'; exit 1; fi if ! type pandoc >/dev/null 2>/dev/null; then echo 'ERROR: pandoc is missing'; exit 1; fi if ! python3 -c 'import rsa' 2>/dev/null; then echo 'ERROR: python3-rsa is missing'; exit 1; fi if ! python3 -c 'import wheel' 2>/dev/null; then echo 'ERROR: wheel is missing'; exit 1; fi read -p "Is ChangeLog up to date? (y/n) " -n 1 if [[ ! $REPLY =~ ^[Yy]$ ]]; then exit 1; fi /bin/echo -e "\n### First of all, testing..." make clean if $skip_tests ; then echo 'SKIPPING TESTS' else nosetests --verbose --with-coverage --cover-package=youtube_dl --cover-html test --stop || exit 1 fi /bin/echo -e "\n### Changing version in version.py..." sed -i "s/__version__ = '.*'/__version__ = '$version'/" youtube_dl/version.py /bin/echo -e "\n### Changing version in ChangeLog..." sed -i "s//$version/" ChangeLog /bin/echo -e "\n### Committing documentation, templates and youtube_dl/version.py..." make README.md CONTRIBUTING.md .github/ISSUE_TEMPLATE.md supportedsites git add README.md CONTRIBUTING.md .github/ISSUE_TEMPLATE.md docs/supportedsites.md youtube_dl/version.py ChangeLog git commit $gpg_sign_commits -m "release $version" /bin/echo -e "\n### Now tagging, signing and pushing..." git tag -s -m "Release $version" "$version" git show "$version" read -p "Is it good, can I push? (y/n) " -n 1 if [[ ! $REPLY =~ ^[Yy]$ ]]; then exit 1; fi echo MASTER=$(git rev-parse --abbrev-ref HEAD) git push origin $MASTER:master git push origin "$version" /bin/echo -e "\n### OK, now it is time to build the binaries..." REV=$(git rev-parse HEAD) make youtube-dl youtube-dl.tar.gz read -p "VM running? (y/n) " -n 1 wget "http://$buildserver/build/rg3/youtube-dl/youtube-dl.exe?rev=$REV" -O youtube-dl.exe mkdir -p "build/$version" mv youtube-dl youtube-dl.exe "build/$version" mv youtube-dl.tar.gz "build/$version/youtube-dl-$version.tar.gz" RELEASE_FILES="youtube-dl youtube-dl.exe youtube-dl-$version.tar.gz" (cd build/$version/ && md5sum $RELEASE_FILES > MD5SUMS) (cd build/$version/ && sha1sum $RELEASE_FILES > SHA1SUMS) (cd build/$version/ && sha256sum $RELEASE_FILES > SHA2-256SUMS) (cd build/$version/ && sha512sum $RELEASE_FILES > SHA2-512SUMS) /bin/echo -e "\n### Signing and uploading the new binaries to GitHub..." for f in $RELEASE_FILES; do gpg --passphrase-repeat 5 --detach-sig "build/$version/$f"; done ROOT=$(pwd) python devscripts/create-github-release.py ChangeLog $version "$ROOT/build/$version" ssh ytdl@yt-dl.org "sh html/update_latest.sh $version" /bin/echo -e "\n### Now switching to gh-pages..." git clone --branch gh-pages --single-branch . build/gh-pages ( set -e ORIGIN_URL=$(git config --get remote.origin.url) cd build/gh-pages "$ROOT/devscripts/gh-pages/add-version.py" $version "$ROOT/devscripts/gh-pages/update-feed.py" "$ROOT/devscripts/gh-pages/sign-versions.py" < "$ROOT/updates_key.pem" "$ROOT/devscripts/gh-pages/generate-download.py" "$ROOT/devscripts/gh-pages/update-copyright.py" "$ROOT/devscripts/gh-pages/update-sites.py" git add *.html *.html.in update git commit $gpg_sign_commits -m "release $version" git push "$ROOT" gh-pages git push "$ORIGIN_URL" gh-pages ) rm -rf build make pypi-files echo "Uploading to PyPi ..." python setup.py sdist bdist_wheel upload make clean /bin/echo -e "\n### DONE!" youtube-dl/devscripts/run_tests.sh0000755000000000000000000000106213250601453016416 0ustar rootroot#!/bin/bash # Keep this list in sync with the `offlinetest` target in Makefile DOWNLOAD_TESTS="age_restriction|download|iqiyi_sdk_interpreter|socks|subtitles|write_annotations|youtube_lists|youtube_signature" test_set="" multiprocess_args="" case "$YTDL_TEST_SET" in core) test_set="-I test_($DOWNLOAD_TESTS)\.py" ;; download) test_set="-I test_(?!$DOWNLOAD_TESTS).+\.py" multiprocess_args="--processes=4 --process-timeout=540" ;; *) break ;; esac nosetests test --verbose $test_set $multiprocess_args youtube-dl/devscripts/create-github-release.py0000644000000000000000000000775213250601453020560 0ustar rootroot#!/usr/bin/env python from __future__ import unicode_literals import base64 import io import json import mimetypes import netrc import optparse import os import re import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from youtube_dl.compat import ( compat_basestring, compat_input, compat_getpass, compat_print, compat_urllib_request, ) from youtube_dl.utils import ( make_HTTPS_handler, sanitized_Request, ) class GitHubReleaser(object): _API_URL = 'https://api.github.com/repos/rg3/youtube-dl/releases' _UPLOADS_URL = 'https://uploads.github.com/repos/rg3/youtube-dl/releases/%s/assets?name=%s' _NETRC_MACHINE = 'github.com' def __init__(self, debuglevel=0): self._init_github_account() https_handler = make_HTTPS_handler({}, debuglevel=debuglevel) self._opener = compat_urllib_request.build_opener(https_handler) def _init_github_account(self): try: info = netrc.netrc().authenticators(self._NETRC_MACHINE) if info is not None: self._username = info[0] self._password = info[2] compat_print('Using GitHub credentials found in .netrc...') return else: compat_print('No GitHub credentials found in .netrc') except (IOError, netrc.NetrcParseError): compat_print('Unable to parse .netrc') self._username = compat_input( 'Type your GitHub username or email address and press [Return]: ') self._password = compat_getpass( 'Type your GitHub password and press [Return]: ') def _call(self, req): if isinstance(req, compat_basestring): req = sanitized_Request(req) # Authorizing manually since GitHub does not response with 401 with # WWW-Authenticate header set (see # https://developer.github.com/v3/#basic-authentication) b64 = base64.b64encode( ('%s:%s' % (self._username, self._password)).encode('utf-8')).decode('ascii') req.add_header('Authorization', 'Basic %s' % b64) response = self._opener.open(req).read().decode('utf-8') return json.loads(response) def list_releases(self): return self._call(self._API_URL) def create_release(self, tag_name, name=None, body='', draft=False, prerelease=False): data = { 'tag_name': tag_name, 'target_commitish': 'master', 'name': name, 'body': body, 'draft': draft, 'prerelease': prerelease, } req = sanitized_Request(self._API_URL, json.dumps(data).encode('utf-8')) return self._call(req) def create_asset(self, release_id, asset): asset_name = os.path.basename(asset) url = self._UPLOADS_URL % (release_id, asset_name) # Our files are small enough to be loaded directly into memory. data = open(asset, 'rb').read() req = sanitized_Request(url, data) mime_type, _ = mimetypes.guess_type(asset_name) req.add_header('Content-Type', mime_type or 'application/octet-stream') return self._call(req) def main(): parser = optparse.OptionParser(usage='%prog CHANGELOG VERSION BUILDPATH') options, args = parser.parse_args() if len(args) != 3: parser.error('Expected a version and a build directory') changelog_file, version, build_path = args with io.open(changelog_file, encoding='utf-8') as inf: changelog = inf.read() mobj = re.search(r'(?s)version %s\n{2}(.+?)\n{3}' % version, changelog) body = mobj.group(1) if mobj else '' releaser = GitHubReleaser() new_release = releaser.create_release( version, name='youtube-dl %s' % version, body=body) release_id = new_release['id'] for asset in os.listdir(build_path): compat_print('Uploading %s...' % asset) releaser.create_asset(release_id, os.path.join(build_path, asset)) if __name__ == '__main__': main() youtube-dl/devscripts/make_issue_template.py0000644000000000000000000000143313250601453020425 0ustar rootroot#!/usr/bin/env python from __future__ import unicode_literals import io import optparse def main(): parser = optparse.OptionParser(usage='%prog INFILE OUTFILE') options, args = parser.parse_args() if len(args) != 2: parser.error('Expected an input and an output filename') infile, outfile = args with io.open(infile, encoding='utf-8') as inf: issue_template_tmpl = inf.read() # Get the version from youtube_dl/version.py without importing the package exec(compile(open('youtube_dl/version.py').read(), 'youtube_dl/version.py', 'exec')) out = issue_template_tmpl % {'version': locals()['__version__']} with io.open(outfile, 'w', encoding='utf-8') as outf: outf.write(out) if __name__ == '__main__': main() youtube-dl/devscripts/posix-locale.sh0000755000000000000000000000026413250601453016772 0ustar rootroot # source this file in your shell to get a POSIX locale (which will break many programs, but that's kind of the point) export LC_ALL=POSIX export LANG=POSIX export LANGUAGE=POSIX youtube-dl/devscripts/show-downloads-statistics.py0000644000000000000000000000250013250601453021541 0ustar rootroot#!/usr/bin/env python from __future__ import unicode_literals import itertools import json import os import re import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from youtube_dl.compat import ( compat_print, compat_urllib_request, ) from youtube_dl.utils import format_bytes def format_size(bytes): return '%s (%d bytes)' % (format_bytes(bytes), bytes) total_bytes = 0 for page in itertools.count(1): releases = json.loads(compat_urllib_request.urlopen( 'https://api.github.com/repos/rg3/youtube-dl/releases?page=%s' % page ).read().decode('utf-8')) if not releases: break for release in releases: compat_print(release['name']) for asset in release['assets']: asset_name = asset['name'] total_bytes += asset['download_count'] * asset['size'] if all(not re.match(p, asset_name) for p in ( r'^youtube-dl$', r'^youtube-dl-\d{4}\.\d{2}\.\d{2}(?:\.\d+)?\.tar\.gz$', r'^youtube-dl\.exe$')): continue compat_print( ' %s size: %s downloads: %d' % (asset_name, format_size(asset['size']), asset['download_count'])) compat_print('total downloads traffic: %s' % format_size(total_bytes)) youtube-dl/devscripts/bash-completion.py0000755000000000000000000000154513250601453017500 0ustar rootroot#!/usr/bin/env python from __future__ import unicode_literals import os from os.path import dirname as dirn import sys sys.path.insert(0, dirn(dirn((os.path.abspath(__file__))))) import youtube_dl BASH_COMPLETION_FILE = "youtube-dl.bash-completion" BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in" def build_completion(opt_parser): opts_flag = [] for group in opt_parser.option_groups: for option in group.option_list: # for every long flag opts_flag.append(option.get_opt_string()) with open(BASH_COMPLETION_TEMPLATE) as f: template = f.read() with open(BASH_COMPLETION_FILE, "w") as f: # just using the special char filled_template = template.replace("{{flags}}", " ".join(opts_flag)) f.write(filled_template) parser = youtube_dl.parseOpts()[0] build_completion(parser) youtube-dl/devscripts/prepare_manpage.py0000644000000000000000000000421413250601453017533 0ustar rootrootfrom __future__ import unicode_literals import io import optparse import os.path import re ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) README_FILE = os.path.join(ROOT_DIR, 'README.md') PREFIX = r'''%YOUTUBE-DL(1) # NAME youtube\-dl \- download videos from youtube.com or other video platforms # SYNOPSIS **youtube-dl** \[OPTIONS\] URL [URL...] ''' def main(): parser = optparse.OptionParser(usage='%prog OUTFILE.md') options, args = parser.parse_args() if len(args) != 1: parser.error('Expected an output filename') outfile, = args with io.open(README_FILE, encoding='utf-8') as f: readme = f.read() readme = re.sub(r'(?s)^.*?(?=# DESCRIPTION)', '', readme) readme = re.sub(r'\s+youtube-dl \[OPTIONS\] URL \[URL\.\.\.\]', '', readme) readme = PREFIX + readme readme = filter_options(readme) with io.open(outfile, 'w', encoding='utf-8') as outf: outf.write(readme) def filter_options(readme): ret = '' in_options = False for line in readme.split('\n'): if line.startswith('# '): if line[2:].startswith('OPTIONS'): in_options = True else: in_options = False if in_options: if line.lstrip().startswith('-'): split = re.split(r'\s{2,}', line.lstrip()) # Description string may start with `-` as well. If there is # only one piece then it's a description bit not an option. if len(split) > 1: option, description = split split_option = option.split(' ') if not split_option[-1].startswith('-'): # metavar option = ' '.join(split_option[:-1] + ['*%s*' % split_option[-1]]) # Pandoc's definition_lists. See http://pandoc.org/README.html # for more information. ret += '\n%s\n: %s\n' % (option, description) continue ret += line.lstrip() + '\n' else: ret += line + '\n' return ret if __name__ == '__main__': main() youtube-dl/devscripts/install_jython.sh0000755000000000000000000000032513250601453017432 0ustar rootroot#!/bin/bash wget http://central.maven.org/maven2/org/python/jython-installer/2.7.1/jython-installer-2.7.1.jar java -jar jython-installer-2.7.1.jar -s -d "$HOME/jython" $HOME/jython/bin/jython -m pip install nose youtube-dl/devscripts/fish-completion.py0000755000000000000000000000311613250601453017510 0ustar rootroot#!/usr/bin/env python from __future__ import unicode_literals import optparse import os from os.path import dirname as dirn import sys sys.path.insert(0, dirn(dirn((os.path.abspath(__file__))))) import youtube_dl from youtube_dl.utils import shell_quote FISH_COMPLETION_FILE = 'youtube-dl.fish' FISH_COMPLETION_TEMPLATE = 'devscripts/fish-completion.in' EXTRA_ARGS = { 'recode-video': ['--arguments', 'mp4 flv ogg webm mkv', '--exclusive'], # Options that need a file parameter 'download-archive': ['--require-parameter'], 'cookies': ['--require-parameter'], 'load-info': ['--require-parameter'], 'batch-file': ['--require-parameter'], } def build_completion(opt_parser): commands = [] for group in opt_parser.option_groups: for option in group.option_list: long_option = option.get_opt_string().strip('-') complete_cmd = ['complete', '--command', 'youtube-dl', '--long-option', long_option] if option._short_opts: complete_cmd += ['--short-option', option._short_opts[0].strip('-')] if option.help != optparse.SUPPRESS_HELP: complete_cmd += ['--description', option.help] complete_cmd.extend(EXTRA_ARGS.get(long_option, [])) commands.append(shell_quote(complete_cmd)) with open(FISH_COMPLETION_TEMPLATE) as f: template = f.read() filled_template = template.replace('{{commands}}', '\n'.join(commands)) with open(FISH_COMPLETION_FILE, 'w') as f: f.write(filled_template) parser = youtube_dl.parseOpts()[0] build_completion(parser) youtube-dl/devscripts/make_contributing.py0000755000000000000000000000142613250601453020116 0ustar rootroot#!/usr/bin/env python from __future__ import unicode_literals import io import optparse import re def main(): parser = optparse.OptionParser(usage='%prog INFILE OUTFILE') options, args = parser.parse_args() if len(args) != 2: parser.error('Expected an input and an output filename') infile, outfile = args with io.open(infile, encoding='utf-8') as inf: readme = inf.read() bug_text = re.search( r'(?s)#\s*BUGS\s*[^\n]*\s*(.*?)#\s*COPYRIGHT', readme).group(1) dev_text = re.search( r'(?s)(#\s*DEVELOPER INSTRUCTIONS.*?)#\s*EMBEDDING YOUTUBE-DL', readme).group(1) out = bug_text + dev_text with io.open(outfile, 'w', encoding='utf-8') as outf: outf.write(out) if __name__ == '__main__': main() youtube-dl/devscripts/wine-py2exe.sh0000755000000000000000000000365113250601453016552 0ustar rootroot#!/bin/bash # Run with as parameter a setup.py that works in the current directory # e.g. no os.chdir() # It will run twice, the first time will crash set -e SCRIPT_DIR="$( cd "$( dirname "$0" )" && pwd )" if [ ! -d wine-py2exe ]; then sudo apt-get install wine1.3 axel bsdiff mkdir wine-py2exe cd wine-py2exe export WINEPREFIX=`pwd` axel -a "http://www.python.org/ftp/python/2.7/python-2.7.msi" axel -a "http://downloads.sourceforge.net/project/py2exe/py2exe/0.6.9/py2exe-0.6.9.win32-py2.7.exe" #axel -a "http://winetricks.org/winetricks" # http://appdb.winehq.org/objectManager.php?sClass=version&iId=21957 echo "Follow python setup on screen" wine msiexec /i python-2.7.msi echo "Follow py2exe setup on screen" wine py2exe-0.6.9.win32-py2.7.exe #echo "Follow Microsoft Visual C++ 2008 Redistributable Package setup on screen" #bash winetricks vcrun2008 rm py2exe-0.6.9.win32-py2.7.exe rm python-2.7.msi #rm winetricks # http://bugs.winehq.org/show_bug.cgi?id=3591 mv drive_c/Python27/Lib/site-packages/py2exe/run.exe drive_c/Python27/Lib/site-packages/py2exe/run.exe.backup bspatch drive_c/Python27/Lib/site-packages/py2exe/run.exe.backup drive_c/Python27/Lib/site-packages/py2exe/run.exe "$SCRIPT_DIR/SizeOfImage.patch" mv drive_c/Python27/Lib/site-packages/py2exe/run_w.exe drive_c/Python27/Lib/site-packages/py2exe/run_w.exe.backup bspatch drive_c/Python27/Lib/site-packages/py2exe/run_w.exe.backup drive_c/Python27/Lib/site-packages/py2exe/run_w.exe "$SCRIPT_DIR/SizeOfImage_w.patch" cd - else export WINEPREFIX="$( cd wine-py2exe && pwd )" fi wine "C:\\Python27\\python.exe" "$1" py2exe > "py2exe.log" 2>&1 || true echo '# Copying python27.dll' >> "py2exe.log" cp "$WINEPREFIX/drive_c/windows/system32/python27.dll" build/bdist.win32/winexe/bundle-2.7/ wine "C:\\Python27\\python.exe" "$1" py2exe >> "py2exe.log" 2>&1 youtube-dl/devscripts/check-porn.py0000644000000000000000000000360313250601453016437 0ustar rootroot#!/usr/bin/env python from __future__ import unicode_literals """ This script employs a VERY basic heuristic ('porn' in webpage.lower()) to check if we are not 'age_limit' tagging some porn site A second approach implemented relies on a list of porn domains, to activate it pass the list filename as the only argument """ # Allow direct execution import os import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from test.helper import gettestcases from youtube_dl.utils import compat_urllib_parse_urlparse from youtube_dl.utils import compat_urllib_request if len(sys.argv) > 1: METHOD = 'LIST' LIST = open(sys.argv[1]).read().decode('utf8').strip() else: METHOD = 'EURISTIC' for test in gettestcases(): if METHOD == 'EURISTIC': try: webpage = compat_urllib_request.urlopen(test['url'], timeout=10).read() except Exception: print('\nFail: {0}'.format(test['name'])) continue webpage = webpage.decode('utf8', 'replace') RESULT = 'porn' in webpage.lower() elif METHOD == 'LIST': domain = compat_urllib_parse_urlparse(test['url']).netloc if not domain: print('\nFail: {0}'.format(test['name'])) continue domain = '.'.join(domain.split('.')[-2:]) RESULT = ('.' + domain + '\n' in LIST or '\n' + domain + '\n' in LIST) if RESULT and ('info_dict' not in test or 'age_limit' not in test['info_dict'] or test['info_dict']['age_limit'] != 18): print('\nPotential missing age_limit check: {0}'.format(test['name'])) elif not RESULT and ('info_dict' in test and 'age_limit' in test['info_dict'] and test['info_dict']['age_limit'] == 18): print('\nPotential false negative: {0}'.format(test['name'])) else: sys.stdout.write('.') sys.stdout.flush() print() youtube-dl/devscripts/bash-completion.in0000644000000000000000000000151413250601453017447 0ustar rootroot__youtube_dl() { local cur prev opts fileopts diropts keywords COMPREPLY=() cur="${COMP_WORDS[COMP_CWORD]}" prev="${COMP_WORDS[COMP_CWORD-1]}" opts="{{flags}}" keywords=":ytfavorites :ytrecommended :ytsubscriptions :ytwatchlater :ythistory" fileopts="-a|--batch-file|--download-archive|--cookies|--load-info" diropts="--cache-dir" if [[ ${prev} =~ ${fileopts} ]]; then COMPREPLY=( $(compgen -f -- ${cur}) ) return 0 elif [[ ${prev} =~ ${diropts} ]]; then COMPREPLY=( $(compgen -d -- ${cur}) ) return 0 fi if [[ ${cur} =~ : ]]; then COMPREPLY=( $(compgen -W "${keywords}" -- ${cur}) ) return 0 elif [[ ${cur} == * ]] ; then COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) return 0 fi } complete -F __youtube_dl youtube-dl youtube-dl/devscripts/SizeOfImage.patch0000644000000000000000000000022313250601453017212 0ustar rootrootBSDIFF4023DBZh91AY&SYgmDD`@ !`ЊeH lMBZh91AY&SY> M l %rE8P>BZh9rE8Pyoutube-dl/devscripts/fish-completion.in0000644000000000000000000000020213250601453017454 0ustar rootroot {{commands}} complete --command youtube-dl --arguments ":ytfavorites :ytrecommended :ytsubscriptions :ytwatchlater :ythistory" youtube-dl/devscripts/make_readme.py0000755000000000000000000000122413250601453016640 0ustar rootrootfrom __future__ import unicode_literals import io import sys import re README_FILE = 'README.md' helptext = sys.stdin.read() if isinstance(helptext, bytes): helptext = helptext.decode('utf-8') with io.open(README_FILE, encoding='utf-8') as f: oldreadme = f.read() header = oldreadme[:oldreadme.index('# OPTIONS')] footer = oldreadme[oldreadme.index('# CONFIGURATION'):] options = helptext[helptext.index(' General Options:') + 19:] options = re.sub(r'(?m)^ (\w.+)$', r'## \1', options) options = '# OPTIONS\n' + options + '\n' with io.open(README_FILE, 'w', encoding='utf-8') as f: f.write(header) f.write(options) f.write(footer) youtube-dl/devscripts/buildserver.py0000644000000000000000000003314313250601453016736 0ustar rootroot#!/usr/bin/python3 import argparse import ctypes import functools import shutil import subprocess import sys import tempfile import threading import traceback import os.path sys.path.insert(0, os.path.dirname(os.path.dirname((os.path.abspath(__file__))))) from youtube_dl.compat import ( compat_input, compat_http_server, compat_str, compat_urlparse, ) # These are not used outside of buildserver.py thus not in compat.py try: import winreg as compat_winreg except ImportError: # Python 2 import _winreg as compat_winreg try: import socketserver as compat_socketserver except ImportError: # Python 2 import SocketServer as compat_socketserver class BuildHTTPServer(compat_socketserver.ThreadingMixIn, compat_http_server.HTTPServer): allow_reuse_address = True advapi32 = ctypes.windll.advapi32 SC_MANAGER_ALL_ACCESS = 0xf003f SC_MANAGER_CREATE_SERVICE = 0x02 SERVICE_WIN32_OWN_PROCESS = 0x10 SERVICE_AUTO_START = 0x2 SERVICE_ERROR_NORMAL = 0x1 DELETE = 0x00010000 SERVICE_STATUS_START_PENDING = 0x00000002 SERVICE_STATUS_RUNNING = 0x00000004 SERVICE_ACCEPT_STOP = 0x1 SVCNAME = 'youtubedl_builder' LPTSTR = ctypes.c_wchar_p START_CALLBACK = ctypes.WINFUNCTYPE(None, ctypes.c_int, ctypes.POINTER(LPTSTR)) class SERVICE_TABLE_ENTRY(ctypes.Structure): _fields_ = [ ('lpServiceName', LPTSTR), ('lpServiceProc', START_CALLBACK) ] HandlerEx = ctypes.WINFUNCTYPE( ctypes.c_int, # return ctypes.c_int, # dwControl ctypes.c_int, # dwEventType ctypes.c_void_p, # lpEventData, ctypes.c_void_p, # lpContext, ) def _ctypes_array(c_type, py_array): ar = (c_type * len(py_array))() ar[:] = py_array return ar def win_OpenSCManager(): res = advapi32.OpenSCManagerW(None, None, SC_MANAGER_ALL_ACCESS) if not res: raise Exception('Opening service manager failed - ' 'are you running this as administrator?') return res def win_install_service(service_name, cmdline): manager = win_OpenSCManager() try: h = advapi32.CreateServiceW( manager, service_name, None, SC_MANAGER_CREATE_SERVICE, SERVICE_WIN32_OWN_PROCESS, SERVICE_AUTO_START, SERVICE_ERROR_NORMAL, cmdline, None, None, None, None, None) if not h: raise OSError('Service creation failed: %s' % ctypes.FormatError()) advapi32.CloseServiceHandle(h) finally: advapi32.CloseServiceHandle(manager) def win_uninstall_service(service_name): manager = win_OpenSCManager() try: h = advapi32.OpenServiceW(manager, service_name, DELETE) if not h: raise OSError('Could not find service %s: %s' % ( service_name, ctypes.FormatError())) try: if not advapi32.DeleteService(h): raise OSError('Deletion failed: %s' % ctypes.FormatError()) finally: advapi32.CloseServiceHandle(h) finally: advapi32.CloseServiceHandle(manager) def win_service_report_event(service_name, msg, is_error=True): with open('C:/sshkeys/log', 'a', encoding='utf-8') as f: f.write(msg + '\n') event_log = advapi32.RegisterEventSourceW(None, service_name) if not event_log: raise OSError('Could not report event: %s' % ctypes.FormatError()) try: type_id = 0x0001 if is_error else 0x0004 event_id = 0xc0000000 if is_error else 0x40000000 lines = _ctypes_array(LPTSTR, [msg]) if not advapi32.ReportEventW( event_log, type_id, 0, event_id, None, len(lines), 0, lines, None): raise OSError('Event reporting failed: %s' % ctypes.FormatError()) finally: advapi32.DeregisterEventSource(event_log) def win_service_handler(stop_event, *args): try: raise ValueError('Handler called with args ' + repr(args)) TODO except Exception as e: tb = traceback.format_exc() msg = str(e) + '\n' + tb win_service_report_event(service_name, msg, is_error=True) raise def win_service_set_status(handle, status_code): svcStatus = SERVICE_STATUS() svcStatus.dwServiceType = SERVICE_WIN32_OWN_PROCESS svcStatus.dwCurrentState = status_code svcStatus.dwControlsAccepted = SERVICE_ACCEPT_STOP svcStatus.dwServiceSpecificExitCode = 0 if not advapi32.SetServiceStatus(handle, ctypes.byref(svcStatus)): raise OSError('SetServiceStatus failed: %r' % ctypes.FormatError()) def win_service_main(service_name, real_main, argc, argv_raw): try: # args = [argv_raw[i].value for i in range(argc)] stop_event = threading.Event() handler = HandlerEx(functools.partial(stop_event, win_service_handler)) h = advapi32.RegisterServiceCtrlHandlerExW(service_name, handler, None) if not h: raise OSError('Handler registration failed: %s' % ctypes.FormatError()) TODO except Exception as e: tb = traceback.format_exc() msg = str(e) + '\n' + tb win_service_report_event(service_name, msg, is_error=True) raise def win_service_start(service_name, real_main): try: cb = START_CALLBACK( functools.partial(win_service_main, service_name, real_main)) dispatch_table = _ctypes_array(SERVICE_TABLE_ENTRY, [ SERVICE_TABLE_ENTRY( service_name, cb ), SERVICE_TABLE_ENTRY(None, ctypes.cast(None, START_CALLBACK)) ]) if not advapi32.StartServiceCtrlDispatcherW(dispatch_table): raise OSError('ctypes start failed: %s' % ctypes.FormatError()) except Exception as e: tb = traceback.format_exc() msg = str(e) + '\n' + tb win_service_report_event(service_name, msg, is_error=True) raise def main(args=None): parser = argparse.ArgumentParser() parser.add_argument('-i', '--install', action='store_const', dest='action', const='install', help='Launch at Windows startup') parser.add_argument('-u', '--uninstall', action='store_const', dest='action', const='uninstall', help='Remove Windows service') parser.add_argument('-s', '--service', action='store_const', dest='action', const='service', help='Run as a Windows service') parser.add_argument('-b', '--bind', metavar='', action='store', default='0.0.0.0:8142', help='Bind to host:port (default %default)') options = parser.parse_args(args=args) if options.action == 'install': fn = os.path.abspath(__file__).replace('v:', '\\\\vboxsrv\\vbox') cmdline = '%s %s -s -b %s' % (sys.executable, fn, options.bind) win_install_service(SVCNAME, cmdline) return if options.action == 'uninstall': win_uninstall_service(SVCNAME) return if options.action == 'service': win_service_start(SVCNAME, main) return host, port_str = options.bind.split(':') port = int(port_str) print('Listening on %s:%d' % (host, port)) srv = BuildHTTPServer((host, port), BuildHTTPRequestHandler) thr = threading.Thread(target=srv.serve_forever) thr.start() compat_input('Press ENTER to shut down') srv.shutdown() thr.join() def rmtree(path): for name in os.listdir(path): fname = os.path.join(path, name) if os.path.isdir(fname): rmtree(fname) else: os.chmod(fname, 0o666) os.remove(fname) os.rmdir(path) class BuildError(Exception): def __init__(self, output, code=500): self.output = output self.code = code def __str__(self): return self.output class HTTPError(BuildError): pass class PythonBuilder(object): def __init__(self, **kwargs): python_version = kwargs.pop('python', '3.4') python_path = None for node in ('Wow6432Node\\', ''): try: key = compat_winreg.OpenKey( compat_winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\%sPython\PythonCore\%s\InstallPath' % (node, python_version)) try: python_path, _ = compat_winreg.QueryValueEx(key, '') finally: compat_winreg.CloseKey(key) break except Exception: pass if not python_path: raise BuildError('No such Python version: %s' % python_version) self.pythonPath = python_path super(PythonBuilder, self).__init__(**kwargs) class GITInfoBuilder(object): def __init__(self, **kwargs): try: self.user, self.repoName = kwargs['path'][:2] self.rev = kwargs.pop('rev') except ValueError: raise BuildError('Invalid path') except KeyError as e: raise BuildError('Missing mandatory parameter "%s"' % e.args[0]) path = os.path.join(os.environ['APPDATA'], 'Build archive', self.repoName, self.user) if not os.path.exists(path): os.makedirs(path) self.basePath = tempfile.mkdtemp(dir=path) self.buildPath = os.path.join(self.basePath, 'build') super(GITInfoBuilder, self).__init__(**kwargs) class GITBuilder(GITInfoBuilder): def build(self): try: subprocess.check_output(['git', 'clone', 'git://github.com/%s/%s.git' % (self.user, self.repoName), self.buildPath]) subprocess.check_output(['git', 'checkout', self.rev], cwd=self.buildPath) except subprocess.CalledProcessError as e: raise BuildError(e.output) super(GITBuilder, self).build() class YoutubeDLBuilder(object): authorizedUsers = ['fraca7', 'phihag', 'rg3', 'FiloSottile'] def __init__(self, **kwargs): if self.repoName != 'youtube-dl': raise BuildError('Invalid repository "%s"' % self.repoName) if self.user not in self.authorizedUsers: raise HTTPError('Unauthorized user "%s"' % self.user, 401) super(YoutubeDLBuilder, self).__init__(**kwargs) def build(self): try: proc = subprocess.Popen([os.path.join(self.pythonPath, 'python.exe'), 'setup.py', 'py2exe'], stdin=subprocess.PIPE, cwd=self.buildPath) proc.wait() #subprocess.check_output([os.path.join(self.pythonPath, 'python.exe'), 'setup.py', 'py2exe'], # cwd=self.buildPath) except subprocess.CalledProcessError as e: raise BuildError(e.output) super(YoutubeDLBuilder, self).build() class DownloadBuilder(object): def __init__(self, **kwargs): self.handler = kwargs.pop('handler') self.srcPath = os.path.join(self.buildPath, *tuple(kwargs['path'][2:])) self.srcPath = os.path.abspath(os.path.normpath(self.srcPath)) if not self.srcPath.startswith(self.buildPath): raise HTTPError(self.srcPath, 401) super(DownloadBuilder, self).__init__(**kwargs) def build(self): if not os.path.exists(self.srcPath): raise HTTPError('No such file', 404) if os.path.isdir(self.srcPath): raise HTTPError('Is a directory: %s' % self.srcPath, 401) self.handler.send_response(200) self.handler.send_header('Content-Type', 'application/octet-stream') self.handler.send_header('Content-Disposition', 'attachment; filename=%s' % os.path.split(self.srcPath)[-1]) self.handler.send_header('Content-Length', str(os.stat(self.srcPath).st_size)) self.handler.end_headers() with open(self.srcPath, 'rb') as src: shutil.copyfileobj(src, self.handler.wfile) super(DownloadBuilder, self).build() class CleanupTempDir(object): def build(self): try: rmtree(self.basePath) except Exception as e: print('WARNING deleting "%s": %s' % (self.basePath, e)) super(CleanupTempDir, self).build() class Null(object): def __init__(self, **kwargs): pass def start(self): pass def close(self): pass def build(self): pass class Builder(PythonBuilder, GITBuilder, YoutubeDLBuilder, DownloadBuilder, CleanupTempDir, Null): pass class BuildHTTPRequestHandler(compat_http_server.BaseHTTPRequestHandler): actionDict = {'build': Builder, 'download': Builder} # They're the same, no more caching. def do_GET(self): path = compat_urlparse.urlparse(self.path) paramDict = dict([(key, value[0]) for key, value in compat_urlparse.parse_qs(path.query).items()]) action, _, path = path.path.strip('/').partition('/') if path: path = path.split('/') if action in self.actionDict: try: builder = self.actionDict[action](path=path, handler=self, **paramDict) builder.start() try: builder.build() finally: builder.close() except BuildError as e: self.send_response(e.code) msg = compat_str(e).encode('UTF-8') self.send_header('Content-Type', 'text/plain; charset=UTF-8') self.send_header('Content-Length', len(msg)) self.end_headers() self.wfile.write(msg) else: self.send_response(500, 'Unknown build method "%s"' % action) else: self.send_response(500, 'Malformed URL') if __name__ == '__main__': main() youtube-dl/devscripts/make_lazy_extractors.py0000644000000000000000000000547013250601453020644 0ustar rootrootfrom __future__ import unicode_literals, print_function from inspect import getsource import io import os from os.path import dirname as dirn import sys print('WARNING: Lazy loading extractors is an experimental feature that may not always work', file=sys.stderr) sys.path.insert(0, dirn(dirn((os.path.abspath(__file__))))) lazy_extractors_filename = sys.argv[1] if os.path.exists(lazy_extractors_filename): os.remove(lazy_extractors_filename) from youtube_dl.extractor import _ALL_CLASSES from youtube_dl.extractor.common import InfoExtractor, SearchInfoExtractor with open('devscripts/lazy_load_template.py', 'rt') as f: module_template = f.read() module_contents = [ module_template + '\n' + getsource(InfoExtractor.suitable) + '\n', 'class LazyLoadSearchExtractor(LazyLoadExtractor):\n pass\n'] ie_template = ''' class {name}({bases}): _VALID_URL = {valid_url!r} _module = '{module}' ''' make_valid_template = ''' @classmethod def _make_valid_url(cls): return {valid_url!r} ''' def get_base_name(base): if base is InfoExtractor: return 'LazyLoadExtractor' elif base is SearchInfoExtractor: return 'LazyLoadSearchExtractor' else: return base.__name__ def build_lazy_ie(ie, name): valid_url = getattr(ie, '_VALID_URL', None) s = ie_template.format( name=name, bases=', '.join(map(get_base_name, ie.__bases__)), valid_url=valid_url, module=ie.__module__) if ie.suitable.__func__ is not InfoExtractor.suitable.__func__: s += '\n' + getsource(ie.suitable) if hasattr(ie, '_make_valid_url'): # search extractors s += make_valid_template.format(valid_url=ie._make_valid_url()) return s # find the correct sorting and add the required base classes so that sublcasses # can be correctly created classes = _ALL_CLASSES[:-1] ordered_cls = [] while classes: for c in classes[:]: bases = set(c.__bases__) - set((object, InfoExtractor, SearchInfoExtractor)) stop = False for b in bases: if b not in classes and b not in ordered_cls: if b.__name__ == 'GenericIE': exit() classes.insert(0, b) stop = True if stop: break if all(b in ordered_cls for b in bases): ordered_cls.append(c) classes.remove(c) break ordered_cls.append(_ALL_CLASSES[-1]) names = [] for ie in ordered_cls: name = ie.__name__ src = build_lazy_ie(ie, name) module_contents.append(src) if ie in _ALL_CLASSES: names.append(name) module_contents.append( '_ALL_CLASSES = [{0}]'.format(', '.join(names))) module_src = '\n'.join(module_contents) + '\n' with io.open(lazy_extractors_filename, 'wt', encoding='utf-8') as f: f.write(module_src) youtube-dl/devscripts/zsh-completion.py0000755000000000000000000000255013250601453017364 0ustar rootroot#!/usr/bin/env python from __future__ import unicode_literals import os from os.path import dirname as dirn import sys sys.path.insert(0, dirn(dirn((os.path.abspath(__file__))))) import youtube_dl ZSH_COMPLETION_FILE = "youtube-dl.zsh" ZSH_COMPLETION_TEMPLATE = "devscripts/zsh-completion.in" def build_completion(opt_parser): opts = [opt for group in opt_parser.option_groups for opt in group.option_list] opts_file = [opt for opt in opts if opt.metavar == "FILE"] opts_dir = [opt for opt in opts if opt.metavar == "DIR"] fileopts = [] for opt in opts_file: if opt._short_opts: fileopts.extend(opt._short_opts) if opt._long_opts: fileopts.extend(opt._long_opts) diropts = [] for opt in opts_dir: if opt._short_opts: diropts.extend(opt._short_opts) if opt._long_opts: diropts.extend(opt._long_opts) flags = [opt.get_opt_string() for opt in opts] with open(ZSH_COMPLETION_TEMPLATE) as f: template = f.read() template = template.replace("{{fileopts}}", "|".join(fileopts)) template = template.replace("{{diropts}}", "|".join(diropts)) template = template.replace("{{flags}}", " ".join(flags)) with open(ZSH_COMPLETION_FILE, "w") as f: f.write(template) parser = youtube_dl.parseOpts()[0] build_completion(parser) youtube-dl/devscripts/SizeOfImage_w.patch0000644000000000000000000000022413250601453017541 0ustar rootrootBSDIFF4024DBZh91AY&SYk.DH`@ !`ЊeH  te`BZh91AY&SY֤? 0R~ovrE8P֤?BZh9rE8Pyoutube-dl/devscripts/zsh-completion.in0000644000000000000000000000141113250601453017332 0ustar rootroot#compdef youtube-dl __youtube_dl() { local curcontext="$curcontext" fileopts diropts cur prev typeset -A opt_args fileopts="{{fileopts}}" diropts="{{diropts}}" cur=$words[CURRENT] case $cur in :) _arguments '*: :(::ytfavorites ::ytrecommended ::ytsubscriptions ::ytwatchlater ::ythistory)' ;; *) prev=$words[CURRENT-1] if [[ ${prev} =~ ${fileopts} ]]; then _path_files elif [[ ${prev} =~ ${diropts} ]]; then _path_files -/ elif [[ ${prev} == "--recode-video" ]]; then _arguments '*: :(mp4 flv ogg webm mkv)' else _arguments '*: :({{flags}})' fi ;; esac } __youtube_dlyoutube-dl/devscripts/generate_aes_testdata.py0000644000000000000000000000216113250601453020717 0ustar rootrootfrom __future__ import unicode_literals import codecs import subprocess import os import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from youtube_dl.utils import intlist_to_bytes from youtube_dl.aes import aes_encrypt, key_expansion secret_msg = b'Secret message goes here' def hex_str(int_list): return codecs.encode(intlist_to_bytes(int_list), 'hex') def openssl_encode(algo, key, iv): cmd = ['openssl', 'enc', '-e', '-' + algo, '-K', hex_str(key), '-iv', hex_str(iv)] prog = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE) out, _ = prog.communicate(secret_msg) return out iv = key = [0x20, 0x15] + 14 * [0] r = openssl_encode('aes-128-cbc', key, iv) print('aes_cbc_decrypt') print(repr(r)) password = key new_key = aes_encrypt(password, key_expansion(password)) r = openssl_encode('aes-128-ctr', new_key, iv) print('aes_decrypt_text 16') print(repr(r)) password = key + 16 * [0] new_key = aes_encrypt(password, key_expansion(password)) * (32 // 16) r = openssl_encode('aes-256-ctr', new_key, iv) print('aes_decrypt_text 32') print(repr(r)) youtube-dl/test/0000755000000000000000000000000013250601465012626 5ustar rootrootyoutube-dl/test/test_download.py0000644000000000000000000002366213250601453016054 0ustar rootroot#!/usr/bin/env python from __future__ import unicode_literals # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from test.helper import ( assertGreaterEqual, expect_warnings, get_params, gettestcases, expect_info_dict, try_rm, report_warning, ) import hashlib import io import json import socket import youtube_dl.YoutubeDL from youtube_dl.compat import ( compat_http_client, compat_urllib_error, compat_HTTPError, ) from youtube_dl.utils import ( DownloadError, ExtractorError, format_bytes, UnavailableVideoError, ) from youtube_dl.extractor import get_info_extractor RETRIES = 3 class YoutubeDL(youtube_dl.YoutubeDL): def __init__(self, *args, **kwargs): self.to_stderr = self.to_screen self.processed_info_dicts = [] super(YoutubeDL, self).__init__(*args, **kwargs) def report_warning(self, message): # Don't accept warnings during tests raise ExtractorError(message) def process_info(self, info_dict): self.processed_info_dicts.append(info_dict) return super(YoutubeDL, self).process_info(info_dict) def _file_md5(fn): with open(fn, 'rb') as f: return hashlib.md5(f.read()).hexdigest() defs = gettestcases() class TestDownload(unittest.TestCase): # Parallel testing in nosetests. See # http://nose.readthedocs.org/en/latest/doc_tests/test_multiprocess/multiprocess.html _multiprocess_shared_ = True maxDiff = None def __str__(self): """Identify each test with the `add_ie` attribute, if available.""" def strclass(cls): """From 2.7's unittest; 2.6 had _strclass so we can't import it.""" return '%s.%s' % (cls.__module__, cls.__name__) add_ie = getattr(self, self._testMethodName).add_ie return '%s (%s)%s:' % (self._testMethodName, strclass(self.__class__), ' [%s]' % add_ie if add_ie else '') def setUp(self): self.defs = defs # Dynamically generate tests def generator(test_case, tname): def test_template(self): ie = youtube_dl.extractor.get_info_extractor(test_case['name'])() other_ies = [get_info_extractor(ie_key)() for ie_key in test_case.get('add_ie', [])] is_playlist = any(k.startswith('playlist') for k in test_case) test_cases = test_case.get( 'playlist', [] if is_playlist else [test_case]) def print_skipping(reason): print('Skipping %s: %s' % (test_case['name'], reason)) if not ie.working(): print_skipping('IE marked as not _WORKING') return for tc in test_cases: info_dict = tc.get('info_dict', {}) if not (info_dict.get('id') and info_dict.get('ext')): raise Exception('Test definition incorrect. The output file cannot be known. Are both \'id\' and \'ext\' keys present?') if 'skip' in test_case: print_skipping(test_case['skip']) return for other_ie in other_ies: if not other_ie.working(): print_skipping('test depends on %sIE, marked as not WORKING' % other_ie.ie_key()) return params = get_params(test_case.get('params', {})) params['outtmpl'] = tname + '_' + params['outtmpl'] if is_playlist and 'playlist' not in test_case: params.setdefault('extract_flat', 'in_playlist') params.setdefault('skip_download', True) ydl = YoutubeDL(params, auto_init=False) ydl.add_default_info_extractors() finished_hook_called = set() def _hook(status): if status['status'] == 'finished': finished_hook_called.add(status['filename']) ydl.add_progress_hook(_hook) expect_warnings(ydl, test_case.get('expected_warnings', [])) def get_tc_filename(tc): return ydl.prepare_filename(tc.get('info_dict', {})) res_dict = None def try_rm_tcs_files(tcs=None): if tcs is None: tcs = test_cases for tc in tcs: tc_filename = get_tc_filename(tc) try_rm(tc_filename) try_rm(tc_filename + '.part') try_rm(os.path.splitext(tc_filename)[0] + '.info.json') try_rm_tcs_files() try: try_num = 1 while True: try: # We're not using .download here since that is just a shim # for outside error handling, and returns the exit code # instead of the result dict. res_dict = ydl.extract_info( test_case['url'], force_generic_extractor=params.get('force_generic_extractor', False)) except (DownloadError, ExtractorError) as err: # Check if the exception is not a network related one if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError, compat_http_client.BadStatusLine) or (err.exc_info[0] == compat_HTTPError and err.exc_info[1].code == 503): raise if try_num == RETRIES: report_warning('%s failed due to network errors, skipping...' % tname) return print('Retrying: {0} failed tries\n\n##########\n\n'.format(try_num)) try_num += 1 else: break if is_playlist: self.assertTrue(res_dict['_type'] in ['playlist', 'multi_video']) self.assertTrue('entries' in res_dict) expect_info_dict(self, res_dict, test_case.get('info_dict', {})) if 'playlist_mincount' in test_case: assertGreaterEqual( self, len(res_dict['entries']), test_case['playlist_mincount'], 'Expected at least %d in playlist %s, but got only %d' % ( test_case['playlist_mincount'], test_case['url'], len(res_dict['entries']))) if 'playlist_count' in test_case: self.assertEqual( len(res_dict['entries']), test_case['playlist_count'], 'Expected %d entries in playlist %s, but got %d.' % ( test_case['playlist_count'], test_case['url'], len(res_dict['entries']), )) if 'playlist_duration_sum' in test_case: got_duration = sum(e['duration'] for e in res_dict['entries']) self.assertEqual( test_case['playlist_duration_sum'], got_duration) # Generalize both playlists and single videos to unified format for # simplicity if 'entries' not in res_dict: res_dict['entries'] = [res_dict] for tc_num, tc in enumerate(test_cases): tc_res_dict = res_dict['entries'][tc_num] # First, check test cases' data against extracted data alone expect_info_dict(self, tc_res_dict, tc.get('info_dict', {})) # Now, check downloaded file consistency tc_filename = get_tc_filename(tc) if not test_case.get('params', {}).get('skip_download', False): self.assertTrue(os.path.exists(tc_filename), msg='Missing file ' + tc_filename) self.assertTrue(tc_filename in finished_hook_called) expected_minsize = tc.get('file_minsize', 10000) if expected_minsize is not None: if params.get('test'): expected_minsize = max(expected_minsize, 10000) got_fsize = os.path.getsize(tc_filename) assertGreaterEqual( self, got_fsize, expected_minsize, 'Expected %s to be at least %s, but it\'s only %s ' % (tc_filename, format_bytes(expected_minsize), format_bytes(got_fsize))) if 'md5' in tc: md5_for_file = _file_md5(tc_filename) self.assertEqual(tc['md5'], md5_for_file) # Finally, check test cases' data again but this time against # extracted data from info JSON file written during processing info_json_fn = os.path.splitext(tc_filename)[0] + '.info.json' self.assertTrue( os.path.exists(info_json_fn), 'Missing info file %s' % info_json_fn) with io.open(info_json_fn, encoding='utf-8') as infof: info_dict = json.load(infof) expect_info_dict(self, info_dict, tc.get('info_dict', {})) finally: try_rm_tcs_files() if is_playlist and res_dict is not None and res_dict.get('entries'): # Remove all other files that may have been extracted if the # extractor returns full results even with extract_flat res_tcs = [{'info_dict': e} for e in res_dict['entries']] try_rm_tcs_files(res_tcs) return test_template # And add them to TestDownload for n, test_case in enumerate(defs): tname = 'test_' + str(test_case['name']) i = 1 while hasattr(TestDownload, tname): tname = 'test_%s_%d' % (test_case['name'], i) i += 1 test_method = generator(test_case, tname) test_method.__name__ = str(tname) ie_list = test_case.get('add_ie') test_method.add_ie = ie_list and ','.join(ie_list) setattr(TestDownload, test_method.__name__, test_method) del test_method if __name__ == '__main__': unittest.main() youtube-dl/test/test_InfoExtractor.py0000644000000000000000000011434313250601453017031 0ustar rootroot#!/usr/bin/env python from __future__ import unicode_literals # Allow direct execution import io import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from test.helper import FakeYDL, expect_dict, expect_value from youtube_dl.compat import compat_etree_fromstring from youtube_dl.extractor.common import InfoExtractor from youtube_dl.extractor import YoutubeIE, get_info_extractor from youtube_dl.utils import encode_data_uri, strip_jsonp, ExtractorError, RegexNotFoundError class TestIE(InfoExtractor): pass class TestInfoExtractor(unittest.TestCase): def setUp(self): self.ie = TestIE(FakeYDL()) def test_ie_key(self): self.assertEqual(get_info_extractor(YoutubeIE.ie_key()), YoutubeIE) def test_html_search_regex(self): html = '

    Watch this video

    ' search = lambda re, *args: self.ie._html_search_regex(re, html, *args) self.assertEqual(search(r'

    (.+?)

    ', 'foo'), 'Watch this video') def test_opengraph(self): ie = self.ie html = ''' ''' self.assertEqual(ie._og_search_title(html), 'Foo') self.assertEqual(ie._og_search_description(html), 'Some video\'s description ') self.assertEqual(ie._og_search_thumbnail(html), 'http://domain.com/pic.jpg?key1=val1&key2=val2') self.assertEqual(ie._og_search_video_url(html, default=None), None) self.assertEqual(ie._og_search_property('foobar', html), 'Foo') self.assertEqual(ie._og_search_property('test1', html), 'foo > < bar') self.assertEqual(ie._og_search_property('test2', html), 'foo >//< bar') self.assertEqual(ie._og_search_property(('test0', 'test1'), html), 'foo > < bar') self.assertRaises(RegexNotFoundError, ie._og_search_property, 'test0', html, None, fatal=True) self.assertRaises(RegexNotFoundError, ie._og_search_property, ('test0', 'test00'), html, None, fatal=True) def test_html_search_meta(self): ie = self.ie html = ''' ''' self.assertEqual(ie._html_search_meta('a', html), '1') self.assertEqual(ie._html_search_meta('b', html), '2') self.assertEqual(ie._html_search_meta('c', html), '3') self.assertEqual(ie._html_search_meta('d', html), '4') self.assertEqual(ie._html_search_meta('e', html), '5') self.assertEqual(ie._html_search_meta('f', html), '6') self.assertEqual(ie._html_search_meta(('a', 'b', 'c'), html), '1') self.assertEqual(ie._html_search_meta(('c', 'b', 'a'), html), '3') self.assertEqual(ie._html_search_meta(('z', 'x', 'c'), html), '3') self.assertRaises(RegexNotFoundError, ie._html_search_meta, 'z', html, None, fatal=True) self.assertRaises(RegexNotFoundError, ie._html_search_meta, ('z', 'x'), html, None, fatal=True) def test_download_json(self): uri = encode_data_uri(b'{"foo": "blah"}', 'application/json') self.assertEqual(self.ie._download_json(uri, None), {'foo': 'blah'}) uri = encode_data_uri(b'callback({"foo": "blah"})', 'application/javascript') self.assertEqual(self.ie._download_json(uri, None, transform_source=strip_jsonp), {'foo': 'blah'}) uri = encode_data_uri(b'{"foo": invalid}', 'application/json') self.assertRaises(ExtractorError, self.ie._download_json, uri, None) self.assertEqual(self.ie._download_json(uri, None, fatal=False), None) def test_extract_jwplayer_data_realworld(self): # from http://www.suffolk.edu/sjc/ expect_dict( self, self.ie._extract_jwplayer_data(r''' ''', None, require_title=False), { 'id': 'XEgvuql4', 'formats': [{ 'url': 'rtmp://192.138.214.154/live/sjclive', 'ext': 'flv' }] }) # from https://www.pornoxo.com/videos/7564/striptease-from-sexy-secretary/ expect_dict( self, self.ie._extract_jwplayer_data(r''' ''', 'dummy', require_title=False), { 'thumbnail': 'https://t03.vipstreamservice.com/thumbs/pxo-full/2009-12/14/a4b2157147afe5efa93ce1978e0265289c193874e02597.flv-full-13.jpg', 'formats': [{ 'url': 'https://cdn.pornoxo.com/key=MF+oEbaxqTKb50P-w9G3nA,end=1489689259,ip=104.199.146.27/ip=104.199.146.27/speed=6573765/buffer=3.0/2009-12/4b2157147afe5efa93ce1978e0265289c193874e02597.flv', 'ext': 'flv' }] }) # from http://www.indiedb.com/games/king-machine/videos expect_dict( self, self.ie._extract_jwplayer_data(r''' ''', 'dummy'), { 'title': 'king machine trailer 1', 'thumbnail': 'http://media.indiedb.com/cache/images/games/1/50/49678/thumb_620x2000/king-machine-trailer.mp4.jpg', 'formats': [{ 'url': 'http://cdn.dbolical.com/cache/videos/games/1/50/49678/encode_mp4/king-machine-trailer.mp4', 'height': 360, 'ext': 'mp4' }, { 'url': 'http://cdn.dbolical.com/cache/videos/games/1/50/49678/encode720p_mp4/king-machine-trailer.mp4', 'height': 720, 'ext': 'mp4' }] }) def test_parse_m3u8_formats(self): _TEST_CASES = [ ( # https://github.com/rg3/youtube-dl/issues/11507 # http://pluzz.francetv.fr/videos/le_ministere.html 'pluzz_francetv_11507', 'http://replayftv-vh.akamaihd.net/i/streaming-adaptatif_france-dom-tom/2017/S16/J2/156589847-58f59130c1f52-,standard1,standard2,standard3,standard4,standard5,.mp4.csmil/master.m3u8?caption=2017%2F16%2F156589847-1492488987.m3u8%3Afra%3AFrancais&audiotrack=0%3Afra%3AFrancais', [{ 'url': 'http://replayftv-vh.akamaihd.net/i/streaming-adaptatif_france-dom-tom/2017/S16/J2/156589847-58f59130c1f52-,standard1,standard2,standard3,standard4,standard5,.mp4.csmil/index_0_av.m3u8?null=0', 'manifest_url': 'http://replayftv-vh.akamaihd.net/i/streaming-adaptatif_france-dom-tom/2017/S16/J2/156589847-58f59130c1f52-,standard1,standard2,standard3,standard4,standard5,.mp4.csmil/master.m3u8?caption=2017%2F16%2F156589847-1492488987.m3u8%3Afra%3AFrancais&audiotrack=0%3Afra%3AFrancais', 'ext': 'mp4', 'format_id': '180', 'protocol': 'm3u8', 'acodec': 'mp4a.40.2', 'vcodec': 'avc1.66.30', 'tbr': 180, 'width': 256, 'height': 144, }, { 'url': 'http://replayftv-vh.akamaihd.net/i/streaming-adaptatif_france-dom-tom/2017/S16/J2/156589847-58f59130c1f52-,standard1,standard2,standard3,standard4,standard5,.mp4.csmil/index_1_av.m3u8?null=0', 'manifest_url': 'http://replayftv-vh.akamaihd.net/i/streaming-adaptatif_france-dom-tom/2017/S16/J2/156589847-58f59130c1f52-,standard1,standard2,standard3,standard4,standard5,.mp4.csmil/master.m3u8?caption=2017%2F16%2F156589847-1492488987.m3u8%3Afra%3AFrancais&audiotrack=0%3Afra%3AFrancais', 'ext': 'mp4', 'format_id': '303', 'protocol': 'm3u8', 'acodec': 'mp4a.40.2', 'vcodec': 'avc1.66.30', 'tbr': 303, 'width': 320, 'height': 180, }, { 'url': 'http://replayftv-vh.akamaihd.net/i/streaming-adaptatif_france-dom-tom/2017/S16/J2/156589847-58f59130c1f52-,standard1,standard2,standard3,standard4,standard5,.mp4.csmil/index_2_av.m3u8?null=0', 'manifest_url': 'http://replayftv-vh.akamaihd.net/i/streaming-adaptatif_france-dom-tom/2017/S16/J2/156589847-58f59130c1f52-,standard1,standard2,standard3,standard4,standard5,.mp4.csmil/master.m3u8?caption=2017%2F16%2F156589847-1492488987.m3u8%3Afra%3AFrancais&audiotrack=0%3Afra%3AFrancais', 'ext': 'mp4', 'format_id': '575', 'protocol': 'm3u8', 'acodec': 'mp4a.40.2', 'vcodec': 'avc1.66.30', 'tbr': 575, 'width': 512, 'height': 288, }, { 'url': 'http://replayftv-vh.akamaihd.net/i/streaming-adaptatif_france-dom-tom/2017/S16/J2/156589847-58f59130c1f52-,standard1,standard2,standard3,standard4,standard5,.mp4.csmil/index_3_av.m3u8?null=0', 'manifest_url': 'http://replayftv-vh.akamaihd.net/i/streaming-adaptatif_france-dom-tom/2017/S16/J2/156589847-58f59130c1f52-,standard1,standard2,standard3,standard4,standard5,.mp4.csmil/master.m3u8?caption=2017%2F16%2F156589847-1492488987.m3u8%3Afra%3AFrancais&audiotrack=0%3Afra%3AFrancais', 'ext': 'mp4', 'format_id': '831', 'protocol': 'm3u8', 'acodec': 'mp4a.40.2', 'vcodec': 'avc1.77.30', 'tbr': 831, 'width': 704, 'height': 396, }, { 'url': 'http://replayftv-vh.akamaihd.net/i/streaming-adaptatif_france-dom-tom/2017/S16/J2/156589847-58f59130c1f52-,standard1,standard2,standard3,standard4,standard5,.mp4.csmil/index_4_av.m3u8?null=0', 'manifest_url': 'http://replayftv-vh.akamaihd.net/i/streaming-adaptatif_france-dom-tom/2017/S16/J2/156589847-58f59130c1f52-,standard1,standard2,standard3,standard4,standard5,.mp4.csmil/master.m3u8?caption=2017%2F16%2F156589847-1492488987.m3u8%3Afra%3AFrancais&audiotrack=0%3Afra%3AFrancais', 'ext': 'mp4', 'protocol': 'm3u8', 'format_id': '1467', 'acodec': 'mp4a.40.2', 'vcodec': 'avc1.77.30', 'tbr': 1467, 'width': 1024, 'height': 576, }] ), ( # https://github.com/rg3/youtube-dl/issues/11995 # http://teamcoco.com/video/clueless-gamer-super-bowl-for-honor 'teamcoco_11995', 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/main.m3u8', [{ 'url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/hls/CONAN_020217_Highlight_show-audio-160k_v4.m3u8', 'manifest_url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/main.m3u8', 'ext': 'mp4', 'format_id': 'audio-0-Default', 'protocol': 'm3u8', 'vcodec': 'none', }, { 'url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/hls/CONAN_020217_Highlight_show-audio-64k_v4.m3u8', 'manifest_url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/main.m3u8', 'ext': 'mp4', 'format_id': 'audio-1-Default', 'protocol': 'm3u8', 'vcodec': 'none', }, { 'url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/hls/CONAN_020217_Highlight_show-audio-64k_v4.m3u8', 'manifest_url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/main.m3u8', 'ext': 'mp4', 'format_id': '71', 'protocol': 'm3u8', 'acodec': 'mp4a.40.5', 'vcodec': 'none', 'tbr': 71, }, { 'url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/hls/CONAN_020217_Highlight_show-400k_v4.m3u8', 'manifest_url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/main.m3u8', 'ext': 'mp4', 'format_id': '413', 'protocol': 'm3u8', 'acodec': 'none', 'vcodec': 'avc1.42001e', 'tbr': 413, 'width': 400, 'height': 224, }, { 'url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/hls/CONAN_020217_Highlight_show-400k_v4.m3u8', 'manifest_url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/main.m3u8', 'ext': 'mp4', 'format_id': '522', 'protocol': 'm3u8', 'acodec': 'none', 'vcodec': 'avc1.42001e', 'tbr': 522, 'width': 400, 'height': 224, }, { 'url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/hls/CONAN_020217_Highlight_show-1m_v4.m3u8', 'manifest_url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/main.m3u8', 'ext': 'mp4', 'format_id': '1205', 'protocol': 'm3u8', 'acodec': 'none', 'vcodec': 'avc1.4d001e', 'tbr': 1205, 'width': 640, 'height': 360, }, { 'url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/hls/CONAN_020217_Highlight_show-2m_v4.m3u8', 'manifest_url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/main.m3u8', 'ext': 'mp4', 'format_id': '2374', 'protocol': 'm3u8', 'acodec': 'none', 'vcodec': 'avc1.4d001f', 'tbr': 2374, 'width': 1024, 'height': 576, }] ), ( # https://github.com/rg3/youtube-dl/issues/12211 # http://video.toggle.sg/en/series/whoopie-s-world/ep3/478601 'toggle_mobile_12211', 'http://cdnapi.kaltura.com/p/2082311/sp/208231100/playManifest/protocol/http/entryId/0_89q6e8ku/format/applehttp/tags/mobile_sd/f/a.m3u8', [{ 'url': 'http://k.toggle.sg/fhls/p/2082311/sp/208231100/serveFlavor/entryId/0_89q6e8ku/v/2/pv/1/flavorId/0_sa2ntrdg/name/a.mp4/index.m3u8', 'manifest_url': 'http://cdnapi.kaltura.com/p/2082311/sp/208231100/playManifest/protocol/http/entryId/0_89q6e8ku/format/applehttp/tags/mobile_sd/f/a.m3u8', 'ext': 'mp4', 'format_id': 'audio-English', 'protocol': 'm3u8', 'language': 'eng', 'vcodec': 'none', }, { 'url': 'http://k.toggle.sg/fhls/p/2082311/sp/208231100/serveFlavor/entryId/0_89q6e8ku/v/2/pv/1/flavorId/0_r7y0nitg/name/a.mp4/index.m3u8', 'manifest_url': 'http://cdnapi.kaltura.com/p/2082311/sp/208231100/playManifest/protocol/http/entryId/0_89q6e8ku/format/applehttp/tags/mobile_sd/f/a.m3u8', 'ext': 'mp4', 'format_id': 'audio-Undefined', 'protocol': 'm3u8', 'language': 'und', 'vcodec': 'none', }, { 'url': 'http://k.toggle.sg/fhls/p/2082311/sp/208231100/serveFlavor/entryId/0_89q6e8ku/v/2/pv/1/flavorId/0_qlk9hlzr/name/a.mp4/index.m3u8', 'manifest_url': 'http://cdnapi.kaltura.com/p/2082311/sp/208231100/playManifest/protocol/http/entryId/0_89q6e8ku/format/applehttp/tags/mobile_sd/f/a.m3u8', 'ext': 'mp4', 'format_id': '155', 'protocol': 'm3u8', 'tbr': 155.648, 'width': 320, 'height': 180, }, { 'url': 'http://k.toggle.sg/fhls/p/2082311/sp/208231100/serveFlavor/entryId/0_89q6e8ku/v/2/pv/1/flavorId/0_oefackmi/name/a.mp4/index.m3u8', 'manifest_url': 'http://cdnapi.kaltura.com/p/2082311/sp/208231100/playManifest/protocol/http/entryId/0_89q6e8ku/format/applehttp/tags/mobile_sd/f/a.m3u8', 'ext': 'mp4', 'format_id': '502', 'protocol': 'm3u8', 'tbr': 502.784, 'width': 480, 'height': 270, }, { 'url': 'http://k.toggle.sg/fhls/p/2082311/sp/208231100/serveFlavor/entryId/0_89q6e8ku/v/12/pv/1/flavorId/0_vyg9pj7k/name/a.mp4/index.m3u8', 'manifest_url': 'http://cdnapi.kaltura.com/p/2082311/sp/208231100/playManifest/protocol/http/entryId/0_89q6e8ku/format/applehttp/tags/mobile_sd/f/a.m3u8', 'ext': 'mp4', 'format_id': '827', 'protocol': 'm3u8', 'tbr': 827.392, 'width': 640, 'height': 360, }, { 'url': 'http://k.toggle.sg/fhls/p/2082311/sp/208231100/serveFlavor/entryId/0_89q6e8ku/v/12/pv/1/flavorId/0_50n4psvx/name/a.mp4/index.m3u8', 'manifest_url': 'http://cdnapi.kaltura.com/p/2082311/sp/208231100/playManifest/protocol/http/entryId/0_89q6e8ku/format/applehttp/tags/mobile_sd/f/a.m3u8', 'ext': 'mp4', 'format_id': '1396', 'protocol': 'm3u8', 'tbr': 1396.736, 'width': 854, 'height': 480, }] ), ( # http://www.twitch.tv/riotgames/v/6528877 'twitch_vod', 'https://usher.ttvnw.net/vod/6528877?allow_source=true&allow_audio_only=true&allow_spectre=true&player=twitchweb&nauth=%7B%22user_id%22%3Anull%2C%22vod_id%22%3A6528877%2C%22expires%22%3A1492887874%2C%22chansub%22%3A%7B%22restricted_bitrates%22%3A%5B%5D%7D%2C%22privileged%22%3Afalse%2C%22https_required%22%3Afalse%7D&nauthsig=3e29296a6824a0f48f9e731383f77a614fc79bee', [{ 'url': 'https://vod.edgecast.hls.ttvnw.net/e5da31ab49_riotgames_15001215120_261543898/audio_only/index-muted-HM49I092CC.m3u8', 'manifest_url': 'https://usher.ttvnw.net/vod/6528877?allow_source=true&allow_audio_only=true&allow_spectre=true&player=twitchweb&nauth=%7B%22user_id%22%3Anull%2C%22vod_id%22%3A6528877%2C%22expires%22%3A1492887874%2C%22chansub%22%3A%7B%22restricted_bitrates%22%3A%5B%5D%7D%2C%22privileged%22%3Afalse%2C%22https_required%22%3Afalse%7D&nauthsig=3e29296a6824a0f48f9e731383f77a614fc79bee', 'ext': 'mp4', 'format_id': 'Audio Only', 'protocol': 'm3u8', 'acodec': 'mp4a.40.2', 'vcodec': 'none', 'tbr': 182.725, }, { 'url': 'https://vod.edgecast.hls.ttvnw.net/e5da31ab49_riotgames_15001215120_261543898/mobile/index-muted-HM49I092CC.m3u8', 'manifest_url': 'https://usher.ttvnw.net/vod/6528877?allow_source=true&allow_audio_only=true&allow_spectre=true&player=twitchweb&nauth=%7B%22user_id%22%3Anull%2C%22vod_id%22%3A6528877%2C%22expires%22%3A1492887874%2C%22chansub%22%3A%7B%22restricted_bitrates%22%3A%5B%5D%7D%2C%22privileged%22%3Afalse%2C%22https_required%22%3Afalse%7D&nauthsig=3e29296a6824a0f48f9e731383f77a614fc79bee', 'ext': 'mp4', 'format_id': 'Mobile', 'protocol': 'm3u8', 'acodec': 'mp4a.40.2', 'vcodec': 'avc1.42C00D', 'tbr': 280.474, 'width': 400, 'height': 226, }, { 'url': 'https://vod.edgecast.hls.ttvnw.net/e5da31ab49_riotgames_15001215120_261543898/low/index-muted-HM49I092CC.m3u8', 'manifest_url': 'https://usher.ttvnw.net/vod/6528877?allow_source=true&allow_audio_only=true&allow_spectre=true&player=twitchweb&nauth=%7B%22user_id%22%3Anull%2C%22vod_id%22%3A6528877%2C%22expires%22%3A1492887874%2C%22chansub%22%3A%7B%22restricted_bitrates%22%3A%5B%5D%7D%2C%22privileged%22%3Afalse%2C%22https_required%22%3Afalse%7D&nauthsig=3e29296a6824a0f48f9e731383f77a614fc79bee', 'ext': 'mp4', 'format_id': 'Low', 'protocol': 'm3u8', 'acodec': 'mp4a.40.2', 'vcodec': 'avc1.42C01E', 'tbr': 628.347, 'width': 640, 'height': 360, }, { 'url': 'https://vod.edgecast.hls.ttvnw.net/e5da31ab49_riotgames_15001215120_261543898/medium/index-muted-HM49I092CC.m3u8', 'manifest_url': 'https://usher.ttvnw.net/vod/6528877?allow_source=true&allow_audio_only=true&allow_spectre=true&player=twitchweb&nauth=%7B%22user_id%22%3Anull%2C%22vod_id%22%3A6528877%2C%22expires%22%3A1492887874%2C%22chansub%22%3A%7B%22restricted_bitrates%22%3A%5B%5D%7D%2C%22privileged%22%3Afalse%2C%22https_required%22%3Afalse%7D&nauthsig=3e29296a6824a0f48f9e731383f77a614fc79bee', 'ext': 'mp4', 'format_id': 'Medium', 'protocol': 'm3u8', 'acodec': 'mp4a.40.2', 'vcodec': 'avc1.42C01E', 'tbr': 893.387, 'width': 852, 'height': 480, }, { 'url': 'https://vod.edgecast.hls.ttvnw.net/e5da31ab49_riotgames_15001215120_261543898/high/index-muted-HM49I092CC.m3u8', 'manifest_url': 'https://usher.ttvnw.net/vod/6528877?allow_source=true&allow_audio_only=true&allow_spectre=true&player=twitchweb&nauth=%7B%22user_id%22%3Anull%2C%22vod_id%22%3A6528877%2C%22expires%22%3A1492887874%2C%22chansub%22%3A%7B%22restricted_bitrates%22%3A%5B%5D%7D%2C%22privileged%22%3Afalse%2C%22https_required%22%3Afalse%7D&nauthsig=3e29296a6824a0f48f9e731383f77a614fc79bee', 'ext': 'mp4', 'format_id': 'High', 'protocol': 'm3u8', 'acodec': 'mp4a.40.2', 'vcodec': 'avc1.42C01F', 'tbr': 1603.789, 'width': 1280, 'height': 720, }, { 'url': 'https://vod.edgecast.hls.ttvnw.net/e5da31ab49_riotgames_15001215120_261543898/chunked/index-muted-HM49I092CC.m3u8', 'manifest_url': 'https://usher.ttvnw.net/vod/6528877?allow_source=true&allow_audio_only=true&allow_spectre=true&player=twitchweb&nauth=%7B%22user_id%22%3Anull%2C%22vod_id%22%3A6528877%2C%22expires%22%3A1492887874%2C%22chansub%22%3A%7B%22restricted_bitrates%22%3A%5B%5D%7D%2C%22privileged%22%3Afalse%2C%22https_required%22%3Afalse%7D&nauthsig=3e29296a6824a0f48f9e731383f77a614fc79bee', 'ext': 'mp4', 'format_id': 'Source', 'protocol': 'm3u8', 'acodec': 'mp4a.40.2', 'vcodec': 'avc1.100.31', 'tbr': 3214.134, 'width': 1280, 'height': 720, }] ), ( # http://www.vidio.com/watch/165683-dj_ambred-booyah-live-2015 # EXT-X-STREAM-INF tag with NAME attribute that is not defined # in HLS specification 'vidio', 'https://www.vidio.com/videos/165683/playlist.m3u8', [{ 'url': 'https://cdn1-a.production.vidio.static6.com/uploads/165683/dj_ambred-4383-b300.mp4.m3u8', 'manifest_url': 'https://www.vidio.com/videos/165683/playlist.m3u8', 'ext': 'mp4', 'format_id': '270p 3G', 'protocol': 'm3u8', 'tbr': 300, 'width': 480, 'height': 270, }, { 'url': 'https://cdn1-a.production.vidio.static6.com/uploads/165683/dj_ambred-4383-b600.mp4.m3u8', 'manifest_url': 'https://www.vidio.com/videos/165683/playlist.m3u8', 'ext': 'mp4', 'format_id': '360p SD', 'protocol': 'm3u8', 'tbr': 600, 'width': 640, 'height': 360, }, { 'url': 'https://cdn1-a.production.vidio.static6.com/uploads/165683/dj_ambred-4383-b1200.mp4.m3u8', 'manifest_url': 'https://www.vidio.com/videos/165683/playlist.m3u8', 'ext': 'mp4', 'format_id': '720p HD', 'protocol': 'm3u8', 'tbr': 1200, 'width': 1280, 'height': 720, }] ) ] for m3u8_file, m3u8_url, expected_formats in _TEST_CASES: with io.open('./test/testdata/m3u8/%s.m3u8' % m3u8_file, mode='r', encoding='utf-8') as f: formats = self.ie._parse_m3u8_formats( f.read(), m3u8_url, ext='mp4') self.ie._sort_formats(formats) expect_value(self, formats, expected_formats, None) def test_parse_mpd_formats(self): _TEST_CASES = [ ( # https://github.com/rg3/youtube-dl/issues/13919 # Also tests duplicate representation ids, see # https://github.com/rg3/youtube-dl/issues/15111 'float_duration', 'http://unknown/manifest.mpd', [{ 'manifest_url': 'http://unknown/manifest.mpd', 'ext': 'm4a', 'format_id': '318597', 'format_note': 'DASH audio', 'protocol': 'http_dash_segments', 'acodec': 'mp4a.40.2', 'vcodec': 'none', 'tbr': 61.587, }, { 'manifest_url': 'http://unknown/manifest.mpd', 'ext': 'mp4', 'format_id': '318597', 'format_note': 'DASH video', 'protocol': 'http_dash_segments', 'acodec': 'none', 'vcodec': 'avc1.42001f', 'tbr': 318.597, 'width': 340, 'height': 192, }, { 'manifest_url': 'http://unknown/manifest.mpd', 'ext': 'mp4', 'format_id': '638590', 'format_note': 'DASH video', 'protocol': 'http_dash_segments', 'acodec': 'none', 'vcodec': 'avc1.42001f', 'tbr': 638.59, 'width': 512, 'height': 288, }, { 'manifest_url': 'http://unknown/manifest.mpd', 'ext': 'mp4', 'format_id': '1022565', 'format_note': 'DASH video', 'protocol': 'http_dash_segments', 'acodec': 'none', 'vcodec': 'avc1.4d001f', 'tbr': 1022.565, 'width': 688, 'height': 384, }, { 'manifest_url': 'http://unknown/manifest.mpd', 'ext': 'mp4', 'format_id': '2046506', 'format_note': 'DASH video', 'protocol': 'http_dash_segments', 'acodec': 'none', 'vcodec': 'avc1.4d001f', 'tbr': 2046.506, 'width': 1024, 'height': 576, }, { 'manifest_url': 'http://unknown/manifest.mpd', 'ext': 'mp4', 'format_id': '3998017', 'format_note': 'DASH video', 'protocol': 'http_dash_segments', 'acodec': 'none', 'vcodec': 'avc1.640029', 'tbr': 3998.017, 'width': 1280, 'height': 720, }, { 'manifest_url': 'http://unknown/manifest.mpd', 'ext': 'mp4', 'format_id': '5997485', 'format_note': 'DASH video', 'protocol': 'http_dash_segments', 'acodec': 'none', 'vcodec': 'avc1.640032', 'tbr': 5997.485, 'width': 1920, 'height': 1080, }] ), ( # https://github.com/rg3/youtube-dl/pull/14844 'urls_only', 'http://unknown/manifest.mpd', [{ 'manifest_url': 'http://unknown/manifest.mpd', 'ext': 'mp4', 'format_id': 'h264_aac_144p_m4s', 'format_note': 'DASH video', 'protocol': 'http_dash_segments', 'acodec': 'mp4a.40.2', 'vcodec': 'avc3.42c01e', 'tbr': 200, 'width': 256, 'height': 144, }, { 'manifest_url': 'http://unknown/manifest.mpd', 'ext': 'mp4', 'format_id': 'h264_aac_240p_m4s', 'format_note': 'DASH video', 'protocol': 'http_dash_segments', 'acodec': 'mp4a.40.2', 'vcodec': 'avc3.42c01e', 'tbr': 400, 'width': 424, 'height': 240, }, { 'manifest_url': 'http://unknown/manifest.mpd', 'ext': 'mp4', 'format_id': 'h264_aac_360p_m4s', 'format_note': 'DASH video', 'protocol': 'http_dash_segments', 'acodec': 'mp4a.40.2', 'vcodec': 'avc3.42c01e', 'tbr': 800, 'width': 640, 'height': 360, }, { 'manifest_url': 'http://unknown/manifest.mpd', 'ext': 'mp4', 'format_id': 'h264_aac_480p_m4s', 'format_note': 'DASH video', 'protocol': 'http_dash_segments', 'acodec': 'mp4a.40.2', 'vcodec': 'avc3.42c01e', 'tbr': 1200, 'width': 856, 'height': 480, }, { 'manifest_url': 'http://unknown/manifest.mpd', 'ext': 'mp4', 'format_id': 'h264_aac_576p_m4s', 'format_note': 'DASH video', 'protocol': 'http_dash_segments', 'acodec': 'mp4a.40.2', 'vcodec': 'avc3.42c01e', 'tbr': 1600, 'width': 1024, 'height': 576, }, { 'manifest_url': 'http://unknown/manifest.mpd', 'ext': 'mp4', 'format_id': 'h264_aac_720p_m4s', 'format_note': 'DASH video', 'protocol': 'http_dash_segments', 'acodec': 'mp4a.40.2', 'vcodec': 'avc3.42c01e', 'tbr': 2400, 'width': 1280, 'height': 720, }, { 'manifest_url': 'http://unknown/manifest.mpd', 'ext': 'mp4', 'format_id': 'h264_aac_1080p_m4s', 'format_note': 'DASH video', 'protocol': 'http_dash_segments', 'acodec': 'mp4a.40.2', 'vcodec': 'avc3.42c01e', 'tbr': 4400, 'width': 1920, 'height': 1080, }] ) ] for mpd_file, mpd_url, expected_formats in _TEST_CASES: with io.open('./test/testdata/mpd/%s.mpd' % mpd_file, mode='r', encoding='utf-8') as f: formats = self.ie._parse_mpd_formats( compat_etree_fromstring(f.read().encode('utf-8')), mpd_url=mpd_url) self.ie._sort_formats(formats) expect_value(self, formats, expected_formats, None) def test_parse_f4m_formats(self): _TEST_CASES = [ ( # https://github.com/rg3/youtube-dl/issues/14660 'custom_base_url', 'http://api.new.livestream.com/accounts/6115179/events/6764928/videos/144884262.f4m', [{ 'manifest_url': 'http://api.new.livestream.com/accounts/6115179/events/6764928/videos/144884262.f4m', 'ext': 'flv', 'format_id': '2148', 'protocol': 'f4m', 'tbr': 2148, 'width': 1280, 'height': 720, }] ), ] for f4m_file, f4m_url, expected_formats in _TEST_CASES: with io.open('./test/testdata/f4m/%s.f4m' % f4m_file, mode='r', encoding='utf-8') as f: formats = self.ie._parse_f4m_formats( compat_etree_fromstring(f.read().encode('utf-8')), f4m_url, None) self.ie._sort_formats(formats) expect_value(self, formats, expected_formats, None) if __name__ == '__main__': unittest.main() youtube-dl/test/test_youtube_lists.py0000644000000000000000000000474413250601453017157 0ustar rootroot#!/usr/bin/env python from __future__ import unicode_literals # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from test.helper import FakeYDL from youtube_dl.extractor import ( YoutubePlaylistIE, YoutubeIE, ) class TestYoutubeLists(unittest.TestCase): def assertIsPlaylist(self, info): """Make sure the info has '_type' set to 'playlist'""" self.assertEqual(info['_type'], 'playlist') def test_youtube_playlist_noplaylist(self): dl = FakeYDL() dl.params['noplaylist'] = True ie = YoutubePlaylistIE(dl) result = ie.extract('https://www.youtube.com/watch?v=FXxLjLQi3Fg&list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re') self.assertEqual(result['_type'], 'url') self.assertEqual(YoutubeIE().extract_id(result['url']), 'FXxLjLQi3Fg') def test_youtube_course(self): dl = FakeYDL() ie = YoutubePlaylistIE(dl) # TODO find a > 100 (paginating?) videos course result = ie.extract('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8') entries = list(result['entries']) self.assertEqual(YoutubeIE().extract_id(entries[0]['url']), 'j9WZyLZCBzs') self.assertEqual(len(entries), 25) self.assertEqual(YoutubeIE().extract_id(entries[-1]['url']), 'rYefUsYuEp0') def test_youtube_mix(self): dl = FakeYDL() ie = YoutubePlaylistIE(dl) result = ie.extract('https://www.youtube.com/watch?v=W01L70IGBgE&index=2&list=RDOQpdSVF_k_w') entries = result['entries'] self.assertTrue(len(entries) >= 50) original_video = entries[0] self.assertEqual(original_video['id'], 'OQpdSVF_k_w') def test_youtube_toptracks(self): print('Skipping: The playlist page gives error 500') return dl = FakeYDL() ie = YoutubePlaylistIE(dl) result = ie.extract('https://www.youtube.com/playlist?list=MCUS') entries = result['entries'] self.assertEqual(len(entries), 100) def test_youtube_flat_playlist_titles(self): dl = FakeYDL() dl.params['extract_flat'] = True ie = YoutubePlaylistIE(dl) result = ie.extract('https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re') self.assertIsPlaylist(result) for entry in result['entries']: self.assertTrue(entry.get('title')) if __name__ == '__main__': unittest.main() youtube-dl/test/versions.json0000644000000000000000000000311013250601453015361 0ustar rootroot{ "latest": "2013.01.06", "signature": "72158cdba391628569ffdbea259afbcf279bbe3d8aeb7492690735dc1cfa6afa754f55c61196f3871d429599ab22f2667f1fec98865527b32632e7f4b3675a7ef0f0fbe084d359256ae4bba68f0d33854e531a70754712f244be71d4b92e664302aa99653ee4df19800d955b6c4149cd2b3f24288d6e4b40b16126e01f4c8ce6", "versions": { "2013.01.02": { "bin": [ "http://youtube-dl.org/downloads/2013.01.02/youtube-dl", "f5b502f8aaa77675c4884938b1e4871ebca2611813a0c0e74f60c0fbd6dcca6b" ], "exe": [ "http://youtube-dl.org/downloads/2013.01.02/youtube-dl.exe", "75fa89d2ce297d102ff27675aa9d92545bbc91013f52ec52868c069f4f9f0422" ], "tar": [ "http://youtube-dl.org/downloads/2013.01.02/youtube-dl-2013.01.02.tar.gz", "6a66d022ac8e1c13da284036288a133ec8dba003b7bd3a5179d0c0daca8c8196" ] }, "2013.01.06": { "bin": [ "http://youtube-dl.org/downloads/2013.01.06/youtube-dl", "64b6ed8865735c6302e836d4d832577321b4519aa02640dc508580c1ee824049" ], "exe": [ "http://youtube-dl.org/downloads/2013.01.06/youtube-dl.exe", "58609baf91e4389d36e3ba586e21dab882daaaee537e4448b1265392ae86ff84" ], "tar": [ "http://youtube-dl.org/downloads/2013.01.06/youtube-dl-2013.01.06.tar.gz", "fe77ab20a95d980ed17a659aa67e371fdd4d656d19c4c7950e7b720b0c2f1a86" ] } } }youtube-dl/test/test_utils.py0000644000000000000000000016730713250601465015415 0ustar rootroot#!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # Various small unit tests import io import json import xml.etree.ElementTree from youtube_dl.utils import ( age_restricted, args_to_str, encode_base_n, clean_html, date_from_str, DateRange, detect_exe_version, determine_ext, dict_get, encode_compat_str, encodeFilename, escape_rfc3986, escape_url, extract_attributes, ExtractorError, find_xpath_attr, fix_xml_ampersands, get_element_by_class, get_element_by_attribute, get_elements_by_class, get_elements_by_attribute, InAdvancePagedList, intlist_to_bytes, is_html, js_to_json, limit_length, mimetype2ext, month_by_name, multipart_encode, ohdave_rsa_encrypt, OnDemandPagedList, orderedSet, parse_age_limit, parse_duration, parse_filesize, parse_count, parse_iso8601, parse_resolution, pkcs1pad, read_batch_urls, sanitize_filename, sanitize_path, sanitize_url, expand_path, prepend_extension, replace_extension, remove_start, remove_end, remove_quotes, shell_quote, smuggle_url, str_to_int, strip_jsonp, timeconvert, unescapeHTML, unified_strdate, unified_timestamp, unsmuggle_url, uppercase_escape, lowercase_escape, url_basename, base_url, urljoin, urlencode_postdata, urshift, update_url_query, version_tuple, xpath_with_ns, xpath_element, xpath_text, xpath_attr, render_table, match_str, parse_dfxp_time_expr, dfxp2srt, cli_option, cli_valueless_option, cli_bool_option, parse_codecs, ) from youtube_dl.compat import ( compat_chr, compat_etree_fromstring, compat_getenv, compat_os_name, compat_setenv, compat_urlparse, compat_parse_qs, ) class TestUtil(unittest.TestCase): def test_timeconvert(self): self.assertTrue(timeconvert('') is None) self.assertTrue(timeconvert('bougrg') is None) def test_sanitize_filename(self): self.assertEqual(sanitize_filename('abc'), 'abc') self.assertEqual(sanitize_filename('abc_d-e'), 'abc_d-e') self.assertEqual(sanitize_filename('123'), '123') self.assertEqual('abc_de', sanitize_filename('abc/de')) self.assertFalse('/' in sanitize_filename('abc/de///')) self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de')) self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|')) self.assertEqual('yes no', sanitize_filename('yes? no')) self.assertEqual('this - that', sanitize_filename('this: that')) self.assertEqual(sanitize_filename('AT&T'), 'AT&T') aumlaut = 'ä' self.assertEqual(sanitize_filename(aumlaut), aumlaut) tests = '\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0446\u0430' self.assertEqual(sanitize_filename(tests), tests) self.assertEqual( sanitize_filename('New World record at 0:12:34'), 'New World record at 0_12_34') self.assertEqual(sanitize_filename('--gasdgf'), '_-gasdgf') self.assertEqual(sanitize_filename('--gasdgf', is_id=True), '--gasdgf') self.assertEqual(sanitize_filename('.gasdgf'), 'gasdgf') self.assertEqual(sanitize_filename('.gasdgf', is_id=True), '.gasdgf') forbidden = '"\0\\/' for fc in forbidden: for fbc in forbidden: self.assertTrue(fbc not in sanitize_filename(fc)) def test_sanitize_filename_restricted(self): self.assertEqual(sanitize_filename('abc', restricted=True), 'abc') self.assertEqual(sanitize_filename('abc_d-e', restricted=True), 'abc_d-e') self.assertEqual(sanitize_filename('123', restricted=True), '123') self.assertEqual('abc_de', sanitize_filename('abc/de', restricted=True)) self.assertFalse('/' in sanitize_filename('abc/de///', restricted=True)) self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de', restricted=True)) self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|', restricted=True)) self.assertEqual('yes_no', sanitize_filename('yes? no', restricted=True)) self.assertEqual('this_-_that', sanitize_filename('this: that', restricted=True)) tests = 'aäb\u4e2d\u56fd\u7684c' self.assertEqual(sanitize_filename(tests, restricted=True), 'aab_c') self.assertTrue(sanitize_filename('\xf6', restricted=True) != '') # No empty filename forbidden = '"\0\\/&!: \'\t\n()[]{}$;`^,#' for fc in forbidden: for fbc in forbidden: self.assertTrue(fbc not in sanitize_filename(fc, restricted=True)) # Handle a common case more neatly self.assertEqual(sanitize_filename('\u5927\u58f0\u5e26 - Song', restricted=True), 'Song') self.assertEqual(sanitize_filename('\u603b\u7edf: Speech', restricted=True), 'Speech') # .. but make sure the file name is never empty self.assertTrue(sanitize_filename('-', restricted=True) != '') self.assertTrue(sanitize_filename(':', restricted=True) != '') self.assertEqual(sanitize_filename( 'ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', restricted=True), 'AAAAAAAECEEEEIIIIDNOOOOOOOOEUUUUUYPssaaaaaaaeceeeeiiiionooooooooeuuuuuypy') def test_sanitize_ids(self): self.assertEqual(sanitize_filename('_n_cd26wFpw', is_id=True), '_n_cd26wFpw') self.assertEqual(sanitize_filename('_BD_eEpuzXw', is_id=True), '_BD_eEpuzXw') self.assertEqual(sanitize_filename('N0Y__7-UOdI', is_id=True), 'N0Y__7-UOdI') def test_sanitize_path(self): if sys.platform != 'win32': return self.assertEqual(sanitize_path('abc'), 'abc') self.assertEqual(sanitize_path('abc/def'), 'abc\\def') self.assertEqual(sanitize_path('abc\\def'), 'abc\\def') self.assertEqual(sanitize_path('abc|def'), 'abc#def') self.assertEqual(sanitize_path('<>:"|?*'), '#######') self.assertEqual(sanitize_path('C:/abc/def'), 'C:\\abc\\def') self.assertEqual(sanitize_path('C?:/abc/def'), 'C##\\abc\\def') self.assertEqual(sanitize_path('\\\\?\\UNC\\ComputerName\\abc'), '\\\\?\\UNC\\ComputerName\\abc') self.assertEqual(sanitize_path('\\\\?\\UNC/ComputerName/abc'), '\\\\?\\UNC\\ComputerName\\abc') self.assertEqual(sanitize_path('\\\\?\\C:\\abc'), '\\\\?\\C:\\abc') self.assertEqual(sanitize_path('\\\\?\\C:/abc'), '\\\\?\\C:\\abc') self.assertEqual(sanitize_path('\\\\?\\C:\\ab?c\\de:f'), '\\\\?\\C:\\ab#c\\de#f') self.assertEqual(sanitize_path('\\\\?\\C:\\abc'), '\\\\?\\C:\\abc') self.assertEqual( sanitize_path('youtube/%(uploader)s/%(autonumber)s-%(title)s-%(upload_date)s.%(ext)s'), 'youtube\\%(uploader)s\\%(autonumber)s-%(title)s-%(upload_date)s.%(ext)s') self.assertEqual( sanitize_path('youtube/TheWreckingYard ./00001-Not bad, Especially for Free! (1987 Yamaha 700)-20141116.mp4.part'), 'youtube\\TheWreckingYard #\\00001-Not bad, Especially for Free! (1987 Yamaha 700)-20141116.mp4.part') self.assertEqual(sanitize_path('abc/def...'), 'abc\\def..#') self.assertEqual(sanitize_path('abc.../def'), 'abc..#\\def') self.assertEqual(sanitize_path('abc.../def...'), 'abc..#\\def..#') self.assertEqual(sanitize_path('../abc'), '..\\abc') self.assertEqual(sanitize_path('../../abc'), '..\\..\\abc') self.assertEqual(sanitize_path('./abc'), 'abc') self.assertEqual(sanitize_path('./../abc'), '..\\abc') def test_sanitize_url(self): self.assertEqual(sanitize_url('//foo.bar'), 'http://foo.bar') self.assertEqual(sanitize_url('httpss://foo.bar'), 'https://foo.bar') self.assertEqual(sanitize_url('rmtps://foo.bar'), 'rtmps://foo.bar') self.assertEqual(sanitize_url('https://foo.bar'), 'https://foo.bar') def test_expand_path(self): def env(var): return '%{0}%'.format(var) if sys.platform == 'win32' else '${0}'.format(var) compat_setenv('YOUTUBE_DL_EXPATH_PATH', 'expanded') self.assertEqual(expand_path(env('YOUTUBE_DL_EXPATH_PATH')), 'expanded') self.assertEqual(expand_path(env('HOME')), compat_getenv('HOME')) self.assertEqual(expand_path('~'), compat_getenv('HOME')) self.assertEqual( expand_path('~/%s' % env('YOUTUBE_DL_EXPATH_PATH')), '%s/expanded' % compat_getenv('HOME')) def test_prepend_extension(self): self.assertEqual(prepend_extension('abc.ext', 'temp'), 'abc.temp.ext') self.assertEqual(prepend_extension('abc.ext', 'temp', 'ext'), 'abc.temp.ext') self.assertEqual(prepend_extension('abc.unexpected_ext', 'temp', 'ext'), 'abc.unexpected_ext.temp') self.assertEqual(prepend_extension('abc', 'temp'), 'abc.temp') self.assertEqual(prepend_extension('.abc', 'temp'), '.abc.temp') self.assertEqual(prepend_extension('.abc.ext', 'temp'), '.abc.temp.ext') def test_replace_extension(self): self.assertEqual(replace_extension('abc.ext', 'temp'), 'abc.temp') self.assertEqual(replace_extension('abc.ext', 'temp', 'ext'), 'abc.temp') self.assertEqual(replace_extension('abc.unexpected_ext', 'temp', 'ext'), 'abc.unexpected_ext.temp') self.assertEqual(replace_extension('abc', 'temp'), 'abc.temp') self.assertEqual(replace_extension('.abc', 'temp'), '.abc.temp') self.assertEqual(replace_extension('.abc.ext', 'temp'), '.abc.temp') def test_remove_start(self): self.assertEqual(remove_start(None, 'A - '), None) self.assertEqual(remove_start('A - B', 'A - '), 'B') self.assertEqual(remove_start('B - A', 'A - '), 'B - A') def test_remove_end(self): self.assertEqual(remove_end(None, ' - B'), None) self.assertEqual(remove_end('A - B', ' - B'), 'A') self.assertEqual(remove_end('B - A', ' - B'), 'B - A') def test_remove_quotes(self): self.assertEqual(remove_quotes(None), None) self.assertEqual(remove_quotes('"'), '"') self.assertEqual(remove_quotes("'"), "'") self.assertEqual(remove_quotes(';'), ';') self.assertEqual(remove_quotes('";'), '";') self.assertEqual(remove_quotes('""'), '') self.assertEqual(remove_quotes('";"'), ';') def test_ordered_set(self): self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7]) self.assertEqual(orderedSet([]), []) self.assertEqual(orderedSet([1]), [1]) # keep the list ordered self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1]) def test_unescape_html(self): self.assertEqual(unescapeHTML('%20;'), '%20;') self.assertEqual(unescapeHTML('/'), '/') self.assertEqual(unescapeHTML('/'), '/') self.assertEqual(unescapeHTML('é'), 'é') self.assertEqual(unescapeHTML('�'), '�') self.assertEqual(unescapeHTML('&a"'), '&a"') # HTML5 entities self.assertEqual(unescapeHTML('.''), '.\'') def test_date_from_str(self): self.assertEqual(date_from_str('yesterday'), date_from_str('now-1day')) self.assertEqual(date_from_str('now+7day'), date_from_str('now+1week')) self.assertEqual(date_from_str('now+14day'), date_from_str('now+2week')) self.assertEqual(date_from_str('now+365day'), date_from_str('now+1year')) self.assertEqual(date_from_str('now+30day'), date_from_str('now+1month')) def test_daterange(self): _20century = DateRange("19000101", "20000101") self.assertFalse("17890714" in _20century) _ac = DateRange("00010101") self.assertTrue("19690721" in _ac) _firstmilenium = DateRange(end="10000101") self.assertTrue("07110427" in _firstmilenium) def test_unified_dates(self): self.assertEqual(unified_strdate('December 21, 2010'), '20101221') self.assertEqual(unified_strdate('8/7/2009'), '20090708') self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214') self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011') self.assertEqual(unified_strdate('1968 12 10'), '19681210') self.assertEqual(unified_strdate('1968-12-10'), '19681210') self.assertEqual(unified_strdate('28/01/2014 21:00:00 +0100'), '20140128') self.assertEqual( unified_strdate('11/26/2014 11:30:00 AM PST', day_first=False), '20141126') self.assertEqual( unified_strdate('2/2/2015 6:47:40 PM', day_first=False), '20150202') self.assertEqual(unified_strdate('Feb 14th 2016 5:45PM'), '20160214') self.assertEqual(unified_strdate('25-09-2014'), '20140925') self.assertEqual(unified_strdate('27.02.2016 17:30'), '20160227') self.assertEqual(unified_strdate('UNKNOWN DATE FORMAT'), None) self.assertEqual(unified_strdate('Feb 7, 2016 at 6:35 pm'), '20160207') self.assertEqual(unified_strdate('July 15th, 2013'), '20130715') self.assertEqual(unified_strdate('September 1st, 2013'), '20130901') self.assertEqual(unified_strdate('Sep 2nd, 2013'), '20130902') def test_unified_timestamps(self): self.assertEqual(unified_timestamp('December 21, 2010'), 1292889600) self.assertEqual(unified_timestamp('8/7/2009'), 1247011200) self.assertEqual(unified_timestamp('Dec 14, 2012'), 1355443200) self.assertEqual(unified_timestamp('2012/10/11 01:56:38 +0000'), 1349920598) self.assertEqual(unified_timestamp('1968 12 10'), -33436800) self.assertEqual(unified_timestamp('1968-12-10'), -33436800) self.assertEqual(unified_timestamp('28/01/2014 21:00:00 +0100'), 1390939200) self.assertEqual( unified_timestamp('11/26/2014 11:30:00 AM PST', day_first=False), 1417001400) self.assertEqual( unified_timestamp('2/2/2015 6:47:40 PM', day_first=False), 1422902860) self.assertEqual(unified_timestamp('Feb 14th 2016 5:45PM'), 1455471900) self.assertEqual(unified_timestamp('25-09-2014'), 1411603200) self.assertEqual(unified_timestamp('27.02.2016 17:30'), 1456594200) self.assertEqual(unified_timestamp('UNKNOWN DATE FORMAT'), None) self.assertEqual(unified_timestamp('May 16, 2016 11:15 PM'), 1463440500) self.assertEqual(unified_timestamp('Feb 7, 2016 at 6:35 pm'), 1454870100) self.assertEqual(unified_timestamp('2017-03-30T17:52:41Q'), 1490896361) self.assertEqual(unified_timestamp('Sep 11, 2013 | 5:49 AM'), 1378878540) self.assertEqual(unified_timestamp('December 15, 2017 at 7:49 am'), 1513324140) self.assertEqual(unified_timestamp('2018-03-14T08:32:43.1493874+00:00'), 1521016363) def test_determine_ext(self): self.assertEqual(determine_ext('http://example.com/foo/bar.mp4/?download'), 'mp4') self.assertEqual(determine_ext('http://example.com/foo/bar/?download', None), None) self.assertEqual(determine_ext('http://example.com/foo/bar.nonext/?download', None), None) self.assertEqual(determine_ext('http://example.com/foo/bar/mp4?download', None), None) self.assertEqual(determine_ext('http://example.com/foo/bar.m3u8//?download'), 'm3u8') def test_find_xpath_attr(self): testxml = ''' ''' doc = compat_etree_fromstring(testxml) self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n'), None) self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n', 'v'), None) self.assertEqual(find_xpath_attr(doc, './/node', 'n'), None) self.assertEqual(find_xpath_attr(doc, './/node', 'n', 'v'), None) self.assertEqual(find_xpath_attr(doc, './/node', 'x'), doc[1]) self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'a'), doc[1]) self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'b'), doc[3]) self.assertEqual(find_xpath_attr(doc, './/node', 'y'), doc[2]) self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'c'), doc[2]) self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'd'), doc[3]) self.assertEqual(find_xpath_attr(doc, './/node', 'x', ''), doc[4]) def test_xpath_with_ns(self): testxml = ''' The Author http://server.com/download.mp3 ''' doc = compat_etree_fromstring(testxml) find = lambda p: doc.find(xpath_with_ns(p, {'media': 'http://example.com/'})) self.assertTrue(find('media:song') is not None) self.assertEqual(find('media:song/media:author').text, 'The Author') self.assertEqual(find('media:song/url').text, 'http://server.com/download.mp3') def test_xpath_element(self): doc = xml.etree.ElementTree.Element('root') div = xml.etree.ElementTree.SubElement(doc, 'div') p = xml.etree.ElementTree.SubElement(div, 'p') p.text = 'Foo' self.assertEqual(xpath_element(doc, 'div/p'), p) self.assertEqual(xpath_element(doc, ['div/p']), p) self.assertEqual(xpath_element(doc, ['div/bar', 'div/p']), p) self.assertEqual(xpath_element(doc, 'div/bar', default='default'), 'default') self.assertEqual(xpath_element(doc, ['div/bar'], default='default'), 'default') self.assertTrue(xpath_element(doc, 'div/bar') is None) self.assertTrue(xpath_element(doc, ['div/bar']) is None) self.assertTrue(xpath_element(doc, ['div/bar'], 'div/baz') is None) self.assertRaises(ExtractorError, xpath_element, doc, 'div/bar', fatal=True) self.assertRaises(ExtractorError, xpath_element, doc, ['div/bar'], fatal=True) self.assertRaises(ExtractorError, xpath_element, doc, ['div/bar', 'div/baz'], fatal=True) def test_xpath_text(self): testxml = '''

    Foo

    ''' doc = compat_etree_fromstring(testxml) self.assertEqual(xpath_text(doc, 'div/p'), 'Foo') self.assertEqual(xpath_text(doc, 'div/bar', default='default'), 'default') self.assertTrue(xpath_text(doc, 'div/bar') is None) self.assertRaises(ExtractorError, xpath_text, doc, 'div/bar', fatal=True) def test_xpath_attr(self): testxml = '''

    Foo

    ''' doc = compat_etree_fromstring(testxml) self.assertEqual(xpath_attr(doc, 'div/p', 'x'), 'a') self.assertEqual(xpath_attr(doc, 'div/bar', 'x'), None) self.assertEqual(xpath_attr(doc, 'div/p', 'y'), None) self.assertEqual(xpath_attr(doc, 'div/bar', 'x', default='default'), 'default') self.assertEqual(xpath_attr(doc, 'div/p', 'y', default='default'), 'default') self.assertRaises(ExtractorError, xpath_attr, doc, 'div/bar', 'x', fatal=True) self.assertRaises(ExtractorError, xpath_attr, doc, 'div/p', 'y', fatal=True) def test_smuggle_url(self): data = {"ö": "ö", "abc": [3]} url = 'https://foo.bar/baz?x=y#a' smug_url = smuggle_url(url, data) unsmug_url, unsmug_data = unsmuggle_url(smug_url) self.assertEqual(url, unsmug_url) self.assertEqual(data, unsmug_data) res_url, res_data = unsmuggle_url(url) self.assertEqual(res_url, url) self.assertEqual(res_data, None) smug_url = smuggle_url(url, {'a': 'b'}) smug_smug_url = smuggle_url(smug_url, {'c': 'd'}) res_url, res_data = unsmuggle_url(smug_smug_url) self.assertEqual(res_url, url) self.assertEqual(res_data, {'a': 'b', 'c': 'd'}) def test_shell_quote(self): args = ['ffmpeg', '-i', encodeFilename('ñ€ß\'.mp4')] self.assertEqual( shell_quote(args), """ffmpeg -i 'ñ€ß'"'"'.mp4'""" if compat_os_name != 'nt' else '''ffmpeg -i "ñ€ß'.mp4"''') def test_str_to_int(self): self.assertEqual(str_to_int('123,456'), 123456) self.assertEqual(str_to_int('123.456'), 123456) def test_url_basename(self): self.assertEqual(url_basename('http://foo.de/'), '') self.assertEqual(url_basename('http://foo.de/bar/baz'), 'baz') self.assertEqual(url_basename('http://foo.de/bar/baz?x=y'), 'baz') self.assertEqual(url_basename('http://foo.de/bar/baz#x=y'), 'baz') self.assertEqual(url_basename('http://foo.de/bar/baz/'), 'baz') self.assertEqual( url_basename('http://media.w3.org/2010/05/sintel/trailer.mp4'), 'trailer.mp4') def test_base_url(self): self.assertEqual(base_url('http://foo.de/'), 'http://foo.de/') self.assertEqual(base_url('http://foo.de/bar'), 'http://foo.de/') self.assertEqual(base_url('http://foo.de/bar/'), 'http://foo.de/bar/') self.assertEqual(base_url('http://foo.de/bar/baz'), 'http://foo.de/bar/') self.assertEqual(base_url('http://foo.de/bar/baz?x=z/x/c'), 'http://foo.de/bar/') def test_urljoin(self): self.assertEqual(urljoin('http://foo.de/', '/a/b/c.txt'), 'http://foo.de/a/b/c.txt') self.assertEqual(urljoin(b'http://foo.de/', '/a/b/c.txt'), 'http://foo.de/a/b/c.txt') self.assertEqual(urljoin('http://foo.de/', b'/a/b/c.txt'), 'http://foo.de/a/b/c.txt') self.assertEqual(urljoin(b'http://foo.de/', b'/a/b/c.txt'), 'http://foo.de/a/b/c.txt') self.assertEqual(urljoin('//foo.de/', '/a/b/c.txt'), '//foo.de/a/b/c.txt') self.assertEqual(urljoin('http://foo.de/', 'a/b/c.txt'), 'http://foo.de/a/b/c.txt') self.assertEqual(urljoin('http://foo.de', '/a/b/c.txt'), 'http://foo.de/a/b/c.txt') self.assertEqual(urljoin('http://foo.de', 'a/b/c.txt'), 'http://foo.de/a/b/c.txt') self.assertEqual(urljoin('http://foo.de/', 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt') self.assertEqual(urljoin('http://foo.de/', '//foo.de/a/b/c.txt'), '//foo.de/a/b/c.txt') self.assertEqual(urljoin(None, 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt') self.assertEqual(urljoin(None, '//foo.de/a/b/c.txt'), '//foo.de/a/b/c.txt') self.assertEqual(urljoin('', 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt') self.assertEqual(urljoin(['foobar'], 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt') self.assertEqual(urljoin('http://foo.de/', None), None) self.assertEqual(urljoin('http://foo.de/', ''), None) self.assertEqual(urljoin('http://foo.de/', ['foobar']), None) self.assertEqual(urljoin('http://foo.de/a/b/c.txt', '.././../d.txt'), 'http://foo.de/d.txt') def test_parse_age_limit(self): self.assertEqual(parse_age_limit(None), None) self.assertEqual(parse_age_limit(False), None) self.assertEqual(parse_age_limit('invalid'), None) self.assertEqual(parse_age_limit(0), 0) self.assertEqual(parse_age_limit(18), 18) self.assertEqual(parse_age_limit(21), 21) self.assertEqual(parse_age_limit(22), None) self.assertEqual(parse_age_limit('18'), 18) self.assertEqual(parse_age_limit('18+'), 18) self.assertEqual(parse_age_limit('PG-13'), 13) self.assertEqual(parse_age_limit('TV-14'), 14) self.assertEqual(parse_age_limit('TV-MA'), 17) def test_parse_duration(self): self.assertEqual(parse_duration(None), None) self.assertEqual(parse_duration(False), None) self.assertEqual(parse_duration('invalid'), None) self.assertEqual(parse_duration('1'), 1) self.assertEqual(parse_duration('1337:12'), 80232) self.assertEqual(parse_duration('9:12:43'), 33163) self.assertEqual(parse_duration('12:00'), 720) self.assertEqual(parse_duration('00:01:01'), 61) self.assertEqual(parse_duration('x:y'), None) self.assertEqual(parse_duration('3h11m53s'), 11513) self.assertEqual(parse_duration('3h 11m 53s'), 11513) self.assertEqual(parse_duration('3 hours 11 minutes 53 seconds'), 11513) self.assertEqual(parse_duration('3 hours 11 mins 53 secs'), 11513) self.assertEqual(parse_duration('62m45s'), 3765) self.assertEqual(parse_duration('6m59s'), 419) self.assertEqual(parse_duration('49s'), 49) self.assertEqual(parse_duration('0h0m0s'), 0) self.assertEqual(parse_duration('0m0s'), 0) self.assertEqual(parse_duration('0s'), 0) self.assertEqual(parse_duration('01:02:03.05'), 3723.05) self.assertEqual(parse_duration('T30M38S'), 1838) self.assertEqual(parse_duration('5 s'), 5) self.assertEqual(parse_duration('3 min'), 180) self.assertEqual(parse_duration('2.5 hours'), 9000) self.assertEqual(parse_duration('02:03:04'), 7384) self.assertEqual(parse_duration('01:02:03:04'), 93784) self.assertEqual(parse_duration('1 hour 3 minutes'), 3780) self.assertEqual(parse_duration('87 Min.'), 5220) self.assertEqual(parse_duration('PT1H0.040S'), 3600.04) self.assertEqual(parse_duration('PT00H03M30SZ'), 210) self.assertEqual(parse_duration('P0Y0M0DT0H4M20.880S'), 260.88) def test_fix_xml_ampersands(self): self.assertEqual( fix_xml_ampersands('"&x=y&z=a'), '"&x=y&z=a') self.assertEqual( fix_xml_ampersands('"&x=y&wrong;&z=a'), '"&x=y&wrong;&z=a') self.assertEqual( fix_xml_ampersands('&'><"'), '&'><"') self.assertEqual( fix_xml_ampersands('Ӓ᪼'), 'Ӓ᪼') self.assertEqual(fix_xml_ampersands('&#&#'), '&#&#') def test_paged_list(self): def testPL(size, pagesize, sliceargs, expected): def get_page(pagenum): firstid = pagenum * pagesize upto = min(size, pagenum * pagesize + pagesize) for i in range(firstid, upto): yield i pl = OnDemandPagedList(get_page, pagesize) got = pl.getslice(*sliceargs) self.assertEqual(got, expected) iapl = InAdvancePagedList(get_page, size // pagesize + 1, pagesize) got = iapl.getslice(*sliceargs) self.assertEqual(got, expected) testPL(5, 2, (), [0, 1, 2, 3, 4]) testPL(5, 2, (1,), [1, 2, 3, 4]) testPL(5, 2, (2,), [2, 3, 4]) testPL(5, 2, (4,), [4]) testPL(5, 2, (0, 3), [0, 1, 2]) testPL(5, 2, (1, 4), [1, 2, 3]) testPL(5, 2, (2, 99), [2, 3, 4]) testPL(5, 2, (20, 99), []) def test_read_batch_urls(self): f = io.StringIO('''\xef\xbb\xbf foo bar\r baz # More after this line\r ; or after this bam''') self.assertEqual(read_batch_urls(f), ['foo', 'bar', 'baz', 'bam']) def test_urlencode_postdata(self): data = urlencode_postdata({'username': 'foo@bar.com', 'password': '1234'}) self.assertTrue(isinstance(data, bytes)) def test_update_url_query(self): def query_dict(url): return compat_parse_qs(compat_urlparse.urlparse(url).query) self.assertEqual(query_dict(update_url_query( 'http://example.com/path', {'quality': ['HD'], 'format': ['mp4']})), query_dict('http://example.com/path?quality=HD&format=mp4')) self.assertEqual(query_dict(update_url_query( 'http://example.com/path', {'system': ['LINUX', 'WINDOWS']})), query_dict('http://example.com/path?system=LINUX&system=WINDOWS')) self.assertEqual(query_dict(update_url_query( 'http://example.com/path', {'fields': 'id,formats,subtitles'})), query_dict('http://example.com/path?fields=id,formats,subtitles')) self.assertEqual(query_dict(update_url_query( 'http://example.com/path', {'fields': ('id,formats,subtitles', 'thumbnails')})), query_dict('http://example.com/path?fields=id,formats,subtitles&fields=thumbnails')) self.assertEqual(query_dict(update_url_query( 'http://example.com/path?manifest=f4m', {'manifest': []})), query_dict('http://example.com/path')) self.assertEqual(query_dict(update_url_query( 'http://example.com/path?system=LINUX&system=WINDOWS', {'system': 'LINUX'})), query_dict('http://example.com/path?system=LINUX')) self.assertEqual(query_dict(update_url_query( 'http://example.com/path', {'fields': b'id,formats,subtitles'})), query_dict('http://example.com/path?fields=id,formats,subtitles')) self.assertEqual(query_dict(update_url_query( 'http://example.com/path', {'width': 1080, 'height': 720})), query_dict('http://example.com/path?width=1080&height=720')) self.assertEqual(query_dict(update_url_query( 'http://example.com/path', {'bitrate': 5020.43})), query_dict('http://example.com/path?bitrate=5020.43')) self.assertEqual(query_dict(update_url_query( 'http://example.com/path', {'test': '第二行тест'})), query_dict('http://example.com/path?test=%E7%AC%AC%E4%BA%8C%E8%A1%8C%D1%82%D0%B5%D1%81%D1%82')) def test_multipart_encode(self): self.assertEqual( multipart_encode({b'field': b'value'}, boundary='AAAAAA')[0], b'--AAAAAA\r\nContent-Disposition: form-data; name="field"\r\n\r\nvalue\r\n--AAAAAA--\r\n') self.assertEqual( multipart_encode({'欄位'.encode('utf-8'): '值'.encode('utf-8')}, boundary='AAAAAA')[0], b'--AAAAAA\r\nContent-Disposition: form-data; name="\xe6\xac\x84\xe4\xbd\x8d"\r\n\r\n\xe5\x80\xbc\r\n--AAAAAA--\r\n') self.assertRaises( ValueError, multipart_encode, {b'field': b'value'}, boundary='value') def test_dict_get(self): FALSE_VALUES = { 'none': None, 'false': False, 'zero': 0, 'empty_string': '', 'empty_list': [], } d = FALSE_VALUES.copy() d['a'] = 42 self.assertEqual(dict_get(d, 'a'), 42) self.assertEqual(dict_get(d, 'b'), None) self.assertEqual(dict_get(d, 'b', 42), 42) self.assertEqual(dict_get(d, ('a', )), 42) self.assertEqual(dict_get(d, ('b', 'a', )), 42) self.assertEqual(dict_get(d, ('b', 'c', 'a', 'd', )), 42) self.assertEqual(dict_get(d, ('b', 'c', )), None) self.assertEqual(dict_get(d, ('b', 'c', ), 42), 42) for key, false_value in FALSE_VALUES.items(): self.assertEqual(dict_get(d, ('b', 'c', key, )), None) self.assertEqual(dict_get(d, ('b', 'c', key, ), skip_false_values=False), false_value) def test_encode_compat_str(self): self.assertEqual(encode_compat_str(b'\xd1\x82\xd0\xb5\xd1\x81\xd1\x82', 'utf-8'), 'тест') self.assertEqual(encode_compat_str('тест', 'utf-8'), 'тест') def test_parse_iso8601(self): self.assertEqual(parse_iso8601('2014-03-23T23:04:26+0100'), 1395612266) self.assertEqual(parse_iso8601('2014-03-23T22:04:26+0000'), 1395612266) self.assertEqual(parse_iso8601('2014-03-23T22:04:26Z'), 1395612266) self.assertEqual(parse_iso8601('2014-03-23T22:04:26.1234Z'), 1395612266) self.assertEqual(parse_iso8601('2015-09-29T08:27:31.727'), 1443515251) self.assertEqual(parse_iso8601('2015-09-29T08-27-31.727'), None) def test_strip_jsonp(self): stripped = strip_jsonp('cb ([ {"id":"532cb",\n\n\n"x":\n3}\n]\n);') d = json.loads(stripped) self.assertEqual(d, [{"id": "532cb", "x": 3}]) stripped = strip_jsonp('parseMetadata({"STATUS":"OK"})\n\n\n//epc') d = json.loads(stripped) self.assertEqual(d, {'STATUS': 'OK'}) stripped = strip_jsonp('ps.embedHandler({"status": "success"});') d = json.loads(stripped) self.assertEqual(d, {'status': 'success'}) stripped = strip_jsonp('window.cb && window.cb({"status": "success"});') d = json.loads(stripped) self.assertEqual(d, {'status': 'success'}) stripped = strip_jsonp('window.cb && cb({"status": "success"});') d = json.loads(stripped) self.assertEqual(d, {'status': 'success'}) def test_uppercase_escape(self): self.assertEqual(uppercase_escape('aä'), 'aä') self.assertEqual(uppercase_escape('\\U0001d550'), '𝕐') def test_lowercase_escape(self): self.assertEqual(lowercase_escape('aä'), 'aä') self.assertEqual(lowercase_escape('\\u0026'), '&') def test_limit_length(self): self.assertEqual(limit_length(None, 12), None) self.assertEqual(limit_length('foo', 12), 'foo') self.assertTrue( limit_length('foo bar baz asd', 12).startswith('foo bar')) self.assertTrue('...' in limit_length('foo bar baz asd', 12)) def test_mimetype2ext(self): self.assertEqual(mimetype2ext(None), None) self.assertEqual(mimetype2ext('video/x-flv'), 'flv') self.assertEqual(mimetype2ext('application/x-mpegURL'), 'm3u8') self.assertEqual(mimetype2ext('text/vtt'), 'vtt') self.assertEqual(mimetype2ext('text/vtt;charset=utf-8'), 'vtt') self.assertEqual(mimetype2ext('text/html; charset=utf-8'), 'html') def test_month_by_name(self): self.assertEqual(month_by_name(None), None) self.assertEqual(month_by_name('December', 'en'), 12) self.assertEqual(month_by_name('décembre', 'fr'), 12) self.assertEqual(month_by_name('December'), 12) self.assertEqual(month_by_name('décembre'), None) self.assertEqual(month_by_name('Unknown', 'unknown'), None) def test_parse_codecs(self): self.assertEqual(parse_codecs(''), {}) self.assertEqual(parse_codecs('avc1.77.30, mp4a.40.2'), { 'vcodec': 'avc1.77.30', 'acodec': 'mp4a.40.2', }) self.assertEqual(parse_codecs('mp4a.40.2'), { 'vcodec': 'none', 'acodec': 'mp4a.40.2', }) self.assertEqual(parse_codecs('mp4a.40.5,avc1.42001e'), { 'vcodec': 'avc1.42001e', 'acodec': 'mp4a.40.5', }) self.assertEqual(parse_codecs('avc3.640028'), { 'vcodec': 'avc3.640028', 'acodec': 'none', }) self.assertEqual(parse_codecs(', h264,,newcodec,aac'), { 'vcodec': 'h264', 'acodec': 'aac', }) def test_escape_rfc3986(self): reserved = "!*'();:@&=+$,/?#[]" unreserved = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~' self.assertEqual(escape_rfc3986(reserved), reserved) self.assertEqual(escape_rfc3986(unreserved), unreserved) self.assertEqual(escape_rfc3986('тест'), '%D1%82%D0%B5%D1%81%D1%82') self.assertEqual(escape_rfc3986('%D1%82%D0%B5%D1%81%D1%82'), '%D1%82%D0%B5%D1%81%D1%82') self.assertEqual(escape_rfc3986('foo bar'), 'foo%20bar') self.assertEqual(escape_rfc3986('foo%20bar'), 'foo%20bar') def test_escape_url(self): self.assertEqual( escape_url('http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavré_FD.mp4'), 'http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavre%CC%81_FD.mp4' ) self.assertEqual( escape_url('http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erklärt/Das-Erste/Video?documentId=22673108&bcastId=5290'), 'http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erkl%C3%A4rt/Das-Erste/Video?documentId=22673108&bcastId=5290' ) self.assertEqual( escape_url('http://тест.рф/фрагмент'), 'http://xn--e1aybc.xn--p1ai/%D1%84%D1%80%D0%B0%D0%B3%D0%BC%D0%B5%D0%BD%D1%82' ) self.assertEqual( escape_url('http://тест.рф/абв?абв=абв#абв'), 'http://xn--e1aybc.xn--p1ai/%D0%B0%D0%B1%D0%B2?%D0%B0%D0%B1%D0%B2=%D0%B0%D0%B1%D0%B2#%D0%B0%D0%B1%D0%B2' ) self.assertEqual(escape_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0') def test_js_to_json_realworld(self): inp = '''{ 'clip':{'provider':'pseudo'} }''' self.assertEqual(js_to_json(inp), '''{ "clip":{"provider":"pseudo"} }''') json.loads(js_to_json(inp)) inp = '''{ 'playlist':[{'controls':{'all':null}}] }''' self.assertEqual(js_to_json(inp), '''{ "playlist":[{"controls":{"all":null}}] }''') inp = '''"The CW\\'s \\'Crazy Ex-Girlfriend\\'"''' self.assertEqual(js_to_json(inp), '''"The CW's 'Crazy Ex-Girlfriend'"''') inp = '"SAND Number: SAND 2013-7800P\\nPresenter: Tom Russo\\nHabanero Software Training - Xyce Software\\nXyce, Sandia\\u0027s"' json_code = js_to_json(inp) self.assertEqual(json.loads(json_code), json.loads(inp)) inp = '''{ 0:{src:'skipped', type: 'application/dash+xml'}, 1:{src:'skipped', type: 'application/vnd.apple.mpegURL'}, }''' self.assertEqual(js_to_json(inp), '''{ "0":{"src":"skipped", "type": "application/dash+xml"}, "1":{"src":"skipped", "type": "application/vnd.apple.mpegURL"} }''') inp = '''{"foo":101}''' self.assertEqual(js_to_json(inp), '''{"foo":101}''') inp = '''{"duration": "00:01:07"}''' self.assertEqual(js_to_json(inp), '''{"duration": "00:01:07"}''') inp = '''{segments: [{"offset":-3.885780586188048e-16,"duration":39.75000000000001}]}''' self.assertEqual(js_to_json(inp), '''{"segments": [{"offset":-3.885780586188048e-16,"duration":39.75000000000001}]}''') def test_js_to_json_edgecases(self): on = js_to_json("{abc_def:'1\\'\\\\2\\\\\\'3\"4'}") self.assertEqual(json.loads(on), {"abc_def": "1'\\2\\'3\"4"}) on = js_to_json('{"abc": true}') self.assertEqual(json.loads(on), {'abc': True}) # Ignore JavaScript code as well on = js_to_json('''{ "x": 1, y: "a", z: some.code }''') d = json.loads(on) self.assertEqual(d['x'], 1) self.assertEqual(d['y'], 'a') on = js_to_json('["abc", "def",]') self.assertEqual(json.loads(on), ['abc', 'def']) on = js_to_json('[/*comment\n*/"abc"/*comment\n*/,/*comment\n*/"def",/*comment\n*/]') self.assertEqual(json.loads(on), ['abc', 'def']) on = js_to_json('[//comment\n"abc" //comment\n,//comment\n"def",//comment\n]') self.assertEqual(json.loads(on), ['abc', 'def']) on = js_to_json('{"abc": "def",}') self.assertEqual(json.loads(on), {'abc': 'def'}) on = js_to_json('{/*comment\n*/"abc"/*comment\n*/:/*comment\n*/"def"/*comment\n*/,/*comment\n*/}') self.assertEqual(json.loads(on), {'abc': 'def'}) on = js_to_json('{ 0: /* " \n */ ",]" , }') self.assertEqual(json.loads(on), {'0': ',]'}) on = js_to_json('{ /*comment\n*/0/*comment\n*/: /* " \n */ ",]" , }') self.assertEqual(json.loads(on), {'0': ',]'}) on = js_to_json('{ 0: // comment\n1 }') self.assertEqual(json.loads(on), {'0': 1}) on = js_to_json(r'["

    x<\/p>"]') self.assertEqual(json.loads(on), ['

    x

    ']) on = js_to_json(r'["\xaa"]') self.assertEqual(json.loads(on), ['\u00aa']) on = js_to_json("['a\\\nb']") self.assertEqual(json.loads(on), ['ab']) on = js_to_json("/*comment\n*/[/*comment\n*/'a\\\nb'/*comment\n*/]/*comment\n*/") self.assertEqual(json.loads(on), ['ab']) on = js_to_json('{0xff:0xff}') self.assertEqual(json.loads(on), {'255': 255}) on = js_to_json('{/*comment\n*/0xff/*comment\n*/:/*comment\n*/0xff/*comment\n*/}') self.assertEqual(json.loads(on), {'255': 255}) on = js_to_json('{077:077}') self.assertEqual(json.loads(on), {'63': 63}) on = js_to_json('{/*comment\n*/077/*comment\n*/:/*comment\n*/077/*comment\n*/}') self.assertEqual(json.loads(on), {'63': 63}) on = js_to_json('{42:42}') self.assertEqual(json.loads(on), {'42': 42}) on = js_to_json('{/*comment\n*/42/*comment\n*/:/*comment\n*/42/*comment\n*/}') self.assertEqual(json.loads(on), {'42': 42}) on = js_to_json('{42:4.2e1}') self.assertEqual(json.loads(on), {'42': 42.0}) def test_js_to_json_malformed(self): self.assertEqual(js_to_json('42a1'), '42"a1"') self.assertEqual(js_to_json('42a-1'), '42"a"-1') def test_extract_attributes(self): self.assertEqual(extract_attributes(''), {'x': 'y'}) self.assertEqual(extract_attributes(""), {'x': 'y'}) self.assertEqual(extract_attributes(''), {'x': 'y'}) self.assertEqual(extract_attributes(''), {'x': "a 'b' c"}) self.assertEqual(extract_attributes(''), {'x': 'a "b" c'}) self.assertEqual(extract_attributes(''), {'x': 'y'}) self.assertEqual(extract_attributes(''), {'x': 'y'}) self.assertEqual(extract_attributes(''), {'x': '&'}) # XML self.assertEqual(extract_attributes(''), {'x': '"'}) self.assertEqual(extract_attributes(''), {'x': '£'}) # HTML 3.2 self.assertEqual(extract_attributes(''), {'x': 'λ'}) # HTML 4.0 self.assertEqual(extract_attributes(''), {'x': '&foo'}) self.assertEqual(extract_attributes(''), {'x': "'"}) self.assertEqual(extract_attributes(''), {'x': '"'}) self.assertEqual(extract_attributes(''), {'x': None}) self.assertEqual(extract_attributes(''), {'x': 'y', 'a': None}) self.assertEqual(extract_attributes(''), {'x': 'y'}) self.assertEqual(extract_attributes(''), {'y': '2', 'x': '3'}) self.assertEqual(extract_attributes(''), {'x': 'y'}) self.assertEqual(extract_attributes(''), {'x': 'y'}) self.assertEqual(extract_attributes(""), {'x': 'y'}) self.assertEqual(extract_attributes(''), {'x': '\ny\n'}) self.assertEqual(extract_attributes(''), {'caps': 'x'}) # Names lowercased self.assertEqual(extract_attributes(''), {'x': '2'}) self.assertEqual(extract_attributes(''), {'x': '2'}) self.assertEqual(extract_attributes(''), {'_:funny-name1': '1'}) self.assertEqual(extract_attributes(''), {'x': 'Fáilte 世界 \U0001f600'}) self.assertEqual(extract_attributes(''), {'x': 'décompose\u0301'}) # "Narrow" Python builds don't support unicode code points outside BMP. try: compat_chr(0x10000) supports_outside_bmp = True except ValueError: supports_outside_bmp = False if supports_outside_bmp: self.assertEqual(extract_attributes(''), {'x': 'Smile \U0001f600!'}) # Malformed HTML should not break attributes extraction on older Python self.assertEqual(extract_attributes(''), {}) def test_clean_html(self): self.assertEqual(clean_html('a:\nb'), 'a: b') self.assertEqual(clean_html('a:\n "b"'), 'a: "b"') self.assertEqual(clean_html('a
    \xa0b'), 'a\nb') def test_intlist_to_bytes(self): self.assertEqual( intlist_to_bytes([0, 1, 127, 128, 255]), b'\x00\x01\x7f\x80\xff') def test_args_to_str(self): self.assertEqual( args_to_str(['foo', 'ba/r', '-baz', '2 be', '']), 'foo ba/r -baz \'2 be\' \'\'' if compat_os_name != 'nt' else 'foo ba/r -baz "2 be" ""' ) def test_parse_filesize(self): self.assertEqual(parse_filesize(None), None) self.assertEqual(parse_filesize(''), None) self.assertEqual(parse_filesize('91 B'), 91) self.assertEqual(parse_filesize('foobar'), None) self.assertEqual(parse_filesize('2 MiB'), 2097152) self.assertEqual(parse_filesize('5 GB'), 5000000000) self.assertEqual(parse_filesize('1.2Tb'), 1200000000000) self.assertEqual(parse_filesize('1.2tb'), 1200000000000) self.assertEqual(parse_filesize('1,24 KB'), 1240) self.assertEqual(parse_filesize('1,24 kb'), 1240) self.assertEqual(parse_filesize('8.5 megabytes'), 8500000) def test_parse_count(self): self.assertEqual(parse_count(None), None) self.assertEqual(parse_count(''), None) self.assertEqual(parse_count('0'), 0) self.assertEqual(parse_count('1000'), 1000) self.assertEqual(parse_count('1.000'), 1000) self.assertEqual(parse_count('1.1k'), 1100) self.assertEqual(parse_count('1.1kk'), 1100000) self.assertEqual(parse_count('1.1kk '), 1100000) self.assertEqual(parse_count('1.1kk views'), 1100000) def test_parse_resolution(self): self.assertEqual(parse_resolution(None), {}) self.assertEqual(parse_resolution(''), {}) self.assertEqual(parse_resolution('1920x1080'), {'width': 1920, 'height': 1080}) self.assertEqual(parse_resolution('1920×1080'), {'width': 1920, 'height': 1080}) self.assertEqual(parse_resolution('1920 x 1080'), {'width': 1920, 'height': 1080}) self.assertEqual(parse_resolution('720p'), {'height': 720}) self.assertEqual(parse_resolution('4k'), {'height': 2160}) self.assertEqual(parse_resolution('8K'), {'height': 4320}) def test_version_tuple(self): self.assertEqual(version_tuple('1'), (1,)) self.assertEqual(version_tuple('10.23.344'), (10, 23, 344)) self.assertEqual(version_tuple('10.1-6'), (10, 1, 6)) # avconv style def test_detect_exe_version(self): self.assertEqual(detect_exe_version('''ffmpeg version 1.2.1 built on May 27 2013 08:37:26 with gcc 4.7 (Debian 4.7.3-4) configuration: --prefix=/usr --extra-'''), '1.2.1') self.assertEqual(detect_exe_version('''ffmpeg version N-63176-g1fb4685 built on May 15 2014 22:09:06 with gcc 4.8.2 (GCC)'''), 'N-63176-g1fb4685') self.assertEqual(detect_exe_version('''X server found. dri2 connection failed! Trying to open render node... Success at /dev/dri/renderD128. ffmpeg version 2.4.4 Copyright (c) 2000-2014 the FFmpeg ...'''), '2.4.4') def test_age_restricted(self): self.assertFalse(age_restricted(None, 10)) # unrestricted content self.assertFalse(age_restricted(1, None)) # unrestricted policy self.assertFalse(age_restricted(8, 10)) self.assertTrue(age_restricted(18, 14)) self.assertFalse(age_restricted(18, 18)) def test_is_html(self): self.assertFalse(is_html(b'\x49\x44\x43\xaaa')) self.assertTrue(is_html( # UTF-8 with BOM b'\xef\xbb\xbf\xaaa')) self.assertTrue(is_html( # UTF-16-LE b'\xff\xfe<\x00h\x00t\x00m\x00l\x00>\x00\xe4\x00' )) self.assertTrue(is_html( # UTF-16-BE b'\xfe\xff\x00<\x00h\x00t\x00m\x00l\x00>\x00\xe4' )) self.assertTrue(is_html( # UTF-32-BE b'\x00\x00\xFE\xFF\x00\x00\x00<\x00\x00\x00h\x00\x00\x00t\x00\x00\x00m\x00\x00\x00l\x00\x00\x00>\x00\x00\x00\xe4')) self.assertTrue(is_html( # UTF-32-LE b'\xFF\xFE\x00\x00<\x00\x00\x00h\x00\x00\x00t\x00\x00\x00m\x00\x00\x00l\x00\x00\x00>\x00\x00\x00\xe4\x00\x00\x00')) def test_render_table(self): self.assertEqual( render_table( ['a', 'bcd'], [[123, 4], [9999, 51]]), 'a bcd\n' '123 4\n' '9999 51') def test_match_str(self): self.assertRaises(ValueError, match_str, 'xy>foobar', {}) self.assertFalse(match_str('xy', {'x': 1200})) self.assertTrue(match_str('!xy', {'x': 1200})) self.assertTrue(match_str('x', {'x': 1200})) self.assertFalse(match_str('!x', {'x': 1200})) self.assertTrue(match_str('x', {'x': 0})) self.assertFalse(match_str('x>0', {'x': 0})) self.assertFalse(match_str('x>0', {})) self.assertTrue(match_str('x>?0', {})) self.assertTrue(match_str('x>1K', {'x': 1200})) self.assertFalse(match_str('x>2K', {'x': 1200})) self.assertTrue(match_str('x>=1200 & x < 1300', {'x': 1200})) self.assertFalse(match_str('x>=1100 & x < 1200', {'x': 1200})) self.assertFalse(match_str('y=a212', {'y': 'foobar42'})) self.assertTrue(match_str('y=foobar42', {'y': 'foobar42'})) self.assertFalse(match_str('y!=foobar42', {'y': 'foobar42'})) self.assertTrue(match_str('y!=foobar2', {'y': 'foobar42'})) self.assertFalse(match_str( 'like_count > 100 & dislike_count 100 & dislike_count 100 & dislike_count 100 & dislike_count

    The following line contains Chinese characters and special symbols

    第二行
    ♪♪

    Third
    Line

    Lines with invalid timestamps are ignored

    Ignore, two

    Ignored, three

    '''.encode('utf-8') srt_data = '''1 00:00:00,000 --> 00:00:01,000 The following line contains Chinese characters and special symbols 2 00:00:01,000 --> 00:00:02,000 第二行 ♪♪ 3 00:00:02,000 --> 00:00:03,000 Third Line ''' self.assertEqual(dfxp2srt(dfxp_data), srt_data) dfxp_data_no_default_namespace = '''

    The first line

    '''.encode('utf-8') srt_data = '''1 00:00:00,000 --> 00:00:01,000 The first line ''' self.assertEqual(dfxp2srt(dfxp_data_no_default_namespace), srt_data) dfxp_data_with_style = '''