diff --git a/README.md b/README.md
index 85a9725..0f8293a 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
### [Alfred Workflow](https://www.alfredapp.com/workflows/) to generate random values for different data types 🎲️
-![all](/img/screenshots/all.png)
+![all](./img/screenshots/all.png)
-![imei](/img/screenshots/imei.png)
+![imei](./img/screenshots/imei.png)
diff --git a/info.plist b/info.plist
index dae7589..1338358 100644
--- a/info.plist
+++ b/info.plist
@@ -79,7 +79,7 @@
runningsubtext
...
script
- python ./main.py $@
+ python3 ./main.py $@
scriptargtype
1
scriptfile
@@ -121,7 +121,7 @@
version
- v1.1
+ 1.2
webaddress
https://github.com/fedecalendino/alfred-randomer
diff --git a/workflow/__init__.py b/workflow/__init__.py
index 5736ad9..f93fb60 100644
--- a/workflow/__init__.py
+++ b/workflow/__init__.py
@@ -12,14 +12,10 @@
import os
-# Workflow objects
-from .workflow import Workflow, manager
-from .workflow3 import Variables, Workflow3
-
-# Exceptions
-from .workflow import PasswordNotFound, KeychainError
-
+# Filter matching rules
# Icons
+# Exceptions
+# Workflow objects
from .workflow import (
ICON_ACCOUNT,
ICON_BURN,
@@ -44,10 +40,6 @@
ICON_USER,
ICON_WARNING,
ICON_WEB,
-)
-
-# Filter matching rules
-from .workflow import (
MATCH_ALL,
MATCH_ALLCHARS,
MATCH_ATOM,
@@ -57,8 +49,12 @@
MATCH_INITIALS_STARTSWITH,
MATCH_STARTSWITH,
MATCH_SUBSTRING,
+ KeychainError,
+ PasswordNotFound,
+ Workflow,
+ manager,
)
-
+from .workflow3 import Variables, Workflow3
__title__ = "Alfred-Workflow"
__version__ = open(os.path.join(os.path.dirname(__file__), "version")).read()
diff --git a/workflow/background.py b/workflow/background.py
index 1b6a744..bed4c15 100644
--- a/workflow/background.py
+++ b/workflow/background.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2014 deanishe@deanishe.net
@@ -17,13 +16,12 @@
and examples.
"""
-from __future__ import print_function, unicode_literals
-import signal
-import sys
import os
-import subprocess
import pickle
+import signal
+import subprocess
+import sys
from workflow import Workflow
@@ -97,7 +95,10 @@ def _job_pid(name):
return
with open(pidfile, "rb") as fp:
- pid = int(fp.read())
+ read = fp.read()
+ print(str(read))
+ pid = int.from_bytes(read, sys.byteorder)
+ print(pid)
if _process_exists(pid):
return pid
@@ -143,7 +144,7 @@ def _fork_and_exit_parent(errmsg, wait=False, write=False):
if write: # write PID of child process to `pidfile`
tmp = pidfile + ".tmp"
with open(tmp, "wb") as fp:
- fp.write(str(pid))
+ fp.write(pid.to_bytes(4, sys.byteorder))
os.rename(tmp, pidfile)
if wait: # wait for child process to exit
os.waitpid(pid, 0)
@@ -164,9 +165,9 @@ def _fork_and_exit_parent(errmsg, wait=False, write=False):
# Now I am a daemon!
# Redirect standard file descriptors.
- si = open(stdin, "r", 0)
- so = open(stdout, "a+", 0)
- se = open(stderr, "a+", 0)
+ si = open(stdin, "r", 1)
+ so = open(stdout, "a+", 1)
+ se = open(stderr, "a+", 1)
if hasattr(sys.stdin, "fileno"):
os.dup2(si.fileno(), sys.stdin.fileno())
if hasattr(sys.stdout, "fileno"):
@@ -232,9 +233,9 @@ def run_in_background(name, args, **kwargs):
_log().debug("[%s] command cached: %s", name, argcache)
# Call this script
- cmd = ["/usr/bin/python", __file__, name]
+ cmd = [sys.executable, "-m", "workflow.background", name]
_log().debug("[%s] passing job to background runner: %r", name, cmd)
- retcode = subprocess.call(cmd)
+ retcode = subprocess.call(cmd, env={"PYTHONPATH": ":".join(sys.path)})
if retcode: # pragma: no cover
_log().error("[%s] background runner failed with %d", name, retcode)
diff --git a/workflow/notify.py b/workflow/notify.py
index fe7dfa9..fa582f6 100644
--- a/workflow/notify.py
+++ b/workflow/notify.py
@@ -23,7 +23,6 @@
icon and then calls the application to post notifications.
"""
-from __future__ import print_function, unicode_literals
import os
import plistlib
@@ -33,9 +32,9 @@
import tarfile
import tempfile
import uuid
+from typing import List
-import workflow
-
+from . import workflow
_wf = None
_log = None
@@ -134,7 +133,7 @@ def install_notifier():
# until I figure out a better way of excluding this module
# from coverage in py2.6.
if sys.version_info >= (2, 7): # pragma: no cover
- from AppKit import NSWorkspace, NSImage
+ from AppKit import NSImage, NSWorkspace
ws = NSWorkspace.sharedWorkspace()
img = NSImage.alloc().init()
@@ -210,6 +209,10 @@ def notify(title="", text="", sound=None):
return False
+def usr_bin_env(*args: str) -> List[str]:
+ return ["/usr/bin/env", f'PATH={os.environ["PATH"]}'] + list(args)
+
+
def convert_image(inpath, outpath, size):
"""Convert an image file using ``sips``.
@@ -221,10 +224,12 @@ def convert_image(inpath, outpath, size):
Raises:
RuntimeError: Raised if ``sips`` exits with non-zero status.
"""
- cmd = [b"sips", b"-z", str(size), str(size), inpath, b"--out", outpath]
+ cmd = ["sips", "-z", str(size), str(size), inpath, "--out", outpath]
# log().debug(cmd)
with open(os.devnull, "w") as pipe:
- retcode = subprocess.call(cmd, stdout=pipe, stderr=subprocess.STDOUT)
+ retcode = subprocess.call(
+ cmd, shell=True, stdout=pipe, stderr=subprocess.STDOUT
+ )
if retcode != 0:
raise RuntimeError("sips exited with %d" % retcode)
@@ -270,7 +275,7 @@ def png_to_icns(png_path, icns_path):
continue
convert_image(png_path, outpath, size)
- cmd = [b"iconutil", b"-c", b"icns", b"-o", icns_path, iconset]
+ cmd = ["iconutil", "-c", "icns", "-o", icns_path, iconset]
retcode = subprocess.call(cmd)
if retcode != 0:
@@ -290,7 +295,6 @@ def png_to_icns(png_path, icns_path):
# This won't work on 2.6, as `argparse` isn't available
# by default.
import argparse
-
from unicodedata import normalize
def ustr(s):
diff --git a/workflow/update.py b/workflow/update.py
index 3cb0e5f..643681b 100644
--- a/workflow/update.py
+++ b/workflow/update.py
@@ -21,18 +21,21 @@
"""
-from __future__ import print_function, unicode_literals
-from collections import defaultdict
-from functools import total_ordering
import json
import os
-import tempfile
import re
import subprocess
+import tempfile
+from collections import defaultdict
+from functools import total_ordering
+from itertools import zip_longest
+
+import requests
-import workflow
-import web
+from workflow.util import atomic_writer
+
+from . import workflow
# __all__ = []
@@ -120,7 +123,7 @@ def from_releases(cls, js):
dls.append(Download(url, filename, version, release["prerelease"]))
valid = True
- for ext, n in dupes.items():
+ for ext, n in list(dupes.items()):
if n > 1:
wf().logger.debug(
'ignored release "%s": multiple assets ' 'with extension "%s"',
@@ -147,7 +150,7 @@ def __init__(self, url, filename, version, prerelease=False):
pre-release. Defaults to False.
"""
- if isinstance(version, basestring):
+ if isinstance(version, str):
version = Version(version)
self.url = url
@@ -175,14 +178,14 @@ def dict(self):
def __str__(self):
"""Format `Download` for printing."""
- u = (
- "Download(url={dl.url!r}, "
+ return (
+ "Download("
+ "url={dl.url!r}, "
"filename={dl.filename!r}, "
"version={dl.version!r}, "
- "prerelease={dl.prerelease!r})".format(dl=self)
- )
-
- return u.encode("utf-8")
+ "prerelease={dl.prerelease!r}"
+ ")"
+ ).format(dl=self)
def __repr__(self):
"""Code-like representation of `Download`."""
@@ -254,6 +257,7 @@ def __init__(self, vstr):
self._parse(vstr)
def _parse(self, vstr):
+ vstr = str(vstr)
if vstr.startswith("v"):
m = self.match_version(vstr[1:])
else:
@@ -310,9 +314,20 @@ def __lt__(self, other):
return True
if other.suffix and not self.suffix:
return False
- return self._parse_dotted_string(self.suffix) < self._parse_dotted_string(
- other.suffix
- )
+
+ self_suffix = self._parse_dotted_string(self.suffix)
+ other_suffix = self._parse_dotted_string(other.suffix)
+
+ for s, o in zip_longest(self_suffix, other_suffix):
+ if s is None: # shorter value wins
+ return True
+ elif o is None: # longer value loses
+ return False
+ elif type(s) != type(o): # type coersion
+ s, o = str(s), str(o)
+ if s == o: # next if the same compare
+ continue
+ return s < o # finally compare
# t > o
return False
@@ -374,10 +389,11 @@ def retrieve_download(dl):
path = os.path.join(tempfile.gettempdir(), dl.filename)
wf().logger.debug("downloading update from " "%r to %r ...", dl.url, path)
- r = web.get(dl.url)
+ r = requests.get(dl.url)
r.raise_for_status()
- r.save_to_path(path)
+ with atomic_writer(path, "wb") as file_obj:
+ file_obj.write(r.content)
return path
@@ -413,7 +429,7 @@ def get_downloads(repo):
def _fetch():
wf().logger.info("retrieving releases for %r ...", repo)
- r = web.get(url)
+ r = requests.get(url)
r.raise_for_status()
return r.content
@@ -470,11 +486,7 @@ def check_update(repo, current_version, prereleases=False, alfred_version=None):
"""
key = "__workflow_latest_version"
# data stored when no update is available
- no_update = {
- "available": False,
- "download": None,
- "version": None,
- }
+ no_update = {"available": False, "download": None, "version": None}
current = Version(current_version)
dls = get_downloads(repo)
@@ -496,12 +508,7 @@ def check_update(repo, current_version, prereleases=False, alfred_version=None):
if dl.version > current:
wf().cache_data(
- key,
- {
- "version": str(dl.version),
- "download": dl.dict,
- "available": True,
- },
+ key, {"version": str(dl.version), "download": dl.dict, "available": True}
)
return True
@@ -517,11 +524,7 @@ def install_update():
"""
key = "__workflow_latest_version"
# data stored when no update is available
- no_update = {
- "available": False,
- "download": None,
- "version": None,
- }
+ no_update = {"available": False, "download": None, "version": None}
status = wf().cached_data(key, max_age=0)
if not status or not status.get("available"):
diff --git a/workflow/util.py b/workflow/util.py
index b606bda..998456b 100644
--- a/workflow/util.py
+++ b/workflow/util.py
@@ -10,11 +10,8 @@
"""A selection of helper functions useful for building workflows."""
-from __future__ import print_function, absolute_import
import atexit
-from collections import namedtuple
-from contextlib import contextmanager
import errno
import fcntl
import functools
@@ -23,8 +20,10 @@
import signal
import subprocess
import sys
-from threading import Event
import time
+from collections import namedtuple
+from contextlib import contextmanager
+from threading import Event
# JXA scripts to call Alfred's API via the Scripting Bridge
# {app} is automatically replaced with "Alfred 3" or
@@ -88,9 +87,9 @@ def jxa_app_name():
"""
if os.getenv("alfred_version", "").startswith("3"):
# Alfred 3
- return u"Alfred 3"
+ return "Alfred 3"
# Alfred 4+
- return u"com.runningwithcrayons.Alfred"
+ return "com.runningwithcrayons.Alfred"
def unicodify(s, encoding="utf-8", norm=None):
@@ -110,8 +109,8 @@ def unicodify(s, encoding="utf-8", norm=None):
unicode: Decoded, optionally normalised, Unicode string.
"""
- if not isinstance(s, unicode):
- s = unicode(s, encoding)
+ if not isinstance(s, str):
+ s = str(s, encoding)
if norm:
from unicodedata import normalize
@@ -139,7 +138,7 @@ def utf8ify(s):
if isinstance(s, str):
return s
- if isinstance(s, unicode):
+ if isinstance(s, str):
return s.encode("utf-8")
return str(s)
@@ -163,7 +162,7 @@ def applescriptify(s):
unicode: Escaped string.
"""
- return s.replace(u'"', u'" & quote & "')
+ return s.replace('"', '" & quote & "')
def run_command(cmd, **kwargs):
@@ -182,8 +181,8 @@ def run_command(cmd, **kwargs):
str: Output returned by :func:`~subprocess.check_output`.
"""
- cmd = [utf8ify(s) for s in cmd]
- return subprocess.check_output(cmd, **kwargs)
+ cmd = [str(s) for s in cmd]
+ return subprocess.check_output(cmd, **kwargs).decode()
def run_applescript(script, *args, **kwargs):
@@ -301,11 +300,7 @@ def set_config(name, value, bundleid=None, exportable=False):
"""
bundleid = bundleid or os.getenv("alfred_workflow_bundleid")
appname = jxa_app_name()
- opts = {
- "toValue": value,
- "inWorkflow": bundleid,
- "exportable": exportable,
- }
+ opts = {"toValue": value, "inWorkflow": bundleid, "exportable": exportable}
script = JXA_SET_CONFIG.format(
app=json.dumps(appname),
@@ -353,7 +348,7 @@ def search_in_alfred(query=None):
query (unicode, optional): Search query.
"""
- query = query or u""
+ query = query or ""
appname = jxa_app_name()
script = JXA_SEARCH.format(app=json.dumps(appname), arg=json.dumps(query))
run_applescript(script, lang="JavaScript")
@@ -443,7 +438,7 @@ def appinfo(name):
if not bid: # pragma: no cover
return None
- return AppInfo(unicodify(name), unicodify(path), unicodify(bid))
+ return AppInfo(name, path, bid)
@contextmanager
@@ -470,7 +465,7 @@ def atomic_writer(fpath, mode):
finally:
try:
os.remove(temppath)
- except (OSError, IOError):
+ except OSError:
pass
@@ -484,7 +479,7 @@ class LockFile(object):
>>> path = '/path/to/file'
>>> with LockFile(path):
- >>> with open(path, 'wb') as fp:
+ >>> with open(path, 'w') as fp:
>>> fp.write(data)
Args:
@@ -577,10 +572,10 @@ def release(self):
self._lockfile = None
try:
os.unlink(self.lockfile)
- except (IOError, OSError): # pragma: no cover
+ except OSError: # pragma: no cover
pass
- return True
+ return True # noqa: B012
def __enter__(self):
"""Acquire lock."""
diff --git a/workflow/web.py b/workflow/web.py
deleted file mode 100644
index 0fa022a..0000000
--- a/workflow/web.py
+++ /dev/null
@@ -1,809 +0,0 @@
-# encoding: utf-8
-#
-# Copyright (c) 2014 Dean Jackson
-#
-# MIT Licence. See http://opensource.org/licenses/MIT
-#
-# Created on 2014-02-15
-#
-
-"""Lightweight HTTP library with a requests-like interface."""
-
-from __future__ import absolute_import, print_function
-
-import codecs
-import json
-import mimetypes
-import os
-import random
-import re
-import socket
-import string
-import unicodedata
-import urllib
-import urllib2
-import urlparse
-import zlib
-
-__version__ = open(os.path.join(os.path.dirname(__file__), "version")).read()
-
-USER_AGENT = (
- u"Alfred-Workflow/" + __version__ + " (+http://www.deanishe.net/alfred-workflow)"
-)
-
-# Valid characters for multipart form data boundaries
-BOUNDARY_CHARS = string.digits + string.ascii_letters
-
-# HTTP response codes
-RESPONSES = {
- 100: "Continue",
- 101: "Switching Protocols",
- 200: "OK",
- 201: "Created",
- 202: "Accepted",
- 203: "Non-Authoritative Information",
- 204: "No Content",
- 205: "Reset Content",
- 206: "Partial Content",
- 300: "Multiple Choices",
- 301: "Moved Permanently",
- 302: "Found",
- 303: "See Other",
- 304: "Not Modified",
- 305: "Use Proxy",
- 307: "Temporary Redirect",
- 400: "Bad Request",
- 401: "Unauthorized",
- 402: "Payment Required",
- 403: "Forbidden",
- 404: "Not Found",
- 405: "Method Not Allowed",
- 406: "Not Acceptable",
- 407: "Proxy Authentication Required",
- 408: "Request Timeout",
- 409: "Conflict",
- 410: "Gone",
- 411: "Length Required",
- 412: "Precondition Failed",
- 413: "Request Entity Too Large",
- 414: "Request-URI Too Long",
- 415: "Unsupported Media Type",
- 416: "Requested Range Not Satisfiable",
- 417: "Expectation Failed",
- 500: "Internal Server Error",
- 501: "Not Implemented",
- 502: "Bad Gateway",
- 503: "Service Unavailable",
- 504: "Gateway Timeout",
- 505: "HTTP Version Not Supported",
-}
-
-
-def str_dict(dic):
- """Convert keys and values in ``dic`` into UTF-8-encoded :class:`str`.
-
- :param dic: Mapping of Unicode strings
- :type dic: dict
- :returns: Dictionary containing only UTF-8 strings
- :rtype: dict
-
- """
- if isinstance(dic, CaseInsensitiveDictionary):
- dic2 = CaseInsensitiveDictionary()
- else:
- dic2 = {}
- for k, v in dic.items():
- if isinstance(k, unicode):
- k = k.encode("utf-8")
- if isinstance(v, unicode):
- v = v.encode("utf-8")
- dic2[k] = v
- return dic2
-
-
-class NoRedirectHandler(urllib2.HTTPRedirectHandler):
- """Prevent redirections."""
-
- def redirect_request(self, *args):
- """Ignore redirect."""
- return None
-
-
-# Adapted from https://gist.github.com/babakness/3901174
-class CaseInsensitiveDictionary(dict):
- """Dictionary with caseless key search.
-
- Enables case insensitive searching while preserving case sensitivity
- when keys are listed, ie, via keys() or items() methods.
-
- Works by storing a lowercase version of the key as the new key and
- stores the original key-value pair as the key's value
- (values become dictionaries).
-
- """
-
- def __init__(self, initval=None):
- """Create new case-insensitive dictionary."""
- if isinstance(initval, dict):
- for key, value in initval.iteritems():
- self.__setitem__(key, value)
-
- elif isinstance(initval, list):
- for (key, value) in initval:
- self.__setitem__(key, value)
-
- def __contains__(self, key):
- return dict.__contains__(self, key.lower())
-
- def __getitem__(self, key):
- return dict.__getitem__(self, key.lower())["val"]
-
- def __setitem__(self, key, value):
- return dict.__setitem__(self, key.lower(), {"key": key, "val": value})
-
- def get(self, key, default=None):
- """Return value for case-insensitive key or default."""
- try:
- v = dict.__getitem__(self, key.lower())
- except KeyError:
- return default
- else:
- return v["val"]
-
- def update(self, other):
- """Update values from other ``dict``."""
- for k, v in other.items():
- self[k] = v
-
- def items(self):
- """Return ``(key, value)`` pairs."""
- return [(v["key"], v["val"]) for v in dict.itervalues(self)]
-
- def keys(self):
- """Return original keys."""
- return [v["key"] for v in dict.itervalues(self)]
-
- def values(self):
- """Return all values."""
- return [v["val"] for v in dict.itervalues(self)]
-
- def iteritems(self):
- """Iterate over ``(key, value)`` pairs."""
- for v in dict.itervalues(self):
- yield v["key"], v["val"]
-
- def iterkeys(self):
- """Iterate over original keys."""
- for v in dict.itervalues(self):
- yield v["key"]
-
- def itervalues(self):
- """Interate over values."""
- for v in dict.itervalues(self):
- yield v["val"]
-
-
-class Request(urllib2.Request):
- """Subclass of :class:`urllib2.Request` that supports custom methods."""
-
- def __init__(self, *args, **kwargs):
- """Create a new :class:`Request`."""
- self._method = kwargs.pop("method", None)
- urllib2.Request.__init__(self, *args, **kwargs)
-
- def get_method(self):
- return self._method.upper()
-
-
-class Response(object):
- """
- Returned by :func:`request` / :func:`get` / :func:`post` functions.
-
- Simplified version of the ``Response`` object in the ``requests`` library.
-
- >>> r = request('http://www.google.com')
- >>> r.status_code
- 200
- >>> r.encoding
- ISO-8859-1
- >>> r.content # bytes
- ...
- >>> r.text # unicode, decoded according to charset in HTTP header/meta tag
- u' ...'
- >>> r.json() # content parsed as JSON
-
- """
-
- def __init__(self, request, stream=False):
- """Call `request` with :mod:`urllib2` and process results.
-
- :param request: :class:`Request` instance
- :param stream: Whether to stream response or retrieve it all at once
- :type stream: bool
-
- """
- self.request = request
- self._stream = stream
- self.url = None
- self.raw = None
- self._encoding = None
- self.error = None
- self.status_code = None
- self.reason = None
- self.headers = CaseInsensitiveDictionary()
- self._content = None
- self._content_loaded = False
- self._gzipped = False
-
- # Execute query
- try:
- self.raw = urllib2.urlopen(request)
- except urllib2.HTTPError as err:
- self.error = err
- try:
- self.url = err.geturl()
- # sometimes (e.g. when authentication fails)
- # urllib can't get a URL from an HTTPError
- # This behaviour changes across Python versions,
- # so no test cover (it isn't important).
- except AttributeError: # pragma: no cover
- pass
- self.status_code = err.code
- else:
- self.status_code = self.raw.getcode()
- self.url = self.raw.geturl()
- self.reason = RESPONSES.get(self.status_code)
-
- # Parse additional info if request succeeded
- if not self.error:
- headers = self.raw.info()
- self.transfer_encoding = headers.getencoding()
- self.mimetype = headers.gettype()
- for key in headers.keys():
- self.headers[key.lower()] = headers.get(key)
-
- # Is content gzipped?
- # Transfer-Encoding appears to not be used in the wild
- # (contrary to the HTTP standard), but no harm in testing
- # for it
- if "gzip" in headers.get("content-encoding", "") or "gzip" in headers.get(
- "transfer-encoding", ""
- ):
- self._gzipped = True
-
- @property
- def stream(self):
- """Whether response is streamed.
-
- Returns:
- bool: `True` if response is streamed.
-
- """
- return self._stream
-
- @stream.setter
- def stream(self, value):
- if self._content_loaded:
- raise RuntimeError("`content` has already been read from " "this Response.")
-
- self._stream = value
-
- def json(self):
- """Decode response contents as JSON.
-
- :returns: object decoded from JSON
- :rtype: list, dict or unicode
-
- """
- return json.loads(self.content, self.encoding or "utf-8")
-
- @property
- def encoding(self):
- """Text encoding of document or ``None``.
-
- :returns: Text encoding if found.
- :rtype: str or ``None``
-
- """
- if not self._encoding:
- self._encoding = self._get_encoding()
-
- return self._encoding
-
- @property
- def content(self):
- """Raw content of response (i.e. bytes).
-
- :returns: Body of HTTP response
- :rtype: str
-
- """
- if not self._content:
-
- # Decompress gzipped content
- if self._gzipped:
- decoder = zlib.decompressobj(16 + zlib.MAX_WBITS)
- self._content = decoder.decompress(self.raw.read())
-
- else:
- self._content = self.raw.read()
-
- self._content_loaded = True
-
- return self._content
-
- @property
- def text(self):
- """Unicode-decoded content of response body.
-
- If no encoding can be determined from HTTP headers or the content
- itself, the encoded response body will be returned instead.
-
- :returns: Body of HTTP response
- :rtype: unicode or str
-
- """
- if self.encoding:
- return unicodedata.normalize("NFC", unicode(self.content, self.encoding))
- return self.content
-
- def iter_content(self, chunk_size=4096, decode_unicode=False):
- """Iterate over response data.
-
- .. versionadded:: 1.6
-
- :param chunk_size: Number of bytes to read into memory
- :type chunk_size: int
- :param decode_unicode: Decode to Unicode using detected encoding
- :type decode_unicode: bool
- :returns: iterator
-
- """
- if not self.stream:
- raise RuntimeError(
- "You cannot call `iter_content` on a "
- "Response unless you passed `stream=True`"
- " to `get()`/`post()`/`request()`."
- )
-
- if self._content_loaded:
- raise RuntimeError("`content` has already been read from this Response.")
-
- def decode_stream(iterator, r):
- dec = codecs.getincrementaldecoder(r.encoding)(errors="replace")
-
- for chunk in iterator:
- data = dec.decode(chunk)
- if data:
- yield data
-
- data = dec.decode(b"", final=True)
- if data: # pragma: no cover
- yield data
-
- def generate():
- if self._gzipped:
- decoder = zlib.decompressobj(16 + zlib.MAX_WBITS)
-
- while True:
- chunk = self.raw.read(chunk_size)
- if not chunk:
- break
-
- if self._gzipped:
- chunk = decoder.decompress(chunk)
-
- yield chunk
-
- chunks = generate()
-
- if decode_unicode and self.encoding:
- chunks = decode_stream(chunks, self)
-
- return chunks
-
- def save_to_path(self, filepath):
- """Save retrieved data to file at ``filepath``.
-
- .. versionadded: 1.9.6
-
- :param filepath: Path to save retrieved data.
-
- """
- filepath = os.path.abspath(filepath)
- dirname = os.path.dirname(filepath)
- if not os.path.exists(dirname):
- os.makedirs(dirname)
-
- self.stream = True
-
- with open(filepath, "wb") as fileobj:
- for data in self.iter_content():
- fileobj.write(data)
-
- def raise_for_status(self):
- """Raise stored error if one occurred.
-
- error will be instance of :class:`urllib2.HTTPError`
- """
- if self.error is not None:
- raise self.error
- return
-
- def _get_encoding(self):
- """Get encoding from HTTP headers or content.
-
- :returns: encoding or `None`
- :rtype: unicode or ``None``
-
- """
- headers = self.raw.info()
- encoding = None
-
- if headers.getparam("charset"):
- encoding = headers.getparam("charset")
-
- # HTTP Content-Type header
- for param in headers.getplist():
- if param.startswith("charset="):
- encoding = param[8:]
- break
-
- if not self.stream: # Try sniffing response content
- # Encoding declared in document should override HTTP headers
- if self.mimetype == "text/html": # sniff HTML headers
- m = re.search(r"""""", self.content)
- if m:
- encoding = m.group(1)
-
- elif (
- self.mimetype.startswith("application/")
- or self.mimetype.startswith("text/")
- ) and "xml" in self.mimetype:
- m = re.search(
- r"""]*\?>""", self.content
- )
- if m:
- encoding = m.group(1)
-
- # Format defaults
- if self.mimetype == "application/json" and not encoding:
- # The default encoding for JSON
- encoding = "utf-8"
-
- elif self.mimetype == "application/xml" and not encoding:
- # The default for 'application/xml'
- encoding = "utf-8"
-
- if encoding:
- encoding = encoding.lower()
-
- return encoding
-
-
-def request(
- method,
- url,
- params=None,
- data=None,
- headers=None,
- cookies=None,
- files=None,
- auth=None,
- timeout=60,
- allow_redirects=False,
- stream=False,
-):
- """Initiate an HTTP(S) request. Returns :class:`Response` object.
-
- :param method: 'GET' or 'POST'
- :type method: unicode
- :param url: URL to open
- :type url: unicode
- :param params: mapping of URL parameters
- :type params: dict
- :param data: mapping of form data ``{'field_name': 'value'}`` or
- :class:`str`
- :type data: dict or str
- :param headers: HTTP headers
- :type headers: dict
- :param cookies: cookies to send to server
- :type cookies: dict
- :param files: files to upload (see below).
- :type files: dict
- :param auth: username, password
- :type auth: tuple
- :param timeout: connection timeout limit in seconds
- :type timeout: int
- :param allow_redirects: follow redirections
- :type allow_redirects: bool
- :param stream: Stream content instead of fetching it all at once.
- :type stream: bool
- :returns: Response object
- :rtype: :class:`Response`
-
-
- The ``files`` argument is a dictionary::
-
- {'fieldname' : { 'filename': 'blah.txt',
- 'content': '',
- 'mimetype': 'text/plain'}
- }
-
- * ``fieldname`` is the name of the field in the HTML form.
- * ``mimetype`` is optional. If not provided, :mod:`mimetypes` will
- be used to guess the mimetype, or ``application/octet-stream``
- will be used.
-
- """
- # TODO: cookies
- socket.setdefaulttimeout(timeout)
-
- # Default handlers
- openers = [urllib2.ProxyHandler(urllib2.getproxies())]
-
- if not allow_redirects:
- openers.append(NoRedirectHandler())
-
- if auth is not None: # Add authorisation handler
- username, password = auth
- password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
- password_manager.add_password(None, url, username, password)
- auth_manager = urllib2.HTTPBasicAuthHandler(password_manager)
- openers.append(auth_manager)
-
- # Install our custom chain of openers
- opener = urllib2.build_opener(*openers)
- urllib2.install_opener(opener)
-
- if not headers:
- headers = CaseInsensitiveDictionary()
- else:
- headers = CaseInsensitiveDictionary(headers)
-
- if "user-agent" not in headers:
- headers["user-agent"] = USER_AGENT
-
- # Accept gzip-encoded content
- encodings = [s.strip() for s in headers.get("accept-encoding", "").split(",")]
- if "gzip" not in encodings:
- encodings.append("gzip")
-
- headers["accept-encoding"] = ", ".join(encodings)
-
- if files:
- if not data:
- data = {}
- new_headers, data = encode_multipart_formdata(data, files)
- headers.update(new_headers)
- elif data and isinstance(data, dict):
- data = urllib.urlencode(str_dict(data))
-
- # Make sure everything is encoded text
- headers = str_dict(headers)
-
- if isinstance(url, unicode):
- url = url.encode("utf-8")
-
- if params: # GET args (POST args are handled in encode_multipart_formdata)
-
- scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
-
- if query: # Combine query string and `params`
- url_params = urlparse.parse_qs(query)
- # `params` take precedence over URL query string
- url_params.update(params)
- params = url_params
-
- query = urllib.urlencode(str_dict(params), doseq=True)
- url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
-
- req = Request(url, data, headers, method=method)
- return Response(req, stream)
-
-
-def get(
- url,
- params=None,
- headers=None,
- cookies=None,
- auth=None,
- timeout=60,
- allow_redirects=True,
- stream=False,
-):
- """Initiate a GET request. Arguments as for :func:`request`.
-
- :returns: :class:`Response` instance
-
- """
- return request(
- "GET",
- url,
- params,
- headers=headers,
- cookies=cookies,
- auth=auth,
- timeout=timeout,
- allow_redirects=allow_redirects,
- stream=stream,
- )
-
-
-def delete(
- url,
- params=None,
- data=None,
- headers=None,
- cookies=None,
- auth=None,
- timeout=60,
- allow_redirects=True,
- stream=False,
-):
- """Initiate a DELETE request. Arguments as for :func:`request`.
-
- :returns: :class:`Response` instance
-
- """
- return request(
- "DELETE",
- url,
- params,
- data,
- headers=headers,
- cookies=cookies,
- auth=auth,
- timeout=timeout,
- allow_redirects=allow_redirects,
- stream=stream,
- )
-
-
-def post(
- url,
- params=None,
- data=None,
- headers=None,
- cookies=None,
- files=None,
- auth=None,
- timeout=60,
- allow_redirects=False,
- stream=False,
-):
- """Initiate a POST request. Arguments as for :func:`request`.
-
- :returns: :class:`Response` instance
-
- """
- return request(
- "POST",
- url,
- params,
- data,
- headers,
- cookies,
- files,
- auth,
- timeout,
- allow_redirects,
- stream,
- )
-
-
-def put(
- url,
- params=None,
- data=None,
- headers=None,
- cookies=None,
- files=None,
- auth=None,
- timeout=60,
- allow_redirects=False,
- stream=False,
-):
- """Initiate a PUT request. Arguments as for :func:`request`.
-
- :returns: :class:`Response` instance
-
- """
- return request(
- "PUT",
- url,
- params,
- data,
- headers,
- cookies,
- files,
- auth,
- timeout,
- allow_redirects,
- stream,
- )
-
-
-def encode_multipart_formdata(fields, files):
- """Encode form data (``fields``) and ``files`` for POST request.
-
- :param fields: mapping of ``{name : value}`` pairs for normal form fields.
- :type fields: dict
- :param files: dictionary of fieldnames/files elements for file data.
- See below for details.
- :type files: dict of :class:`dict`
- :returns: ``(headers, body)`` ``headers`` is a
- :class:`dict` of HTTP headers
- :rtype: 2-tuple ``(dict, str)``
-
- The ``files`` argument is a dictionary::
-
- {'fieldname' : { 'filename': 'blah.txt',
- 'content': '',
- 'mimetype': 'text/plain'}
- }
-
- - ``fieldname`` is the name of the field in the HTML form.
- - ``mimetype`` is optional. If not provided, :mod:`mimetypes` will
- be used to guess the mimetype, or ``application/octet-stream``
- will be used.
-
- """
-
- def get_content_type(filename):
- """Return or guess mimetype of ``filename``.
-
- :param filename: filename of file
- :type filename: unicode/str
- :returns: mime-type, e.g. ``text/html``
- :rtype: str
-
- """
- return mimetypes.guess_type(filename)[0] or "application/octet-stream"
-
- boundary = "-----" + "".join(random.choice(BOUNDARY_CHARS) for i in range(30))
- CRLF = "\r\n"
- output = []
-
- # Normal form fields
- for (name, value) in fields.items():
- if isinstance(name, unicode):
- name = name.encode("utf-8")
- if isinstance(value, unicode):
- value = value.encode("utf-8")
- output.append("--" + boundary)
- output.append('Content-Disposition: form-data; name="%s"' % name)
- output.append("")
- output.append(value)
-
- # Files to upload
- for name, d in files.items():
- filename = d[u"filename"]
- content = d[u"content"]
- if u"mimetype" in d:
- mimetype = d[u"mimetype"]
- else:
- mimetype = get_content_type(filename)
- if isinstance(name, unicode):
- name = name.encode("utf-8")
- if isinstance(filename, unicode):
- filename = filename.encode("utf-8")
- if isinstance(mimetype, unicode):
- mimetype = mimetype.encode("utf-8")
- output.append("--" + boundary)
- output.append(
- "Content-Disposition: form-data; "
- 'name="%s"; filename="%s"' % (name, filename)
- )
- output.append("Content-Type: %s" % mimetype)
- output.append("")
- output.append(content)
-
- output.append("--" + boundary + "--")
- output.append("")
- body = CRLF.join(output)
- headers = {
- "Content-Type": "multipart/form-data; boundary=%s" % boundary,
- "Content-Length": str(len(body)),
- }
- return (headers, body)
diff --git a/workflow/workflow.py b/workflow/workflow.py
index a00baeb..22a8da0 100644
--- a/workflow/workflow.py
+++ b/workflow/workflow.py
@@ -19,11 +19,8 @@
"""
-from __future__ import print_function, unicode_literals
import binascii
-import cPickle
-from copy import deepcopy
import json
import logging
import logging.handlers
@@ -37,6 +34,9 @@
import sys
import time
import unicodedata
+from contextlib import contextmanager
+from copy import deepcopy
+from typing import Optional
try:
import xml.etree.cElementTree as ET
@@ -44,12 +44,10 @@
import xml.etree.ElementTree as ET
# imported to maintain API
-from util import AcquisitionError # noqa: F401
-from util import (
- atomic_writer,
- LockFile,
- uninterruptible,
-)
+from workflow.util import AcquisitionError # noqa: F401
+from workflow.util import LockFile, atomic_writer, uninterruptible
+
+assert sys.version_info[0] == 3
#: Sentinel for properties that haven't been set yet (that might
#: correctly have the value ``None``)
@@ -542,8 +540,8 @@ def register(self, name, serializer):
"""
# Basic validation
- getattr(serializer, "load")
- getattr(serializer, "dump")
+ serializer.load
+ serializer.dump
self._serializers[name] = serializer
@@ -583,86 +581,72 @@ def serializers(self):
return sorted(self._serializers.keys())
-class JSONSerializer(object):
- """Wrapper around :mod:`json`. Sets ``indent`` and ``encoding``.
-
- .. versionadded:: 1.8
-
- Use this serializer if you need readable data files. JSON doesn't
- support Python objects as well as ``cPickle``/``pickle``, so be
- careful which data you try to serialize as JSON.
-
- """
+class BaseSerializer:
+ is_binary: Optional[bool] = None
@classmethod
- def load(cls, file_obj):
- """Load serialized object from open JSON file.
-
- .. versionadded:: 1.8
-
- :param file_obj: file handle
- :type file_obj: ``file`` object
- :returns: object loaded from JSON file
- :rtype: object
-
- """
- return json.load(file_obj)
+ def binary_mode(cls):
+ return "b" if cls.is_binary else ""
@classmethod
- def dump(cls, obj, file_obj):
- """Serialize object ``obj`` to open JSON file.
-
- .. versionadded:: 1.8
+ def _opener(cls, opener, path, mode="r"):
+ with opener(path, mode + cls.binary_mode()) as fp:
+ yield fp
- :param obj: Python object to serialize
- :type obj: JSON-serializable data structure
- :param file_obj: file handle
- :type file_obj: ``file`` object
+ @classmethod
+ @contextmanager
+ def atomic_writer(cls, path, mode):
+ yield from cls._opener(atomic_writer, path, mode)
- """
- return json.dump(obj, file_obj, indent=2, encoding="utf-8")
+ @classmethod
+ @contextmanager
+ def open(cls, path, mode):
+ yield from cls._opener(open, path, mode)
-class CPickleSerializer(object):
- """Wrapper around :mod:`cPickle`. Sets ``protocol``.
+class JSONSerializer(BaseSerializer):
+ """Wrapper around :mod:`json`. Sets ``indent`` and ``encoding``.
.. versionadded:: 1.8
- This is the default serializer and the best combination of speed and
- flexibility.
+ Use this serializer if you need readable data files. JSON doesn't
+ support Python objects as well as ``pickle``, so be
+ careful which data you try to serialize as JSON.
"""
+ is_binary = False
+
@classmethod
def load(cls, file_obj):
- """Load serialized object from open pickle file.
+ """Load serialized object from open JSON file.
.. versionadded:: 1.8
:param file_obj: file handle
:type file_obj: ``file`` object
- :returns: object loaded from pickle file
+ :returns: object loaded from JSON file
:rtype: object
"""
- return cPickle.load(file_obj)
+ return json.load(file_obj)
@classmethod
def dump(cls, obj, file_obj):
- """Serialize object ``obj`` to open pickle file.
+ """Serialize object ``obj`` to open JSON file.
.. versionadded:: 1.8
:param obj: Python object to serialize
- :type obj: Python object
+ :type obj: JSON-serializable data structure
:param file_obj: file handle
:type file_obj: ``file`` object
"""
- return cPickle.dump(obj, file_obj, protocol=-1)
+ return json.dump(obj, file_obj, indent=2)
-class PickleSerializer(object):
+class PickleSerializer(BaseSerializer):
"""Wrapper around :mod:`pickle`. Sets ``protocol``.
.. versionadded:: 1.8
@@ -671,6 +655,8 @@ class PickleSerializer(object):
"""
+ is_binary = True
+
@classmethod
def load(cls, file_obj):
"""Load serialized object from open pickle file.
@@ -702,7 +688,6 @@ def dump(cls, obj, file_obj):
# Set up default manager and register built-in serializers
manager = SerializerManager()
-manager.register("cpickle", CPickleSerializer)
manager.register("pickle", PickleSerializer)
manager.register("json", JSONSerializer)
@@ -838,7 +823,7 @@ def __init__(self, filepath, defaults=None):
if os.path.exists(self._filepath):
self._load()
elif defaults:
- for key, val in defaults.items():
+ for key, val in list(defaults.items()):
self[key] = val
self.save() # save default settings
@@ -846,7 +831,7 @@ def _load(self):
"""Load cached settings from JSON file `self._filepath`."""
data = {}
with LockFile(self._filepath, 0.5):
- with open(self._filepath, "rb") as fp:
+ with open(self._filepath, "r") as fp:
data.update(json.load(fp))
self._original = deepcopy(data)
@@ -870,8 +855,8 @@ def save(self):
data.update(self)
with LockFile(self._filepath, 0.5):
- with atomic_writer(self._filepath, "wb") as fp:
- json.dump(data, fp, sort_keys=True, indent=2, encoding="utf-8")
+ with atomic_writer(self._filepath, "w") as fp:
+ json.dump(data, fp, sort_keys=True, indent=2)
# dict methods
def __setitem__(self, key, value):
@@ -958,6 +943,9 @@ def __init__(
help_url=None,
):
"""Create new :class:`Workflow` object."""
+
+ seralizer = "pickle"
+
self._default_settings = default_settings or {}
self._update_settings = update_settings or {}
self._input_encoding = input_encoding
@@ -970,8 +958,8 @@ def __init__(
self._bundleid = None
self._debugging = None
self._name = None
- self._cache_serializer = "cpickle"
- self._data_serializer = "cpickle"
+ self._cache_serializer = seralizer
+ self._data_serializer = seralizer
self._info = None
self._info_loaded = False
self._logger = None
@@ -1013,7 +1001,7 @@ def __init__(
@property
def alfred_version(self):
"""Alfred version as :class:`~workflow.update.Version` object."""
- from update import Version
+ from .update import Version
return Version(self.alfred_env.get("version"))
@@ -1090,7 +1078,10 @@ def alfred_env(self):
if value:
if key in ("debug", "version_build", "theme_subtext"):
- value = int(value)
+ if value.isdigit():
+ value = int(value)
+ else:
+ value = False
else:
value = self.decode(value)
@@ -1119,7 +1110,7 @@ def bundleid(self):
if self.alfred_env.get("workflow_bundleid"):
self._bundleid = self.alfred_env.get("workflow_bundleid")
else:
- self._bundleid = unicode(self.info["bundleid"], "utf-8")
+ self._bundleid = self.info["bundleid"]
return self._bundleid
@@ -1131,7 +1122,9 @@ def debugging(self):
:rtype: ``bool``
"""
- return self.alfred_env.get("debug") == 1
+ return bool(
+ self.alfred_env.get("debug") == 1 or os.environ.get("PYTEST_RUNNING")
+ )
@property
def name(self):
@@ -1182,7 +1175,7 @@ def version(self):
filepath = self.workflowfile("version")
if os.path.exists(filepath):
- with open(filepath, "rb") as fileobj:
+ with open(filepath, "r") as fileobj:
version = fileobj.read()
# info.plist
@@ -1190,7 +1183,7 @@ def version(self):
version = self.info.get("version")
if version:
- from update import Version
+ from .update import Version
version = Version(version)
@@ -1321,7 +1314,7 @@ def workflowdir(self):
# the library is in. CWD will be the workflow root if
# a workflow is being run in Alfred
candidates = [
- os.path.abspath(os.getcwdu()),
+ os.path.abspath(os.getcwd()),
os.path.dirname(os.path.abspath(os.path.dirname(__file__))),
]
@@ -1594,7 +1587,7 @@ def stored_data(self, name):
self.logger.debug("no data stored for `%s`", name)
return None
- with open(metadata_path, "rb") as file_obj:
+ with open(metadata_path, "r") as file_obj:
serializer_name = file_obj.read().strip()
serializer = manager.serializer(serializer_name)
@@ -1680,14 +1673,17 @@ def delete_paths(paths):
delete_paths((metadata_path, data_path))
return
+ if isinstance(data, str):
+ data = bytearray(data)
+
# Ensure write is not interrupted by SIGTERM
@uninterruptible
def _store():
# Save file extension
- with atomic_writer(metadata_path, "wb") as file_obj:
+ with atomic_writer(metadata_path, "w") as file_obj:
file_obj.write(serializer_name)
- with atomic_writer(data_path, "wb") as file_obj:
+ with serializer.atomic_writer(data_path, "w") as file_obj:
serializer.dump(data, file_obj)
_store()
@@ -1750,7 +1746,7 @@ def cache_data(self, name, data):
self.logger.debug("deleted cache file: %s", cache_path)
return
- with atomic_writer(cache_path, "wb") as file_obj:
+ with serializer.atomic_writer(cache_path, "w") as file_obj:
serializer.dump(data, file_obj)
self.logger.debug("cached data: %s", cache_path)
@@ -2121,7 +2117,7 @@ def run(self, func, text_errors=False):
if not sys.stdout.isatty(): # Show error in Alfred
if text_errors:
- print(unicode(err).encode("utf-8"), end="")
+ print(str(err).encode("utf-8"), end="")
else:
self._items = []
if self._name:
@@ -2131,7 +2127,7 @@ def run(self, func, text_errors=False):
else: # pragma: no cover
name = os.path.dirname(__file__)
self.add_item(
- "Error in workflow '%s'" % name, unicode(err), icon=ICON_ERROR
+ "Error in workflow '%s'" % name, str(err), icon=ICON_ERROR
)
self.send_feedback()
return 1
@@ -2242,7 +2238,7 @@ def send_feedback(self):
for item in self._items:
root.append(item.elem)
sys.stdout.write('\n')
- sys.stdout.write(ET.tostring(root).encode("utf-8"))
+ sys.stdout.write(ET.tostring(root, encoding="unicode"))
sys.stdout.flush()
####################################################################
@@ -2280,7 +2276,7 @@ def last_version_run(self):
version = self.settings.get("__workflow_last_version")
if version:
- from update import Version
+ from .update import Version
version = Version(version)
@@ -2308,8 +2304,8 @@ def set_last_version(self, version=None):
version = self.version
- if isinstance(version, basestring):
- from update import Version
+ if isinstance(version, str):
+ from .update import Version
version = Version(version)
@@ -2382,18 +2378,16 @@ def check_update(self, force=False):
# Check for new version if it's time
if force or not self.cached_data_fresh(key, frequency * 86400):
-
repo = self._update_settings["github_slug"]
# version = self._update_settings['version']
version = str(self.version)
- from background import run_in_background
+ from .background import run_in_background
# update.py is adjacent to this file
- update_script = os.path.join(os.path.dirname(__file__), b"update.py")
-
- cmd = ["/usr/bin/python", update_script, "check", repo, version]
+ update_script = os.path.join(os.path.dirname(__file__), "update.py")
+ cmd = [sys.executable, update_script, "check", repo, version]
if self.prereleases:
cmd.append("--prereleases")
@@ -2416,7 +2410,7 @@ def start_update(self):
installed, else ``False``
"""
- import update
+ from . import update
repo = self._update_settings["github_slug"]
# version = self._update_settings['version']
@@ -2425,12 +2419,12 @@ def start_update(self):
if not update.check_update(repo, version, self.prereleases):
return False
- from background import run_in_background
+ from .background import run_in_background
# update.py is adjacent to this file
- update_script = os.path.join(os.path.dirname(__file__), b"update.py")
+ update_script = os.path.join(os.path.dirname(__file__), "update.py")
- cmd = ["/usr/bin/python", update_script, "install", repo, version]
+ cmd = [sys.executable, update_script, "install", repo, version]
if self.prereleases:
cmd.append("--prereleases")
@@ -2518,7 +2512,7 @@ def get_password(self, account, service=None):
h = groups.get("hex")
password = groups.get("pw")
if h:
- password = unicode(binascii.unhexlify(h), "utf-8")
+ password = str(binascii.unhexlify(h), "utf-8")
self.logger.debug("got password : %s:%s", service, account)
@@ -2548,7 +2542,7 @@ def delete_password(self, account, service=None):
# Methods for workflow:* magic args
####################################################################
- def _register_default_magic(self):
+ def _register_default_magic(self): # noqa: C901
"""Register the built-in magic arguments."""
# TODO: refactor & simplify
# Wrap callback and message with callable
@@ -2766,8 +2760,8 @@ def decode(self, text, encoding=None, normalization=None):
"""
encoding = encoding or self._input_encoding
normalization = normalization or self._normalizsation
- if not isinstance(text, unicode):
- text = unicode(text, encoding)
+ if not isinstance(text, str):
+ text = str(text, encoding)
return unicodedata.normalize(normalization, text)
def fold_to_ascii(self, text):
@@ -2786,7 +2780,7 @@ def fold_to_ascii(self, text):
if isascii(text):
return text
text = "".join([ASCII_REPLACEMENTS.get(c, c) for c in text])
- return unicode(unicodedata.normalize("NFKD", text).encode("ascii", "ignore"))
+ return unicodedata.normalize("NFKD", text)
def dumbify_punctuation(self, text):
"""Convert non-ASCII punctuation to closest ASCII equivalent.
@@ -2833,7 +2827,8 @@ def _delete_directory_contents(self, dirpath, filter_func):
def _load_info_plist(self):
"""Load workflow info from ``info.plist``."""
# info.plist should be in the directory above this one
- self._info = plistlib.readPlist(self.workflowfile("info.plist"))
+ with open(self.workflowfile("info.plist"), "rb") as file_obj:
+ self._info = plistlib.load(file_obj)
self._info_loaded = True
def _create(self, dirpath):
diff --git a/workflow/workflow3.py b/workflow/workflow3.py
index c554a63..3a06e33 100644
--- a/workflow/workflow3.py
+++ b/workflow/workflow3.py
@@ -23,7 +23,6 @@
"""
-from __future__ import print_function, unicode_literals, absolute_import
import json
import os
@@ -76,7 +75,7 @@ def obj(self):
o = {}
if self:
d2 = {}
- for k, v in self.items():
+ for k, v in list(self.items()):
d2[k] = v
o["variables"] = d2
@@ -88,7 +87,7 @@ def obj(self):
return {"alfredworkflow": o}
- def __unicode__(self):
+ def __str__(self):
"""Convert to ``alfredworkflow`` JSON object.
Returns:
@@ -98,20 +97,11 @@ def __unicode__(self):
if not self and not self.config:
if not self.arg:
return ""
- if isinstance(self.arg, unicode):
+ if isinstance(self.arg, str):
return self.arg
return json.dumps(self.obj)
- def __str__(self):
- """Convert to ``alfredworkflow`` JSON object.
-
- Returns:
- str: UTF-8 encoded ``alfredworkflow`` JSON object
-
- """
- return unicode(self).encode("utf-8")
-
class Modifier(object):
"""Modify :class:`Item3` arg/icon/variables when modifier key is pressed.
@@ -372,11 +362,7 @@ def obj(self):
"""
# Required values
- o = {
- "title": self.title,
- "subtitle": self.subtitle,
- "valid": self.valid,
- }
+ o = {"title": self.title, "subtitle": self.subtitle, "valid": self.valid}
# Optional values
if self.arg is not None:
@@ -460,7 +446,7 @@ def _modifiers(self):
"""
if self.modifiers:
mods = {}
- for k, mod in self.modifiers.items():
+ for k, mod in list(self.modifiers.items()):
mods[k] = mod.obj
return mods