repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
KxSystems/pyq | setup.py | get_q_home | python | def get_q_home(env):
q_home = env.get('QHOME')
if q_home:
return q_home
for v in ['VIRTUAL_ENV', 'HOME']:
prefix = env.get(v)
if prefix:
q_home = os.path.join(prefix, 'q')
if os.path.isdir(q_home):
return q_home
if WINDOWS:
q_home = os.path.join(env['SystemDrive'], r'\q')
if os.path.isdir(q_home):
return q_home
raise RuntimeError('No suitable QHOME.') | Derive q home from the environment | train | https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/setup.py#L185-L200 | null | """PyQ - Python for kdb+
|Documentation Status| |PyPI Version|
PyQ_ brings the `Python programming language`_ to the `kdb+ database`_. It
allows developers to seamlessly integrate Python and q codes in one
application. This is achieved by bringing the Python and q interpreters in
the same process so that codes written in either of the languages operate on
the same data. In PyQ, Python and q objects live in the same memory space
and share the same data.
.. |Documentation Status|
image:: https://readthedocs.org/projects/pyq/badge/?version=latest
:target: http://pyq.readthedocs.io/en/latest/?badge=latest
.. |PyPI Version| image:: https://img.shields.io/pypi/v/pyq.svg
:target: https://pypi.python.org/pypi/pyq
.. _PyQ: https://code.kx.com/q/interfaces/pyq/
.. _`Python programming language`: https://www.python.org/about
.. _`kdb+ database`: https://kx.com
"""
import os
import platform
import subprocess
import sys
from distutils.command.build import build
from distutils.command.build_ext import build_ext
from distutils.command.config import config
from distutils.command.install import install
from distutils.command.install_data import install_data
from distutils.command.install_scripts import install_scripts
import sysconfig
WINDOWS = platform.system() == 'Windows'
if WINDOWS:
from setuptools import Command, Distribution, Extension, setup
else:
from distutils.core import Command, Distribution, Extension, setup
VERSION = '4.2.1'
IS_RELEASE = True
VERSION_FILE = 'src/pyq/version.py'
VERSION_PY = """\
# generated by setup.py
version = '{}'
"""
CFLAGS = ['/WX', '/wd4090'] if WINDOWS else ['-fno-strict-aliasing']
if sys.version_info >= (3, ) and not WINDOWS:
CFLAGS.append('-Werror')
LDFLAGS = []
if (sys.maxsize + 1).bit_length() == 32 and platform.machine() == 'x86_64':
# Building 32-bit pyq on a 64-bit host
config_vars = sysconfig.get_config_vars()
CFLAGS.append('-m32')
LDFLAGS.append('-m32')
def split_replace(string, a, b, sep):
x = string.split(sep)
for i, part in enumerate(x):
if part == a:
x[i] = b
return sep.join(x)
for k, v in config_vars.items():
if isinstance(v, str):
config_vars[k] = split_replace(v, 'x86_64', 'i386', '-')
TEST_REQUIREMENTS = [
'pytest>=2.6.4,!=3.2.0,!=3.3.0',
'pytest-pyq',
'pytest-cov>=2.4',
'coverage>=4.2'
] + (['pathlib2>=2.0'] if sys.version_info[0] < 3 else [])
IPYTHON_REQUIREMENTS = ['ipython']
Executable = Extension
METADATA = dict(
name='pyq',
packages=['pyq', 'pyq.tests', ],
package_dir={'': 'src'},
qlib_scripts=['python.q', 'p.k', 'pyq-operators.q', 'pyq-print.q', ],
ext_modules=[
Extension('pyq._k', sources=['src/pyq/_k.c', ],
extra_compile_args=CFLAGS,
extra_link_args=LDFLAGS),
],
qext_modules=[
Extension('pyq', sources=['src/pyq/pyq.c', ],
extra_compile_args=CFLAGS,
extra_link_args=LDFLAGS),
],
executables=[] if WINDOWS else [
Executable('pyq', sources=['src/pyq.c'],
extra_compile_args=CFLAGS,
extra_link_args=LDFLAGS),
],
scripts=['src/scripts/pyq-runtests',
'src/scripts/pyq-coverage',
'src/scripts/ipyq',
'src/scripts/pq',
'src/scripts/qp',
],
data_files=[
('q', ['src/pyq/p.k',
'src/pyq/pyq-operators.q',
'src/pyq/python.q',
]
),
],
url='https://github.com/KxSystems/pyq',
maintainer='PyQ Authors',
maintainer_email='pyq@enlnt.com',
license='Apache License',
platforms=['Linux', 'MacOS X', 'Windows'],
classifiers=['Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Financial and Insurance Industry',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows :: Windows 10',
'Programming Language :: C',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Database',
'Topic :: Software Development :: Libraries' +
' :: Python Modules'],
)
def add_data_file(data_files, target, source):
"""Add an entry to data_files"""
for t, f in data_files:
if t == target:
break
else:
data_files.append((target, []))
f = data_files[-1][1]
if source not in f:
f.append(source)
def get_version():
write_version_file = True
if IS_RELEASE:
version = VERSION
elif os.path.exists('.git'):
try:
out = subprocess.check_output(['git', 'describe'])
_, commits, revision = decode(out).strip().split('-')
version = '{}.dev{}+{}'.format(VERSION, commits, revision[1:])
except (OSError, ValueError):
version = VERSION + '.dev0+unknown'
else:
try:
f = open(VERSION_FILE)
except OSError:
version = VERSION + '.dev0+unknown'
else:
with f:
g = {}
exec(f.read(), g)
version = g['version']
write_version_file = False
if write_version_file:
with open(VERSION_FILE, 'w') as f:
f.write(VERSION_PY.format(version))
return version
def get_q_os_letter(sysname, machine):
if sysname == 'Linux':
return 'l'
if sysname == 'SunOS':
return 'v' if machine == 'i86pc' else 's'
if sysname == 'Darwin':
return 'm'
if sysname == 'Windows':
return 'w'
raise RuntimeError('"Unknown platform: %s %s.' % (sysname, machine))
def get_q_arch(q_home):
bits = (sys.maxsize + 1).bit_length()
sysname = platform.system()
machine = platform.machine()
os_letter = get_q_os_letter(sysname, machine)
if bits == 64:
# In case we're on 64-bit platform, but 64-bit kdb+ is not available
# we will fallback to the 32-bit version.
x64dir = os.path.join(q_home, '%s64' % os_letter)
if not os.path.isdir(x64dir):
bits = 32
return '%s%d' % (os_letter, bits)
def get_q_version(q_home):
"""Return version of q installed at q_home"""
with open(os.path.join(q_home, 'q.k')) as f:
for line in f:
if line.startswith('k:'):
return line[2:5]
return '2.2'
decode = (lambda x: x) if str is bytes else lambda x: x.decode()
def get_python_dll(executable):
sysname = platform.system()
if sysname.startswith(('Linux', 'SunOS')):
output = subprocess.check_output(['ldd', executable])
for line in output.splitlines():
if b'libpython' in line:
return decode(line.split()[2])
# This is for systems which have statically linked Python
# (i.e Ubuntu), but provide dynamic libraries in a separate
# package.
libpython = 'libpython{}.{}'.format(*sys.version_info[:2]).encode()
try:
output = subprocess.check_output(['ldconfig', '-p'])
except subprocess.CalledProcessError:
output = subprocess.check_output(['/sbin/ldconfig', '-p'])
for line in output.splitlines():
if libpython in line:
return decode(line.split()[-1])
elif sysname == 'Darwin':
output = subprocess.check_output(['otool', '-L', executable])
for line in output.splitlines()[1:]:
if b'Python' in line:
python_dll = decode(line.split()[0])
return python_dll.replace('@executable_path',
os.path.dirname(executable))
elif sysname == 'Windows':
return 'python{}{}.dll'.format(*sys.version_info[:2])
# This is known to work for Anaconda
ldlibrary = sysconfig.get_config_var('LDLIBRARY')
libdir = sysconfig.get_config_var('LIBDIR')
if ldlibrary and libdir:
libfile = os.path.join(libdir, ldlibrary)
if os.path.exists(libfile):
return libfile
raise RuntimeError('no python dll')
SETUP_CFG = """\
[config]
q_home = {q_home}
q_version = {q_version}
q_arch = {q_arch}
python_dll = {python_dll}
"""
class Config(config):
user_options = [
('q-home=', None, 'q home directory'),
('q-version=', None, 'q version'),
('q-arch=', None, 'q architecture, e.g. l64'),
('python-dll=', None, 'path to the python dynamic library'),
('dest=', None, "path to the config file (default: setup.cfg)"),
('write', None, 'write the config file')
]
q_home = None
q_arch = None
q_version = None
python_dll = None
dest = None
write = None
extra_link_args = []
def initialize_options(self):
config.initialize_options(self)
def finalize_options(self):
if self.q_home is None:
self.q_home = get_q_home(os.environ)
if self.q_arch is None:
self.q_arch = get_q_arch(self.q_home)
if self.q_version is None:
self.q_version = get_q_version(self.q_home)
if self.python_dll is None:
self.python_dll = get_python_dll(sys.executable)
if self.dest is None:
self.dest = 'setup.cfg'
if WINDOWS:
self.extra_link_args = [r'src\pyq\kx\%s\q.lib' % self.q_arch]
def run(self):
setup_cfg = SETUP_CFG.format(**vars(self))
self.announce(setup_cfg.rstrip(), 2)
if self.write:
with open(self.dest, 'w') as f:
f.write(setup_cfg)
self.announce('^^^ Written to %s.' % self.dest, 2)
else:
self.announce('^^^ Use --write options'
' to write this to %s.' % self.dest, 2)
PYQ_CONFIG = """\
\\d .pyq
python_dll:"{python_dll}\\000"
pyq_executable:"{pyq_executable}"
"""
class BuildQLib(Command):
description = "build q/k scripts"
user_options = [
('build-lib=', 'd', "build directory"),
('force', 'f', "forcibly build everything (ignore file timestamps)"),
]
q_home = None
build_base = None
build_lib = None
python_dll = None
pyq_executable = None
def initialize_options(self):
pass
def finalize_options(self):
self.set_undefined_options('config',
('q_home', 'q_home'),
('python_dll', 'python_dll'))
self.set_undefined_options('build',
('build_base', 'build_base'))
self.build_lib = os.path.join(self.build_base, 'qlib')
cmd = self.get_finalized_command('install_exe')
pyq_path = os.path.join(cmd.install_dir, 'pyq')
self.pyq_executable = pyq_path.replace('\\', '\\\\')
def run(self):
self.mkpath(self.build_lib)
for script in self.distribution.qlib_scripts:
outfile = os.path.join(self.build_lib, script)
script_file = os.path.join('src', 'pyq', script)
self.write_pyq_config()
self.copy_file(script_file, outfile, preserve_mode=0)
def write_pyq_config(self):
pyq_config_file = os.path.join(self.build_lib, 'pyq-config.q')
with open(pyq_config_file, 'w') as f:
f.write(PYQ_CONFIG.format(**vars(self)))
add_data_file(self.distribution.data_files, 'q', pyq_config_file)
class BuildQExt(Command):
description = "build q extension modules"
user_options = [
('build-lib=', 'd', "build directory"),
('force', 'f', "forcibly build everything (ignore file timestamps)"),
]
q_home = None
q_arch = None
q_version = None
build_base = None
build_temp = None
build_lib = None
compiler = None
define = None
debug = None
force = None
plat_name = None
extensions = None
def initialize_options(self):
pass
def finalize_options(self):
self.set_undefined_options('config',
('q_home', 'q_home'),
('q_arch', 'q_arch'),
('q_version', 'q_version'))
self.set_undefined_options('build',
('build_base', 'build_base'),
('compiler', 'compiler'),
('debug', 'debug'),
('force', 'force'),
('plat_name', 'plat_name'))
if self.build_lib is None:
self.build_lib = os.path.join(self.build_base,
'qext.' + self.plat_name)
if self.build_temp is None:
self.build_temp = os.path.join(self.build_base,
'temp.' + self.plat_name)
if self.extensions is None:
self.extensions = self.distribution.qext_modules
if self.define is None:
split_version = self.q_version.split('.')
self.define = [('KXVER', split_version[0]),
('KXVER2', split_version[1]), ]
def run(self):
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler
include_dirs = ['src/pyq/kx', ]
conf = self.get_finalized_command("config")
for ext in self.extensions:
sources = ext.sources
ext_path = os.path.join(self.build_lib,
ext.name + ('.dll' if WINDOWS else '.so'))
compiler = new_compiler(compiler=self.compiler,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force)
customize_compiler(compiler)
define = self.define[:]
if sys.version_info >= (3,):
py3k = '{:d}{:d}'.format(*sys.version_info[:2])
define.append(('PY3K', py3k))
if WINDOWS:
compiler.initialize()
compiler.compile_options.remove('/MD')
extra_args = ext.extra_compile_args or []
objects = compiler.compile(sources,
output_dir=self.build_temp,
macros=define,
extra_postargs=extra_args,
include_dirs=include_dirs)
extra_args = conf.extra_link_args[:] + ext.extra_link_args
if WINDOWS:
extra_args.extend([r'/DEF:src\pyq\%s.def' % ext.name])
compiler.link_shared_object(objects, ext_path,
extra_postargs=extra_args)
add_data_file(self.distribution.data_files,
os.path.join('q', self.q_arch), ext_path)
class BuildPyExt(build_ext):
q_arch = None
def finalize_options(self):
build_ext.finalize_options(self)
self.set_undefined_options('build_qext',
('define', 'define'))
self.set_undefined_options('config',
('q_arch', 'q_arch'))
conf = self.get_finalized_command("config")
if conf.extra_link_args:
for ext in self.extensions:
ext.extra_link_args = [a.format(**vars(ext))
for a in conf.extra_link_args]
if WINDOWS:
def build_extensions(self):
self.compiler.initialize()
self.compiler.compile_options.remove('/MD')
build_ext.build_extensions(self)
class BuildExe(Command):
description = "build executables"
user_options = []
q_home = None
q_arch = None
q_version = None
build_temp = None
build_exe = None
build_base = None
compiler = None
debug = None
define = None
plat_name = None
def initialize_options(self):
pass
def finalize_options(self):
self.set_undefined_options('config',
('q_home', 'q_home'),
('q_arch', 'q_arch'),
('q_version', 'q_version'))
self.set_undefined_options('build',
('build_base', 'build_base'),
('compiler', 'compiler'),
('debug', 'debug'),
('force', 'force'),
('plat_name', 'plat_name'))
if self.build_exe is None:
self.build_exe = os.path.join(self.build_base,
'exe.{}-{}'.format(self.plat_name,
sys.version[:3]))
if self.define is None:
self.define = [
('KXVER', self.q_version[0]),
('QARCH', self.q_arch),
]
def run(self):
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler
for exe in self.distribution.executables:
compiler = new_compiler(
compiler=self.compiler,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force)
customize_compiler(compiler)
extra_args = exe.extra_compile_args or []
objects = compiler.compile(exe.sources,
macros=self.define,
extra_postargs=extra_args,
output_dir=self.build_temp)
compiler.link_executable(objects,
extra_preargs=LDFLAGS,
output_progname=exe.name,
output_dir=self.build_exe)
class InstallQLib(install_data):
description = "install q/k scripts"
build_dir = None
skip_build = None
outfiles = None
def finalize_options(self):
self.set_undefined_options('config', ('q_home', 'install_dir'))
self.set_undefined_options('build_qlib', ('build_lib', 'build_dir'))
self.set_undefined_options('install', ('skip_build', 'skip_build'))
def run(self):
if not self.skip_build:
self.run_command('build_qlib')
self.outfiles = self.copy_tree(self.build_dir, self.install_dir)
class InstallQExt(install_data):
description = "install q/k scripts"
q_home = None
q_arch = None
build_dir = None
skip_build = None
install_dir = None
outfiles = None
def finalize_options(self):
self.set_undefined_options('config',
('q_home', 'q_home'),
('q_arch', 'q_arch'))
self.set_undefined_options('build_qext', ('build_lib', 'build_dir'))
self.set_undefined_options('install', ('skip_build', 'skip_build'))
self.install_dir = os.path.join(self.q_home, self.q_arch)
def run(self):
if not self.skip_build:
self.run_command('build_qext')
self.outfiles = self.copy_tree(self.build_dir, self.install_dir)
class InstallExe(install_scripts):
description = "install executables"
outfiles = None
def finalize_options(self):
self.set_undefined_options('build_exe', ('build_exe', 'build_dir'))
self.set_undefined_options('install',
('install_scripts', 'install_dir'),
('force', 'force'),
('skip_build', 'skip_build'),
)
def run(self):
if not self.skip_build:
self.run_command('build_exe')
from stat import ST_MODE
if not self.get_inputs():
return
self.outfiles = self.copy_tree(self.build_dir, self.install_dir)
if os.name == 'posix':
# Set the executable bits (owner, group, and world) on
# all the executables we just installed.
for file in self.get_outputs():
if self.dry_run:
self.announce("changing mode of %s" % file, 2)
else:
mode = ((os.stat(file)[ST_MODE]) | 0o555) & 0o7777
self.announce("changing mode of %s to %o" %
(file, mode), 2)
os.chmod(file, mode)
def get_inputs(self):
return self.distribution.executables or []
class PyqDistribution(Distribution):
qlib_scripts = None
qext_modules = None
executables = None
build.sub_commands.extend([
('build_qlib', None),
('build_qext', None),
('build_exe', None),
])
install.sub_commands.extend([
('install_qlib', None),
('install_qext', None),
('install_exe', None),
])
def run_setup(metadata):
summary, details = __doc__.split('\n\n', 1)
rst_description = '\n'.join([summary, '=' * len(summary), '\n' + details])
keywords = metadata.copy()
keywords.update(
version=get_version(),
description=summary,
long_description=rst_description,
distclass=PyqDistribution,
cmdclass={
'config': Config,
'build_qlib': BuildQLib,
'build_qext': BuildQExt,
'build_ext': BuildPyExt,
'build_exe': BuildExe,
'install_qlib': InstallQLib,
'install_qext': InstallQExt,
'install_exe': InstallExe,
},
)
if 'setuptools' in sys.modules:
keywords['extras_require'] = {
'test': TEST_REQUIREMENTS,
'ipython': IPYTHON_REQUIREMENTS,
'all': TEST_REQUIREMENTS + IPYTHON_REQUIREMENTS + [
'py', 'numpy', 'prompt-toolkit', 'pygments-q'],
}
if (sys.version_info >= (3,) and not WINDOWS and
'CONDA_PREFIX' not in os.environ and
os.path.exists('embedPy/p.q')):
try:
import numpy
except ImportError:
pass
else:
add_embedpy_components(keywords)
setup(**keywords)
def add_embedpy_components(keywords):
keywords['qlib_scripts'].append('../../embedPy/p.q')
keywords['qext_modules'].append(
Extension('p', sources=['embedPy/py.c', ]),
)
add_data_file(keywords['data_files'], 'q', 'embedPy/p.q')
if __name__ == '__main__':
run_setup(METADATA)
|
KxSystems/pyq | setup.py | get_q_version | python | def get_q_version(q_home):
with open(os.path.join(q_home, 'q.k')) as f:
for line in f:
if line.startswith('k:'):
return line[2:5]
return '2.2' | Return version of q installed at q_home | train | https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/setup.py#L230-L236 | null | """PyQ - Python for kdb+
|Documentation Status| |PyPI Version|
PyQ_ brings the `Python programming language`_ to the `kdb+ database`_. It
allows developers to seamlessly integrate Python and q codes in one
application. This is achieved by bringing the Python and q interpreters in
the same process so that codes written in either of the languages operate on
the same data. In PyQ, Python and q objects live in the same memory space
and share the same data.
.. |Documentation Status|
image:: https://readthedocs.org/projects/pyq/badge/?version=latest
:target: http://pyq.readthedocs.io/en/latest/?badge=latest
.. |PyPI Version| image:: https://img.shields.io/pypi/v/pyq.svg
:target: https://pypi.python.org/pypi/pyq
.. _PyQ: https://code.kx.com/q/interfaces/pyq/
.. _`Python programming language`: https://www.python.org/about
.. _`kdb+ database`: https://kx.com
"""
import os
import platform
import subprocess
import sys
from distutils.command.build import build
from distutils.command.build_ext import build_ext
from distutils.command.config import config
from distutils.command.install import install
from distutils.command.install_data import install_data
from distutils.command.install_scripts import install_scripts
import sysconfig
WINDOWS = platform.system() == 'Windows'
if WINDOWS:
from setuptools import Command, Distribution, Extension, setup
else:
from distutils.core import Command, Distribution, Extension, setup
VERSION = '4.2.1'
IS_RELEASE = True
VERSION_FILE = 'src/pyq/version.py'
VERSION_PY = """\
# generated by setup.py
version = '{}'
"""
CFLAGS = ['/WX', '/wd4090'] if WINDOWS else ['-fno-strict-aliasing']
if sys.version_info >= (3, ) and not WINDOWS:
CFLAGS.append('-Werror')
LDFLAGS = []
if (sys.maxsize + 1).bit_length() == 32 and platform.machine() == 'x86_64':
# Building 32-bit pyq on a 64-bit host
config_vars = sysconfig.get_config_vars()
CFLAGS.append('-m32')
LDFLAGS.append('-m32')
def split_replace(string, a, b, sep):
x = string.split(sep)
for i, part in enumerate(x):
if part == a:
x[i] = b
return sep.join(x)
for k, v in config_vars.items():
if isinstance(v, str):
config_vars[k] = split_replace(v, 'x86_64', 'i386', '-')
TEST_REQUIREMENTS = [
'pytest>=2.6.4,!=3.2.0,!=3.3.0',
'pytest-pyq',
'pytest-cov>=2.4',
'coverage>=4.2'
] + (['pathlib2>=2.0'] if sys.version_info[0] < 3 else [])
IPYTHON_REQUIREMENTS = ['ipython']
Executable = Extension
METADATA = dict(
name='pyq',
packages=['pyq', 'pyq.tests', ],
package_dir={'': 'src'},
qlib_scripts=['python.q', 'p.k', 'pyq-operators.q', 'pyq-print.q', ],
ext_modules=[
Extension('pyq._k', sources=['src/pyq/_k.c', ],
extra_compile_args=CFLAGS,
extra_link_args=LDFLAGS),
],
qext_modules=[
Extension('pyq', sources=['src/pyq/pyq.c', ],
extra_compile_args=CFLAGS,
extra_link_args=LDFLAGS),
],
executables=[] if WINDOWS else [
Executable('pyq', sources=['src/pyq.c'],
extra_compile_args=CFLAGS,
extra_link_args=LDFLAGS),
],
scripts=['src/scripts/pyq-runtests',
'src/scripts/pyq-coverage',
'src/scripts/ipyq',
'src/scripts/pq',
'src/scripts/qp',
],
data_files=[
('q', ['src/pyq/p.k',
'src/pyq/pyq-operators.q',
'src/pyq/python.q',
]
),
],
url='https://github.com/KxSystems/pyq',
maintainer='PyQ Authors',
maintainer_email='pyq@enlnt.com',
license='Apache License',
platforms=['Linux', 'MacOS X', 'Windows'],
classifiers=['Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Financial and Insurance Industry',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows :: Windows 10',
'Programming Language :: C',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Database',
'Topic :: Software Development :: Libraries' +
' :: Python Modules'],
)
def add_data_file(data_files, target, source):
"""Add an entry to data_files"""
for t, f in data_files:
if t == target:
break
else:
data_files.append((target, []))
f = data_files[-1][1]
if source not in f:
f.append(source)
def get_version():
write_version_file = True
if IS_RELEASE:
version = VERSION
elif os.path.exists('.git'):
try:
out = subprocess.check_output(['git', 'describe'])
_, commits, revision = decode(out).strip().split('-')
version = '{}.dev{}+{}'.format(VERSION, commits, revision[1:])
except (OSError, ValueError):
version = VERSION + '.dev0+unknown'
else:
try:
f = open(VERSION_FILE)
except OSError:
version = VERSION + '.dev0+unknown'
else:
with f:
g = {}
exec(f.read(), g)
version = g['version']
write_version_file = False
if write_version_file:
with open(VERSION_FILE, 'w') as f:
f.write(VERSION_PY.format(version))
return version
def get_q_home(env):
"""Derive q home from the environment"""
q_home = env.get('QHOME')
if q_home:
return q_home
for v in ['VIRTUAL_ENV', 'HOME']:
prefix = env.get(v)
if prefix:
q_home = os.path.join(prefix, 'q')
if os.path.isdir(q_home):
return q_home
if WINDOWS:
q_home = os.path.join(env['SystemDrive'], r'\q')
if os.path.isdir(q_home):
return q_home
raise RuntimeError('No suitable QHOME.')
def get_q_os_letter(sysname, machine):
if sysname == 'Linux':
return 'l'
if sysname == 'SunOS':
return 'v' if machine == 'i86pc' else 's'
if sysname == 'Darwin':
return 'm'
if sysname == 'Windows':
return 'w'
raise RuntimeError('"Unknown platform: %s %s.' % (sysname, machine))
def get_q_arch(q_home):
bits = (sys.maxsize + 1).bit_length()
sysname = platform.system()
machine = platform.machine()
os_letter = get_q_os_letter(sysname, machine)
if bits == 64:
# In case we're on 64-bit platform, but 64-bit kdb+ is not available
# we will fallback to the 32-bit version.
x64dir = os.path.join(q_home, '%s64' % os_letter)
if not os.path.isdir(x64dir):
bits = 32
return '%s%d' % (os_letter, bits)
decode = (lambda x: x) if str is bytes else lambda x: x.decode()
def get_python_dll(executable):
sysname = platform.system()
if sysname.startswith(('Linux', 'SunOS')):
output = subprocess.check_output(['ldd', executable])
for line in output.splitlines():
if b'libpython' in line:
return decode(line.split()[2])
# This is for systems which have statically linked Python
# (i.e Ubuntu), but provide dynamic libraries in a separate
# package.
libpython = 'libpython{}.{}'.format(*sys.version_info[:2]).encode()
try:
output = subprocess.check_output(['ldconfig', '-p'])
except subprocess.CalledProcessError:
output = subprocess.check_output(['/sbin/ldconfig', '-p'])
for line in output.splitlines():
if libpython in line:
return decode(line.split()[-1])
elif sysname == 'Darwin':
output = subprocess.check_output(['otool', '-L', executable])
for line in output.splitlines()[1:]:
if b'Python' in line:
python_dll = decode(line.split()[0])
return python_dll.replace('@executable_path',
os.path.dirname(executable))
elif sysname == 'Windows':
return 'python{}{}.dll'.format(*sys.version_info[:2])
# This is known to work for Anaconda
ldlibrary = sysconfig.get_config_var('LDLIBRARY')
libdir = sysconfig.get_config_var('LIBDIR')
if ldlibrary and libdir:
libfile = os.path.join(libdir, ldlibrary)
if os.path.exists(libfile):
return libfile
raise RuntimeError('no python dll')
SETUP_CFG = """\
[config]
q_home = {q_home}
q_version = {q_version}
q_arch = {q_arch}
python_dll = {python_dll}
"""
class Config(config):
user_options = [
('q-home=', None, 'q home directory'),
('q-version=', None, 'q version'),
('q-arch=', None, 'q architecture, e.g. l64'),
('python-dll=', None, 'path to the python dynamic library'),
('dest=', None, "path to the config file (default: setup.cfg)"),
('write', None, 'write the config file')
]
q_home = None
q_arch = None
q_version = None
python_dll = None
dest = None
write = None
extra_link_args = []
def initialize_options(self):
config.initialize_options(self)
def finalize_options(self):
if self.q_home is None:
self.q_home = get_q_home(os.environ)
if self.q_arch is None:
self.q_arch = get_q_arch(self.q_home)
if self.q_version is None:
self.q_version = get_q_version(self.q_home)
if self.python_dll is None:
self.python_dll = get_python_dll(sys.executable)
if self.dest is None:
self.dest = 'setup.cfg'
if WINDOWS:
self.extra_link_args = [r'src\pyq\kx\%s\q.lib' % self.q_arch]
def run(self):
setup_cfg = SETUP_CFG.format(**vars(self))
self.announce(setup_cfg.rstrip(), 2)
if self.write:
with open(self.dest, 'w') as f:
f.write(setup_cfg)
self.announce('^^^ Written to %s.' % self.dest, 2)
else:
self.announce('^^^ Use --write options'
' to write this to %s.' % self.dest, 2)
PYQ_CONFIG = """\
\\d .pyq
python_dll:"{python_dll}\\000"
pyq_executable:"{pyq_executable}"
"""
class BuildQLib(Command):
description = "build q/k scripts"
user_options = [
('build-lib=', 'd', "build directory"),
('force', 'f', "forcibly build everything (ignore file timestamps)"),
]
q_home = None
build_base = None
build_lib = None
python_dll = None
pyq_executable = None
def initialize_options(self):
pass
def finalize_options(self):
self.set_undefined_options('config',
('q_home', 'q_home'),
('python_dll', 'python_dll'))
self.set_undefined_options('build',
('build_base', 'build_base'))
self.build_lib = os.path.join(self.build_base, 'qlib')
cmd = self.get_finalized_command('install_exe')
pyq_path = os.path.join(cmd.install_dir, 'pyq')
self.pyq_executable = pyq_path.replace('\\', '\\\\')
def run(self):
self.mkpath(self.build_lib)
for script in self.distribution.qlib_scripts:
outfile = os.path.join(self.build_lib, script)
script_file = os.path.join('src', 'pyq', script)
self.write_pyq_config()
self.copy_file(script_file, outfile, preserve_mode=0)
def write_pyq_config(self):
pyq_config_file = os.path.join(self.build_lib, 'pyq-config.q')
with open(pyq_config_file, 'w') as f:
f.write(PYQ_CONFIG.format(**vars(self)))
add_data_file(self.distribution.data_files, 'q', pyq_config_file)
class BuildQExt(Command):
description = "build q extension modules"
user_options = [
('build-lib=', 'd', "build directory"),
('force', 'f', "forcibly build everything (ignore file timestamps)"),
]
q_home = None
q_arch = None
q_version = None
build_base = None
build_temp = None
build_lib = None
compiler = None
define = None
debug = None
force = None
plat_name = None
extensions = None
def initialize_options(self):
pass
def finalize_options(self):
self.set_undefined_options('config',
('q_home', 'q_home'),
('q_arch', 'q_arch'),
('q_version', 'q_version'))
self.set_undefined_options('build',
('build_base', 'build_base'),
('compiler', 'compiler'),
('debug', 'debug'),
('force', 'force'),
('plat_name', 'plat_name'))
if self.build_lib is None:
self.build_lib = os.path.join(self.build_base,
'qext.' + self.plat_name)
if self.build_temp is None:
self.build_temp = os.path.join(self.build_base,
'temp.' + self.plat_name)
if self.extensions is None:
self.extensions = self.distribution.qext_modules
if self.define is None:
split_version = self.q_version.split('.')
self.define = [('KXVER', split_version[0]),
('KXVER2', split_version[1]), ]
def run(self):
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler
include_dirs = ['src/pyq/kx', ]
conf = self.get_finalized_command("config")
for ext in self.extensions:
sources = ext.sources
ext_path = os.path.join(self.build_lib,
ext.name + ('.dll' if WINDOWS else '.so'))
compiler = new_compiler(compiler=self.compiler,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force)
customize_compiler(compiler)
define = self.define[:]
if sys.version_info >= (3,):
py3k = '{:d}{:d}'.format(*sys.version_info[:2])
define.append(('PY3K', py3k))
if WINDOWS:
compiler.initialize()
compiler.compile_options.remove('/MD')
extra_args = ext.extra_compile_args or []
objects = compiler.compile(sources,
output_dir=self.build_temp,
macros=define,
extra_postargs=extra_args,
include_dirs=include_dirs)
extra_args = conf.extra_link_args[:] + ext.extra_link_args
if WINDOWS:
extra_args.extend([r'/DEF:src\pyq\%s.def' % ext.name])
compiler.link_shared_object(objects, ext_path,
extra_postargs=extra_args)
add_data_file(self.distribution.data_files,
os.path.join('q', self.q_arch), ext_path)
class BuildPyExt(build_ext):
q_arch = None
def finalize_options(self):
build_ext.finalize_options(self)
self.set_undefined_options('build_qext',
('define', 'define'))
self.set_undefined_options('config',
('q_arch', 'q_arch'))
conf = self.get_finalized_command("config")
if conf.extra_link_args:
for ext in self.extensions:
ext.extra_link_args = [a.format(**vars(ext))
for a in conf.extra_link_args]
if WINDOWS:
def build_extensions(self):
self.compiler.initialize()
self.compiler.compile_options.remove('/MD')
build_ext.build_extensions(self)
class BuildExe(Command):
description = "build executables"
user_options = []
q_home = None
q_arch = None
q_version = None
build_temp = None
build_exe = None
build_base = None
compiler = None
debug = None
define = None
plat_name = None
def initialize_options(self):
pass
def finalize_options(self):
self.set_undefined_options('config',
('q_home', 'q_home'),
('q_arch', 'q_arch'),
('q_version', 'q_version'))
self.set_undefined_options('build',
('build_base', 'build_base'),
('compiler', 'compiler'),
('debug', 'debug'),
('force', 'force'),
('plat_name', 'plat_name'))
if self.build_exe is None:
self.build_exe = os.path.join(self.build_base,
'exe.{}-{}'.format(self.plat_name,
sys.version[:3]))
if self.define is None:
self.define = [
('KXVER', self.q_version[0]),
('QARCH', self.q_arch),
]
def run(self):
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler
for exe in self.distribution.executables:
compiler = new_compiler(
compiler=self.compiler,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force)
customize_compiler(compiler)
extra_args = exe.extra_compile_args or []
objects = compiler.compile(exe.sources,
macros=self.define,
extra_postargs=extra_args,
output_dir=self.build_temp)
compiler.link_executable(objects,
extra_preargs=LDFLAGS,
output_progname=exe.name,
output_dir=self.build_exe)
class InstallQLib(install_data):
description = "install q/k scripts"
build_dir = None
skip_build = None
outfiles = None
def finalize_options(self):
self.set_undefined_options('config', ('q_home', 'install_dir'))
self.set_undefined_options('build_qlib', ('build_lib', 'build_dir'))
self.set_undefined_options('install', ('skip_build', 'skip_build'))
def run(self):
if not self.skip_build:
self.run_command('build_qlib')
self.outfiles = self.copy_tree(self.build_dir, self.install_dir)
class InstallQExt(install_data):
description = "install q/k scripts"
q_home = None
q_arch = None
build_dir = None
skip_build = None
install_dir = None
outfiles = None
def finalize_options(self):
self.set_undefined_options('config',
('q_home', 'q_home'),
('q_arch', 'q_arch'))
self.set_undefined_options('build_qext', ('build_lib', 'build_dir'))
self.set_undefined_options('install', ('skip_build', 'skip_build'))
self.install_dir = os.path.join(self.q_home, self.q_arch)
def run(self):
if not self.skip_build:
self.run_command('build_qext')
self.outfiles = self.copy_tree(self.build_dir, self.install_dir)
class InstallExe(install_scripts):
description = "install executables"
outfiles = None
def finalize_options(self):
self.set_undefined_options('build_exe', ('build_exe', 'build_dir'))
self.set_undefined_options('install',
('install_scripts', 'install_dir'),
('force', 'force'),
('skip_build', 'skip_build'),
)
def run(self):
if not self.skip_build:
self.run_command('build_exe')
from stat import ST_MODE
if not self.get_inputs():
return
self.outfiles = self.copy_tree(self.build_dir, self.install_dir)
if os.name == 'posix':
# Set the executable bits (owner, group, and world) on
# all the executables we just installed.
for file in self.get_outputs():
if self.dry_run:
self.announce("changing mode of %s" % file, 2)
else:
mode = ((os.stat(file)[ST_MODE]) | 0o555) & 0o7777
self.announce("changing mode of %s to %o" %
(file, mode), 2)
os.chmod(file, mode)
def get_inputs(self):
return self.distribution.executables or []
class PyqDistribution(Distribution):
qlib_scripts = None
qext_modules = None
executables = None
build.sub_commands.extend([
('build_qlib', None),
('build_qext', None),
('build_exe', None),
])
install.sub_commands.extend([
('install_qlib', None),
('install_qext', None),
('install_exe', None),
])
def run_setup(metadata):
summary, details = __doc__.split('\n\n', 1)
rst_description = '\n'.join([summary, '=' * len(summary), '\n' + details])
keywords = metadata.copy()
keywords.update(
version=get_version(),
description=summary,
long_description=rst_description,
distclass=PyqDistribution,
cmdclass={
'config': Config,
'build_qlib': BuildQLib,
'build_qext': BuildQExt,
'build_ext': BuildPyExt,
'build_exe': BuildExe,
'install_qlib': InstallQLib,
'install_qext': InstallQExt,
'install_exe': InstallExe,
},
)
if 'setuptools' in sys.modules:
keywords['extras_require'] = {
'test': TEST_REQUIREMENTS,
'ipython': IPYTHON_REQUIREMENTS,
'all': TEST_REQUIREMENTS + IPYTHON_REQUIREMENTS + [
'py', 'numpy', 'prompt-toolkit', 'pygments-q'],
}
if (sys.version_info >= (3,) and not WINDOWS and
'CONDA_PREFIX' not in os.environ and
os.path.exists('embedPy/p.q')):
try:
import numpy
except ImportError:
pass
else:
add_embedpy_components(keywords)
setup(**keywords)
def add_embedpy_components(keywords):
keywords['qlib_scripts'].append('../../embedPy/p.q')
keywords['qext_modules'].append(
Extension('p', sources=['embedPy/py.c', ]),
)
add_data_file(keywords['data_files'], 'q', 'embedPy/p.q')
if __name__ == '__main__':
run_setup(METADATA)
|
KxSystems/pyq | src/pyq/cmd.py | Cmd.precmd | python | def precmd(self, line):
if line.startswith('help'):
if not q("`help in key`.q"):
try:
q("\\l help.q")
except kerr:
return '-1"no help available - install help.q"'
if line == 'help':
line += "`"
return line | Support for help | train | https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/cmd.py#L35-L45 | null | class Cmd(_cmd.Cmd, object):
"""q REPL"""
_prompt = 'q{ns})'
@property
def prompt(self):
code = _prompt_color()
prompt = self._prompt.format(ns=_prompt_namespace())
return _colorize(code, prompt)
def onecmd(self, line):
"""Interpret the line"""
if line == '\\':
return True
elif line == 'EOF':
print('\r', end='')
return True
else:
try:
v = q(line)
except kerr as e:
print("'%s" % e.args[0])
else:
if v != q('::'):
v.show()
return False
if ptk:
cmdloop = ptk.cmdloop
|
KxSystems/pyq | src/pyq/cmd.py | Cmd.onecmd | python | def onecmd(self, line):
if line == '\\':
return True
elif line == 'EOF':
print('\r', end='')
return True
else:
try:
v = q(line)
except kerr as e:
print("'%s" % e.args[0])
else:
if v != q('::'):
v.show()
return False | Interpret the line | train | https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/cmd.py#L47-L62 | null | class Cmd(_cmd.Cmd, object):
"""q REPL"""
_prompt = 'q{ns})'
@property
def prompt(self):
code = _prompt_color()
prompt = self._prompt.format(ns=_prompt_namespace())
return _colorize(code, prompt)
def precmd(self, line):
"""Support for help"""
if line.startswith('help'):
if not q("`help in key`.q"):
try:
q("\\l help.q")
except kerr:
return '-1"no help available - install help.q"'
if line == 'help':
line += "`"
return line
if ptk:
cmdloop = ptk.cmdloop
|
KxSystems/pyq | src/pyq/_pt_run.py | console_size | python | def console_size(fd=1):
try:
import fcntl
import termios
import struct
except ImportError:
size = os.getenv('LINES', 25), os.getenv('COLUMNS', 80)
else:
size = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ,
b'1234'))
return size | Return console size as a (LINES, COLUMNS) tuple | train | https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/_pt_run.py#L18-L29 | null | """REPL based on prompt-toolkit"""
from __future__ import print_function
import sys
import os
try:
import ptpython.entry_points.run_ptpython as ptp
except ImportError:
if hasattr(sys, '_called_from_test'):
raise
print('Cannot import ptpython. Try',
' pip install ptpython', sep='\n')
raise SystemExit(1)
from pyq import q, kerr
def run(q_prompt=False):
"""Run a prompt-toolkit based REPL"""
lines, columns = console_size()
q(r'\c %d %d' % (lines, columns))
if len(sys.argv) > 1:
try:
q(r'\l %s' % sys.argv[1])
except kerr as e:
print(e)
raise SystemExit(1)
else:
del sys.argv[1]
if q_prompt:
q()
ptp.run()
|
KxSystems/pyq | src/pyq/_pt_run.py | run | python | def run(q_prompt=False):
lines, columns = console_size()
q(r'\c %d %d' % (lines, columns))
if len(sys.argv) > 1:
try:
q(r'\l %s' % sys.argv[1])
except kerr as e:
print(e)
raise SystemExit(1)
else:
del sys.argv[1]
if q_prompt:
q()
ptp.run() | Run a prompt-toolkit based REPL | train | https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/_pt_run.py#L32-L46 | [
"def console_size(fd=1):\n \"\"\"Return console size as a (LINES, COLUMNS) tuple\"\"\"\n try:\n import fcntl\n import termios\n import struct\n except ImportError:\n size = os.getenv('LINES', 25), os.getenv('COLUMNS', 80)\n else:\n size = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ,\n b'1234'))\n return size\n"
] | """REPL based on prompt-toolkit"""
from __future__ import print_function
import sys
import os
try:
import ptpython.entry_points.run_ptpython as ptp
except ImportError:
if hasattr(sys, '_called_from_test'):
raise
print('Cannot import ptpython. Try',
' pip install ptpython', sep='\n')
raise SystemExit(1)
from pyq import q, kerr
def console_size(fd=1):
"""Return console size as a (LINES, COLUMNS) tuple"""
try:
import fcntl
import termios
import struct
except ImportError:
size = os.getenv('LINES', 25), os.getenv('COLUMNS', 80)
else:
size = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ,
b'1234'))
return size
|
KxSystems/pyq | src/pyq/_n.py | get_unit | python | def get_unit(a):
typestr = a.dtype.str
i = typestr.find('[')
if i == -1:
raise TypeError("Expected a datetime64 array, not %s", a.dtype)
return typestr[i + 1: -1] | Extract the time unit from array's dtype | train | https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/_n.py#L50-L56 | null | """A helper module for interfacing with numpy
Numpy has four date units
Code Meaning Time span (relative) Time span (absolute)
Y year +/- 9.2e18 years [9.2e18 BC, 9.2e18 AD]
M month +/- 7.6e17 years [7.6e17 BC, 7.6e17 AD]
W week +/- 1.7e17 years [1.7e17 BC, 1.7e17 AD]
D day +/- 2.5e16 years [2.5e16 BC, 2.5e16 AD]
And nine time units:
Code Meaning Time span (relative) Time span (absolute)
h hour +/- 1.0e15 years [1.0e15 BC, 1.0e15 AD]
m minute +/- 1.7e13 years [1.7e13 BC, 1.7e13 AD]
s second +/- 2.9e12 years [ 2.9e9 BC, 2.9e9 AD]
ms millisecond +/- 2.9e9 years [ 2.9e6 BC, 2.9e6 AD]
us microsecond +/- 2.9e6 years [290301 BC, 294241 AD]
ns nanosecond +/- 292 years [ 1678 AD, 2262 AD]
ps picosecond +/- 106 days [ 1969 AD, 1970 AD]
fs femtosecond +/- 2.6 hours [ 1969 AD, 1970 AD]
as attosecond +/- 9.2 seconds [ 1969 AD, 1970 AD]
kdb+ has four datetime-like types
num char q-type c-type
12 "p" timestamp int64_t
13 "m" month int32_t
14 "d" date int32_t
15 "z" datetime double
And four timedelta-like types
16 "n" timespan int64_t
17 "u" minute int32_t
18 "v" second int32_t
19 "t" time int32_t
"""
from __future__ import absolute_import
from datetime import date
import numpy
K_DATE_SHIFT = date(2000, 1, 1).toordinal() - date(1970, 1, 1).toordinal()
K_STAMP_SHIFT = K_DATE_SHIFT * 24 * 60 * 60 * 10 ** 9
_SCALE = {
'W': ('floor_divide', 7 * 24 * 60 * 60 * 10 ** 9),
'D': ('floor_divide', 24 * 60 * 60 * 10 ** 9),
'h': ('floor_divide', 60 * 60 * 10 ** 9),
'm': ('floor_divide', 60 * 10 ** 9),
's': ('floor_divide', 10 ** 9),
'ms': ('floor_divide', 10 ** 6),
'us': ('floor_divide', 10 ** 3),
'ns': (None, None),
'ps': ('multiply', 10 ** 3),
'fs': ('multiply', 10 ** 6),
'as': ('multiply', 10 ** 9),
}
_UNIT = {
'D': ('date', K_DATE_SHIFT, None, None),
'Y': ('year', -1970, None, None),
'W': ('date', K_DATE_SHIFT, 'floor_divide', 7),
'M': ('month', 30 * 12, None, None),
'h': ('timestamp', K_STAMP_SHIFT, 'floor_divide', 60 * 60 * 10 ** 9),
'm': ('timestamp', K_STAMP_SHIFT, 'floor_divide', 60 * 10 ** 9),
's': ('timestamp', K_STAMP_SHIFT, 'floor_divide', 10 ** 9),
'ns': ('timestamp', K_STAMP_SHIFT, None, None),
'ps': ('timestamp', K_STAMP_SHIFT, 'multiply', 1000),
}
_DTYPES = [
"O", # 0
"?", # 1 - boolean
"16B", # 2 - guid
None, # 3 - unused
"B", # 4 - byte
"h", # 5 - short
"i", # 6 - int
"q", # 7 - long
"f", # 8 - real
"d", # 9 - float
"S1", # 10 - char
"O", # 11 - symbol
"M8[ns]", # 12 - timestamp
"M8[M]", # 13 - month
"M8[D]", # 14 - date
None, # 15 - datetime (unsupported)
"m8[ns]", # 16 - timespan
"m8[m]", # 17 - minute
"m8[s]", # 18 - second
"m8[ms]", # 19 - time
"O", # 20 - `sym$
]
def dtypeof(x):
"""Return the dtype corresponding to a given q object"""
t = abs(x._t)
if t < 20:
return _DTYPES[t]
return 'O'
def k2a(a, x):
"""Rescale data from a K object x to array a.
"""
func, scale = None, 1
t = abs(x._t)
# timestamp (12), month (13), date (14) or datetime (15)
if 12 <= t <= 15:
unit = get_unit(a)
attr, shift, func, scale = _UNIT[unit]
a[:] = getattr(x, attr).data
a += shift
# timespan (16), minute (17), second (18) or time (19)
elif 16 <= t <= 19:
unit = get_unit(a)
func, scale = _SCALE[unit]
a[:] = x.timespan.data
else:
a[:] = list(x)
if func is not None:
func = getattr(numpy, func)
a[:] = func(a.view(dtype='i8'), scale)
if a.dtype.char in 'mM':
n = x.null
if n.any:
a[n] = None
def array(self, dtype=None):
"""An implementation of __array__()"""
t = self._t
# timestamp (12) through last enum (76)
if 11 <= t < 77:
dtype = dtypeof(self)
a = numpy.empty(len(self), dtype)
k2a(a, self)
return a
# table (98)
if t == 98:
if dtype is None:
dtype = list(zip(self.cols, (dtypeof(c) for c in self.flip.value)))
dtype = numpy.dtype(dtype)
a = numpy.empty(int(self.count), dtype)
for c in dtype.fields:
k2a(a[c], self[c])
return a
return numpy.array(list(self), dtype)
|
KxSystems/pyq | src/pyq/_n.py | k2a | python | def k2a(a, x):
func, scale = None, 1
t = abs(x._t)
# timestamp (12), month (13), date (14) or datetime (15)
if 12 <= t <= 15:
unit = get_unit(a)
attr, shift, func, scale = _UNIT[unit]
a[:] = getattr(x, attr).data
a += shift
# timespan (16), minute (17), second (18) or time (19)
elif 16 <= t <= 19:
unit = get_unit(a)
func, scale = _SCALE[unit]
a[:] = x.timespan.data
else:
a[:] = list(x)
if func is not None:
func = getattr(numpy, func)
a[:] = func(a.view(dtype='i8'), scale)
if a.dtype.char in 'mM':
n = x.null
if n.any:
a[n] = None | Rescale data from a K object x to array a. | train | https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/_n.py#L118-L145 | [
"def get_unit(a):\n \"\"\"Extract the time unit from array's dtype\"\"\"\n typestr = a.dtype.str\n i = typestr.find('[')\n if i == -1:\n raise TypeError(\"Expected a datetime64 array, not %s\", a.dtype)\n return typestr[i + 1: -1]\n"
] | """A helper module for interfacing with numpy
Numpy has four date units
Code Meaning Time span (relative) Time span (absolute)
Y year +/- 9.2e18 years [9.2e18 BC, 9.2e18 AD]
M month +/- 7.6e17 years [7.6e17 BC, 7.6e17 AD]
W week +/- 1.7e17 years [1.7e17 BC, 1.7e17 AD]
D day +/- 2.5e16 years [2.5e16 BC, 2.5e16 AD]
And nine time units:
Code Meaning Time span (relative) Time span (absolute)
h hour +/- 1.0e15 years [1.0e15 BC, 1.0e15 AD]
m minute +/- 1.7e13 years [1.7e13 BC, 1.7e13 AD]
s second +/- 2.9e12 years [ 2.9e9 BC, 2.9e9 AD]
ms millisecond +/- 2.9e9 years [ 2.9e6 BC, 2.9e6 AD]
us microsecond +/- 2.9e6 years [290301 BC, 294241 AD]
ns nanosecond +/- 292 years [ 1678 AD, 2262 AD]
ps picosecond +/- 106 days [ 1969 AD, 1970 AD]
fs femtosecond +/- 2.6 hours [ 1969 AD, 1970 AD]
as attosecond +/- 9.2 seconds [ 1969 AD, 1970 AD]
kdb+ has four datetime-like types
num char q-type c-type
12 "p" timestamp int64_t
13 "m" month int32_t
14 "d" date int32_t
15 "z" datetime double
And four timedelta-like types
16 "n" timespan int64_t
17 "u" minute int32_t
18 "v" second int32_t
19 "t" time int32_t
"""
from __future__ import absolute_import
from datetime import date
import numpy
K_DATE_SHIFT = date(2000, 1, 1).toordinal() - date(1970, 1, 1).toordinal()
K_STAMP_SHIFT = K_DATE_SHIFT * 24 * 60 * 60 * 10 ** 9
def get_unit(a):
"""Extract the time unit from array's dtype"""
typestr = a.dtype.str
i = typestr.find('[')
if i == -1:
raise TypeError("Expected a datetime64 array, not %s", a.dtype)
return typestr[i + 1: -1]
_SCALE = {
'W': ('floor_divide', 7 * 24 * 60 * 60 * 10 ** 9),
'D': ('floor_divide', 24 * 60 * 60 * 10 ** 9),
'h': ('floor_divide', 60 * 60 * 10 ** 9),
'm': ('floor_divide', 60 * 10 ** 9),
's': ('floor_divide', 10 ** 9),
'ms': ('floor_divide', 10 ** 6),
'us': ('floor_divide', 10 ** 3),
'ns': (None, None),
'ps': ('multiply', 10 ** 3),
'fs': ('multiply', 10 ** 6),
'as': ('multiply', 10 ** 9),
}
_UNIT = {
'D': ('date', K_DATE_SHIFT, None, None),
'Y': ('year', -1970, None, None),
'W': ('date', K_DATE_SHIFT, 'floor_divide', 7),
'M': ('month', 30 * 12, None, None),
'h': ('timestamp', K_STAMP_SHIFT, 'floor_divide', 60 * 60 * 10 ** 9),
'm': ('timestamp', K_STAMP_SHIFT, 'floor_divide', 60 * 10 ** 9),
's': ('timestamp', K_STAMP_SHIFT, 'floor_divide', 10 ** 9),
'ns': ('timestamp', K_STAMP_SHIFT, None, None),
'ps': ('timestamp', K_STAMP_SHIFT, 'multiply', 1000),
}
_DTYPES = [
"O", # 0
"?", # 1 - boolean
"16B", # 2 - guid
None, # 3 - unused
"B", # 4 - byte
"h", # 5 - short
"i", # 6 - int
"q", # 7 - long
"f", # 8 - real
"d", # 9 - float
"S1", # 10 - char
"O", # 11 - symbol
"M8[ns]", # 12 - timestamp
"M8[M]", # 13 - month
"M8[D]", # 14 - date
None, # 15 - datetime (unsupported)
"m8[ns]", # 16 - timespan
"m8[m]", # 17 - minute
"m8[s]", # 18 - second
"m8[ms]", # 19 - time
"O", # 20 - `sym$
]
def dtypeof(x):
"""Return the dtype corresponding to a given q object"""
t = abs(x._t)
if t < 20:
return _DTYPES[t]
return 'O'
def array(self, dtype=None):
"""An implementation of __array__()"""
t = self._t
# timestamp (12) through last enum (76)
if 11 <= t < 77:
dtype = dtypeof(self)
a = numpy.empty(len(self), dtype)
k2a(a, self)
return a
# table (98)
if t == 98:
if dtype is None:
dtype = list(zip(self.cols, (dtypeof(c) for c in self.flip.value)))
dtype = numpy.dtype(dtype)
a = numpy.empty(int(self.count), dtype)
for c in dtype.fields:
k2a(a[c], self[c])
return a
return numpy.array(list(self), dtype)
|
KxSystems/pyq | src/pyq/_n.py | array | python | def array(self, dtype=None):
t = self._t
# timestamp (12) through last enum (76)
if 11 <= t < 77:
dtype = dtypeof(self)
a = numpy.empty(len(self), dtype)
k2a(a, self)
return a
# table (98)
if t == 98:
if dtype is None:
dtype = list(zip(self.cols, (dtypeof(c) for c in self.flip.value)))
dtype = numpy.dtype(dtype)
a = numpy.empty(int(self.count), dtype)
for c in dtype.fields:
k2a(a[c], self[c])
return a
return numpy.array(list(self), dtype) | An implementation of __array__() | train | https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/_n.py#L148-L166 | [
"def dtypeof(x):\n \"\"\"Return the dtype corresponding to a given q object\"\"\"\n t = abs(x._t)\n if t < 20:\n return _DTYPES[t]\n return 'O'\n",
"def k2a(a, x):\n \"\"\"Rescale data from a K object x to array a.\n\n \"\"\"\n func, scale = None, 1\n t = abs(x._t)\n # timestamp (12), month (13), date (14) or datetime (15)\n if 12 <= t <= 15:\n unit = get_unit(a)\n attr, shift, func, scale = _UNIT[unit]\n a[:] = getattr(x, attr).data\n a += shift\n # timespan (16), minute (17), second (18) or time (19)\n elif 16 <= t <= 19:\n unit = get_unit(a)\n func, scale = _SCALE[unit]\n a[:] = x.timespan.data\n else:\n a[:] = list(x)\n\n if func is not None:\n func = getattr(numpy, func)\n a[:] = func(a.view(dtype='i8'), scale)\n\n if a.dtype.char in 'mM':\n n = x.null\n if n.any:\n a[n] = None\n"
] | """A helper module for interfacing with numpy
Numpy has four date units
Code Meaning Time span (relative) Time span (absolute)
Y year +/- 9.2e18 years [9.2e18 BC, 9.2e18 AD]
M month +/- 7.6e17 years [7.6e17 BC, 7.6e17 AD]
W week +/- 1.7e17 years [1.7e17 BC, 1.7e17 AD]
D day +/- 2.5e16 years [2.5e16 BC, 2.5e16 AD]
And nine time units:
Code Meaning Time span (relative) Time span (absolute)
h hour +/- 1.0e15 years [1.0e15 BC, 1.0e15 AD]
m minute +/- 1.7e13 years [1.7e13 BC, 1.7e13 AD]
s second +/- 2.9e12 years [ 2.9e9 BC, 2.9e9 AD]
ms millisecond +/- 2.9e9 years [ 2.9e6 BC, 2.9e6 AD]
us microsecond +/- 2.9e6 years [290301 BC, 294241 AD]
ns nanosecond +/- 292 years [ 1678 AD, 2262 AD]
ps picosecond +/- 106 days [ 1969 AD, 1970 AD]
fs femtosecond +/- 2.6 hours [ 1969 AD, 1970 AD]
as attosecond +/- 9.2 seconds [ 1969 AD, 1970 AD]
kdb+ has four datetime-like types
num char q-type c-type
12 "p" timestamp int64_t
13 "m" month int32_t
14 "d" date int32_t
15 "z" datetime double
And four timedelta-like types
16 "n" timespan int64_t
17 "u" minute int32_t
18 "v" second int32_t
19 "t" time int32_t
"""
from __future__ import absolute_import
from datetime import date
import numpy
K_DATE_SHIFT = date(2000, 1, 1).toordinal() - date(1970, 1, 1).toordinal()
K_STAMP_SHIFT = K_DATE_SHIFT * 24 * 60 * 60 * 10 ** 9
def get_unit(a):
"""Extract the time unit from array's dtype"""
typestr = a.dtype.str
i = typestr.find('[')
if i == -1:
raise TypeError("Expected a datetime64 array, not %s", a.dtype)
return typestr[i + 1: -1]
_SCALE = {
'W': ('floor_divide', 7 * 24 * 60 * 60 * 10 ** 9),
'D': ('floor_divide', 24 * 60 * 60 * 10 ** 9),
'h': ('floor_divide', 60 * 60 * 10 ** 9),
'm': ('floor_divide', 60 * 10 ** 9),
's': ('floor_divide', 10 ** 9),
'ms': ('floor_divide', 10 ** 6),
'us': ('floor_divide', 10 ** 3),
'ns': (None, None),
'ps': ('multiply', 10 ** 3),
'fs': ('multiply', 10 ** 6),
'as': ('multiply', 10 ** 9),
}
_UNIT = {
'D': ('date', K_DATE_SHIFT, None, None),
'Y': ('year', -1970, None, None),
'W': ('date', K_DATE_SHIFT, 'floor_divide', 7),
'M': ('month', 30 * 12, None, None),
'h': ('timestamp', K_STAMP_SHIFT, 'floor_divide', 60 * 60 * 10 ** 9),
'm': ('timestamp', K_STAMP_SHIFT, 'floor_divide', 60 * 10 ** 9),
's': ('timestamp', K_STAMP_SHIFT, 'floor_divide', 10 ** 9),
'ns': ('timestamp', K_STAMP_SHIFT, None, None),
'ps': ('timestamp', K_STAMP_SHIFT, 'multiply', 1000),
}
_DTYPES = [
"O", # 0
"?", # 1 - boolean
"16B", # 2 - guid
None, # 3 - unused
"B", # 4 - byte
"h", # 5 - short
"i", # 6 - int
"q", # 7 - long
"f", # 8 - real
"d", # 9 - float
"S1", # 10 - char
"O", # 11 - symbol
"M8[ns]", # 12 - timestamp
"M8[M]", # 13 - month
"M8[D]", # 14 - date
None, # 15 - datetime (unsupported)
"m8[ns]", # 16 - timespan
"m8[m]", # 17 - minute
"m8[s]", # 18 - second
"m8[ms]", # 19 - time
"O", # 20 - `sym$
]
def dtypeof(x):
"""Return the dtype corresponding to a given q object"""
t = abs(x._t)
if t < 20:
return _DTYPES[t]
return 'O'
def k2a(a, x):
"""Rescale data from a K object x to array a.
"""
func, scale = None, 1
t = abs(x._t)
# timestamp (12), month (13), date (14) or datetime (15)
if 12 <= t <= 15:
unit = get_unit(a)
attr, shift, func, scale = _UNIT[unit]
a[:] = getattr(x, attr).data
a += shift
# timespan (16), minute (17), second (18) or time (19)
elif 16 <= t <= 19:
unit = get_unit(a)
func, scale = _SCALE[unit]
a[:] = x.timespan.data
else:
a[:] = list(x)
if func is not None:
func = getattr(numpy, func)
a[:] = func(a.view(dtype='i8'), scale)
if a.dtype.char in 'mM':
n = x.null
if n.any:
a[n] = None
|
KxSystems/pyq | src/pyq/__init__.py | versions | python | def versions():
stream = sys.stdout if _PY3K else sys.stderr
print('PyQ', __version__, file=stream)
if _np is not None:
print('NumPy', _np.__version__, file=stream)
print('KDB+ %s (%s) %s' % tuple(q('.z.K,.z.k,.z.o')), file=stream)
print('Python', sys.version, file=stream) | Report versions | train | https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/__init__.py#L855-L862 | null | """pyq - python for kdb+"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from datetime import datetime, date, time
from collections import Mapping as _Mapping
import sys
import os
try:
import numpy as _np
except ImportError:
_np = None
try:
from ._k import K as _K, error as kerr, Q_VERSION, Q_DATE, Q_OS
except ImportError:
import ctypes
import platform
if not hasattr(ctypes.CDLL(None), 'b9'):
message = ("Importing the pyq package from "
"standalone python is not supported. ")
if platform.system() == 'Windows':
message += "Run path\\to\\q.exe python.q."
else:
message += "Use pyq executable."
raise ImportError(message)
raise
try:
from .version import version as __version__
except ImportError:
__version__ = 'unknown'
__metaclass__ = type
# Convenience constant to select code branches according to Q_VERSION
_KX3 = Q_VERSION >= 3
_PY3K = sys.hexversion >= 0x3000000
if _PY3K:
import builtins as __builtin__
else:
import __builtin__
import warnings
warnings.warn("Python 2 support will be discontinued soon",
DeprecationWarning)
# List of q builtin functions that are not defined in .q.
# NB: This is similar to .Q.res, but excludes non-function constructs
# such as "do", "if", "select" etc. We also exclude the "exit" function
# because it is rarely safe to use it to exit from pyq.
_Q_RES = ['abs', 'acos', 'asin', 'atan', 'avg', 'bin', 'cor', 'cos',
'cov', 'dev', 'div', 'ema', 'enlist', 'exp', 'getenv', 'in',
'insert', 'last', 'like', 'log', 'max', 'min', 'prd', 'reval',
'scov', 'sdev', 'setenv', 'sin', 'sqrt', 'ss', 'sum', 'svar',
'tan', 'var', 'wavg', 'within', 'wsum', 'xexp']
if _KX3 and Q_DATE >= date(2012, 7, 26):
# binr was introduced in kdb+3.0 2012.07.26
_Q_RES.append('binr')
if Q_VERSION >= 3.6:
_Q_RES.append('hopen')
class K(_K):
"""proxies for kdb+ objects
>>> q('2005.01.01 2005.12.04')
k('2005.01.01 2005.12.04')
Iteration over simple lists produces python objects
>>> list(q("`a`b`c`d"))
['a', 'b', 'c', 'd']
Iteration over q tables produces q dictionaries
>>> list(q("([]a:`x`y`z;b:1 2 3)"))
[k('`a`b!(`x;1)'), k('`a`b!(`y;2)'), k('`a`b!(`z;3)')]
Iteration over a q dictionary iterates over its key
>>> list(q('`a`b!1 2'))
['a', 'b']
as a consequence, iteration over a keyed table is the same as
iteration over its key table
>>> list(q("([a:`x`y`z]b:1 2 3)"))
[k('(,`a)!,`x'), k('(,`a)!,`y'), k('(,`a)!,`z')]
Callbacks into python
>>> def f(x, y):
... return x + y
>>> q('{[f]f(1;2)}', f)
k('3')
Buffer protocol
The following session illustrates how buffer protocol implemented by
K objects can be used to write data from Python streams directly yo kdb+.
Create a list of chars in kdb+
>>> x = kp('xxxxxx')
Open a pair of file descriptors
>>> r, w = os.pipe()
Write 6 bytes to the write end
>>> os.write(w, b'abcdef')
6
Read from the read-end into x
>>> f = os.fdopen(r, mode='rb')
>>> f.readinto(x)
6
Now x contains the bytes that were sent through the pipe
>>> x
k('"abcdef"')
Close the descriptors and the stream
>>> os.close(w); f.close()
Low level interface
The K type provides a set of low level functions that are similar
to the C API provided by the `k.h header <http://kx.com/q/c/c/k.h>`_.
The C API functions that return K objects in C are implemented as
class methods that return instances of K type.
Atoms
>>> K._kb(True), K._kg(5), K._kh(42), K._ki(-3), K._kj(2**40)
(k('1b'), k('0x05'), k('42h'), k('-3i'), k('1099511627776'))
>>> K._ke(3.5), K._kf(1.0), K._kc(b'x'), K._ks('xyz')
(k('3.5e'), k('1f'), k('"x"'), k('`xyz'))
>>> K._kd(0), K._kz(0.0), K._kt(0)
(k('2000.01.01'), k('2000.01.01T00:00:00.000'), k('00:00:00.000'))
Tables and dictionaries
>>> x = K._xD(k('`a`b`c'), k('1 2 3')); x, K._xT(x)
(k('`a`b`c!1 2 3'), k('+`a`b`c!1 2 3'))
Keyed table
>>> t = K._xD(K._xT(K._xD(k(",`a"), k(",1 2 3"))),
... K._xT(K._xD(k(",`b"), k(",10 20 30"))))
>>> K._ktd(t)
k('+`a`b!(1 2 3;10 20 30)')
"""
# Lighten the K objects by preventing the automatic creation of
# __dict__ and __weakref__ for each instance.
__slots__ = ()
# Helper methods for use in C implementation of __new__
def _set_mask(self, mask):
return q("{?[y;((),x)0N;x]}", self, mask)
@classmethod
def _from_record_array(cls, x):
fields = [f for f, t in x.dtype.descr]
k = q('!', list(fields), [K(x[f]) for f in fields])
if x.ndim:
k = k.flip
return k
@classmethod
def _from_sequence(cls, x, elm=None):
r = cls._ktn(0, 0)
g = iter(x)
try:
i = next(g)
except StopIteration:
return r
en = _K_k(0, 'enlist')
r._ja(en)
if elm is None:
elm = cls
for i in itertools.chain([i], g):
i = elm(i)
# Symbols and lists require special treatment
if i._t in (-11, 11, 0):
i = i.enlist
r._ja(i)
return r.eval
@classmethod
def _convert(cls, x):
for t in type(x).mro():
c = converters.get(t)
if c is not None:
return c(x)
return cls._from_sequence(x)
def __reduce_ex__(self, proto):
x = self._b9(1, self)
b = memoryview(x).tobytes()
return (d9, (b,))
def __getitem__(self, x):
"""
>>> k("10 20 30 40 50")[k("1 3")]
k('20 40')
>>> k("`a`b`c!1 2 3")['b']
2
"""
try:
return _K.__getitem__(self, x)
except (TypeError, NotImplementedError):
pass
try:
start, stop, step = x.indices(len(self))
except AttributeError:
i = K(x)
if self._t == 99 and i._t < 0:
return self.value[self._k(0, "?", self.key, i)]
else:
return self._k(0, "@", self, i)
if step == 1:
return self._k(0, "sublist", self._J([start, stop - start]), self)
# NB: .indices() cannot return step=0.
i = start + step * q.til(max(0, (stop - start) // step))
return self._k(0, "{$[99=type x;(key[x]y)!value[x]y;x y]}", self, i)
def __getattr__(self, a):
"""table columns can be accessed via dot notation
>>> q("([]a:1 2 3; b:10 20 30)").a
k('1 2 3')
"""
t = self._t
if t == 98:
return self._k(0, '{x`%s}' % a, self)
if t == 99:
if self._k(0, "{11h~type key x}", self):
if a == 'items':
# NB: Workaround for a bug in OrderedDict in Python 3.5.
# See http://bugs.python.org/issue27576 for details.
raise AttributeError
return self._k(0, '{x`%s}' % a, self)
return self._k(0, '{(0!x)`%s}' % a, self)
if 12 <= abs(t) < 20:
try:
return self._k(0, "`%s$" % a, self)
except kerr:
pass
raise AttributeError(a)
_fields = b" g@ ghijefgsjiifjiii"
if _PY3K:
_fields = [bytes([_x]) for _x in _fields]
def __int__(self):
"""converts K scalars to python int
>>> [int(q(x)) for x in '1b 2h 3 4e `5 6.0 2000.01.08'.split()]
[1, 2, 3, 4, 5, 6, 7]
"""
t = self._t
if t >= 0:
raise TypeError("cannot convert non-scalar to int")
return int(self.inspect(self._fields[-t]))
__long__ = __int__
def __float__(self):
"""converts K scalars to python float
>>> [float(q(x)) for x in '1b 2h 3 4e `5 6.0 2000.01.08'.split()]
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]
"""
t = self._t
if t >= 0:
raise TypeError("cannot convert non-scalar to float")
return float(self.inspect(self._fields[-t]))
def __index__(self):
t = self._t
if -5 >= t >= -7:
return int(self)
raise TypeError("Only scalar short/int/long K objects "
"can be converted to an index")
# Strictly speaking, this is only needed for Python 3.x, but
# there is no harm if this is defined and not used in Python 2.x.
def __bytes__(self):
t = self._t
if -5 >= t >= -7:
return bytes(int(self))
if 0 < abs(t) < 11:
if abs(t) == 2:
# A work-around while .data is not implemented for guid type
from uuid import UUID
x = q('(),', self) # ensure that x is a list
return b''.join(UUID(int=i).bytes for i in x)
return bytes(self.data)
raise BufferError("k object of type %d" % t)
def __eq__(self, other):
"""
>>> K(1) == K(1)
True
>>> K(1) == None
False
"""
try:
other = K(other)
except TypeError:
return False
return bool(k('~')(self, other))
def __ne__(self, other):
"""
>>> K(1) != K(2)
True
"""
return bool(k('~~')(self, other))
def __contains__(self, item):
"""membership test
>>> 1 in q('1 2 3')
True
>>> 'abc' not in q('(1;2.0;`abc)')
False
"""
if self._t:
x = q('in', item, self)
else:
x = q('{sum x~/:y}', item, self)
return bool(x)
def keys(self):
"""returns q('key', self)
Among other uses, enables interoperability between q and
python dicts.
>>> from collections import OrderedDict
>>> OrderedDict(q('`a`b!1 2'))
OrderedDict([('a', 1), ('b', 2)])
>>> d = {}; d.update(q('`a`b!1 2'))
>>> list(sorted(d.items()))
[('a', 1), ('b', 2)]
"""
return self._k(0, 'key', self)
def show(self, start=0, geometry=None, output=None):
"""pretty-print data to the console
(similar to q.show, but uses python stdout by default)
>>> x = q('([k:`x`y`z]a:1 2 3;b:10 20 30)')
>>> x.show() # doctest: +NORMALIZE_WHITESPACE
k| a b
-| ----
x| 1 10
y| 2 20
z| 3 30
the first optional argument, 'start' specifies the first row to be
printed (negative means from the end)
>>> x.show(2) # doctest: +NORMALIZE_WHITESPACE
k| a b
-| ----
z| 3 30
>>> x.show(-2) # doctest: +NORMALIZE_WHITESPACE
k| a b
-| ----
y| 2 20
z| 3 30
the geometry is the height and width of the console
>>> x.show(geometry=[4, 6])
k| a..
-| -..
x| 1..
..
"""
if output is None:
output = sys.stdout
if geometry is None:
geometry = q.value(kp("\\c"))
else:
geometry = self._I(geometry)
if start < 0:
start += q.count(self)
# Make sure nil is not passed to a q function
if self._id() != nil._id():
r = self._show(geometry, start)
else:
r = '::\n'
if isinstance(output, type):
return output(r)
try:
output.write(r)
except TypeError:
output.write(str(r))
# See issue #665
def decode(self, encoding='utf-8', errors='strict'):
return bytes(self).decode(encoding, errors)
def _seu(self, what, columns, by, where, kwds):
args = [self]
anames = ['self']
if kwds:
extra = sorted(kwds.keys())
args.extend(kwds[name] for name in extra)
anames.extend(extra)
if not isinstance(columns, str):
columns = ','.join(str(x) for x in columns)
query = "{[%s]%s %s " % (';'.join(anames), what, columns)
if by:
if not isinstance(by, str):
by = ','.join(str(x) for x in by)
query += " by " + by
query += " from self"
if where:
if not isinstance(where, str):
where = ','.join(str(x) for x in where)
query += " where " + where
query += '}'
return q(query, *args)
def select(self, columns=(), by=(), where=(), **kwds):
"""select from self
>>> t = q('([]a:1 2 3; b:10 20 30)')
>>> t.select('a', where='b > 20').show()
a
-
3
"""
return self._seu('select', columns, by, where, kwds)
def exec_(self, columns=(), by=(), where=(), **kwds):
"""exec from self
>>> t = q('([]a:1 2 3; b:10 20 30)')
>>> t.exec_('a', where='b > 10').show()
2 3
"""
return self._seu('exec', columns, by, where, kwds)
def update(self, columns=(), by=(), where=(), **kwds):
"""update from self
>>> t = q('([]a:1 2 3; b:10 20 30)')
>>> t.update('a*2',
... where='b > 20').show() # doctest: +NORMALIZE_WHITESPACE
a b
----
1 10
2 20
6 30
"""
return self._seu('update', columns, by, where, kwds)
@property
def ss(self):
if self._t == 10:
return q.ss(self)
return q('`ss$', self)
if _np is not None:
@property
def _mask(self):
x = self._get_null()
if x is None:
return _np.ma.nomask
else:
return _np.asarray(x)
from ._n import array as __array__
__array_priority__ = 20
__doc__ += """
K objects can be used in Python arithmetic expressions
>>> x, y, z = map(K, (1, 2, 3))
>>> print(x + y, x * y,
... z/y, x|y, x&y, abs(-z)) #doctest: +NORMALIZE_WHITESPACE
3 2 1.5 2 1 3
Mixing K objects with python numbers is allowed
>>> 1/q('1 2 4')
k('1 0.5 0.25')
>>> q.til(5)**2
k('0 1 4 9 16f')
"""
def __format__(self, fmt):
if fmt:
return format(self._pys(), fmt)
return str(self)
def __sizeof__(self):
return object.__sizeof__(self) + int(self._sizeof())
def __fspath__(self):
"""Return the file system path representation of the object."""
if self._t != -11: # symbol
raise TypeError
sym = str(self)
if not sym.startswith(':'):
raise TypeError
return sym[1:]
def __complex__(self):
"""Called to implement the built-in function complex()."""
if self._t != 99 or self.key != ['re', 'im']:
return complex(float(self))
return complex(float(self.re), float(self.im))
@classmethod
def dict(cls, *args, **kwds):
"""Construct a q dictionary
K.dict() -> new empty q dictionary (q('()!()')
K.dict(mapping) -> new dictionary initialized from a mapping object's
(key, value) pairs
K.dict(iterable) -> new dictionary initialized from an iterable
yielding (key, value) pairs
K.dict(**kwargs) -> new dictionary initialized with the name=value
pairs in the keyword argument list.
For example: K.dict(one=1, two=2)
"""
if args:
if len(args) > 1:
raise TypeError("Too many positional arguments")
x = args[0]
keys = []
vals = []
try:
x_keys = x.keys
except AttributeError:
for k, v in x:
keys.append(k)
vals.append(v)
else:
keys = x_keys()
vals = [x[k] for k in keys]
return q('!', keys, vals)
else:
if kwds:
keys = []
vals = []
for k, v in kwds.items():
keys.append(k)
vals.append(v)
return q('!', keys, vals)
else:
return q('()!()')
@classmethod
def table(cls, *args, **kwds):
if args or kwds:
return cls.dict(*args, **kwds).flip
else:
raise TypeError("A table must have at least one column")
if _PY3K:
setattr(K, 'exec', K.exec_)
def _q_builtins():
from keyword import iskeyword
# Allow _q_builtins() to be called before q is defined
def q(x):
return K._k(0, x)
kver = q('.z.K').inspect(b'f')
names = _Q_RES + list(q("1_key[.q]except`each`over`scan"))
if kver < 3.4:
# ema is present since kdb+3.4
q(r'.q.ema:{first[y]("f"$1-x)\x*y}')
if kver < 3.3:
# restricted eval was added in 3.3
q(r'.q.reval:eval')
if kver < 3.0:
for new in ['scov', 'svar', 'sdev']:
names.remove(new)
pairs = []
for x in names:
attr = x
if iskeyword(x):
attr += '_'
pairs.append((attr, q(x)))
return pairs
def _genmethods(cls, obj):
q(r'\l pyq-operators.q')
cls._show = q('{` sv .Q.S[y;z;x]}')
cls._sizeof = q('.pyq.sizeof')
for spec, verb in [
('add', '+'), ('sub', '-'), ('rsub', '{y-x}'),
('mul', '*'), ('pow', 'xexp'), ('rpow', '{y xexp x}'),
('xor', '^'), ('rxor', '{y^x}'),
('truediv', '%'), ('rtruediv', '{y%x}'),
('floordiv', 'div'), ('rfloordiv', '{y div x}'),
('and', '&'), ('or', '|'),
('mod', 'mod'), ('rmod', '{y mod x}'), ('invert', 'not'),
('pos', '{@[flip;x;x]}'), ('neg', '-:'), ('abs', 'abs'),
# x @ y - composition if y is a function, x[y] otherwise.
('matmul', "{$[100>type y;x y;'[x;y]]}"),
('rmatmul', "{$[100>type x;y x;'[y;x]]}"),
]:
setattr(cls, '__%s__' % spec, q(verb))
for spec in 'add mul and or'.split():
setattr(cls, '__r%s__' % spec, getattr(cls, '__%s__' % spec))
for x, f in _q_builtins():
if not hasattr(cls, x):
setattr(cls, x, f)
obj.__dict__[x] = f
def cmp_op(op):
def dunder(self, other):
other = K(other)
if self._t < 0 and other._t < 0:
return bool(q(op, self, other))
else:
raise NotImplementedError
return dunder
for spec, verb in [('gt', '>'), ('lt', '<'), ('ge', '>='), ('le', '<=')]:
setattr(cls, '__%s__' % spec, cmp_op(verb))
# Shift operators
for x in ['', 'r']:
for y in 'lr':
op = x + y + 'shift'
setattr(cls, '__%s__' % op, q('.pyq.' + op))
def d9(x):
"""like K._d9, but takes python bytes"""
return K._d9(K._kp(x))
def k(m, *args):
return K._k(0, 'k)' + m, *map(K, args))
class _Q(object):
"""a portal to kdb+"""
def __init__(self):
object.__setattr__(self, '_cmd', None)
object.__setattr__(self, '_q_names',
[name for name, _ in _q_builtins()])
def __call__(self, m=None, *args):
"""Execute q code."""
try:
return K._k(0, m, *map(K, args))
except TypeError:
if m is not None:
raise
if self._cmd is None:
from .cmd import Cmd
object.__setattr__(self, '_cmd', Cmd())
self._cmd.cmdloop()
def __getattr__(self, attr, _k=K._k):
try:
return _k(0, attr.rstrip('_'))
except kerr:
pass
raise AttributeError(attr)
def __setattr__(self, attr, value):
self("@[`.;;:;]", attr, value)
def __delattr__(self, attr):
k = K._k
k(0, "delete %s from `." % attr)
def __dir__(self):
return self._q_names + list(self.key('.'))
q = _Q()
nil = q('(value +[;0])1')
show = K.show
def _ni(x):
r = q('()')
append = q(',')
for i in x:
r = append(r, K(i))
return r
_X = {K: K._K, str: K._S, int: (K._J if _KX3 else K._I),
float: K._F, date: K._D, time: _ni, datetime: _ni, bool: K._B}
def _listtok(x):
if x:
for i in x:
if i is not None:
break
c = _X.get(type(i))
if c is not None:
try:
return c(x)
except (TypeError, ValueError):
pass
return K._from_sequence(x)
return K._ktn(0, 0)
_X[list] = lambda x: K([K(i) for i in x])
def _tupletok(x):
try:
fields = x._fields
except AttributeError:
return K._from_sequence(x)
else:
return K._xD(K(fields), K._from_sequence(x))
kp = K._kp
converters = {
list: _listtok,
tuple: _tupletok,
type(lambda: 0): K._func,
type(sum): K._func,
dict: lambda x: K._xD(K(x.keys()), K(x.values())),
complex: lambda z: K._xD(K._S(['re', 'im']), K._F([z.real, z.imag])),
bytearray: K._from_memoryview,
memoryview: K._from_memoryview,
}
if _PY3K:
converters[bytes] = K._kp
else:
converters[unicode] = K._ks
_X[unicode] = K._S
_X[long] = (K._J if _KX3 else K._I)
try:
converters[buffer] = K._kp
except NameError:
buffer = str
###############################################################################
# Lazy addition of converters
###############################################################################
lazy_converters = {
'uuid': [('UUID', lambda u: K._kguid(u.int))],
'py._path.local': [
('LocalPath',
(lambda p: K(':' + p.strpath)) if os.sep == '/' else
lambda p: K(':' + p.strpath.replace(os.sep, '/')))
],
'pathlib': [('PurePath', lambda p: K(':' + p.as_posix()))],
}
lazy_converters['pathlib2'] = lazy_converters['pathlib']
if _PY3K:
lazy_converters['array'] = [('array', K._from_memoryview)]
# If module is already loaded, register converters for its classes
# right away.
def _pre_register_converters():
for name, pairs in lazy_converters.items():
mod = sys.modules.get(name)
if mod is not None:
for cname, conv in pairs:
converters[getattr(mod, cname)] = conv
_pre_register_converters()
del _pre_register_converters
# Replace builtin import to add lazy registration logic
_imp = __builtin__.__import__
def __import__(name, globals={}, locals={}, fromlist=[], level=[-1, 0][_PY3K],
_imp=_imp, _c=converters, _lc=lazy_converters):
m = _imp(name, globals, locals, fromlist, level)
pairs = _lc.get(name)
if pairs is not None:
_c.update((getattr(m, cname), conv) for cname, conv in pairs)
return m
__builtin__.__import__ = __import__
###############################################################################
_genmethods(K, q)
del _genmethods, _imp
###############################################################################
# Casts and constructors
###############################################################################
def _gendescriptors(cls, string_types=(type(b''), type(u''))):
cls._Z = NotImplemented
if Q_VERSION < 3:
cls._UU = cls._kguid = NotImplemented
types = [
# code, char, name, vector, scalar
(1, 'b', 'boolean', cls._B, cls._kb),
(2, 'g', 'guid', cls._UU, cls._kguid),
(4, 'x', 'byte', cls._G, cls._kg),
(5, 'h', 'short', cls._H, cls._kh),
(6, 'i', 'int', cls._I, cls._ki),
(7, 'j', 'long', cls._J, cls._kj),
(8, 'e', 'real', cls._E, cls._ke),
(9, 'f', 'float', cls._F, cls._kf),
(11, 's', 'symbol', cls._S, cls._ks),
(12, 'p', 'timestamp', cls._P, cls._kpz),
(13, 'm', 'month', cls._M, cls._km),
(14, 'd', 'date', cls._D, cls._kd),
(15, 'z', 'datetime', cls._Z, cls._kz),
(16, 'n', 'timespan', cls._N, cls._knz),
(17, 'u', 'minute', cls._U, cls._ku),
(18, 'v', 'second', cls._V, cls._kv),
(19, 't', 'time', cls._T, cls._kt),
]
class Desc:
def __init__(self, code, char, name, vector, scalar):
self.code = cls._kh(code)
self.char = char
self.name = name
self.vector = vector
self.scalar = scalar
def make_constructor(self):
def constructor(x):
# If x is already K - check the type and either
# pass it through or cast to the type needed.
if isinstance(x, K):
if x.type.abs == self.code:
return x
else:
return cls._k(0, '$', self.code, x)
if isinstance(x, _Mapping):
return cls._xD(cls(x.keys()), constructor(x.values()))
try:
return self.vector(x)
except TypeError:
pass
try:
return self.scalar(x)
except TypeError:
return cls._from_sequence(x, constructor)
constructor.__name__ = 'K.' + self.name
if self.code > 4 and int(self.code) != 11:
constructor.inf = q('0W' + self.char)
constructor.na = q('0N' + self.char)
elif self.name == 'guid':
constructor.na = q('0Ng')
return constructor
def __get__(self, instance, owner):
if instance is None:
return self.make_constructor()
# Make sure dict keys and table columns have priority over casts
name = self.name
if instance._t == 98 and name in instance.cols:
return instance[name]
if instance._t == 99: # keyed table or dict
key = instance.key
if key._t == 11 and name in key:
return instance[name]
if key._t == 98 and name in instance.cols:
return instance.exec_(name)
return cls._k(0, '$', self.code, instance)
for code, char, name, vector, scalar in types:
setattr(cls, name, Desc(code, char, name, vector, scalar))
# Special case: string
def make_strings(x):
if isinstance(x, string_types):
return cls._kp(x)
if isinstance(x, _Mapping):
return cls._xD(cls(x.keys()), make_strings(x.values()))
return cls._from_sequence(x, make_strings)
class StringDesc:
def __get__(self, instance, owner):
if instance is None:
return make_strings
# NB: As a reserved word, "string" cannot be a column name but
# can be a key in a dictionary
if instance._t == 99:
key = instance.key
if key._t == 11 and 'string' in key:
return instance['string']
return cls._k(0, 'string', instance)
cls.string = StringDesc()
# Special case: char (like string, but may return a scalar char.)
def make_chars(x):
if isinstance(x, str):
x = x.encode('utf8')
if isinstance(x, bytes):
if len(x) == 1:
return cls._kc(x)
else:
return cls._kp(x)
if isinstance(x, _Mapping):
return cls._xD(cls(x.keys()), make_chars(x.values()))
if not x:
return cls._kp('')
return cls._from_sequence(x, make_chars)
make_chars.inf = q('0Wc')
make_chars.na = q('0Nc')
class CharDesc:
def __get__(self, instance, owner):
if instance is None:
return make_chars
# Make sure dict keys and table columns have priority over casts
name = 'char'
if instance._t == 98 and name in instance.cols:
return instance[name]
if instance._t == 99: # keyed table or dict
key = instance.key
if key._t == 11 and name in key:
return instance[name]
if key._t == 98 and name in instance.cols:
return instance.exec_(name)
return cls._k(0, '`char$', instance)
cls.char = CharDesc()
_gendescriptors(K)
del _gendescriptors
def _genadverbs(cls):
adverbs = [
'each', # '
'over', # /
'scan', # \
'prior', # ':
'sv', # /: - each-right
'vs', # \: - each-left
]
for i, a in enumerate(adverbs):
x = cls._ktj(103, i)
setattr(cls, a, x)
_genadverbs(K)
del _genadverbs
# Traceback support
_K_k = K._k
_K_call = K.__call__
def _set_excepthook(origexcepthook):
def excepthook(exctype, value, traceback):
origexcepthook(exctype, value, traceback)
a = value.args
if exctype is kerr and len(a) > 1:
sbt = _K_k(0, '.Q.sbt', a[1])
print("kdb+ backtrace:\n%s" % sbt,
file=sys.stderr, end='')
sys.excepthook = excepthook
_val = q.value
_enl = q.enlist
def _trp_k(cls_, h, m, *args):
if h != 0:
return cls_._k(h, m, args)
f = cls_._trp(_val, K._knk(1, kp(m)))
if args:
return cls_._trp(f, K(args))
else:
return f
def _trp_call(*args, **kwds):
f = args[0]
args = args[1:]
if f.type < 100:
return _K_call(f, *args, **kwds)
if kwds:
args = f._callargs(*args, **kwds)
if not args:
args = [None]
args = K._knk(len(args), *map(K, args))
return f._trp(args)
if 'PYQ_BACKTRACE' in os.environ and q('.z.K >= 3.5'):
_set_excepthook(sys.excepthook)
K._k = classmethod(_trp_k)
K.__call__ = _trp_call
|
KxSystems/pyq | src/pyq/__init__.py | K.show | python | def show(self, start=0, geometry=None, output=None):
if output is None:
output = sys.stdout
if geometry is None:
geometry = q.value(kp("\\c"))
else:
geometry = self._I(geometry)
if start < 0:
start += q.count(self)
# Make sure nil is not passed to a q function
if self._id() != nil._id():
r = self._show(geometry, start)
else:
r = '::\n'
if isinstance(output, type):
return output(r)
try:
output.write(r)
except TypeError:
output.write(str(r)) | pretty-print data to the console
(similar to q.show, but uses python stdout by default)
>>> x = q('([k:`x`y`z]a:1 2 3;b:10 20 30)')
>>> x.show() # doctest: +NORMALIZE_WHITESPACE
k| a b
-| ----
x| 1 10
y| 2 20
z| 3 30
the first optional argument, 'start' specifies the first row to be
printed (negative means from the end)
>>> x.show(2) # doctest: +NORMALIZE_WHITESPACE
k| a b
-| ----
z| 3 30
>>> x.show(-2) # doctest: +NORMALIZE_WHITESPACE
k| a b
-| ----
y| 2 20
z| 3 30
the geometry is the height and width of the console
>>> x.show(geometry=[4, 6])
k| a..
-| -..
x| 1..
.. | train | https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/__init__.py#L377-L435 | null | class K(_K):
"""proxies for kdb+ objects
>>> q('2005.01.01 2005.12.04')
k('2005.01.01 2005.12.04')
Iteration over simple lists produces python objects
>>> list(q("`a`b`c`d"))
['a', 'b', 'c', 'd']
Iteration over q tables produces q dictionaries
>>> list(q("([]a:`x`y`z;b:1 2 3)"))
[k('`a`b!(`x;1)'), k('`a`b!(`y;2)'), k('`a`b!(`z;3)')]
Iteration over a q dictionary iterates over its key
>>> list(q('`a`b!1 2'))
['a', 'b']
as a consequence, iteration over a keyed table is the same as
iteration over its key table
>>> list(q("([a:`x`y`z]b:1 2 3)"))
[k('(,`a)!,`x'), k('(,`a)!,`y'), k('(,`a)!,`z')]
Callbacks into python
>>> def f(x, y):
... return x + y
>>> q('{[f]f(1;2)}', f)
k('3')
Buffer protocol
The following session illustrates how buffer protocol implemented by
K objects can be used to write data from Python streams directly yo kdb+.
Create a list of chars in kdb+
>>> x = kp('xxxxxx')
Open a pair of file descriptors
>>> r, w = os.pipe()
Write 6 bytes to the write end
>>> os.write(w, b'abcdef')
6
Read from the read-end into x
>>> f = os.fdopen(r, mode='rb')
>>> f.readinto(x)
6
Now x contains the bytes that were sent through the pipe
>>> x
k('"abcdef"')
Close the descriptors and the stream
>>> os.close(w); f.close()
Low level interface
The K type provides a set of low level functions that are similar
to the C API provided by the `k.h header <http://kx.com/q/c/c/k.h>`_.
The C API functions that return K objects in C are implemented as
class methods that return instances of K type.
Atoms
>>> K._kb(True), K._kg(5), K._kh(42), K._ki(-3), K._kj(2**40)
(k('1b'), k('0x05'), k('42h'), k('-3i'), k('1099511627776'))
>>> K._ke(3.5), K._kf(1.0), K._kc(b'x'), K._ks('xyz')
(k('3.5e'), k('1f'), k('"x"'), k('`xyz'))
>>> K._kd(0), K._kz(0.0), K._kt(0)
(k('2000.01.01'), k('2000.01.01T00:00:00.000'), k('00:00:00.000'))
Tables and dictionaries
>>> x = K._xD(k('`a`b`c'), k('1 2 3')); x, K._xT(x)
(k('`a`b`c!1 2 3'), k('+`a`b`c!1 2 3'))
Keyed table
>>> t = K._xD(K._xT(K._xD(k(",`a"), k(",1 2 3"))),
... K._xT(K._xD(k(",`b"), k(",10 20 30"))))
>>> K._ktd(t)
k('+`a`b!(1 2 3;10 20 30)')
"""
# Lighten the K objects by preventing the automatic creation of
# __dict__ and __weakref__ for each instance.
__slots__ = ()
# Helper methods for use in C implementation of __new__
def _set_mask(self, mask):
return q("{?[y;((),x)0N;x]}", self, mask)
@classmethod
def _from_record_array(cls, x):
fields = [f for f, t in x.dtype.descr]
k = q('!', list(fields), [K(x[f]) for f in fields])
if x.ndim:
k = k.flip
return k
@classmethod
def _from_sequence(cls, x, elm=None):
r = cls._ktn(0, 0)
g = iter(x)
try:
i = next(g)
except StopIteration:
return r
en = _K_k(0, 'enlist')
r._ja(en)
if elm is None:
elm = cls
for i in itertools.chain([i], g):
i = elm(i)
# Symbols and lists require special treatment
if i._t in (-11, 11, 0):
i = i.enlist
r._ja(i)
return r.eval
@classmethod
def _convert(cls, x):
for t in type(x).mro():
c = converters.get(t)
if c is not None:
return c(x)
return cls._from_sequence(x)
def __reduce_ex__(self, proto):
x = self._b9(1, self)
b = memoryview(x).tobytes()
return (d9, (b,))
def __getitem__(self, x):
"""
>>> k("10 20 30 40 50")[k("1 3")]
k('20 40')
>>> k("`a`b`c!1 2 3")['b']
2
"""
try:
return _K.__getitem__(self, x)
except (TypeError, NotImplementedError):
pass
try:
start, stop, step = x.indices(len(self))
except AttributeError:
i = K(x)
if self._t == 99 and i._t < 0:
return self.value[self._k(0, "?", self.key, i)]
else:
return self._k(0, "@", self, i)
if step == 1:
return self._k(0, "sublist", self._J([start, stop - start]), self)
# NB: .indices() cannot return step=0.
i = start + step * q.til(max(0, (stop - start) // step))
return self._k(0, "{$[99=type x;(key[x]y)!value[x]y;x y]}", self, i)
def __getattr__(self, a):
"""table columns can be accessed via dot notation
>>> q("([]a:1 2 3; b:10 20 30)").a
k('1 2 3')
"""
t = self._t
if t == 98:
return self._k(0, '{x`%s}' % a, self)
if t == 99:
if self._k(0, "{11h~type key x}", self):
if a == 'items':
# NB: Workaround for a bug in OrderedDict in Python 3.5.
# See http://bugs.python.org/issue27576 for details.
raise AttributeError
return self._k(0, '{x`%s}' % a, self)
return self._k(0, '{(0!x)`%s}' % a, self)
if 12 <= abs(t) < 20:
try:
return self._k(0, "`%s$" % a, self)
except kerr:
pass
raise AttributeError(a)
_fields = b" g@ ghijefgsjiifjiii"
if _PY3K:
_fields = [bytes([_x]) for _x in _fields]
def __int__(self):
"""converts K scalars to python int
>>> [int(q(x)) for x in '1b 2h 3 4e `5 6.0 2000.01.08'.split()]
[1, 2, 3, 4, 5, 6, 7]
"""
t = self._t
if t >= 0:
raise TypeError("cannot convert non-scalar to int")
return int(self.inspect(self._fields[-t]))
__long__ = __int__
def __float__(self):
"""converts K scalars to python float
>>> [float(q(x)) for x in '1b 2h 3 4e `5 6.0 2000.01.08'.split()]
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]
"""
t = self._t
if t >= 0:
raise TypeError("cannot convert non-scalar to float")
return float(self.inspect(self._fields[-t]))
def __index__(self):
t = self._t
if -5 >= t >= -7:
return int(self)
raise TypeError("Only scalar short/int/long K objects "
"can be converted to an index")
# Strictly speaking, this is only needed for Python 3.x, but
# there is no harm if this is defined and not used in Python 2.x.
def __bytes__(self):
t = self._t
if -5 >= t >= -7:
return bytes(int(self))
if 0 < abs(t) < 11:
if abs(t) == 2:
# A work-around while .data is not implemented for guid type
from uuid import UUID
x = q('(),', self) # ensure that x is a list
return b''.join(UUID(int=i).bytes for i in x)
return bytes(self.data)
raise BufferError("k object of type %d" % t)
def __eq__(self, other):
"""
>>> K(1) == K(1)
True
>>> K(1) == None
False
"""
try:
other = K(other)
except TypeError:
return False
return bool(k('~')(self, other))
def __ne__(self, other):
"""
>>> K(1) != K(2)
True
"""
return bool(k('~~')(self, other))
def __contains__(self, item):
"""membership test
>>> 1 in q('1 2 3')
True
>>> 'abc' not in q('(1;2.0;`abc)')
False
"""
if self._t:
x = q('in', item, self)
else:
x = q('{sum x~/:y}', item, self)
return bool(x)
def keys(self):
"""returns q('key', self)
Among other uses, enables interoperability between q and
python dicts.
>>> from collections import OrderedDict
>>> OrderedDict(q('`a`b!1 2'))
OrderedDict([('a', 1), ('b', 2)])
>>> d = {}; d.update(q('`a`b!1 2'))
>>> list(sorted(d.items()))
[('a', 1), ('b', 2)]
"""
return self._k(0, 'key', self)
# See issue #665
def decode(self, encoding='utf-8', errors='strict'):
return bytes(self).decode(encoding, errors)
def _seu(self, what, columns, by, where, kwds):
args = [self]
anames = ['self']
if kwds:
extra = sorted(kwds.keys())
args.extend(kwds[name] for name in extra)
anames.extend(extra)
if not isinstance(columns, str):
columns = ','.join(str(x) for x in columns)
query = "{[%s]%s %s " % (';'.join(anames), what, columns)
if by:
if not isinstance(by, str):
by = ','.join(str(x) for x in by)
query += " by " + by
query += " from self"
if where:
if not isinstance(where, str):
where = ','.join(str(x) for x in where)
query += " where " + where
query += '}'
return q(query, *args)
def select(self, columns=(), by=(), where=(), **kwds):
"""select from self
>>> t = q('([]a:1 2 3; b:10 20 30)')
>>> t.select('a', where='b > 20').show()
a
-
3
"""
return self._seu('select', columns, by, where, kwds)
def exec_(self, columns=(), by=(), where=(), **kwds):
"""exec from self
>>> t = q('([]a:1 2 3; b:10 20 30)')
>>> t.exec_('a', where='b > 10').show()
2 3
"""
return self._seu('exec', columns, by, where, kwds)
def update(self, columns=(), by=(), where=(), **kwds):
"""update from self
>>> t = q('([]a:1 2 3; b:10 20 30)')
>>> t.update('a*2',
... where='b > 20').show() # doctest: +NORMALIZE_WHITESPACE
a b
----
1 10
2 20
6 30
"""
return self._seu('update', columns, by, where, kwds)
@property
def ss(self):
if self._t == 10:
return q.ss(self)
return q('`ss$', self)
if _np is not None:
@property
def _mask(self):
x = self._get_null()
if x is None:
return _np.ma.nomask
else:
return _np.asarray(x)
from ._n import array as __array__
__array_priority__ = 20
__doc__ += """
K objects can be used in Python arithmetic expressions
>>> x, y, z = map(K, (1, 2, 3))
>>> print(x + y, x * y,
... z/y, x|y, x&y, abs(-z)) #doctest: +NORMALIZE_WHITESPACE
3 2 1.5 2 1 3
Mixing K objects with python numbers is allowed
>>> 1/q('1 2 4')
k('1 0.5 0.25')
>>> q.til(5)**2
k('0 1 4 9 16f')
"""
def __format__(self, fmt):
if fmt:
return format(self._pys(), fmt)
return str(self)
def __sizeof__(self):
return object.__sizeof__(self) + int(self._sizeof())
def __fspath__(self):
"""Return the file system path representation of the object."""
if self._t != -11: # symbol
raise TypeError
sym = str(self)
if not sym.startswith(':'):
raise TypeError
return sym[1:]
def __complex__(self):
"""Called to implement the built-in function complex()."""
if self._t != 99 or self.key != ['re', 'im']:
return complex(float(self))
return complex(float(self.re), float(self.im))
@classmethod
def dict(cls, *args, **kwds):
"""Construct a q dictionary
K.dict() -> new empty q dictionary (q('()!()')
K.dict(mapping) -> new dictionary initialized from a mapping object's
(key, value) pairs
K.dict(iterable) -> new dictionary initialized from an iterable
yielding (key, value) pairs
K.dict(**kwargs) -> new dictionary initialized with the name=value
pairs in the keyword argument list.
For example: K.dict(one=1, two=2)
"""
if args:
if len(args) > 1:
raise TypeError("Too many positional arguments")
x = args[0]
keys = []
vals = []
try:
x_keys = x.keys
except AttributeError:
for k, v in x:
keys.append(k)
vals.append(v)
else:
keys = x_keys()
vals = [x[k] for k in keys]
return q('!', keys, vals)
else:
if kwds:
keys = []
vals = []
for k, v in kwds.items():
keys.append(k)
vals.append(v)
return q('!', keys, vals)
else:
return q('()!()')
@classmethod
def table(cls, *args, **kwds):
if args or kwds:
return cls.dict(*args, **kwds).flip
else:
raise TypeError("A table must have at least one column")
|
KxSystems/pyq | src/pyq/__init__.py | K.select | python | def select(self, columns=(), by=(), where=(), **kwds):
return self._seu('select', columns, by, where, kwds) | select from self
>>> t = q('([]a:1 2 3; b:10 20 30)')
>>> t.select('a', where='b > 20').show()
a
-
3 | train | https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/__init__.py#L465-L474 | null | class K(_K):
"""proxies for kdb+ objects
>>> q('2005.01.01 2005.12.04')
k('2005.01.01 2005.12.04')
Iteration over simple lists produces python objects
>>> list(q("`a`b`c`d"))
['a', 'b', 'c', 'd']
Iteration over q tables produces q dictionaries
>>> list(q("([]a:`x`y`z;b:1 2 3)"))
[k('`a`b!(`x;1)'), k('`a`b!(`y;2)'), k('`a`b!(`z;3)')]
Iteration over a q dictionary iterates over its key
>>> list(q('`a`b!1 2'))
['a', 'b']
as a consequence, iteration over a keyed table is the same as
iteration over its key table
>>> list(q("([a:`x`y`z]b:1 2 3)"))
[k('(,`a)!,`x'), k('(,`a)!,`y'), k('(,`a)!,`z')]
Callbacks into python
>>> def f(x, y):
... return x + y
>>> q('{[f]f(1;2)}', f)
k('3')
Buffer protocol
The following session illustrates how buffer protocol implemented by
K objects can be used to write data from Python streams directly yo kdb+.
Create a list of chars in kdb+
>>> x = kp('xxxxxx')
Open a pair of file descriptors
>>> r, w = os.pipe()
Write 6 bytes to the write end
>>> os.write(w, b'abcdef')
6
Read from the read-end into x
>>> f = os.fdopen(r, mode='rb')
>>> f.readinto(x)
6
Now x contains the bytes that were sent through the pipe
>>> x
k('"abcdef"')
Close the descriptors and the stream
>>> os.close(w); f.close()
Low level interface
The K type provides a set of low level functions that are similar
to the C API provided by the `k.h header <http://kx.com/q/c/c/k.h>`_.
The C API functions that return K objects in C are implemented as
class methods that return instances of K type.
Atoms
>>> K._kb(True), K._kg(5), K._kh(42), K._ki(-3), K._kj(2**40)
(k('1b'), k('0x05'), k('42h'), k('-3i'), k('1099511627776'))
>>> K._ke(3.5), K._kf(1.0), K._kc(b'x'), K._ks('xyz')
(k('3.5e'), k('1f'), k('"x"'), k('`xyz'))
>>> K._kd(0), K._kz(0.0), K._kt(0)
(k('2000.01.01'), k('2000.01.01T00:00:00.000'), k('00:00:00.000'))
Tables and dictionaries
>>> x = K._xD(k('`a`b`c'), k('1 2 3')); x, K._xT(x)
(k('`a`b`c!1 2 3'), k('+`a`b`c!1 2 3'))
Keyed table
>>> t = K._xD(K._xT(K._xD(k(",`a"), k(",1 2 3"))),
... K._xT(K._xD(k(",`b"), k(",10 20 30"))))
>>> K._ktd(t)
k('+`a`b!(1 2 3;10 20 30)')
"""
# Lighten the K objects by preventing the automatic creation of
# __dict__ and __weakref__ for each instance.
__slots__ = ()
# Helper methods for use in C implementation of __new__
def _set_mask(self, mask):
return q("{?[y;((),x)0N;x]}", self, mask)
@classmethod
def _from_record_array(cls, x):
fields = [f for f, t in x.dtype.descr]
k = q('!', list(fields), [K(x[f]) for f in fields])
if x.ndim:
k = k.flip
return k
@classmethod
def _from_sequence(cls, x, elm=None):
r = cls._ktn(0, 0)
g = iter(x)
try:
i = next(g)
except StopIteration:
return r
en = _K_k(0, 'enlist')
r._ja(en)
if elm is None:
elm = cls
for i in itertools.chain([i], g):
i = elm(i)
# Symbols and lists require special treatment
if i._t in (-11, 11, 0):
i = i.enlist
r._ja(i)
return r.eval
@classmethod
def _convert(cls, x):
for t in type(x).mro():
c = converters.get(t)
if c is not None:
return c(x)
return cls._from_sequence(x)
def __reduce_ex__(self, proto):
x = self._b9(1, self)
b = memoryview(x).tobytes()
return (d9, (b,))
def __getitem__(self, x):
"""
>>> k("10 20 30 40 50")[k("1 3")]
k('20 40')
>>> k("`a`b`c!1 2 3")['b']
2
"""
try:
return _K.__getitem__(self, x)
except (TypeError, NotImplementedError):
pass
try:
start, stop, step = x.indices(len(self))
except AttributeError:
i = K(x)
if self._t == 99 and i._t < 0:
return self.value[self._k(0, "?", self.key, i)]
else:
return self._k(0, "@", self, i)
if step == 1:
return self._k(0, "sublist", self._J([start, stop - start]), self)
# NB: .indices() cannot return step=0.
i = start + step * q.til(max(0, (stop - start) // step))
return self._k(0, "{$[99=type x;(key[x]y)!value[x]y;x y]}", self, i)
def __getattr__(self, a):
"""table columns can be accessed via dot notation
>>> q("([]a:1 2 3; b:10 20 30)").a
k('1 2 3')
"""
t = self._t
if t == 98:
return self._k(0, '{x`%s}' % a, self)
if t == 99:
if self._k(0, "{11h~type key x}", self):
if a == 'items':
# NB: Workaround for a bug in OrderedDict in Python 3.5.
# See http://bugs.python.org/issue27576 for details.
raise AttributeError
return self._k(0, '{x`%s}' % a, self)
return self._k(0, '{(0!x)`%s}' % a, self)
if 12 <= abs(t) < 20:
try:
return self._k(0, "`%s$" % a, self)
except kerr:
pass
raise AttributeError(a)
_fields = b" g@ ghijefgsjiifjiii"
if _PY3K:
_fields = [bytes([_x]) for _x in _fields]
def __int__(self):
"""converts K scalars to python int
>>> [int(q(x)) for x in '1b 2h 3 4e `5 6.0 2000.01.08'.split()]
[1, 2, 3, 4, 5, 6, 7]
"""
t = self._t
if t >= 0:
raise TypeError("cannot convert non-scalar to int")
return int(self.inspect(self._fields[-t]))
__long__ = __int__
def __float__(self):
"""converts K scalars to python float
>>> [float(q(x)) for x in '1b 2h 3 4e `5 6.0 2000.01.08'.split()]
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]
"""
t = self._t
if t >= 0:
raise TypeError("cannot convert non-scalar to float")
return float(self.inspect(self._fields[-t]))
def __index__(self):
t = self._t
if -5 >= t >= -7:
return int(self)
raise TypeError("Only scalar short/int/long K objects "
"can be converted to an index")
# Strictly speaking, this is only needed for Python 3.x, but
# there is no harm if this is defined and not used in Python 2.x.
def __bytes__(self):
t = self._t
if -5 >= t >= -7:
return bytes(int(self))
if 0 < abs(t) < 11:
if abs(t) == 2:
# A work-around while .data is not implemented for guid type
from uuid import UUID
x = q('(),', self) # ensure that x is a list
return b''.join(UUID(int=i).bytes for i in x)
return bytes(self.data)
raise BufferError("k object of type %d" % t)
def __eq__(self, other):
"""
>>> K(1) == K(1)
True
>>> K(1) == None
False
"""
try:
other = K(other)
except TypeError:
return False
return bool(k('~')(self, other))
def __ne__(self, other):
"""
>>> K(1) != K(2)
True
"""
return bool(k('~~')(self, other))
def __contains__(self, item):
"""membership test
>>> 1 in q('1 2 3')
True
>>> 'abc' not in q('(1;2.0;`abc)')
False
"""
if self._t:
x = q('in', item, self)
else:
x = q('{sum x~/:y}', item, self)
return bool(x)
def keys(self):
"""returns q('key', self)
Among other uses, enables interoperability between q and
python dicts.
>>> from collections import OrderedDict
>>> OrderedDict(q('`a`b!1 2'))
OrderedDict([('a', 1), ('b', 2)])
>>> d = {}; d.update(q('`a`b!1 2'))
>>> list(sorted(d.items()))
[('a', 1), ('b', 2)]
"""
return self._k(0, 'key', self)
def show(self, start=0, geometry=None, output=None):
"""pretty-print data to the console
(similar to q.show, but uses python stdout by default)
>>> x = q('([k:`x`y`z]a:1 2 3;b:10 20 30)')
>>> x.show() # doctest: +NORMALIZE_WHITESPACE
k| a b
-| ----
x| 1 10
y| 2 20
z| 3 30
the first optional argument, 'start' specifies the first row to be
printed (negative means from the end)
>>> x.show(2) # doctest: +NORMALIZE_WHITESPACE
k| a b
-| ----
z| 3 30
>>> x.show(-2) # doctest: +NORMALIZE_WHITESPACE
k| a b
-| ----
y| 2 20
z| 3 30
the geometry is the height and width of the console
>>> x.show(geometry=[4, 6])
k| a..
-| -..
x| 1..
..
"""
if output is None:
output = sys.stdout
if geometry is None:
geometry = q.value(kp("\\c"))
else:
geometry = self._I(geometry)
if start < 0:
start += q.count(self)
# Make sure nil is not passed to a q function
if self._id() != nil._id():
r = self._show(geometry, start)
else:
r = '::\n'
if isinstance(output, type):
return output(r)
try:
output.write(r)
except TypeError:
output.write(str(r))
# See issue #665
def decode(self, encoding='utf-8', errors='strict'):
return bytes(self).decode(encoding, errors)
def _seu(self, what, columns, by, where, kwds):
args = [self]
anames = ['self']
if kwds:
extra = sorted(kwds.keys())
args.extend(kwds[name] for name in extra)
anames.extend(extra)
if not isinstance(columns, str):
columns = ','.join(str(x) for x in columns)
query = "{[%s]%s %s " % (';'.join(anames), what, columns)
if by:
if not isinstance(by, str):
by = ','.join(str(x) for x in by)
query += " by " + by
query += " from self"
if where:
if not isinstance(where, str):
where = ','.join(str(x) for x in where)
query += " where " + where
query += '}'
return q(query, *args)
def exec_(self, columns=(), by=(), where=(), **kwds):
"""exec from self
>>> t = q('([]a:1 2 3; b:10 20 30)')
>>> t.exec_('a', where='b > 10').show()
2 3
"""
return self._seu('exec', columns, by, where, kwds)
def update(self, columns=(), by=(), where=(), **kwds):
"""update from self
>>> t = q('([]a:1 2 3; b:10 20 30)')
>>> t.update('a*2',
... where='b > 20').show() # doctest: +NORMALIZE_WHITESPACE
a b
----
1 10
2 20
6 30
"""
return self._seu('update', columns, by, where, kwds)
@property
def ss(self):
if self._t == 10:
return q.ss(self)
return q('`ss$', self)
if _np is not None:
@property
def _mask(self):
x = self._get_null()
if x is None:
return _np.ma.nomask
else:
return _np.asarray(x)
from ._n import array as __array__
__array_priority__ = 20
__doc__ += """
K objects can be used in Python arithmetic expressions
>>> x, y, z = map(K, (1, 2, 3))
>>> print(x + y, x * y,
... z/y, x|y, x&y, abs(-z)) #doctest: +NORMALIZE_WHITESPACE
3 2 1.5 2 1 3
Mixing K objects with python numbers is allowed
>>> 1/q('1 2 4')
k('1 0.5 0.25')
>>> q.til(5)**2
k('0 1 4 9 16f')
"""
def __format__(self, fmt):
if fmt:
return format(self._pys(), fmt)
return str(self)
def __sizeof__(self):
return object.__sizeof__(self) + int(self._sizeof())
def __fspath__(self):
"""Return the file system path representation of the object."""
if self._t != -11: # symbol
raise TypeError
sym = str(self)
if not sym.startswith(':'):
raise TypeError
return sym[1:]
def __complex__(self):
"""Called to implement the built-in function complex()."""
if self._t != 99 or self.key != ['re', 'im']:
return complex(float(self))
return complex(float(self.re), float(self.im))
@classmethod
def dict(cls, *args, **kwds):
"""Construct a q dictionary
K.dict() -> new empty q dictionary (q('()!()')
K.dict(mapping) -> new dictionary initialized from a mapping object's
(key, value) pairs
K.dict(iterable) -> new dictionary initialized from an iterable
yielding (key, value) pairs
K.dict(**kwargs) -> new dictionary initialized with the name=value
pairs in the keyword argument list.
For example: K.dict(one=1, two=2)
"""
if args:
if len(args) > 1:
raise TypeError("Too many positional arguments")
x = args[0]
keys = []
vals = []
try:
x_keys = x.keys
except AttributeError:
for k, v in x:
keys.append(k)
vals.append(v)
else:
keys = x_keys()
vals = [x[k] for k in keys]
return q('!', keys, vals)
else:
if kwds:
keys = []
vals = []
for k, v in kwds.items():
keys.append(k)
vals.append(v)
return q('!', keys, vals)
else:
return q('()!()')
@classmethod
def table(cls, *args, **kwds):
if args or kwds:
return cls.dict(*args, **kwds).flip
else:
raise TypeError("A table must have at least one column")
|
KxSystems/pyq | src/pyq/__init__.py | K.exec_ | python | def exec_(self, columns=(), by=(), where=(), **kwds):
return self._seu('exec', columns, by, where, kwds) | exec from self
>>> t = q('([]a:1 2 3; b:10 20 30)')
>>> t.exec_('a', where='b > 10').show()
2 3 | train | https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/__init__.py#L476-L483 | null | class K(_K):
"""proxies for kdb+ objects
>>> q('2005.01.01 2005.12.04')
k('2005.01.01 2005.12.04')
Iteration over simple lists produces python objects
>>> list(q("`a`b`c`d"))
['a', 'b', 'c', 'd']
Iteration over q tables produces q dictionaries
>>> list(q("([]a:`x`y`z;b:1 2 3)"))
[k('`a`b!(`x;1)'), k('`a`b!(`y;2)'), k('`a`b!(`z;3)')]
Iteration over a q dictionary iterates over its key
>>> list(q('`a`b!1 2'))
['a', 'b']
as a consequence, iteration over a keyed table is the same as
iteration over its key table
>>> list(q("([a:`x`y`z]b:1 2 3)"))
[k('(,`a)!,`x'), k('(,`a)!,`y'), k('(,`a)!,`z')]
Callbacks into python
>>> def f(x, y):
... return x + y
>>> q('{[f]f(1;2)}', f)
k('3')
Buffer protocol
The following session illustrates how buffer protocol implemented by
K objects can be used to write data from Python streams directly yo kdb+.
Create a list of chars in kdb+
>>> x = kp('xxxxxx')
Open a pair of file descriptors
>>> r, w = os.pipe()
Write 6 bytes to the write end
>>> os.write(w, b'abcdef')
6
Read from the read-end into x
>>> f = os.fdopen(r, mode='rb')
>>> f.readinto(x)
6
Now x contains the bytes that were sent through the pipe
>>> x
k('"abcdef"')
Close the descriptors and the stream
>>> os.close(w); f.close()
Low level interface
The K type provides a set of low level functions that are similar
to the C API provided by the `k.h header <http://kx.com/q/c/c/k.h>`_.
The C API functions that return K objects in C are implemented as
class methods that return instances of K type.
Atoms
>>> K._kb(True), K._kg(5), K._kh(42), K._ki(-3), K._kj(2**40)
(k('1b'), k('0x05'), k('42h'), k('-3i'), k('1099511627776'))
>>> K._ke(3.5), K._kf(1.0), K._kc(b'x'), K._ks('xyz')
(k('3.5e'), k('1f'), k('"x"'), k('`xyz'))
>>> K._kd(0), K._kz(0.0), K._kt(0)
(k('2000.01.01'), k('2000.01.01T00:00:00.000'), k('00:00:00.000'))
Tables and dictionaries
>>> x = K._xD(k('`a`b`c'), k('1 2 3')); x, K._xT(x)
(k('`a`b`c!1 2 3'), k('+`a`b`c!1 2 3'))
Keyed table
>>> t = K._xD(K._xT(K._xD(k(",`a"), k(",1 2 3"))),
... K._xT(K._xD(k(",`b"), k(",10 20 30"))))
>>> K._ktd(t)
k('+`a`b!(1 2 3;10 20 30)')
"""
# Lighten the K objects by preventing the automatic creation of
# __dict__ and __weakref__ for each instance.
__slots__ = ()
# Helper methods for use in C implementation of __new__
def _set_mask(self, mask):
return q("{?[y;((),x)0N;x]}", self, mask)
@classmethod
def _from_record_array(cls, x):
fields = [f for f, t in x.dtype.descr]
k = q('!', list(fields), [K(x[f]) for f in fields])
if x.ndim:
k = k.flip
return k
@classmethod
def _from_sequence(cls, x, elm=None):
r = cls._ktn(0, 0)
g = iter(x)
try:
i = next(g)
except StopIteration:
return r
en = _K_k(0, 'enlist')
r._ja(en)
if elm is None:
elm = cls
for i in itertools.chain([i], g):
i = elm(i)
# Symbols and lists require special treatment
if i._t in (-11, 11, 0):
i = i.enlist
r._ja(i)
return r.eval
@classmethod
def _convert(cls, x):
for t in type(x).mro():
c = converters.get(t)
if c is not None:
return c(x)
return cls._from_sequence(x)
def __reduce_ex__(self, proto):
x = self._b9(1, self)
b = memoryview(x).tobytes()
return (d9, (b,))
def __getitem__(self, x):
"""
>>> k("10 20 30 40 50")[k("1 3")]
k('20 40')
>>> k("`a`b`c!1 2 3")['b']
2
"""
try:
return _K.__getitem__(self, x)
except (TypeError, NotImplementedError):
pass
try:
start, stop, step = x.indices(len(self))
except AttributeError:
i = K(x)
if self._t == 99 and i._t < 0:
return self.value[self._k(0, "?", self.key, i)]
else:
return self._k(0, "@", self, i)
if step == 1:
return self._k(0, "sublist", self._J([start, stop - start]), self)
# NB: .indices() cannot return step=0.
i = start + step * q.til(max(0, (stop - start) // step))
return self._k(0, "{$[99=type x;(key[x]y)!value[x]y;x y]}", self, i)
def __getattr__(self, a):
"""table columns can be accessed via dot notation
>>> q("([]a:1 2 3; b:10 20 30)").a
k('1 2 3')
"""
t = self._t
if t == 98:
return self._k(0, '{x`%s}' % a, self)
if t == 99:
if self._k(0, "{11h~type key x}", self):
if a == 'items':
# NB: Workaround for a bug in OrderedDict in Python 3.5.
# See http://bugs.python.org/issue27576 for details.
raise AttributeError
return self._k(0, '{x`%s}' % a, self)
return self._k(0, '{(0!x)`%s}' % a, self)
if 12 <= abs(t) < 20:
try:
return self._k(0, "`%s$" % a, self)
except kerr:
pass
raise AttributeError(a)
_fields = b" g@ ghijefgsjiifjiii"
if _PY3K:
_fields = [bytes([_x]) for _x in _fields]
def __int__(self):
"""converts K scalars to python int
>>> [int(q(x)) for x in '1b 2h 3 4e `5 6.0 2000.01.08'.split()]
[1, 2, 3, 4, 5, 6, 7]
"""
t = self._t
if t >= 0:
raise TypeError("cannot convert non-scalar to int")
return int(self.inspect(self._fields[-t]))
__long__ = __int__
def __float__(self):
"""converts K scalars to python float
>>> [float(q(x)) for x in '1b 2h 3 4e `5 6.0 2000.01.08'.split()]
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]
"""
t = self._t
if t >= 0:
raise TypeError("cannot convert non-scalar to float")
return float(self.inspect(self._fields[-t]))
def __index__(self):
t = self._t
if -5 >= t >= -7:
return int(self)
raise TypeError("Only scalar short/int/long K objects "
"can be converted to an index")
# Strictly speaking, this is only needed for Python 3.x, but
# there is no harm if this is defined and not used in Python 2.x.
def __bytes__(self):
t = self._t
if -5 >= t >= -7:
return bytes(int(self))
if 0 < abs(t) < 11:
if abs(t) == 2:
# A work-around while .data is not implemented for guid type
from uuid import UUID
x = q('(),', self) # ensure that x is a list
return b''.join(UUID(int=i).bytes for i in x)
return bytes(self.data)
raise BufferError("k object of type %d" % t)
def __eq__(self, other):
"""
>>> K(1) == K(1)
True
>>> K(1) == None
False
"""
try:
other = K(other)
except TypeError:
return False
return bool(k('~')(self, other))
def __ne__(self, other):
"""
>>> K(1) != K(2)
True
"""
return bool(k('~~')(self, other))
def __contains__(self, item):
"""membership test
>>> 1 in q('1 2 3')
True
>>> 'abc' not in q('(1;2.0;`abc)')
False
"""
if self._t:
x = q('in', item, self)
else:
x = q('{sum x~/:y}', item, self)
return bool(x)
def keys(self):
"""returns q('key', self)
Among other uses, enables interoperability between q and
python dicts.
>>> from collections import OrderedDict
>>> OrderedDict(q('`a`b!1 2'))
OrderedDict([('a', 1), ('b', 2)])
>>> d = {}; d.update(q('`a`b!1 2'))
>>> list(sorted(d.items()))
[('a', 1), ('b', 2)]
"""
return self._k(0, 'key', self)
def show(self, start=0, geometry=None, output=None):
"""pretty-print data to the console
(similar to q.show, but uses python stdout by default)
>>> x = q('([k:`x`y`z]a:1 2 3;b:10 20 30)')
>>> x.show() # doctest: +NORMALIZE_WHITESPACE
k| a b
-| ----
x| 1 10
y| 2 20
z| 3 30
the first optional argument, 'start' specifies the first row to be
printed (negative means from the end)
>>> x.show(2) # doctest: +NORMALIZE_WHITESPACE
k| a b
-| ----
z| 3 30
>>> x.show(-2) # doctest: +NORMALIZE_WHITESPACE
k| a b
-| ----
y| 2 20
z| 3 30
the geometry is the height and width of the console
>>> x.show(geometry=[4, 6])
k| a..
-| -..
x| 1..
..
"""
if output is None:
output = sys.stdout
if geometry is None:
geometry = q.value(kp("\\c"))
else:
geometry = self._I(geometry)
if start < 0:
start += q.count(self)
# Make sure nil is not passed to a q function
if self._id() != nil._id():
r = self._show(geometry, start)
else:
r = '::\n'
if isinstance(output, type):
return output(r)
try:
output.write(r)
except TypeError:
output.write(str(r))
# See issue #665
def decode(self, encoding='utf-8', errors='strict'):
return bytes(self).decode(encoding, errors)
def _seu(self, what, columns, by, where, kwds):
args = [self]
anames = ['self']
if kwds:
extra = sorted(kwds.keys())
args.extend(kwds[name] for name in extra)
anames.extend(extra)
if not isinstance(columns, str):
columns = ','.join(str(x) for x in columns)
query = "{[%s]%s %s " % (';'.join(anames), what, columns)
if by:
if not isinstance(by, str):
by = ','.join(str(x) for x in by)
query += " by " + by
query += " from self"
if where:
if not isinstance(where, str):
where = ','.join(str(x) for x in where)
query += " where " + where
query += '}'
return q(query, *args)
def select(self, columns=(), by=(), where=(), **kwds):
"""select from self
>>> t = q('([]a:1 2 3; b:10 20 30)')
>>> t.select('a', where='b > 20').show()
a
-
3
"""
return self._seu('select', columns, by, where, kwds)
def update(self, columns=(), by=(), where=(), **kwds):
"""update from self
>>> t = q('([]a:1 2 3; b:10 20 30)')
>>> t.update('a*2',
... where='b > 20').show() # doctest: +NORMALIZE_WHITESPACE
a b
----
1 10
2 20
6 30
"""
return self._seu('update', columns, by, where, kwds)
@property
def ss(self):
if self._t == 10:
return q.ss(self)
return q('`ss$', self)
if _np is not None:
@property
def _mask(self):
x = self._get_null()
if x is None:
return _np.ma.nomask
else:
return _np.asarray(x)
from ._n import array as __array__
__array_priority__ = 20
__doc__ += """
K objects can be used in Python arithmetic expressions
>>> x, y, z = map(K, (1, 2, 3))
>>> print(x + y, x * y,
... z/y, x|y, x&y, abs(-z)) #doctest: +NORMALIZE_WHITESPACE
3 2 1.5 2 1 3
Mixing K objects with python numbers is allowed
>>> 1/q('1 2 4')
k('1 0.5 0.25')
>>> q.til(5)**2
k('0 1 4 9 16f')
"""
def __format__(self, fmt):
if fmt:
return format(self._pys(), fmt)
return str(self)
def __sizeof__(self):
return object.__sizeof__(self) + int(self._sizeof())
def __fspath__(self):
"""Return the file system path representation of the object."""
if self._t != -11: # symbol
raise TypeError
sym = str(self)
if not sym.startswith(':'):
raise TypeError
return sym[1:]
def __complex__(self):
"""Called to implement the built-in function complex()."""
if self._t != 99 or self.key != ['re', 'im']:
return complex(float(self))
return complex(float(self.re), float(self.im))
@classmethod
def dict(cls, *args, **kwds):
"""Construct a q dictionary
K.dict() -> new empty q dictionary (q('()!()')
K.dict(mapping) -> new dictionary initialized from a mapping object's
(key, value) pairs
K.dict(iterable) -> new dictionary initialized from an iterable
yielding (key, value) pairs
K.dict(**kwargs) -> new dictionary initialized with the name=value
pairs in the keyword argument list.
For example: K.dict(one=1, two=2)
"""
if args:
if len(args) > 1:
raise TypeError("Too many positional arguments")
x = args[0]
keys = []
vals = []
try:
x_keys = x.keys
except AttributeError:
for k, v in x:
keys.append(k)
vals.append(v)
else:
keys = x_keys()
vals = [x[k] for k in keys]
return q('!', keys, vals)
else:
if kwds:
keys = []
vals = []
for k, v in kwds.items():
keys.append(k)
vals.append(v)
return q('!', keys, vals)
else:
return q('()!()')
@classmethod
def table(cls, *args, **kwds):
if args or kwds:
return cls.dict(*args, **kwds).flip
else:
raise TypeError("A table must have at least one column")
|
KxSystems/pyq | src/pyq/__init__.py | K.update | python | def update(self, columns=(), by=(), where=(), **kwds):
return self._seu('update', columns, by, where, kwds) | update from self
>>> t = q('([]a:1 2 3; b:10 20 30)')
>>> t.update('a*2',
... where='b > 20').show() # doctest: +NORMALIZE_WHITESPACE
a b
----
1 10
2 20
6 30 | train | https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/__init__.py#L485-L497 | null | class K(_K):
"""proxies for kdb+ objects
>>> q('2005.01.01 2005.12.04')
k('2005.01.01 2005.12.04')
Iteration over simple lists produces python objects
>>> list(q("`a`b`c`d"))
['a', 'b', 'c', 'd']
Iteration over q tables produces q dictionaries
>>> list(q("([]a:`x`y`z;b:1 2 3)"))
[k('`a`b!(`x;1)'), k('`a`b!(`y;2)'), k('`a`b!(`z;3)')]
Iteration over a q dictionary iterates over its key
>>> list(q('`a`b!1 2'))
['a', 'b']
as a consequence, iteration over a keyed table is the same as
iteration over its key table
>>> list(q("([a:`x`y`z]b:1 2 3)"))
[k('(,`a)!,`x'), k('(,`a)!,`y'), k('(,`a)!,`z')]
Callbacks into python
>>> def f(x, y):
... return x + y
>>> q('{[f]f(1;2)}', f)
k('3')
Buffer protocol
The following session illustrates how buffer protocol implemented by
K objects can be used to write data from Python streams directly yo kdb+.
Create a list of chars in kdb+
>>> x = kp('xxxxxx')
Open a pair of file descriptors
>>> r, w = os.pipe()
Write 6 bytes to the write end
>>> os.write(w, b'abcdef')
6
Read from the read-end into x
>>> f = os.fdopen(r, mode='rb')
>>> f.readinto(x)
6
Now x contains the bytes that were sent through the pipe
>>> x
k('"abcdef"')
Close the descriptors and the stream
>>> os.close(w); f.close()
Low level interface
The K type provides a set of low level functions that are similar
to the C API provided by the `k.h header <http://kx.com/q/c/c/k.h>`_.
The C API functions that return K objects in C are implemented as
class methods that return instances of K type.
Atoms
>>> K._kb(True), K._kg(5), K._kh(42), K._ki(-3), K._kj(2**40)
(k('1b'), k('0x05'), k('42h'), k('-3i'), k('1099511627776'))
>>> K._ke(3.5), K._kf(1.0), K._kc(b'x'), K._ks('xyz')
(k('3.5e'), k('1f'), k('"x"'), k('`xyz'))
>>> K._kd(0), K._kz(0.0), K._kt(0)
(k('2000.01.01'), k('2000.01.01T00:00:00.000'), k('00:00:00.000'))
Tables and dictionaries
>>> x = K._xD(k('`a`b`c'), k('1 2 3')); x, K._xT(x)
(k('`a`b`c!1 2 3'), k('+`a`b`c!1 2 3'))
Keyed table
>>> t = K._xD(K._xT(K._xD(k(",`a"), k(",1 2 3"))),
... K._xT(K._xD(k(",`b"), k(",10 20 30"))))
>>> K._ktd(t)
k('+`a`b!(1 2 3;10 20 30)')
"""
# Lighten the K objects by preventing the automatic creation of
# __dict__ and __weakref__ for each instance.
__slots__ = ()
# Helper methods for use in C implementation of __new__
def _set_mask(self, mask):
return q("{?[y;((),x)0N;x]}", self, mask)
@classmethod
def _from_record_array(cls, x):
fields = [f for f, t in x.dtype.descr]
k = q('!', list(fields), [K(x[f]) for f in fields])
if x.ndim:
k = k.flip
return k
@classmethod
def _from_sequence(cls, x, elm=None):
r = cls._ktn(0, 0)
g = iter(x)
try:
i = next(g)
except StopIteration:
return r
en = _K_k(0, 'enlist')
r._ja(en)
if elm is None:
elm = cls
for i in itertools.chain([i], g):
i = elm(i)
# Symbols and lists require special treatment
if i._t in (-11, 11, 0):
i = i.enlist
r._ja(i)
return r.eval
@classmethod
def _convert(cls, x):
for t in type(x).mro():
c = converters.get(t)
if c is not None:
return c(x)
return cls._from_sequence(x)
def __reduce_ex__(self, proto):
x = self._b9(1, self)
b = memoryview(x).tobytes()
return (d9, (b,))
def __getitem__(self, x):
"""
>>> k("10 20 30 40 50")[k("1 3")]
k('20 40')
>>> k("`a`b`c!1 2 3")['b']
2
"""
try:
return _K.__getitem__(self, x)
except (TypeError, NotImplementedError):
pass
try:
start, stop, step = x.indices(len(self))
except AttributeError:
i = K(x)
if self._t == 99 and i._t < 0:
return self.value[self._k(0, "?", self.key, i)]
else:
return self._k(0, "@", self, i)
if step == 1:
return self._k(0, "sublist", self._J([start, stop - start]), self)
# NB: .indices() cannot return step=0.
i = start + step * q.til(max(0, (stop - start) // step))
return self._k(0, "{$[99=type x;(key[x]y)!value[x]y;x y]}", self, i)
def __getattr__(self, a):
"""table columns can be accessed via dot notation
>>> q("([]a:1 2 3; b:10 20 30)").a
k('1 2 3')
"""
t = self._t
if t == 98:
return self._k(0, '{x`%s}' % a, self)
if t == 99:
if self._k(0, "{11h~type key x}", self):
if a == 'items':
# NB: Workaround for a bug in OrderedDict in Python 3.5.
# See http://bugs.python.org/issue27576 for details.
raise AttributeError
return self._k(0, '{x`%s}' % a, self)
return self._k(0, '{(0!x)`%s}' % a, self)
if 12 <= abs(t) < 20:
try:
return self._k(0, "`%s$" % a, self)
except kerr:
pass
raise AttributeError(a)
_fields = b" g@ ghijefgsjiifjiii"
if _PY3K:
_fields = [bytes([_x]) for _x in _fields]
def __int__(self):
"""converts K scalars to python int
>>> [int(q(x)) for x in '1b 2h 3 4e `5 6.0 2000.01.08'.split()]
[1, 2, 3, 4, 5, 6, 7]
"""
t = self._t
if t >= 0:
raise TypeError("cannot convert non-scalar to int")
return int(self.inspect(self._fields[-t]))
__long__ = __int__
def __float__(self):
"""converts K scalars to python float
>>> [float(q(x)) for x in '1b 2h 3 4e `5 6.0 2000.01.08'.split()]
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]
"""
t = self._t
if t >= 0:
raise TypeError("cannot convert non-scalar to float")
return float(self.inspect(self._fields[-t]))
def __index__(self):
t = self._t
if -5 >= t >= -7:
return int(self)
raise TypeError("Only scalar short/int/long K objects "
"can be converted to an index")
# Strictly speaking, this is only needed for Python 3.x, but
# there is no harm if this is defined and not used in Python 2.x.
def __bytes__(self):
t = self._t
if -5 >= t >= -7:
return bytes(int(self))
if 0 < abs(t) < 11:
if abs(t) == 2:
# A work-around while .data is not implemented for guid type
from uuid import UUID
x = q('(),', self) # ensure that x is a list
return b''.join(UUID(int=i).bytes for i in x)
return bytes(self.data)
raise BufferError("k object of type %d" % t)
def __eq__(self, other):
"""
>>> K(1) == K(1)
True
>>> K(1) == None
False
"""
try:
other = K(other)
except TypeError:
return False
return bool(k('~')(self, other))
def __ne__(self, other):
"""
>>> K(1) != K(2)
True
"""
return bool(k('~~')(self, other))
def __contains__(self, item):
"""membership test
>>> 1 in q('1 2 3')
True
>>> 'abc' not in q('(1;2.0;`abc)')
False
"""
if self._t:
x = q('in', item, self)
else:
x = q('{sum x~/:y}', item, self)
return bool(x)
def keys(self):
"""returns q('key', self)
Among other uses, enables interoperability between q and
python dicts.
>>> from collections import OrderedDict
>>> OrderedDict(q('`a`b!1 2'))
OrderedDict([('a', 1), ('b', 2)])
>>> d = {}; d.update(q('`a`b!1 2'))
>>> list(sorted(d.items()))
[('a', 1), ('b', 2)]
"""
return self._k(0, 'key', self)
def show(self, start=0, geometry=None, output=None):
"""pretty-print data to the console
(similar to q.show, but uses python stdout by default)
>>> x = q('([k:`x`y`z]a:1 2 3;b:10 20 30)')
>>> x.show() # doctest: +NORMALIZE_WHITESPACE
k| a b
-| ----
x| 1 10
y| 2 20
z| 3 30
the first optional argument, 'start' specifies the first row to be
printed (negative means from the end)
>>> x.show(2) # doctest: +NORMALIZE_WHITESPACE
k| a b
-| ----
z| 3 30
>>> x.show(-2) # doctest: +NORMALIZE_WHITESPACE
k| a b
-| ----
y| 2 20
z| 3 30
the geometry is the height and width of the console
>>> x.show(geometry=[4, 6])
k| a..
-| -..
x| 1..
..
"""
if output is None:
output = sys.stdout
if geometry is None:
geometry = q.value(kp("\\c"))
else:
geometry = self._I(geometry)
if start < 0:
start += q.count(self)
# Make sure nil is not passed to a q function
if self._id() != nil._id():
r = self._show(geometry, start)
else:
r = '::\n'
if isinstance(output, type):
return output(r)
try:
output.write(r)
except TypeError:
output.write(str(r))
# See issue #665
def decode(self, encoding='utf-8', errors='strict'):
return bytes(self).decode(encoding, errors)
def _seu(self, what, columns, by, where, kwds):
args = [self]
anames = ['self']
if kwds:
extra = sorted(kwds.keys())
args.extend(kwds[name] for name in extra)
anames.extend(extra)
if not isinstance(columns, str):
columns = ','.join(str(x) for x in columns)
query = "{[%s]%s %s " % (';'.join(anames), what, columns)
if by:
if not isinstance(by, str):
by = ','.join(str(x) for x in by)
query += " by " + by
query += " from self"
if where:
if not isinstance(where, str):
where = ','.join(str(x) for x in where)
query += " where " + where
query += '}'
return q(query, *args)
def select(self, columns=(), by=(), where=(), **kwds):
"""select from self
>>> t = q('([]a:1 2 3; b:10 20 30)')
>>> t.select('a', where='b > 20').show()
a
-
3
"""
return self._seu('select', columns, by, where, kwds)
def exec_(self, columns=(), by=(), where=(), **kwds):
"""exec from self
>>> t = q('([]a:1 2 3; b:10 20 30)')
>>> t.exec_('a', where='b > 10').show()
2 3
"""
return self._seu('exec', columns, by, where, kwds)
@property
def ss(self):
if self._t == 10:
return q.ss(self)
return q('`ss$', self)
if _np is not None:
@property
def _mask(self):
x = self._get_null()
if x is None:
return _np.ma.nomask
else:
return _np.asarray(x)
from ._n import array as __array__
__array_priority__ = 20
__doc__ += """
K objects can be used in Python arithmetic expressions
>>> x, y, z = map(K, (1, 2, 3))
>>> print(x + y, x * y,
... z/y, x|y, x&y, abs(-z)) #doctest: +NORMALIZE_WHITESPACE
3 2 1.5 2 1 3
Mixing K objects with python numbers is allowed
>>> 1/q('1 2 4')
k('1 0.5 0.25')
>>> q.til(5)**2
k('0 1 4 9 16f')
"""
def __format__(self, fmt):
if fmt:
return format(self._pys(), fmt)
return str(self)
def __sizeof__(self):
return object.__sizeof__(self) + int(self._sizeof())
def __fspath__(self):
"""Return the file system path representation of the object."""
if self._t != -11: # symbol
raise TypeError
sym = str(self)
if not sym.startswith(':'):
raise TypeError
return sym[1:]
def __complex__(self):
"""Called to implement the built-in function complex()."""
if self._t != 99 or self.key != ['re', 'im']:
return complex(float(self))
return complex(float(self.re), float(self.im))
@classmethod
def dict(cls, *args, **kwds):
"""Construct a q dictionary
K.dict() -> new empty q dictionary (q('()!()')
K.dict(mapping) -> new dictionary initialized from a mapping object's
(key, value) pairs
K.dict(iterable) -> new dictionary initialized from an iterable
yielding (key, value) pairs
K.dict(**kwargs) -> new dictionary initialized with the name=value
pairs in the keyword argument list.
For example: K.dict(one=1, two=2)
"""
if args:
if len(args) > 1:
raise TypeError("Too many positional arguments")
x = args[0]
keys = []
vals = []
try:
x_keys = x.keys
except AttributeError:
for k, v in x:
keys.append(k)
vals.append(v)
else:
keys = x_keys()
vals = [x[k] for k in keys]
return q('!', keys, vals)
else:
if kwds:
keys = []
vals = []
for k, v in kwds.items():
keys.append(k)
vals.append(v)
return q('!', keys, vals)
else:
return q('()!()')
@classmethod
def table(cls, *args, **kwds):
if args or kwds:
return cls.dict(*args, **kwds).flip
else:
raise TypeError("A table must have at least one column")
|
KxSystems/pyq | src/pyq/__init__.py | K.dict | python | def dict(cls, *args, **kwds):
if args:
if len(args) > 1:
raise TypeError("Too many positional arguments")
x = args[0]
keys = []
vals = []
try:
x_keys = x.keys
except AttributeError:
for k, v in x:
keys.append(k)
vals.append(v)
else:
keys = x_keys()
vals = [x[k] for k in keys]
return q('!', keys, vals)
else:
if kwds:
keys = []
vals = []
for k, v in kwds.items():
keys.append(k)
vals.append(v)
return q('!', keys, vals)
else:
return q('()!()') | Construct a q dictionary
K.dict() -> new empty q dictionary (q('()!()')
K.dict(mapping) -> new dictionary initialized from a mapping object's
(key, value) pairs
K.dict(iterable) -> new dictionary initialized from an iterable
yielding (key, value) pairs
K.dict(**kwargs) -> new dictionary initialized with the name=value
pairs in the keyword argument list.
For example: K.dict(one=1, two=2) | train | https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/__init__.py#L558-L595 | null | class K(_K):
"""proxies for kdb+ objects
>>> q('2005.01.01 2005.12.04')
k('2005.01.01 2005.12.04')
Iteration over simple lists produces python objects
>>> list(q("`a`b`c`d"))
['a', 'b', 'c', 'd']
Iteration over q tables produces q dictionaries
>>> list(q("([]a:`x`y`z;b:1 2 3)"))
[k('`a`b!(`x;1)'), k('`a`b!(`y;2)'), k('`a`b!(`z;3)')]
Iteration over a q dictionary iterates over its key
>>> list(q('`a`b!1 2'))
['a', 'b']
as a consequence, iteration over a keyed table is the same as
iteration over its key table
>>> list(q("([a:`x`y`z]b:1 2 3)"))
[k('(,`a)!,`x'), k('(,`a)!,`y'), k('(,`a)!,`z')]
Callbacks into python
>>> def f(x, y):
... return x + y
>>> q('{[f]f(1;2)}', f)
k('3')
Buffer protocol
The following session illustrates how buffer protocol implemented by
K objects can be used to write data from Python streams directly yo kdb+.
Create a list of chars in kdb+
>>> x = kp('xxxxxx')
Open a pair of file descriptors
>>> r, w = os.pipe()
Write 6 bytes to the write end
>>> os.write(w, b'abcdef')
6
Read from the read-end into x
>>> f = os.fdopen(r, mode='rb')
>>> f.readinto(x)
6
Now x contains the bytes that were sent through the pipe
>>> x
k('"abcdef"')
Close the descriptors and the stream
>>> os.close(w); f.close()
Low level interface
The K type provides a set of low level functions that are similar
to the C API provided by the `k.h header <http://kx.com/q/c/c/k.h>`_.
The C API functions that return K objects in C are implemented as
class methods that return instances of K type.
Atoms
>>> K._kb(True), K._kg(5), K._kh(42), K._ki(-3), K._kj(2**40)
(k('1b'), k('0x05'), k('42h'), k('-3i'), k('1099511627776'))
>>> K._ke(3.5), K._kf(1.0), K._kc(b'x'), K._ks('xyz')
(k('3.5e'), k('1f'), k('"x"'), k('`xyz'))
>>> K._kd(0), K._kz(0.0), K._kt(0)
(k('2000.01.01'), k('2000.01.01T00:00:00.000'), k('00:00:00.000'))
Tables and dictionaries
>>> x = K._xD(k('`a`b`c'), k('1 2 3')); x, K._xT(x)
(k('`a`b`c!1 2 3'), k('+`a`b`c!1 2 3'))
Keyed table
>>> t = K._xD(K._xT(K._xD(k(",`a"), k(",1 2 3"))),
... K._xT(K._xD(k(",`b"), k(",10 20 30"))))
>>> K._ktd(t)
k('+`a`b!(1 2 3;10 20 30)')
"""
# Lighten the K objects by preventing the automatic creation of
# __dict__ and __weakref__ for each instance.
__slots__ = ()
# Helper methods for use in C implementation of __new__
def _set_mask(self, mask):
return q("{?[y;((),x)0N;x]}", self, mask)
@classmethod
def _from_record_array(cls, x):
fields = [f for f, t in x.dtype.descr]
k = q('!', list(fields), [K(x[f]) for f in fields])
if x.ndim:
k = k.flip
return k
@classmethod
def _from_sequence(cls, x, elm=None):
r = cls._ktn(0, 0)
g = iter(x)
try:
i = next(g)
except StopIteration:
return r
en = _K_k(0, 'enlist')
r._ja(en)
if elm is None:
elm = cls
for i in itertools.chain([i], g):
i = elm(i)
# Symbols and lists require special treatment
if i._t in (-11, 11, 0):
i = i.enlist
r._ja(i)
return r.eval
@classmethod
def _convert(cls, x):
for t in type(x).mro():
c = converters.get(t)
if c is not None:
return c(x)
return cls._from_sequence(x)
def __reduce_ex__(self, proto):
x = self._b9(1, self)
b = memoryview(x).tobytes()
return (d9, (b,))
def __getitem__(self, x):
"""
>>> k("10 20 30 40 50")[k("1 3")]
k('20 40')
>>> k("`a`b`c!1 2 3")['b']
2
"""
try:
return _K.__getitem__(self, x)
except (TypeError, NotImplementedError):
pass
try:
start, stop, step = x.indices(len(self))
except AttributeError:
i = K(x)
if self._t == 99 and i._t < 0:
return self.value[self._k(0, "?", self.key, i)]
else:
return self._k(0, "@", self, i)
if step == 1:
return self._k(0, "sublist", self._J([start, stop - start]), self)
# NB: .indices() cannot return step=0.
i = start + step * q.til(max(0, (stop - start) // step))
return self._k(0, "{$[99=type x;(key[x]y)!value[x]y;x y]}", self, i)
def __getattr__(self, a):
"""table columns can be accessed via dot notation
>>> q("([]a:1 2 3; b:10 20 30)").a
k('1 2 3')
"""
t = self._t
if t == 98:
return self._k(0, '{x`%s}' % a, self)
if t == 99:
if self._k(0, "{11h~type key x}", self):
if a == 'items':
# NB: Workaround for a bug in OrderedDict in Python 3.5.
# See http://bugs.python.org/issue27576 for details.
raise AttributeError
return self._k(0, '{x`%s}' % a, self)
return self._k(0, '{(0!x)`%s}' % a, self)
if 12 <= abs(t) < 20:
try:
return self._k(0, "`%s$" % a, self)
except kerr:
pass
raise AttributeError(a)
_fields = b" g@ ghijefgsjiifjiii"
if _PY3K:
_fields = [bytes([_x]) for _x in _fields]
def __int__(self):
"""converts K scalars to python int
>>> [int(q(x)) for x in '1b 2h 3 4e `5 6.0 2000.01.08'.split()]
[1, 2, 3, 4, 5, 6, 7]
"""
t = self._t
if t >= 0:
raise TypeError("cannot convert non-scalar to int")
return int(self.inspect(self._fields[-t]))
__long__ = __int__
def __float__(self):
"""converts K scalars to python float
>>> [float(q(x)) for x in '1b 2h 3 4e `5 6.0 2000.01.08'.split()]
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]
"""
t = self._t
if t >= 0:
raise TypeError("cannot convert non-scalar to float")
return float(self.inspect(self._fields[-t]))
def __index__(self):
t = self._t
if -5 >= t >= -7:
return int(self)
raise TypeError("Only scalar short/int/long K objects "
"can be converted to an index")
# Strictly speaking, this is only needed for Python 3.x, but
# there is no harm if this is defined and not used in Python 2.x.
def __bytes__(self):
t = self._t
if -5 >= t >= -7:
return bytes(int(self))
if 0 < abs(t) < 11:
if abs(t) == 2:
# A work-around while .data is not implemented for guid type
from uuid import UUID
x = q('(),', self) # ensure that x is a list
return b''.join(UUID(int=i).bytes for i in x)
return bytes(self.data)
raise BufferError("k object of type %d" % t)
def __eq__(self, other):
"""
>>> K(1) == K(1)
True
>>> K(1) == None
False
"""
try:
other = K(other)
except TypeError:
return False
return bool(k('~')(self, other))
def __ne__(self, other):
"""
>>> K(1) != K(2)
True
"""
return bool(k('~~')(self, other))
def __contains__(self, item):
"""membership test
>>> 1 in q('1 2 3')
True
>>> 'abc' not in q('(1;2.0;`abc)')
False
"""
if self._t:
x = q('in', item, self)
else:
x = q('{sum x~/:y}', item, self)
return bool(x)
def keys(self):
"""returns q('key', self)
Among other uses, enables interoperability between q and
python dicts.
>>> from collections import OrderedDict
>>> OrderedDict(q('`a`b!1 2'))
OrderedDict([('a', 1), ('b', 2)])
>>> d = {}; d.update(q('`a`b!1 2'))
>>> list(sorted(d.items()))
[('a', 1), ('b', 2)]
"""
return self._k(0, 'key', self)
def show(self, start=0, geometry=None, output=None):
"""pretty-print data to the console
(similar to q.show, but uses python stdout by default)
>>> x = q('([k:`x`y`z]a:1 2 3;b:10 20 30)')
>>> x.show() # doctest: +NORMALIZE_WHITESPACE
k| a b
-| ----
x| 1 10
y| 2 20
z| 3 30
the first optional argument, 'start' specifies the first row to be
printed (negative means from the end)
>>> x.show(2) # doctest: +NORMALIZE_WHITESPACE
k| a b
-| ----
z| 3 30
>>> x.show(-2) # doctest: +NORMALIZE_WHITESPACE
k| a b
-| ----
y| 2 20
z| 3 30
the geometry is the height and width of the console
>>> x.show(geometry=[4, 6])
k| a..
-| -..
x| 1..
..
"""
if output is None:
output = sys.stdout
if geometry is None:
geometry = q.value(kp("\\c"))
else:
geometry = self._I(geometry)
if start < 0:
start += q.count(self)
# Make sure nil is not passed to a q function
if self._id() != nil._id():
r = self._show(geometry, start)
else:
r = '::\n'
if isinstance(output, type):
return output(r)
try:
output.write(r)
except TypeError:
output.write(str(r))
# See issue #665
def decode(self, encoding='utf-8', errors='strict'):
return bytes(self).decode(encoding, errors)
def _seu(self, what, columns, by, where, kwds):
args = [self]
anames = ['self']
if kwds:
extra = sorted(kwds.keys())
args.extend(kwds[name] for name in extra)
anames.extend(extra)
if not isinstance(columns, str):
columns = ','.join(str(x) for x in columns)
query = "{[%s]%s %s " % (';'.join(anames), what, columns)
if by:
if not isinstance(by, str):
by = ','.join(str(x) for x in by)
query += " by " + by
query += " from self"
if where:
if not isinstance(where, str):
where = ','.join(str(x) for x in where)
query += " where " + where
query += '}'
return q(query, *args)
def select(self, columns=(), by=(), where=(), **kwds):
"""select from self
>>> t = q('([]a:1 2 3; b:10 20 30)')
>>> t.select('a', where='b > 20').show()
a
-
3
"""
return self._seu('select', columns, by, where, kwds)
def exec_(self, columns=(), by=(), where=(), **kwds):
"""exec from self
>>> t = q('([]a:1 2 3; b:10 20 30)')
>>> t.exec_('a', where='b > 10').show()
2 3
"""
return self._seu('exec', columns, by, where, kwds)
def update(self, columns=(), by=(), where=(), **kwds):
"""update from self
>>> t = q('([]a:1 2 3; b:10 20 30)')
>>> t.update('a*2',
... where='b > 20').show() # doctest: +NORMALIZE_WHITESPACE
a b
----
1 10
2 20
6 30
"""
return self._seu('update', columns, by, where, kwds)
@property
def ss(self):
if self._t == 10:
return q.ss(self)
return q('`ss$', self)
if _np is not None:
@property
def _mask(self):
x = self._get_null()
if x is None:
return _np.ma.nomask
else:
return _np.asarray(x)
from ._n import array as __array__
__array_priority__ = 20
__doc__ += """
K objects can be used in Python arithmetic expressions
>>> x, y, z = map(K, (1, 2, 3))
>>> print(x + y, x * y,
... z/y, x|y, x&y, abs(-z)) #doctest: +NORMALIZE_WHITESPACE
3 2 1.5 2 1 3
Mixing K objects with python numbers is allowed
>>> 1/q('1 2 4')
k('1 0.5 0.25')
>>> q.til(5)**2
k('0 1 4 9 16f')
"""
def __format__(self, fmt):
if fmt:
return format(self._pys(), fmt)
return str(self)
def __sizeof__(self):
return object.__sizeof__(self) + int(self._sizeof())
def __fspath__(self):
"""Return the file system path representation of the object."""
if self._t != -11: # symbol
raise TypeError
sym = str(self)
if not sym.startswith(':'):
raise TypeError
return sym[1:]
def __complex__(self):
"""Called to implement the built-in function complex()."""
if self._t != 99 or self.key != ['re', 'im']:
return complex(float(self))
return complex(float(self.re), float(self.im))
@classmethod
@classmethod
def table(cls, *args, **kwds):
if args or kwds:
return cls.dict(*args, **kwds).flip
else:
raise TypeError("A table must have at least one column")
|
KxSystems/pyq | src/pyq/magic.py | logical_lines | python | def logical_lines(lines):
if isinstance(lines, string_types):
lines = StringIO(lines)
buf = []
for line in lines:
if buf and not line.startswith(' '):
chunk = ''.join(buf).strip()
if chunk:
yield chunk
buf[:] = []
buf.append(line)
chunk = ''.join(buf).strip()
if chunk:
yield chunk | Merge lines into chunks according to q rules | train | https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/magic.py#L23-L38 | null | """IPython magic"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import io
import os
from tempfile import mkstemp
import pyq
from getopt import getopt
import sys
StringIO = io.StringIO # LGTM.com does not like double import
STD_STREAM = [sys.stdin, sys.stdout, sys.stderr]
try:
string_types = (str, unicode)
except NameError:
string_types = (str,)
Q_NONE = pyq.q('::')
def _forward_outputs(outs):
for fd in (1, 2):
if fd in outs:
os.lseek(fd, 0, os.SEEK_SET)
with io.open(fd, closefd=False) as f:
STD_STREAM[fd].writelines(f)
os.ftruncate(fd, 0)
def q(line, cell=None, _ns=None):
"""Run q code.
Options:
-l (dir|script) - pre-load database or script
-h host:port - execute on the given host
-o var - send output to a variable named var.
-i var1,..,varN - input variables
-1/-2 - redirect stdout/stderr
"""
if cell is None:
return pyq.q(line)
if _ns is None:
_ns = vars(sys.modules['__main__'])
input = output = None
preload = []
outs = {}
try:
h = pyq.q('0i')
if line:
for opt, value in getopt(line.split(), "h:l:o:i:12")[0]:
if opt == '-l':
preload.append(value)
elif opt == '-h':
h = pyq.K(str(':' + value))
elif opt == '-o':
output = str(value) # (see #673)
elif opt == '-i':
input = str(value).split(',')
elif opt in ('-1', '-2'):
outs[int(opt[1])] = None
if outs:
if int(h) != 0:
raise ValueError("Cannot redirect remote std stream")
for fd in outs:
tmpfd, tmpfile = mkstemp()
try:
pyq.q(r'\%d %s' % (fd, tmpfile))
finally:
os.unlink(tmpfile)
os.close(tmpfd)
r = None
for script in preload:
h(pyq.kp(r"\l " + script))
if input is not None:
for chunk in logical_lines(cell):
func = "{[%s]%s}" % (';'.join(input), chunk)
args = tuple(_ns[i] for i in input)
if r != Q_NONE:
r.show()
r = h((pyq.kp(func),) + args)
if outs:
_forward_outputs(outs)
else:
for chunk in logical_lines(cell):
if r != Q_NONE:
r.show()
r = h(pyq.kp(chunk))
if outs:
_forward_outputs(outs)
except pyq.kerr as e:
print("'%s" % e)
else:
if output is not None:
if output.startswith('q.'):
pyq.q('@[`.;;:;]', output[2:], r)
else:
_ns[output] = r
else:
if r != Q_NONE:
return r
def _q_formatter(x, p, _):
x_show = x.show(output=str)
p.text(x_show.strip())
def load_ipython_extension(ipython):
"""Register %q and %%q magics and pretty display for K objects"""
ipython.register_magic_function(q, 'line_cell')
fmr = ipython.display_formatter.formatters['text/plain']
fmr.for_type(pyq.K, _q_formatter)
def unload_ipython_extension(ipython):
pass
|
KxSystems/pyq | src/pyq/magic.py | q | python | def q(line, cell=None, _ns=None):
if cell is None:
return pyq.q(line)
if _ns is None:
_ns = vars(sys.modules['__main__'])
input = output = None
preload = []
outs = {}
try:
h = pyq.q('0i')
if line:
for opt, value in getopt(line.split(), "h:l:o:i:12")[0]:
if opt == '-l':
preload.append(value)
elif opt == '-h':
h = pyq.K(str(':' + value))
elif opt == '-o':
output = str(value) # (see #673)
elif opt == '-i':
input = str(value).split(',')
elif opt in ('-1', '-2'):
outs[int(opt[1])] = None
if outs:
if int(h) != 0:
raise ValueError("Cannot redirect remote std stream")
for fd in outs:
tmpfd, tmpfile = mkstemp()
try:
pyq.q(r'\%d %s' % (fd, tmpfile))
finally:
os.unlink(tmpfile)
os.close(tmpfd)
r = None
for script in preload:
h(pyq.kp(r"\l " + script))
if input is not None:
for chunk in logical_lines(cell):
func = "{[%s]%s}" % (';'.join(input), chunk)
args = tuple(_ns[i] for i in input)
if r != Q_NONE:
r.show()
r = h((pyq.kp(func),) + args)
if outs:
_forward_outputs(outs)
else:
for chunk in logical_lines(cell):
if r != Q_NONE:
r.show()
r = h(pyq.kp(chunk))
if outs:
_forward_outputs(outs)
except pyq.kerr as e:
print("'%s" % e)
else:
if output is not None:
if output.startswith('q.'):
pyq.q('@[`.;;:;]', output[2:], r)
else:
_ns[output] = r
else:
if r != Q_NONE:
return r | Run q code.
Options:
-l (dir|script) - pre-load database or script
-h host:port - execute on the given host
-o var - send output to a variable named var.
-i var1,..,varN - input variables
-1/-2 - redirect stdout/stderr | train | https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/magic.py#L50-L121 | [
"def logical_lines(lines):\n \"\"\"Merge lines into chunks according to q rules\"\"\"\n if isinstance(lines, string_types):\n lines = StringIO(lines)\n buf = []\n for line in lines:\n if buf and not line.startswith(' '):\n chunk = ''.join(buf).strip()\n if chunk:\n yield chunk\n buf[:] = []\n\n buf.append(line)\n chunk = ''.join(buf).strip()\n if chunk:\n yield chunk\n",
"def _forward_outputs(outs):\n for fd in (1, 2):\n if fd in outs:\n os.lseek(fd, 0, os.SEEK_SET)\n with io.open(fd, closefd=False) as f:\n STD_STREAM[fd].writelines(f)\n os.ftruncate(fd, 0)\n"
] | """IPython magic"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import io
import os
from tempfile import mkstemp
import pyq
from getopt import getopt
import sys
StringIO = io.StringIO # LGTM.com does not like double import
STD_STREAM = [sys.stdin, sys.stdout, sys.stderr]
try:
string_types = (str, unicode)
except NameError:
string_types = (str,)
Q_NONE = pyq.q('::')
def logical_lines(lines):
"""Merge lines into chunks according to q rules"""
if isinstance(lines, string_types):
lines = StringIO(lines)
buf = []
for line in lines:
if buf and not line.startswith(' '):
chunk = ''.join(buf).strip()
if chunk:
yield chunk
buf[:] = []
buf.append(line)
chunk = ''.join(buf).strip()
if chunk:
yield chunk
def _forward_outputs(outs):
for fd in (1, 2):
if fd in outs:
os.lseek(fd, 0, os.SEEK_SET)
with io.open(fd, closefd=False) as f:
STD_STREAM[fd].writelines(f)
os.ftruncate(fd, 0)
def _q_formatter(x, p, _):
x_show = x.show(output=str)
p.text(x_show.strip())
def load_ipython_extension(ipython):
"""Register %q and %%q magics and pretty display for K objects"""
ipython.register_magic_function(q, 'line_cell')
fmr = ipython.display_formatter.formatters['text/plain']
fmr.for_type(pyq.K, _q_formatter)
def unload_ipython_extension(ipython):
pass
|
KxSystems/pyq | src/pyq/magic.py | load_ipython_extension | python | def load_ipython_extension(ipython):
ipython.register_magic_function(q, 'line_cell')
fmr = ipython.display_formatter.formatters['text/plain']
fmr.for_type(pyq.K, _q_formatter) | Register %q and %%q magics and pretty display for K objects | train | https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/magic.py#L129-L133 | null | """IPython magic"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import io
import os
from tempfile import mkstemp
import pyq
from getopt import getopt
import sys
StringIO = io.StringIO # LGTM.com does not like double import
STD_STREAM = [sys.stdin, sys.stdout, sys.stderr]
try:
string_types = (str, unicode)
except NameError:
string_types = (str,)
Q_NONE = pyq.q('::')
def logical_lines(lines):
"""Merge lines into chunks according to q rules"""
if isinstance(lines, string_types):
lines = StringIO(lines)
buf = []
for line in lines:
if buf and not line.startswith(' '):
chunk = ''.join(buf).strip()
if chunk:
yield chunk
buf[:] = []
buf.append(line)
chunk = ''.join(buf).strip()
if chunk:
yield chunk
def _forward_outputs(outs):
for fd in (1, 2):
if fd in outs:
os.lseek(fd, 0, os.SEEK_SET)
with io.open(fd, closefd=False) as f:
STD_STREAM[fd].writelines(f)
os.ftruncate(fd, 0)
def q(line, cell=None, _ns=None):
"""Run q code.
Options:
-l (dir|script) - pre-load database or script
-h host:port - execute on the given host
-o var - send output to a variable named var.
-i var1,..,varN - input variables
-1/-2 - redirect stdout/stderr
"""
if cell is None:
return pyq.q(line)
if _ns is None:
_ns = vars(sys.modules['__main__'])
input = output = None
preload = []
outs = {}
try:
h = pyq.q('0i')
if line:
for opt, value in getopt(line.split(), "h:l:o:i:12")[0]:
if opt == '-l':
preload.append(value)
elif opt == '-h':
h = pyq.K(str(':' + value))
elif opt == '-o':
output = str(value) # (see #673)
elif opt == '-i':
input = str(value).split(',')
elif opt in ('-1', '-2'):
outs[int(opt[1])] = None
if outs:
if int(h) != 0:
raise ValueError("Cannot redirect remote std stream")
for fd in outs:
tmpfd, tmpfile = mkstemp()
try:
pyq.q(r'\%d %s' % (fd, tmpfile))
finally:
os.unlink(tmpfile)
os.close(tmpfd)
r = None
for script in preload:
h(pyq.kp(r"\l " + script))
if input is not None:
for chunk in logical_lines(cell):
func = "{[%s]%s}" % (';'.join(input), chunk)
args = tuple(_ns[i] for i in input)
if r != Q_NONE:
r.show()
r = h((pyq.kp(func),) + args)
if outs:
_forward_outputs(outs)
else:
for chunk in logical_lines(cell):
if r != Q_NONE:
r.show()
r = h(pyq.kp(chunk))
if outs:
_forward_outputs(outs)
except pyq.kerr as e:
print("'%s" % e)
else:
if output is not None:
if output.startswith('q.'):
pyq.q('@[`.;;:;]', output[2:], r)
else:
_ns[output] = r
else:
if r != Q_NONE:
return r
def _q_formatter(x, p, _):
x_show = x.show(output=str)
p.text(x_show.strip())
def unload_ipython_extension(ipython):
pass
|
KxSystems/pyq | src/pyq/ptk.py | get_prompt_tokens | python | def get_prompt_tokens(_):
namespace = q(r'\d')
if namespace == '.':
namespace = ''
return [(Token.Generic.Prompt, 'q%s)' % namespace)] | Return a list of tokens for the prompt | train | https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/ptk.py#L48-L53 | null | """Prompt toolkit
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import re
from prompt_toolkit.completion import Completion
from prompt_toolkit.contrib.completers import PathCompleter
from prompt_toolkit.contrib.completers.base import Completer
from prompt_toolkit.document import Document
from prompt_toolkit.history import InMemoryHistory
from prompt_toolkit import prompt
from prompt_toolkit.styles import style_from_pygments
from pygments.token import Token
from pygments.styles.monokai import MonokaiStyle as BasicStyle
from pygments.lexers import get_lexer_by_name
try:
q_lexer = get_lexer_by_name('q')
except ValueError:
lexer = None
else:
from prompt_toolkit.layout.lexers import PygmentsLexer
lexer = PygmentsLexer(type(q_lexer))
from . import q
KDB_INFO = "KDB+ %s %s" % tuple(q('.z.K,.z.k'))
style_dict = {
# Prompt.
Token.Generic.Prompt: '#884444',
# Toolbar.
Token.Toolbar: '#ffffff bg:#333333',
}
def get_bottom_toolbar_tokens(cli):
"""Return a list of tokens for the bottom toolbar"""
mem = q('.Q.w', '') // 1024 # memory info in KiB
return [(Token.Toolbar, "{0} {1.used}/{1.mphy} KiB".format(KDB_INFO, mem))]
history = InMemoryHistory()
HSYM_RE = re.compile(r'.*`:([\w/.]*)$')
class QCompleter(Completer):
"""Completer for the q language"""
def __init__(self):
namespace = q(r'\d')
res = q('.Q.res')
dot_q = q('1_key .q')
self.path_completer = PathCompleter()
self.words_info = [(list(res), 'k'),
(list(dot_q), 'q'),
(list(q.key(namespace)), str(namespace))]
def get_completions(self, document, complete_event):
"""Yield completions"""
# Detect a file handle
m = HSYM_RE.match(document.text_before_cursor)
if m:
text = m.group(1)
doc = Document(text, len(text))
for c in self.path_completer.get_completions(doc, complete_event):
yield c
else:
# Get word/text before cursor.
word_before_cursor = document.get_word_before_cursor(False)
for words, meta in self.words_info:
for a in words:
if a.startswith(word_before_cursor):
yield Completion(a, -len(word_before_cursor),
display_meta=meta)
def cmdloop(self, intro=None):
"""A Cmd.cmdloop implementation"""
style = style_from_pygments(BasicStyle, style_dict)
self.preloop()
stop = None
while not stop:
line = prompt(get_prompt_tokens=get_prompt_tokens, lexer=lexer,
get_bottom_toolbar_tokens=get_bottom_toolbar_tokens,
history=history, style=style, true_color=True,
on_exit='return-none', on_abort='return-none',
completer=QCompleter())
if line is None or line.strip() == r'\\':
raise SystemExit
else:
line = self.precmd(line)
stop = self.onecmd(line)
stop = self.postcmd(stop, line)
self.postloop()
|
KxSystems/pyq | src/pyq/ptk.py | cmdloop | python | def cmdloop(self, intro=None):
style = style_from_pygments(BasicStyle, style_dict)
self.preloop()
stop = None
while not stop:
line = prompt(get_prompt_tokens=get_prompt_tokens, lexer=lexer,
get_bottom_toolbar_tokens=get_bottom_toolbar_tokens,
history=history, style=style, true_color=True,
on_exit='return-none', on_abort='return-none',
completer=QCompleter())
if line is None or line.strip() == r'\\':
raise SystemExit
else:
line = self.precmd(line)
stop = self.onecmd(line)
stop = self.postcmd(stop, line)
self.postloop() | A Cmd.cmdloop implementation | train | https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/ptk.py#L90-L107 | null | """Prompt toolkit
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import re
from prompt_toolkit.completion import Completion
from prompt_toolkit.contrib.completers import PathCompleter
from prompt_toolkit.contrib.completers.base import Completer
from prompt_toolkit.document import Document
from prompt_toolkit.history import InMemoryHistory
from prompt_toolkit import prompt
from prompt_toolkit.styles import style_from_pygments
from pygments.token import Token
from pygments.styles.monokai import MonokaiStyle as BasicStyle
from pygments.lexers import get_lexer_by_name
try:
q_lexer = get_lexer_by_name('q')
except ValueError:
lexer = None
else:
from prompt_toolkit.layout.lexers import PygmentsLexer
lexer = PygmentsLexer(type(q_lexer))
from . import q
KDB_INFO = "KDB+ %s %s" % tuple(q('.z.K,.z.k'))
style_dict = {
# Prompt.
Token.Generic.Prompt: '#884444',
# Toolbar.
Token.Toolbar: '#ffffff bg:#333333',
}
def get_bottom_toolbar_tokens(cli):
"""Return a list of tokens for the bottom toolbar"""
mem = q('.Q.w', '') // 1024 # memory info in KiB
return [(Token.Toolbar, "{0} {1.used}/{1.mphy} KiB".format(KDB_INFO, mem))]
history = InMemoryHistory()
def get_prompt_tokens(_):
"""Return a list of tokens for the prompt"""
namespace = q(r'\d')
if namespace == '.':
namespace = ''
return [(Token.Generic.Prompt, 'q%s)' % namespace)]
HSYM_RE = re.compile(r'.*`:([\w/.]*)$')
class QCompleter(Completer):
"""Completer for the q language"""
def __init__(self):
namespace = q(r'\d')
res = q('.Q.res')
dot_q = q('1_key .q')
self.path_completer = PathCompleter()
self.words_info = [(list(res), 'k'),
(list(dot_q), 'q'),
(list(q.key(namespace)), str(namespace))]
def get_completions(self, document, complete_event):
"""Yield completions"""
# Detect a file handle
m = HSYM_RE.match(document.text_before_cursor)
if m:
text = m.group(1)
doc = Document(text, len(text))
for c in self.path_completer.get_completions(doc, complete_event):
yield c
else:
# Get word/text before cursor.
word_before_cursor = document.get_word_before_cursor(False)
for words, meta in self.words_info:
for a in words:
if a.startswith(word_before_cursor):
yield Completion(a, -len(word_before_cursor),
display_meta=meta)
|
KxSystems/pyq | src/pyq/ptk.py | QCompleter.get_completions | python | def get_completions(self, document, complete_event):
# Detect a file handle
m = HSYM_RE.match(document.text_before_cursor)
if m:
text = m.group(1)
doc = Document(text, len(text))
for c in self.path_completer.get_completions(doc, complete_event):
yield c
else:
# Get word/text before cursor.
word_before_cursor = document.get_word_before_cursor(False)
for words, meta in self.words_info:
for a in words:
if a.startswith(word_before_cursor):
yield Completion(a, -len(word_before_cursor),
display_meta=meta) | Yield completions | train | https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/ptk.py#L71-L87 | null | class QCompleter(Completer):
"""Completer for the q language"""
def __init__(self):
namespace = q(r'\d')
res = q('.Q.res')
dot_q = q('1_key .q')
self.path_completer = PathCompleter()
self.words_info = [(list(res), 'k'),
(list(dot_q), 'q'),
(list(q.key(namespace)), str(namespace))]
|
msiemens/PyGitUp | PyGitUp/git_wrapper.py | GitWrapper._run | python | def _run(self, name, *args, **kwargs):
""" Run a git command specified by name and args/kwargs. """
stdout = six.b('')
cmd = getattr(self.git, name)
# Ask cmd(...) to return a (status, stdout, stderr) tuple
kwargs['with_extended_output'] = True
# Execute command
try:
(_, stdout, _) = cmd(*args, **kwargs)
except GitCommandError as error:
# Add more meta-information to errors
message = "'{0}' returned exit status {1}".format(
' '.join(str(c) for c in error.command),
error.status
)
raise GitError(message, stderr=error.stderr, stdout=stdout)
return stdout.strip() | Run a git command specified by name and args/kwargs. | train | https://github.com/msiemens/PyGitUp/blob/b1f78831cb6b8d29d3a7d59f7a2b54fdd0720e9c/PyGitUp/git_wrapper.py#L85-L107 | null | class GitWrapper(object):
"""
A wrapper for repo.git providing better stdout handling + better exeptions.
It is preferred to repo.git because it doesn't print to stdout
in real time. In addition, this wrapper provides better error
handling (it provides stdout messages inside the exception, too).
"""
def __init__(self, repo):
if repo:
#: :type: git.Repo
self.repo = repo
#: :type: git.Git
self.git = self.repo.git
else:
#: :type: git.Git
self.git = Git()
def __del__(self):
# Is the following true?
# GitPython runs persistent git processes in the working directory.
# Therefore, when we use 'git up' in something like a test environment,
# this might cause troubles because of the open file handlers (like
# trying to remove the directory right after the test has finished).
# 'clear_cache' kills the processes...
if platform.system() == 'Windows': # pragma: no cover
pass
# ... or rather "should kill", because but somehow it recently
# started to not kill cat_file_header out of the blue (I even
# tried running old code, but the once working code failed).
# Thus, we kill it manually here.
if self.git.cat_file_header is not None:
subprocess.call(("TASKKILL /F /T /PID {0} 2>nul 1>nul".format(
str(self.git.cat_file_header.proc.pid)
)), shell=True)
if self.git.cat_file_all is not None:
subprocess.call(("TASKKILL /F /T /PID {0} 2>nul 1>nul".format(
str(self.git.cat_file_all.proc.pid)
)), shell=True)
self.git.clear_cache()
def _run(self, name, *args, **kwargs):
""" Run a git command specified by name and args/kwargs. """
stdout = six.b('')
cmd = getattr(self.git, name)
# Ask cmd(...) to return a (status, stdout, stderr) tuple
kwargs['with_extended_output'] = True
# Execute command
try:
(_, stdout, _) = cmd(*args, **kwargs)
except GitCommandError as error:
# Add more meta-information to errors
message = "'{0}' returned exit status {1}".format(
' '.join(str(c) for c in error.command),
error.status
)
raise GitError(message, stderr=error.stderr, stdout=stdout)
return stdout.strip()
def __getattr__(self, name):
return lambda *args, **kwargs: self._run(name, *args, **kwargs)
###########################################################################
# Overwrite some methods and add new ones
###########################################################################
@contextmanager
def stasher(self):
"""
A stashing contextmanager.
"""
# nonlocal for python2
stashed = [False]
clean = [False]
def stash():
if clean[0] or not self.repo.is_dirty(submodules=False):
clean[0] = True
return
if stashed[0]:
return
if self.change_count > 1:
message = 'stashing {0} changes'
else:
message = 'stashing {0} change'
print(colored(
message.format(self.change_count),
'magenta'
))
try:
self._run('stash')
except GitError as e:
raise StashError(stderr=e.stderr, stdout=e.stdout)
stashed[0] = True
yield stash
if stashed[0]:
print(colored('unstashing', 'magenta'))
try:
self._run('stash', 'pop')
except GitError as e:
raise UnstashError(stderr=e.stderr, stdout=e.stdout)
def checkout(self, branch_name):
""" Checkout a branch by name. """
try:
find(
self.repo.branches, lambda b: b.name == branch_name
).checkout()
except OrigCheckoutError as e:
raise CheckoutError(branch_name, details=e)
def rebase(self, target_branch):
""" Rebase to target branch. """
current_branch = self.repo.active_branch
arguments = (
([self.config('git-up.rebase.arguments')] or []) +
[target_branch.name]
)
try:
self._run('rebase', *arguments)
except GitError as e:
raise RebaseError(current_branch.name, target_branch.name,
**e.__dict__)
def fetch(self, *args, **kwargs):
""" Fetch remote commits. """
# Unlike the other git commands, we want to output `git fetch`'s
# output in real time. Therefore we use a different implementation
# from `GitWrapper._run` which buffers all output.
# In theory this may deadlock if `git fetch` prints more than 8 KB
# to stderr which is here assumed to not happen in day-to-day use.
stdout = six.b('')
# Execute command
cmd = self.git.fetch(as_process=True, *args, **kwargs)
# Capture output
while True:
output = cmd.stdout.read(1)
sys.stdout.write(output.decode('utf-8'))
sys.stdout.flush()
stdout += output
# Check for EOF
if output == six.b(""):
break
# Wait for the process to quit
try:
cmd.wait()
except GitCommandError as error:
# Add more meta-information to errors
message = "'{0}' returned exit status {1}".format(
' '.join(str(c) for c in error.command),
error.status
)
raise GitError(message, stderr=error.stderr, stdout=stdout)
return stdout.strip()
def push(self, *args, **kwargs):
''' Push commits to remote '''
stdout = six.b('')
# Execute command
cmd = self.git.push(as_process=True, *args, **kwargs)
# Capture output
while True:
output = cmd.stdout.read(1)
sys.stdout.write(output.decode('utf-8'))
sys.stdout.flush()
stdout += output
# Check for EOF
if output == six.b(""):
break
# Wait for the process to quit
try:
cmd.wait()
except GitCommandError as error:
# Add more meta-information to errors
message = "'{0}' returned exit status {1}".format(
' '.join(str(c) for c in error.command),
error.status
)
raise GitError(message, stderr=error.stderr, stdout=stdout)
return stdout.strip()
def config(self, key):
""" Return `git config key` output or None. """
try:
return self.git.config(key)
except GitCommandError:
return None
@property
def change_count(self):
""" The number of changes in the working directory. """
status = self.git.status(porcelain=True, untracked_files='no').strip()
if not status:
return 0
else:
return len(status.split('\n'))
@property
def version(self):
"""
Return git's version as a list of numbers.
The original repo.git.version_info has problems with tome types of
git version strings.
"""
return re.search(r'\d+(\.\d+)+', self.git.version()).group(0)
def is_version_min(self, required_version):
""" Does git's version match the requirements? """
return self.version.split('.') >= required_version.split('.')
|
msiemens/PyGitUp | PyGitUp/git_wrapper.py | GitWrapper.stasher | python | def stasher(self):
"""
A stashing contextmanager.
"""
# nonlocal for python2
stashed = [False]
clean = [False]
def stash():
if clean[0] or not self.repo.is_dirty(submodules=False):
clean[0] = True
return
if stashed[0]:
return
if self.change_count > 1:
message = 'stashing {0} changes'
else:
message = 'stashing {0} change'
print(colored(
message.format(self.change_count),
'magenta'
))
try:
self._run('stash')
except GitError as e:
raise StashError(stderr=e.stderr, stdout=e.stdout)
stashed[0] = True
yield stash
if stashed[0]:
print(colored('unstashing', 'magenta'))
try:
self._run('stash', 'pop')
except GitError as e:
raise UnstashError(stderr=e.stderr, stdout=e.stdout) | A stashing contextmanager. | train | https://github.com/msiemens/PyGitUp/blob/b1f78831cb6b8d29d3a7d59f7a2b54fdd0720e9c/PyGitUp/git_wrapper.py#L117-L154 | [
"def _run(self, name, *args, **kwargs):\n\n \"\"\" Run a git command specified by name and args/kwargs. \"\"\"\n\n stdout = six.b('')\n cmd = getattr(self.git, name)\n\n # Ask cmd(...) to return a (status, stdout, stderr) tuple\n kwargs['with_extended_output'] = True\n\n # Execute command\n try:\n (_, stdout, _) = cmd(*args, **kwargs)\n except GitCommandError as error:\n # Add more meta-information to errors\n message = \"'{0}' returned exit status {1}\".format(\n ' '.join(str(c) for c in error.command),\n error.status\n )\n\n raise GitError(message, stderr=error.stderr, stdout=stdout)\n\n return stdout.strip()\n"
] | class GitWrapper(object):
"""
A wrapper for repo.git providing better stdout handling + better exeptions.
It is preferred to repo.git because it doesn't print to stdout
in real time. In addition, this wrapper provides better error
handling (it provides stdout messages inside the exception, too).
"""
def __init__(self, repo):
if repo:
#: :type: git.Repo
self.repo = repo
#: :type: git.Git
self.git = self.repo.git
else:
#: :type: git.Git
self.git = Git()
def __del__(self):
# Is the following true?
# GitPython runs persistent git processes in the working directory.
# Therefore, when we use 'git up' in something like a test environment,
# this might cause troubles because of the open file handlers (like
# trying to remove the directory right after the test has finished).
# 'clear_cache' kills the processes...
if platform.system() == 'Windows': # pragma: no cover
pass
# ... or rather "should kill", because but somehow it recently
# started to not kill cat_file_header out of the blue (I even
# tried running old code, but the once working code failed).
# Thus, we kill it manually here.
if self.git.cat_file_header is not None:
subprocess.call(("TASKKILL /F /T /PID {0} 2>nul 1>nul".format(
str(self.git.cat_file_header.proc.pid)
)), shell=True)
if self.git.cat_file_all is not None:
subprocess.call(("TASKKILL /F /T /PID {0} 2>nul 1>nul".format(
str(self.git.cat_file_all.proc.pid)
)), shell=True)
self.git.clear_cache()
def _run(self, name, *args, **kwargs):
""" Run a git command specified by name and args/kwargs. """
stdout = six.b('')
cmd = getattr(self.git, name)
# Ask cmd(...) to return a (status, stdout, stderr) tuple
kwargs['with_extended_output'] = True
# Execute command
try:
(_, stdout, _) = cmd(*args, **kwargs)
except GitCommandError as error:
# Add more meta-information to errors
message = "'{0}' returned exit status {1}".format(
' '.join(str(c) for c in error.command),
error.status
)
raise GitError(message, stderr=error.stderr, stdout=stdout)
return stdout.strip()
def __getattr__(self, name):
return lambda *args, **kwargs: self._run(name, *args, **kwargs)
###########################################################################
# Overwrite some methods and add new ones
###########################################################################
@contextmanager
def stasher(self):
"""
A stashing contextmanager.
"""
# nonlocal for python2
stashed = [False]
clean = [False]
def stash():
if clean[0] or not self.repo.is_dirty(submodules=False):
clean[0] = True
return
if stashed[0]:
return
if self.change_count > 1:
message = 'stashing {0} changes'
else:
message = 'stashing {0} change'
print(colored(
message.format(self.change_count),
'magenta'
))
try:
self._run('stash')
except GitError as e:
raise StashError(stderr=e.stderr, stdout=e.stdout)
stashed[0] = True
yield stash
if stashed[0]:
print(colored('unstashing', 'magenta'))
try:
self._run('stash', 'pop')
except GitError as e:
raise UnstashError(stderr=e.stderr, stdout=e.stdout)
def checkout(self, branch_name):
""" Checkout a branch by name. """
try:
find(
self.repo.branches, lambda b: b.name == branch_name
).checkout()
except OrigCheckoutError as e:
raise CheckoutError(branch_name, details=e)
def rebase(self, target_branch):
""" Rebase to target branch. """
current_branch = self.repo.active_branch
arguments = (
([self.config('git-up.rebase.arguments')] or []) +
[target_branch.name]
)
try:
self._run('rebase', *arguments)
except GitError as e:
raise RebaseError(current_branch.name, target_branch.name,
**e.__dict__)
def fetch(self, *args, **kwargs):
""" Fetch remote commits. """
# Unlike the other git commands, we want to output `git fetch`'s
# output in real time. Therefore we use a different implementation
# from `GitWrapper._run` which buffers all output.
# In theory this may deadlock if `git fetch` prints more than 8 KB
# to stderr which is here assumed to not happen in day-to-day use.
stdout = six.b('')
# Execute command
cmd = self.git.fetch(as_process=True, *args, **kwargs)
# Capture output
while True:
output = cmd.stdout.read(1)
sys.stdout.write(output.decode('utf-8'))
sys.stdout.flush()
stdout += output
# Check for EOF
if output == six.b(""):
break
# Wait for the process to quit
try:
cmd.wait()
except GitCommandError as error:
# Add more meta-information to errors
message = "'{0}' returned exit status {1}".format(
' '.join(str(c) for c in error.command),
error.status
)
raise GitError(message, stderr=error.stderr, stdout=stdout)
return stdout.strip()
def push(self, *args, **kwargs):
''' Push commits to remote '''
stdout = six.b('')
# Execute command
cmd = self.git.push(as_process=True, *args, **kwargs)
# Capture output
while True:
output = cmd.stdout.read(1)
sys.stdout.write(output.decode('utf-8'))
sys.stdout.flush()
stdout += output
# Check for EOF
if output == six.b(""):
break
# Wait for the process to quit
try:
cmd.wait()
except GitCommandError as error:
# Add more meta-information to errors
message = "'{0}' returned exit status {1}".format(
' '.join(str(c) for c in error.command),
error.status
)
raise GitError(message, stderr=error.stderr, stdout=stdout)
return stdout.strip()
def config(self, key):
""" Return `git config key` output or None. """
try:
return self.git.config(key)
except GitCommandError:
return None
@property
def change_count(self):
""" The number of changes in the working directory. """
status = self.git.status(porcelain=True, untracked_files='no').strip()
if not status:
return 0
else:
return len(status.split('\n'))
@property
def version(self):
"""
Return git's version as a list of numbers.
The original repo.git.version_info has problems with tome types of
git version strings.
"""
return re.search(r'\d+(\.\d+)+', self.git.version()).group(0)
def is_version_min(self, required_version):
""" Does git's version match the requirements? """
return self.version.split('.') >= required_version.split('.')
|
msiemens/PyGitUp | PyGitUp/git_wrapper.py | GitWrapper.checkout | python | def checkout(self, branch_name):
""" Checkout a branch by name. """
try:
find(
self.repo.branches, lambda b: b.name == branch_name
).checkout()
except OrigCheckoutError as e:
raise CheckoutError(branch_name, details=e) | Checkout a branch by name. | train | https://github.com/msiemens/PyGitUp/blob/b1f78831cb6b8d29d3a7d59f7a2b54fdd0720e9c/PyGitUp/git_wrapper.py#L156-L163 | [
"def find(seq, test):\n \"\"\" Return first item in sequence where test(item) == True \"\"\"\n for item in seq:\n if test(item):\n return item\n"
] | class GitWrapper(object):
"""
A wrapper for repo.git providing better stdout handling + better exeptions.
It is preferred to repo.git because it doesn't print to stdout
in real time. In addition, this wrapper provides better error
handling (it provides stdout messages inside the exception, too).
"""
def __init__(self, repo):
if repo:
#: :type: git.Repo
self.repo = repo
#: :type: git.Git
self.git = self.repo.git
else:
#: :type: git.Git
self.git = Git()
def __del__(self):
# Is the following true?
# GitPython runs persistent git processes in the working directory.
# Therefore, when we use 'git up' in something like a test environment,
# this might cause troubles because of the open file handlers (like
# trying to remove the directory right after the test has finished).
# 'clear_cache' kills the processes...
if platform.system() == 'Windows': # pragma: no cover
pass
# ... or rather "should kill", because but somehow it recently
# started to not kill cat_file_header out of the blue (I even
# tried running old code, but the once working code failed).
# Thus, we kill it manually here.
if self.git.cat_file_header is not None:
subprocess.call(("TASKKILL /F /T /PID {0} 2>nul 1>nul".format(
str(self.git.cat_file_header.proc.pid)
)), shell=True)
if self.git.cat_file_all is not None:
subprocess.call(("TASKKILL /F /T /PID {0} 2>nul 1>nul".format(
str(self.git.cat_file_all.proc.pid)
)), shell=True)
self.git.clear_cache()
def _run(self, name, *args, **kwargs):
""" Run a git command specified by name and args/kwargs. """
stdout = six.b('')
cmd = getattr(self.git, name)
# Ask cmd(...) to return a (status, stdout, stderr) tuple
kwargs['with_extended_output'] = True
# Execute command
try:
(_, stdout, _) = cmd(*args, **kwargs)
except GitCommandError as error:
# Add more meta-information to errors
message = "'{0}' returned exit status {1}".format(
' '.join(str(c) for c in error.command),
error.status
)
raise GitError(message, stderr=error.stderr, stdout=stdout)
return stdout.strip()
def __getattr__(self, name):
return lambda *args, **kwargs: self._run(name, *args, **kwargs)
###########################################################################
# Overwrite some methods and add new ones
###########################################################################
@contextmanager
def stasher(self):
"""
A stashing contextmanager.
"""
# nonlocal for python2
stashed = [False]
clean = [False]
def stash():
if clean[0] or not self.repo.is_dirty(submodules=False):
clean[0] = True
return
if stashed[0]:
return
if self.change_count > 1:
message = 'stashing {0} changes'
else:
message = 'stashing {0} change'
print(colored(
message.format(self.change_count),
'magenta'
))
try:
self._run('stash')
except GitError as e:
raise StashError(stderr=e.stderr, stdout=e.stdout)
stashed[0] = True
yield stash
if stashed[0]:
print(colored('unstashing', 'magenta'))
try:
self._run('stash', 'pop')
except GitError as e:
raise UnstashError(stderr=e.stderr, stdout=e.stdout)
def checkout(self, branch_name):
""" Checkout a branch by name. """
try:
find(
self.repo.branches, lambda b: b.name == branch_name
).checkout()
except OrigCheckoutError as e:
raise CheckoutError(branch_name, details=e)
def rebase(self, target_branch):
""" Rebase to target branch. """
current_branch = self.repo.active_branch
arguments = (
([self.config('git-up.rebase.arguments')] or []) +
[target_branch.name]
)
try:
self._run('rebase', *arguments)
except GitError as e:
raise RebaseError(current_branch.name, target_branch.name,
**e.__dict__)
def fetch(self, *args, **kwargs):
""" Fetch remote commits. """
# Unlike the other git commands, we want to output `git fetch`'s
# output in real time. Therefore we use a different implementation
# from `GitWrapper._run` which buffers all output.
# In theory this may deadlock if `git fetch` prints more than 8 KB
# to stderr which is here assumed to not happen in day-to-day use.
stdout = six.b('')
# Execute command
cmd = self.git.fetch(as_process=True, *args, **kwargs)
# Capture output
while True:
output = cmd.stdout.read(1)
sys.stdout.write(output.decode('utf-8'))
sys.stdout.flush()
stdout += output
# Check for EOF
if output == six.b(""):
break
# Wait for the process to quit
try:
cmd.wait()
except GitCommandError as error:
# Add more meta-information to errors
message = "'{0}' returned exit status {1}".format(
' '.join(str(c) for c in error.command),
error.status
)
raise GitError(message, stderr=error.stderr, stdout=stdout)
return stdout.strip()
def push(self, *args, **kwargs):
''' Push commits to remote '''
stdout = six.b('')
# Execute command
cmd = self.git.push(as_process=True, *args, **kwargs)
# Capture output
while True:
output = cmd.stdout.read(1)
sys.stdout.write(output.decode('utf-8'))
sys.stdout.flush()
stdout += output
# Check for EOF
if output == six.b(""):
break
# Wait for the process to quit
try:
cmd.wait()
except GitCommandError as error:
# Add more meta-information to errors
message = "'{0}' returned exit status {1}".format(
' '.join(str(c) for c in error.command),
error.status
)
raise GitError(message, stderr=error.stderr, stdout=stdout)
return stdout.strip()
def config(self, key):
""" Return `git config key` output or None. """
try:
return self.git.config(key)
except GitCommandError:
return None
@property
def change_count(self):
""" The number of changes in the working directory. """
status = self.git.status(porcelain=True, untracked_files='no').strip()
if not status:
return 0
else:
return len(status.split('\n'))
@property
def version(self):
"""
Return git's version as a list of numbers.
The original repo.git.version_info has problems with tome types of
git version strings.
"""
return re.search(r'\d+(\.\d+)+', self.git.version()).group(0)
def is_version_min(self, required_version):
""" Does git's version match the requirements? """
return self.version.split('.') >= required_version.split('.')
|
msiemens/PyGitUp | PyGitUp/git_wrapper.py | GitWrapper.rebase | python | def rebase(self, target_branch):
""" Rebase to target branch. """
current_branch = self.repo.active_branch
arguments = (
([self.config('git-up.rebase.arguments')] or []) +
[target_branch.name]
)
try:
self._run('rebase', *arguments)
except GitError as e:
raise RebaseError(current_branch.name, target_branch.name,
**e.__dict__) | Rebase to target branch. | train | https://github.com/msiemens/PyGitUp/blob/b1f78831cb6b8d29d3a7d59f7a2b54fdd0720e9c/PyGitUp/git_wrapper.py#L165-L177 | [
"def _run(self, name, *args, **kwargs):\n\n \"\"\" Run a git command specified by name and args/kwargs. \"\"\"\n\n stdout = six.b('')\n cmd = getattr(self.git, name)\n\n # Ask cmd(...) to return a (status, stdout, stderr) tuple\n kwargs['with_extended_output'] = True\n\n # Execute command\n try:\n (_, stdout, _) = cmd(*args, **kwargs)\n except GitCommandError as error:\n # Add more meta-information to errors\n message = \"'{0}' returned exit status {1}\".format(\n ' '.join(str(c) for c in error.command),\n error.status\n )\n\n raise GitError(message, stderr=error.stderr, stdout=stdout)\n\n return stdout.strip()\n",
"def config(self, key):\n \"\"\" Return `git config key` output or None. \"\"\"\n try:\n return self.git.config(key)\n except GitCommandError:\n return None\n"
] | class GitWrapper(object):
"""
A wrapper for repo.git providing better stdout handling + better exeptions.
It is preferred to repo.git because it doesn't print to stdout
in real time. In addition, this wrapper provides better error
handling (it provides stdout messages inside the exception, too).
"""
def __init__(self, repo):
if repo:
#: :type: git.Repo
self.repo = repo
#: :type: git.Git
self.git = self.repo.git
else:
#: :type: git.Git
self.git = Git()
def __del__(self):
# Is the following true?
# GitPython runs persistent git processes in the working directory.
# Therefore, when we use 'git up' in something like a test environment,
# this might cause troubles because of the open file handlers (like
# trying to remove the directory right after the test has finished).
# 'clear_cache' kills the processes...
if platform.system() == 'Windows': # pragma: no cover
pass
# ... or rather "should kill", because but somehow it recently
# started to not kill cat_file_header out of the blue (I even
# tried running old code, but the once working code failed).
# Thus, we kill it manually here.
if self.git.cat_file_header is not None:
subprocess.call(("TASKKILL /F /T /PID {0} 2>nul 1>nul".format(
str(self.git.cat_file_header.proc.pid)
)), shell=True)
if self.git.cat_file_all is not None:
subprocess.call(("TASKKILL /F /T /PID {0} 2>nul 1>nul".format(
str(self.git.cat_file_all.proc.pid)
)), shell=True)
self.git.clear_cache()
def _run(self, name, *args, **kwargs):
""" Run a git command specified by name and args/kwargs. """
stdout = six.b('')
cmd = getattr(self.git, name)
# Ask cmd(...) to return a (status, stdout, stderr) tuple
kwargs['with_extended_output'] = True
# Execute command
try:
(_, stdout, _) = cmd(*args, **kwargs)
except GitCommandError as error:
# Add more meta-information to errors
message = "'{0}' returned exit status {1}".format(
' '.join(str(c) for c in error.command),
error.status
)
raise GitError(message, stderr=error.stderr, stdout=stdout)
return stdout.strip()
def __getattr__(self, name):
return lambda *args, **kwargs: self._run(name, *args, **kwargs)
###########################################################################
# Overwrite some methods and add new ones
###########################################################################
@contextmanager
def stasher(self):
"""
A stashing contextmanager.
"""
# nonlocal for python2
stashed = [False]
clean = [False]
def stash():
if clean[0] or not self.repo.is_dirty(submodules=False):
clean[0] = True
return
if stashed[0]:
return
if self.change_count > 1:
message = 'stashing {0} changes'
else:
message = 'stashing {0} change'
print(colored(
message.format(self.change_count),
'magenta'
))
try:
self._run('stash')
except GitError as e:
raise StashError(stderr=e.stderr, stdout=e.stdout)
stashed[0] = True
yield stash
if stashed[0]:
print(colored('unstashing', 'magenta'))
try:
self._run('stash', 'pop')
except GitError as e:
raise UnstashError(stderr=e.stderr, stdout=e.stdout)
def checkout(self, branch_name):
""" Checkout a branch by name. """
try:
find(
self.repo.branches, lambda b: b.name == branch_name
).checkout()
except OrigCheckoutError as e:
raise CheckoutError(branch_name, details=e)
def rebase(self, target_branch):
""" Rebase to target branch. """
current_branch = self.repo.active_branch
arguments = (
([self.config('git-up.rebase.arguments')] or []) +
[target_branch.name]
)
try:
self._run('rebase', *arguments)
except GitError as e:
raise RebaseError(current_branch.name, target_branch.name,
**e.__dict__)
def fetch(self, *args, **kwargs):
""" Fetch remote commits. """
# Unlike the other git commands, we want to output `git fetch`'s
# output in real time. Therefore we use a different implementation
# from `GitWrapper._run` which buffers all output.
# In theory this may deadlock if `git fetch` prints more than 8 KB
# to stderr which is here assumed to not happen in day-to-day use.
stdout = six.b('')
# Execute command
cmd = self.git.fetch(as_process=True, *args, **kwargs)
# Capture output
while True:
output = cmd.stdout.read(1)
sys.stdout.write(output.decode('utf-8'))
sys.stdout.flush()
stdout += output
# Check for EOF
if output == six.b(""):
break
# Wait for the process to quit
try:
cmd.wait()
except GitCommandError as error:
# Add more meta-information to errors
message = "'{0}' returned exit status {1}".format(
' '.join(str(c) for c in error.command),
error.status
)
raise GitError(message, stderr=error.stderr, stdout=stdout)
return stdout.strip()
def push(self, *args, **kwargs):
''' Push commits to remote '''
stdout = six.b('')
# Execute command
cmd = self.git.push(as_process=True, *args, **kwargs)
# Capture output
while True:
output = cmd.stdout.read(1)
sys.stdout.write(output.decode('utf-8'))
sys.stdout.flush()
stdout += output
# Check for EOF
if output == six.b(""):
break
# Wait for the process to quit
try:
cmd.wait()
except GitCommandError as error:
# Add more meta-information to errors
message = "'{0}' returned exit status {1}".format(
' '.join(str(c) for c in error.command),
error.status
)
raise GitError(message, stderr=error.stderr, stdout=stdout)
return stdout.strip()
def config(self, key):
""" Return `git config key` output or None. """
try:
return self.git.config(key)
except GitCommandError:
return None
@property
def change_count(self):
""" The number of changes in the working directory. """
status = self.git.status(porcelain=True, untracked_files='no').strip()
if not status:
return 0
else:
return len(status.split('\n'))
@property
def version(self):
"""
Return git's version as a list of numbers.
The original repo.git.version_info has problems with tome types of
git version strings.
"""
return re.search(r'\d+(\.\d+)+', self.git.version()).group(0)
def is_version_min(self, required_version):
""" Does git's version match the requirements? """
return self.version.split('.') >= required_version.split('.')
|
msiemens/PyGitUp | PyGitUp/git_wrapper.py | GitWrapper.push | python | def push(self, *args, **kwargs):
''' Push commits to remote '''
stdout = six.b('')
# Execute command
cmd = self.git.push(as_process=True, *args, **kwargs)
# Capture output
while True:
output = cmd.stdout.read(1)
sys.stdout.write(output.decode('utf-8'))
sys.stdout.flush()
stdout += output
# Check for EOF
if output == six.b(""):
break
# Wait for the process to quit
try:
cmd.wait()
except GitCommandError as error:
# Add more meta-information to errors
message = "'{0}' returned exit status {1}".format(
' '.join(str(c) for c in error.command),
error.status
)
raise GitError(message, stderr=error.stderr, stdout=stdout)
return stdout.strip() | Push commits to remote | train | https://github.com/msiemens/PyGitUp/blob/b1f78831cb6b8d29d3a7d59f7a2b54fdd0720e9c/PyGitUp/git_wrapper.py#L220-L252 | null | class GitWrapper(object):
"""
A wrapper for repo.git providing better stdout handling + better exeptions.
It is preferred to repo.git because it doesn't print to stdout
in real time. In addition, this wrapper provides better error
handling (it provides stdout messages inside the exception, too).
"""
def __init__(self, repo):
if repo:
#: :type: git.Repo
self.repo = repo
#: :type: git.Git
self.git = self.repo.git
else:
#: :type: git.Git
self.git = Git()
def __del__(self):
# Is the following true?
# GitPython runs persistent git processes in the working directory.
# Therefore, when we use 'git up' in something like a test environment,
# this might cause troubles because of the open file handlers (like
# trying to remove the directory right after the test has finished).
# 'clear_cache' kills the processes...
if platform.system() == 'Windows': # pragma: no cover
pass
# ... or rather "should kill", because but somehow it recently
# started to not kill cat_file_header out of the blue (I even
# tried running old code, but the once working code failed).
# Thus, we kill it manually here.
if self.git.cat_file_header is not None:
subprocess.call(("TASKKILL /F /T /PID {0} 2>nul 1>nul".format(
str(self.git.cat_file_header.proc.pid)
)), shell=True)
if self.git.cat_file_all is not None:
subprocess.call(("TASKKILL /F /T /PID {0} 2>nul 1>nul".format(
str(self.git.cat_file_all.proc.pid)
)), shell=True)
self.git.clear_cache()
def _run(self, name, *args, **kwargs):
""" Run a git command specified by name and args/kwargs. """
stdout = six.b('')
cmd = getattr(self.git, name)
# Ask cmd(...) to return a (status, stdout, stderr) tuple
kwargs['with_extended_output'] = True
# Execute command
try:
(_, stdout, _) = cmd(*args, **kwargs)
except GitCommandError as error:
# Add more meta-information to errors
message = "'{0}' returned exit status {1}".format(
' '.join(str(c) for c in error.command),
error.status
)
raise GitError(message, stderr=error.stderr, stdout=stdout)
return stdout.strip()
def __getattr__(self, name):
return lambda *args, **kwargs: self._run(name, *args, **kwargs)
###########################################################################
# Overwrite some methods and add new ones
###########################################################################
@contextmanager
def stasher(self):
"""
A stashing contextmanager.
"""
# nonlocal for python2
stashed = [False]
clean = [False]
def stash():
if clean[0] or not self.repo.is_dirty(submodules=False):
clean[0] = True
return
if stashed[0]:
return
if self.change_count > 1:
message = 'stashing {0} changes'
else:
message = 'stashing {0} change'
print(colored(
message.format(self.change_count),
'magenta'
))
try:
self._run('stash')
except GitError as e:
raise StashError(stderr=e.stderr, stdout=e.stdout)
stashed[0] = True
yield stash
if stashed[0]:
print(colored('unstashing', 'magenta'))
try:
self._run('stash', 'pop')
except GitError as e:
raise UnstashError(stderr=e.stderr, stdout=e.stdout)
def checkout(self, branch_name):
""" Checkout a branch by name. """
try:
find(
self.repo.branches, lambda b: b.name == branch_name
).checkout()
except OrigCheckoutError as e:
raise CheckoutError(branch_name, details=e)
def rebase(self, target_branch):
""" Rebase to target branch. """
current_branch = self.repo.active_branch
arguments = (
([self.config('git-up.rebase.arguments')] or []) +
[target_branch.name]
)
try:
self._run('rebase', *arguments)
except GitError as e:
raise RebaseError(current_branch.name, target_branch.name,
**e.__dict__)
def fetch(self, *args, **kwargs):
""" Fetch remote commits. """
# Unlike the other git commands, we want to output `git fetch`'s
# output in real time. Therefore we use a different implementation
# from `GitWrapper._run` which buffers all output.
# In theory this may deadlock if `git fetch` prints more than 8 KB
# to stderr which is here assumed to not happen in day-to-day use.
stdout = six.b('')
# Execute command
cmd = self.git.fetch(as_process=True, *args, **kwargs)
# Capture output
while True:
output = cmd.stdout.read(1)
sys.stdout.write(output.decode('utf-8'))
sys.stdout.flush()
stdout += output
# Check for EOF
if output == six.b(""):
break
# Wait for the process to quit
try:
cmd.wait()
except GitCommandError as error:
# Add more meta-information to errors
message = "'{0}' returned exit status {1}".format(
' '.join(str(c) for c in error.command),
error.status
)
raise GitError(message, stderr=error.stderr, stdout=stdout)
return stdout.strip()
def push(self, *args, **kwargs):
''' Push commits to remote '''
stdout = six.b('')
# Execute command
cmd = self.git.push(as_process=True, *args, **kwargs)
# Capture output
while True:
output = cmd.stdout.read(1)
sys.stdout.write(output.decode('utf-8'))
sys.stdout.flush()
stdout += output
# Check for EOF
if output == six.b(""):
break
# Wait for the process to quit
try:
cmd.wait()
except GitCommandError as error:
# Add more meta-information to errors
message = "'{0}' returned exit status {1}".format(
' '.join(str(c) for c in error.command),
error.status
)
raise GitError(message, stderr=error.stderr, stdout=stdout)
return stdout.strip()
def config(self, key):
""" Return `git config key` output or None. """
try:
return self.git.config(key)
except GitCommandError:
return None
@property
def change_count(self):
""" The number of changes in the working directory. """
status = self.git.status(porcelain=True, untracked_files='no').strip()
if not status:
return 0
else:
return len(status.split('\n'))
@property
def version(self):
"""
Return git's version as a list of numbers.
The original repo.git.version_info has problems with tome types of
git version strings.
"""
return re.search(r'\d+(\.\d+)+', self.git.version()).group(0)
def is_version_min(self, required_version):
""" Does git's version match the requirements? """
return self.version.split('.') >= required_version.split('.')
|
msiemens/PyGitUp | PyGitUp/git_wrapper.py | GitWrapper.change_count | python | def change_count(self):
""" The number of changes in the working directory. """
status = self.git.status(porcelain=True, untracked_files='no').strip()
if not status:
return 0
else:
return len(status.split('\n')) | The number of changes in the working directory. | train | https://github.com/msiemens/PyGitUp/blob/b1f78831cb6b8d29d3a7d59f7a2b54fdd0720e9c/PyGitUp/git_wrapper.py#L262-L268 | null | class GitWrapper(object):
"""
A wrapper for repo.git providing better stdout handling + better exeptions.
It is preferred to repo.git because it doesn't print to stdout
in real time. In addition, this wrapper provides better error
handling (it provides stdout messages inside the exception, too).
"""
def __init__(self, repo):
if repo:
#: :type: git.Repo
self.repo = repo
#: :type: git.Git
self.git = self.repo.git
else:
#: :type: git.Git
self.git = Git()
def __del__(self):
# Is the following true?
# GitPython runs persistent git processes in the working directory.
# Therefore, when we use 'git up' in something like a test environment,
# this might cause troubles because of the open file handlers (like
# trying to remove the directory right after the test has finished).
# 'clear_cache' kills the processes...
if platform.system() == 'Windows': # pragma: no cover
pass
# ... or rather "should kill", because but somehow it recently
# started to not kill cat_file_header out of the blue (I even
# tried running old code, but the once working code failed).
# Thus, we kill it manually here.
if self.git.cat_file_header is not None:
subprocess.call(("TASKKILL /F /T /PID {0} 2>nul 1>nul".format(
str(self.git.cat_file_header.proc.pid)
)), shell=True)
if self.git.cat_file_all is not None:
subprocess.call(("TASKKILL /F /T /PID {0} 2>nul 1>nul".format(
str(self.git.cat_file_all.proc.pid)
)), shell=True)
self.git.clear_cache()
def _run(self, name, *args, **kwargs):
""" Run a git command specified by name and args/kwargs. """
stdout = six.b('')
cmd = getattr(self.git, name)
# Ask cmd(...) to return a (status, stdout, stderr) tuple
kwargs['with_extended_output'] = True
# Execute command
try:
(_, stdout, _) = cmd(*args, **kwargs)
except GitCommandError as error:
# Add more meta-information to errors
message = "'{0}' returned exit status {1}".format(
' '.join(str(c) for c in error.command),
error.status
)
raise GitError(message, stderr=error.stderr, stdout=stdout)
return stdout.strip()
def __getattr__(self, name):
return lambda *args, **kwargs: self._run(name, *args, **kwargs)
###########################################################################
# Overwrite some methods and add new ones
###########################################################################
@contextmanager
def stasher(self):
"""
A stashing contextmanager.
"""
# nonlocal for python2
stashed = [False]
clean = [False]
def stash():
if clean[0] or not self.repo.is_dirty(submodules=False):
clean[0] = True
return
if stashed[0]:
return
if self.change_count > 1:
message = 'stashing {0} changes'
else:
message = 'stashing {0} change'
print(colored(
message.format(self.change_count),
'magenta'
))
try:
self._run('stash')
except GitError as e:
raise StashError(stderr=e.stderr, stdout=e.stdout)
stashed[0] = True
yield stash
if stashed[0]:
print(colored('unstashing', 'magenta'))
try:
self._run('stash', 'pop')
except GitError as e:
raise UnstashError(stderr=e.stderr, stdout=e.stdout)
def checkout(self, branch_name):
""" Checkout a branch by name. """
try:
find(
self.repo.branches, lambda b: b.name == branch_name
).checkout()
except OrigCheckoutError as e:
raise CheckoutError(branch_name, details=e)
def rebase(self, target_branch):
""" Rebase to target branch. """
current_branch = self.repo.active_branch
arguments = (
([self.config('git-up.rebase.arguments')] or []) +
[target_branch.name]
)
try:
self._run('rebase', *arguments)
except GitError as e:
raise RebaseError(current_branch.name, target_branch.name,
**e.__dict__)
def fetch(self, *args, **kwargs):
""" Fetch remote commits. """
# Unlike the other git commands, we want to output `git fetch`'s
# output in real time. Therefore we use a different implementation
# from `GitWrapper._run` which buffers all output.
# In theory this may deadlock if `git fetch` prints more than 8 KB
# to stderr which is here assumed to not happen in day-to-day use.
stdout = six.b('')
# Execute command
cmd = self.git.fetch(as_process=True, *args, **kwargs)
# Capture output
while True:
output = cmd.stdout.read(1)
sys.stdout.write(output.decode('utf-8'))
sys.stdout.flush()
stdout += output
# Check for EOF
if output == six.b(""):
break
# Wait for the process to quit
try:
cmd.wait()
except GitCommandError as error:
# Add more meta-information to errors
message = "'{0}' returned exit status {1}".format(
' '.join(str(c) for c in error.command),
error.status
)
raise GitError(message, stderr=error.stderr, stdout=stdout)
return stdout.strip()
def push(self, *args, **kwargs):
''' Push commits to remote '''
stdout = six.b('')
# Execute command
cmd = self.git.push(as_process=True, *args, **kwargs)
# Capture output
while True:
output = cmd.stdout.read(1)
sys.stdout.write(output.decode('utf-8'))
sys.stdout.flush()
stdout += output
# Check for EOF
if output == six.b(""):
break
# Wait for the process to quit
try:
cmd.wait()
except GitCommandError as error:
# Add more meta-information to errors
message = "'{0}' returned exit status {1}".format(
' '.join(str(c) for c in error.command),
error.status
)
raise GitError(message, stderr=error.stderr, stdout=stdout)
return stdout.strip()
def config(self, key):
""" Return `git config key` output or None. """
try:
return self.git.config(key)
except GitCommandError:
return None
@property
def change_count(self):
""" The number of changes in the working directory. """
status = self.git.status(porcelain=True, untracked_files='no').strip()
if not status:
return 0
else:
return len(status.split('\n'))
@property
def version(self):
"""
Return git's version as a list of numbers.
The original repo.git.version_info has problems with tome types of
git version strings.
"""
return re.search(r'\d+(\.\d+)+', self.git.version()).group(0)
def is_version_min(self, required_version):
""" Does git's version match the requirements? """
return self.version.split('.') >= required_version.split('.')
|
msiemens/PyGitUp | PyGitUp/utils.py | uniq | python | def uniq(seq):
""" Return a copy of seq without duplicates. """
seen = set()
return [x for x in seq if str(x) not in seen and not seen.add(str(x))] | Return a copy of seq without duplicates. | train | https://github.com/msiemens/PyGitUp/blob/b1f78831cb6b8d29d3a7d59f7a2b54fdd0720e9c/PyGitUp/utils.py#L24-L27 | null | # coding=utf-8
"""
Some simple, generic usefull methods.
"""
import os
import subprocess
import sys
try:
from subprocess import DEVNULL # py3k
except ImportError:
import os
DEVNULL = open(os.devnull, 'wb')
def find(seq, test):
""" Return first item in sequence where test(item) == True """
for item in seq:
if test(item):
return item
def uniq(seq):
""" Return a copy of seq without duplicates. """
seen = set()
return [x for x in seq if str(x) not in seen and not seen.add(str(x))]
def execute(cmd, cwd=None):
""" Execute a command and return it's output. """
try:
lines = subprocess \
.check_output(cmd, cwd=cwd, stderr=DEVNULL) \
.splitlines()
except subprocess.CalledProcessError:
return None
else:
if lines:
return decode(lines[0].strip())
else:
return None
def decode(s):
"""
Decode a string using the system encoding if needed (ie byte strings)
"""
if isinstance(s, bytes):
return s.decode(sys.getdefaultencoding())
else:
return s
|
msiemens/PyGitUp | PyGitUp/utils.py | execute | python | def execute(cmd, cwd=None):
""" Execute a command and return it's output. """
try:
lines = subprocess \
.check_output(cmd, cwd=cwd, stderr=DEVNULL) \
.splitlines()
except subprocess.CalledProcessError:
return None
else:
if lines:
return decode(lines[0].strip())
else:
return None | Execute a command and return it's output. | train | https://github.com/msiemens/PyGitUp/blob/b1f78831cb6b8d29d3a7d59f7a2b54fdd0720e9c/PyGitUp/utils.py#L30-L42 | [
"def decode(s):\n \"\"\"\n Decode a string using the system encoding if needed (ie byte strings)\n \"\"\"\n if isinstance(s, bytes):\n return s.decode(sys.getdefaultencoding())\n else:\n return s\n"
] | # coding=utf-8
"""
Some simple, generic usefull methods.
"""
import os
import subprocess
import sys
try:
from subprocess import DEVNULL # py3k
except ImportError:
import os
DEVNULL = open(os.devnull, 'wb')
def find(seq, test):
""" Return first item in sequence where test(item) == True """
for item in seq:
if test(item):
return item
def uniq(seq):
""" Return a copy of seq without duplicates. """
seen = set()
return [x for x in seq if str(x) not in seen and not seen.add(str(x))]
def execute(cmd, cwd=None):
""" Execute a command and return it's output. """
try:
lines = subprocess \
.check_output(cmd, cwd=cwd, stderr=DEVNULL) \
.splitlines()
except subprocess.CalledProcessError:
return None
else:
if lines:
return decode(lines[0].strip())
else:
return None
def decode(s):
"""
Decode a string using the system encoding if needed (ie byte strings)
"""
if isinstance(s, bytes):
return s.decode(sys.getdefaultencoding())
else:
return s
|
msiemens/PyGitUp | PyGitUp/utils.py | decode | python | def decode(s):
"""
Decode a string using the system encoding if needed (ie byte strings)
"""
if isinstance(s, bytes):
return s.decode(sys.getdefaultencoding())
else:
return s | Decode a string using the system encoding if needed (ie byte strings) | train | https://github.com/msiemens/PyGitUp/blob/b1f78831cb6b8d29d3a7d59f7a2b54fdd0720e9c/PyGitUp/utils.py#L45-L52 | null | # coding=utf-8
"""
Some simple, generic usefull methods.
"""
import os
import subprocess
import sys
try:
from subprocess import DEVNULL # py3k
except ImportError:
import os
DEVNULL = open(os.devnull, 'wb')
def find(seq, test):
""" Return first item in sequence where test(item) == True """
for item in seq:
if test(item):
return item
def uniq(seq):
""" Return a copy of seq without duplicates. """
seen = set()
return [x for x in seq if str(x) not in seen and not seen.add(str(x))]
def execute(cmd, cwd=None):
""" Execute a command and return it's output. """
try:
lines = subprocess \
.check_output(cmd, cwd=cwd, stderr=DEVNULL) \
.splitlines()
except subprocess.CalledProcessError:
return None
else:
if lines:
return decode(lines[0].strip())
else:
return None
def decode(s):
"""
Decode a string using the system encoding if needed (ie byte strings)
"""
if isinstance(s, bytes):
return s.decode(sys.getdefaultencoding())
else:
return s
|
msiemens/PyGitUp | release.py | current_version | python | def current_version():
# Monkeypatch setuptools.setup so we get the verison number
import setuptools
version = [None]
def monkey_setup(**settings):
version[0] = settings['version']
old_setup = setuptools.setup
setuptools.setup = monkey_setup
import setup # setup.py
reload(setup)
setuptools.setup = old_setup
return version[0] | Get the current version number from setup.py | train | https://github.com/msiemens/PyGitUp/blob/b1f78831cb6b8d29d3a7d59f7a2b54fdd0720e9c/release.py#L22-L41 | null | import os
import sys
from contextlib import contextmanager
from datetime import datetime
import git
# VARIABBLES DEFINITIONS
root_dir = os.path.dirname(__file__)
repo = git.Repo(root_dir)
# ASSERTIONS
# assert not repo.is_dirty(), 'Repo {} is dirty!'.format(root_dir)
# METHODS
def get_branch(name):
return [b for b in repo.branches if b.name == name][0]
def update_changelog(changes, new_version):
readme_path = os.path.join(root_dir, 'README.rst')
to_append = 'v' + new_version
to_append += ' (*' + datetime.now().strftime('%Y-%m-%d') + '*)' + '\n'
to_append += '~' * (len(to_append) - 1) + '\n'
to_append += '\n'
to_append += changes.strip() + '\n'
new_file = []
changelog_found = False
with open(readme_path) as f:
for l in f:
line = l.strip()
if line == 'Changelog':
print 'Found changelog!'
changelog_found = True
new_file.append(line)
continue
elif changelog_found and line == '---------':
print 'Found seperator!'
new_file.append(line)
new_file.append('')
new_file.extend(to_append.splitlines())
else:
new_file.append(line)
with open(readme_path, 'w') as f:
f.writelines(new_file)
def show_diff():
print repo.git.diff()
def merge(new_version):
# git checkout master
get_branch('master').checkout()
# git merge --no-ff dev
repo.git.merge('def', no_ff=True)
# git tag vX.X
repo.git.tag('v' + new_version)
def push():
repo.git.push()
repo.git.push(tags=True)
def upload_package():
_argv = sys.argv
sys.argv = ['__main__', 'sdist', 'upload']
import setup
reload(setup)
sys.argv = _argv
@contextmanager
def returning_to_dev():
yield
repo.git.checkout('dev')
|
msiemens/PyGitUp | PyGitUp/gitup.py | run | python | def run(version, quiet, no_fetch, push, **kwargs): # pragma: no cover
"""
A nicer `git pull`.
"""
if version:
if NO_DISTRIBUTE:
print(colored('Please install \'git-up\' via pip in order to '
'get version information.', 'yellow'))
else:
GitUp(sparse=True).version_info()
return
if quiet:
sys.stdout = StringIO()
try:
gitup = GitUp()
if push is not None:
gitup.settings['push.auto'] = push
# if arguments['--no-fetch'] or arguments['--no-f']:
if no_fetch:
gitup.should_fetch = False
except GitError:
sys.exit(1) # Error in constructor
else:
gitup.run() | A nicer `git pull`. | train | https://github.com/msiemens/PyGitUp/blob/b1f78831cb6b8d29d3a7d59f7a2b54fdd0720e9c/PyGitUp/gitup.py#L628-L656 | [
"def run(self):\n \"\"\" Run all the git-up stuff. \"\"\"\n try:\n if self.should_fetch:\n self.fetch()\n\n self.rebase_all_branches()\n\n if self.with_bundler():\n self.check_bundler()\n\n if self.settings['push.auto']:\n self.push()\n\n except GitError as error:\n self.print_error(error)\n\n # Used for test cases\n if self.testing:\n raise\n else: # pragma: no cover\n sys.exit(1)\n",
"def version_info(self):\n \"\"\" Tell, what version we're running at and if it's up to date. \"\"\"\n\n # Retrive and show local version info\n package = pkg.get_distribution('git-up')\n local_version_str = package.version\n local_version = package.parsed_version\n\n print('GitUp version is: ' + colored('v' + local_version_str, 'green'))\n\n if not self.settings['updates.check']:\n return\n\n # Check for updates\n print('Checking for updates...', end='')\n\n try:\n # Get version information from the PyPI JSON API\n reader = codecs.getreader('utf-8')\n details = json.load(reader(urlopen(PYPI_URL)))\n online_version = details['info']['version']\n except (HTTPError, URLError, ValueError):\n recent = True # To not disturb the user with HTTP/parsing errors\n else:\n recent = local_version >= pkg.parse_version(online_version)\n\n if not recent:\n # noinspection PyUnboundLocalVariable\n print(\n '\\rRecent version is: '\n + colored('v' + online_version, color='yellow', attrs=['bold'])\n )\n print('Run \\'pip install -U git-up\\' to get the update.')\n else:\n # Clear the update line\n sys.stdout.write('\\r' + ' ' * 80 + '\\n')\n"
] | # coding=utf-8
from __future__ import print_function
from git import Git
from git import GitCommandNotFound
__all__ = ['GitUp']
###############################################################################
# IMPORTS and LIBRARIES SETUP
###############################################################################
# Python libs
import codecs
import errno
import sys
import os
import re
import json
import subprocess
from contextlib import contextmanager
from tempfile import NamedTemporaryFile
import six
from six.moves import cStringIO as StringIO
from six.moves.urllib.error import HTTPError, URLError
from six.moves.urllib.request import urlopen
# 3rd party libs
try:
# noinspection PyUnresolvedReferences
import pkg_resources as pkg
except ImportError: # pragma: no cover
NO_DISTRIBUTE = True
else: # pragma: no cover
NO_DISTRIBUTE = False
import click
import colorama
from git import Repo, GitCmdObjectDB
from termcolor import colored
# PyGitUp libs
from PyGitUp.utils import execute, uniq, find
from PyGitUp.git_wrapper import GitWrapper, GitError
ON_WINDOWS = sys.platform == 'win32'
###############################################################################
# Setup of 3rd party libs
###############################################################################
colorama.init(autoreset=True, convert=ON_WINDOWS)
###############################################################################
# Setup constants
###############################################################################
PYPI_URL = 'https://pypi.python.org/pypi/git-up/json'
###############################################################################
# GitUp
###############################################################################
def get_git_dir():
toplevel_dir = execute(['git', 'rev-parse', '--show-toplevel'])
if toplevel_dir is not None \
and os.path.isfile(os.path.join(toplevel_dir, '.git')):
# Not a normal git repo. Check if it's a submodule, then use
# toplevel_dir. Otherwise it's a worktree, thus use common_dir.
# NOTE: git worktree support only comes with git v2.5.0 or
# later, on earler versions toplevel_dir is the best we can do.
cmd = ['git', 'rev-parse', '--is-inside-work-tree']
inside_worktree = execute(cmd, cwd=os.path.join(toplevel_dir, '..'))
if inside_worktree == 'true' or Git().version_info[:3] < (2, 5, 0):
return toplevel_dir
else:
return execute(['git', 'rev-parse', '--git-common-dir'])
return toplevel_dir
class GitUp(object):
""" Conainter class for GitUp methods """
default_settings = {
'bundler.check': False,
'bundler.autoinstall': False,
'bundler.local': False,
'bundler.rbenv': False,
'fetch.prune': True,
'fetch.all': False,
'rebase.show-hashes': False,
'rebase.arguments': None,
'rebase.auto': True,
'rebase.log-hook': None,
'updates.check': True,
'push.auto': False,
'push.tags': False,
'push.all': False,
}
def __init__(self, testing=False, sparse=False):
# Sparse init: config only
if sparse:
self.git = GitWrapper(None)
# Load configuration
self.settings = self.default_settings.copy()
self.load_config()
return
# Testing: redirect stderr to stdout
self.testing = testing
if self.testing:
self.stderr = sys.stdout # Quiet testing
else: # pragma: no cover
self.stderr = sys.stderr
self.states = []
self.should_fetch = True
self.pushed = False
# Check, if we're in a git repo
try:
repo_dir = get_git_dir()
except (EnvironmentError, OSError, GitCommandNotFound) as e:
if isinstance(e, GitCommandNotFound) or e.errno == errno.ENOENT:
exc = GitError("The git executable could not be found")
raise exc
else:
raise
else:
if repo_dir is None:
exc = GitError("We don't seem to be in a git repository.")
raise exc
self.repo = Repo(repo_dir, odbt=GitCmdObjectDB)
# Check for branch tracking informatino
if not any(b.tracking_branch() for b in self.repo.branches):
exc = GitError("Can\'t update your repo because it doesn\'t has "
"any branches with tracking information.")
self.print_error(exc)
raise exc
self.git = GitWrapper(self.repo)
# target_map: map local branch names to remote tracking branches
#: :type: dict[str, git.refs.remote.RemoteReference]
self.target_map = dict()
for branch in self.repo.branches:
target = branch.tracking_branch()
if target:
if target.name.startswith('./'):
# Tracking branch is in local repo
target.is_local = True
else:
target.is_local = False
self.target_map[branch.name] = target
# branches: all local branches with tracking information
#: :type: list[git.refs.head.Head]
self.branches = [b for b in self.repo.branches if b.tracking_branch()]
self.branches.sort(key=lambda br: br.name)
# remotes: all remotes that are associated with local branches
#: :type: list[git.refs.remote.RemoteReference]
self.remotes = uniq(
# name = '<remote>/<branch>' -> '<remote>'
[r.name.split('/', 2)[0]
for r in list(self.target_map.values())]
)
# change_count: Number of unstaged changes
self.change_count = len(
self.git.status(porcelain=True, untracked_files='no').split('\n')
)
# Load configuration
self.settings = self.default_settings.copy()
self.load_config()
def run(self):
""" Run all the git-up stuff. """
try:
if self.should_fetch:
self.fetch()
self.rebase_all_branches()
if self.with_bundler():
self.check_bundler()
if self.settings['push.auto']:
self.push()
except GitError as error:
self.print_error(error)
# Used for test cases
if self.testing:
raise
else: # pragma: no cover
sys.exit(1)
def rebase_all_branches(self):
""" Rebase all branches, if possible. """
col_width = max(len(b.name) for b in self.branches) + 1
if self.repo.head.is_detached:
raise GitError("You're not currently on a branch. I'm exiting"
" in case you're in the middle of something.")
original_branch = self.repo.active_branch
with self.git.stasher() as stasher:
for branch in self.branches:
target = self.target_map[branch.name]
# Print branch name
if branch.name == original_branch.name:
attrs = ['bold']
else:
attrs = []
print(colored(branch.name.ljust(col_width), attrs=attrs),
end=' ')
# Check, if target branch exists
try:
if target.name.startswith('./'):
# Check, if local branch exists
self.git.rev_parse(target.name[2:])
else:
# Check, if remote branch exists
_ = target.commit
except (ValueError, GitError):
# Remote branch doesn't exist!
print(colored('error: remote branch doesn\'t exist', 'red'))
self.states.append('remote branch doesn\'t exist')
continue
# Get tracking branch
if target.is_local:
target = find(self.repo.branches,
lambda b: b.name == target.name[2:])
# Check status and act appropriately
if target.commit.hexsha == branch.commit.hexsha:
print(colored('up to date', 'green'))
self.states.append('up to date')
continue # Do not do anything
base = self.git.merge_base(branch.name, target.name)
if base == target.commit.hexsha:
print(colored('ahead of upstream', 'cyan'))
self.states.append('ahead')
continue # Do not do anything
fast_fastforward = False
if base == branch.commit.hexsha:
print(colored('fast-forwarding...', 'yellow'), end='')
self.states.append('fast-forwarding')
# Don't fast fast-forward the currently checked-out branch
fast_fastforward = (branch.name !=
self.repo.active_branch.name)
elif not self.settings['rebase.auto']:
print(colored('diverged', 'red'))
self.states.append('diverged')
continue # Do not do anything
else:
print(colored('rebasing', 'yellow'), end='')
self.states.append('rebasing')
if self.settings['rebase.show-hashes']:
print(' {}..{}'.format(base[0:7],
target.commit.hexsha[0:7]))
else:
print()
self.log(branch, target)
if fast_fastforward:
branch.commit = target.commit
else:
stasher()
self.git.checkout(branch.name)
self.git.rebase(target)
if (self.repo.head.is_detached # Only on Travis CI,
# we get a detached head after doing our rebase *confused*.
# Running self.repo.active_branch would fail.
or not self.repo.active_branch.name == original_branch.name):
print(colored('returning to {0}'.format(original_branch.name),
'magenta'))
original_branch.checkout()
def fetch(self):
"""
Fetch the recent refs from the remotes.
Unless git-up.fetch.all is set to true, all remotes with
locally existent branches will be fetched.
"""
fetch_kwargs = {'multiple': True}
fetch_args = []
if self.is_prune():
fetch_kwargs['prune'] = True
if self.settings['fetch.all']:
fetch_kwargs['all'] = True
else:
if '.' in self.remotes:
self.remotes.remove('.')
if not self.remotes:
# Only local target branches,
# `git fetch --multiple` will fail
return
fetch_args.append(self.remotes)
try:
self.git.fetch(*fetch_args, **fetch_kwargs)
except GitError as error:
error.message = "`git fetch` failed"
raise error
def push(self):
"""
Push the changes back to the remote(s) after fetching
"""
print('pushing...')
push_kwargs = {}
push_args = []
if self.settings['push.tags']:
push_kwargs['push'] = True
if self.settings['push.all']:
push_kwargs['all'] = True
else:
if '.' in self.remotes:
self.remotes.remove('.')
if not self.remotes:
# Only local target branches,
# `git push` will fail
return
push_args.append(self.remotes)
try:
self.git.push(*push_args, **push_kwargs)
self.pushed = True
except GitError as error:
error.message = "`git push` failed"
raise error
def log(self, branch, remote):
""" Call a log-command, if set by git-up.fetch.all. """
log_hook = self.settings['rebase.log-hook']
if log_hook:
if ON_WINDOWS: # pragma: no cover
# Running a string in CMD from Python is not that easy on
# Windows. Running 'cmd /C log_hook' produces problems when
# using multiple statements or things like 'echo'. Therefore,
# we write the string to a bat file and execute it.
# In addition, we replace occurences of $1 with %1 and so forth
# in case the user is used to Bash or sh.
# If there are occurences of %something, we'll replace it with
# %%something. This is the case when running something like
# 'git log --pretty=format:"%Cred%h..."'.
# Also, we replace a semicolon with a newline, because if you
# start with 'echo' on Windows, it will simply echo the
# semicolon and the commands behind instead of echoing and then
# running other commands
# Prepare log_hook
log_hook = re.sub(r'\$(\d+)', r'%\1', log_hook)
log_hook = re.sub(r'%(?!\d)', '%%', log_hook)
log_hook = re.sub(r'; ?', r'\n', log_hook)
# Write log_hook to an temporary file and get it's path
with NamedTemporaryFile(
prefix='PyGitUp.', suffix='.bat', delete=False
) as bat_file:
# Don't echo all commands
bat_file.file.write(b'@echo off\n')
# Run log_hook
bat_file.file.write(log_hook.encode('utf-8'))
# Run bat_file
state = subprocess.call(
[bat_file.name, branch.name, remote.name]
)
# Clean up file
os.remove(bat_file.name)
else: # pragma: no cover
# Run log_hook via 'shell -c'
state = subprocess.call(
[log_hook, 'git-up', branch.name, remote.name],
shell=True
)
if self.testing:
assert state == 0, 'log_hook returned != 0'
def version_info(self):
""" Tell, what version we're running at and if it's up to date. """
# Retrive and show local version info
package = pkg.get_distribution('git-up')
local_version_str = package.version
local_version = package.parsed_version
print('GitUp version is: ' + colored('v' + local_version_str, 'green'))
if not self.settings['updates.check']:
return
# Check for updates
print('Checking for updates...', end='')
try:
# Get version information from the PyPI JSON API
reader = codecs.getreader('utf-8')
details = json.load(reader(urlopen(PYPI_URL)))
online_version = details['info']['version']
except (HTTPError, URLError, ValueError):
recent = True # To not disturb the user with HTTP/parsing errors
else:
recent = local_version >= pkg.parse_version(online_version)
if not recent:
# noinspection PyUnboundLocalVariable
print(
'\rRecent version is: '
+ colored('v' + online_version, color='yellow', attrs=['bold'])
)
print('Run \'pip install -U git-up\' to get the update.')
else:
# Clear the update line
sys.stdout.write('\r' + ' ' * 80 + '\n')
###########################################################################
# Helpers
###########################################################################
def load_config(self):
"""
Load the configuration from git config.
"""
for key in self.settings:
value = self.config(key)
# Parse true/false
if value == '' or value is None:
continue # Not set by user, go on
if value.lower() == 'true':
value = True
elif value.lower() == 'false':
value = False
elif value:
pass # A user-defined string, store the value later
self.settings[key] = value
def config(self, key):
""" Get a git-up-specific config value. """
return self.git.config('git-up.{0}'.format(key))
def is_prune(self):
"""
Return True, if `git fetch --prune` is allowed.
Because of possible incompatibilities, this requires special
treatment.
"""
required_version = "1.6.6"
config_value = self.settings['fetch.prune']
if self.git.is_version_min(required_version):
return config_value is not False
else: # pragma: no cover
if config_value == 'true':
print(colored(
"Warning: fetch.prune is set to 'true' but your git"
"version doesn't seem to support it ({0} < {1})."
"Defaulting to 'false'.".format(self.git.version,
required_version),
'yellow'
))
###########################################################################
# Gemfile Checking
###########################################################################
def with_bundler(self):
"""
Check, if bundler check is requested.
Check, if the user wants us to check for new gems and return True in
this case.
:rtype : bool
"""
def gemfile_exists():
"""
Check, if a Gemfile exists in the current repo.
"""
return os.path.exists('Gemfile')
if 'GIT_UP_BUNDLER_CHECK' in os.environ:
print(colored(
'''The GIT_UP_BUNDLER_CHECK environment variable is deprecated.
You can now tell git-up to check (or not check) for missing
gems on a per-project basis using git's config system. To
set it globally, run this command anywhere:
git config --global git-up.bundler.check true
To set it within a project, run this command inside that
project's directory:
git config git-up.bundler.check true
Replace 'true' with 'false' to disable checking.''', 'yellow'))
if self.settings['bundler.check']:
return gemfile_exists()
if ('GIT_UP_BUNDLER_CHECK' in os.environ
and os.environ['GIT_UP_BUNDLER_CHECK'] == 'true'):
return gemfile_exists()
return False
def check_bundler(self):
"""
Run the bundler check.
"""
def get_config(name):
return name if self.config('bundler.' + name) else ''
from pkg_resources import Requirement, resource_filename
relative_path = os.path.join('PyGitUp', 'check-bundler.rb')
bundler_script = resource_filename(Requirement.parse('git-up'),
relative_path)
assert os.path.exists(bundler_script), 'check-bundler.rb doesn\'t ' \
'exist!'
return_value = subprocess.call(
['ruby', bundler_script, get_config('autoinstall'),
get_config('local'), get_config('rbenv')]
)
if self.testing:
assert return_value == 0, 'Errors while executing check-bundler.rb'
def print_error(self, error):
"""
Print more information about an error.
:type error: GitError
"""
print(colored(error.message, 'red'), file=self.stderr)
if error.stdout or error.stderr:
print(file=self.stderr)
print("Here's what git said:", file=self.stderr)
print(file=self.stderr)
if error.stdout:
print(error.stdout, file=self.stderr)
if error.stderr:
print(error.stderr, file=self.stderr)
if error.details:
print(file=self.stderr)
print("Here's what we know:", file=self.stderr)
print(str(error.details), file=self.stderr)
print(file=self.stderr)
###############################################################################
EPILOG = '''
For configuration options, please see
https://github.com/msiemens/PyGitUp#readme.
\b
Python port of https://github.com/aanand/git-up/
Project Author: Markus Siemens <markus@m-siemens.de>
Project URL: https://github.com/msiemens/PyGitUp
\b
'''
@click.command(epilog=EPILOG)
@click.option('-V', '--version', is_flag=True,
help='Show version (and if there is a newer version).')
@click.option('-q', '--quiet', is_flag=True,
help='Be quiet, only print error messages.')
@click.option('--no-fetch', '--no-f', is_flag=True,
help='Don\'t try to fetch from origin.')
@click.option('-p', '--push/--no-push', default=None,
help='Push the changes after pulling successfully.')
@click.help_option('-h', '--help')
def run(version, quiet, no_fetch, push, **kwargs): # pragma: no cover
"""
A nicer `git pull`.
"""
if version:
if NO_DISTRIBUTE:
print(colored('Please install \'git-up\' via pip in order to '
'get version information.', 'yellow'))
else:
GitUp(sparse=True).version_info()
return
if quiet:
sys.stdout = StringIO()
try:
gitup = GitUp()
if push is not None:
gitup.settings['push.auto'] = push
# if arguments['--no-fetch'] or arguments['--no-f']:
if no_fetch:
gitup.should_fetch = False
except GitError:
sys.exit(1) # Error in constructor
else:
gitup.run()
if __name__ == '__main__': # pragma: no cover
run(help_option_names=['-h'])
|
msiemens/PyGitUp | PyGitUp/gitup.py | GitUp.run | python | def run(self):
""" Run all the git-up stuff. """
try:
if self.should_fetch:
self.fetch()
self.rebase_all_branches()
if self.with_bundler():
self.check_bundler()
if self.settings['push.auto']:
self.push()
except GitError as error:
self.print_error(error)
# Used for test cases
if self.testing:
raise
else: # pragma: no cover
sys.exit(1) | Run all the git-up stuff. | train | https://github.com/msiemens/PyGitUp/blob/b1f78831cb6b8d29d3a7d59f7a2b54fdd0720e9c/PyGitUp/gitup.py#L193-L214 | [
"def rebase_all_branches(self):\n \"\"\" Rebase all branches, if possible. \"\"\"\n col_width = max(len(b.name) for b in self.branches) + 1\n if self.repo.head.is_detached:\n raise GitError(\"You're not currently on a branch. I'm exiting\"\n \" in case you're in the middle of something.\")\n original_branch = self.repo.active_branch\n\n with self.git.stasher() as stasher:\n for branch in self.branches:\n target = self.target_map[branch.name]\n\n # Print branch name\n if branch.name == original_branch.name:\n attrs = ['bold']\n else:\n attrs = []\n print(colored(branch.name.ljust(col_width), attrs=attrs),\n end=' ')\n\n # Check, if target branch exists\n try:\n if target.name.startswith('./'):\n # Check, if local branch exists\n self.git.rev_parse(target.name[2:])\n else:\n # Check, if remote branch exists\n _ = target.commit\n\n except (ValueError, GitError):\n # Remote branch doesn't exist!\n print(colored('error: remote branch doesn\\'t exist', 'red'))\n self.states.append('remote branch doesn\\'t exist')\n\n continue\n\n # Get tracking branch\n if target.is_local:\n target = find(self.repo.branches,\n lambda b: b.name == target.name[2:])\n\n # Check status and act appropriately\n if target.commit.hexsha == branch.commit.hexsha:\n print(colored('up to date', 'green'))\n self.states.append('up to date')\n\n continue # Do not do anything\n\n base = self.git.merge_base(branch.name, target.name)\n\n if base == target.commit.hexsha:\n print(colored('ahead of upstream', 'cyan'))\n self.states.append('ahead')\n\n continue # Do not do anything\n\n fast_fastforward = False\n if base == branch.commit.hexsha:\n print(colored('fast-forwarding...', 'yellow'), end='')\n self.states.append('fast-forwarding')\n # Don't fast fast-forward the currently checked-out branch\n fast_fastforward = (branch.name !=\n self.repo.active_branch.name)\n\n elif not self.settings['rebase.auto']:\n print(colored('diverged', 'red'))\n self.states.append('diverged')\n\n continue # Do not do anything\n else:\n print(colored('rebasing', 'yellow'), end='')\n self.states.append('rebasing')\n\n if self.settings['rebase.show-hashes']:\n print(' {}..{}'.format(base[0:7],\n target.commit.hexsha[0:7]))\n else:\n print()\n\n self.log(branch, target)\n if fast_fastforward:\n branch.commit = target.commit\n else:\n stasher()\n self.git.checkout(branch.name)\n self.git.rebase(target)\n\n if (self.repo.head.is_detached # Only on Travis CI,\n # we get a detached head after doing our rebase *confused*.\n # Running self.repo.active_branch would fail.\n or not self.repo.active_branch.name == original_branch.name):\n print(colored('returning to {0}'.format(original_branch.name),\n 'magenta'))\n original_branch.checkout()\n",
"def fetch(self):\n \"\"\"\n Fetch the recent refs from the remotes.\n\n Unless git-up.fetch.all is set to true, all remotes with\n locally existent branches will be fetched.\n \"\"\"\n fetch_kwargs = {'multiple': True}\n fetch_args = []\n\n if self.is_prune():\n fetch_kwargs['prune'] = True\n\n if self.settings['fetch.all']:\n fetch_kwargs['all'] = True\n else:\n if '.' in self.remotes:\n self.remotes.remove('.')\n\n if not self.remotes:\n # Only local target branches,\n # `git fetch --multiple` will fail\n return\n\n fetch_args.append(self.remotes)\n\n try:\n self.git.fetch(*fetch_args, **fetch_kwargs)\n except GitError as error:\n error.message = \"`git fetch` failed\"\n raise error\n",
" def with_bundler(self):\n \"\"\"\n Check, if bundler check is requested.\n\n Check, if the user wants us to check for new gems and return True in\n this case.\n :rtype : bool\n \"\"\"\n\n def gemfile_exists():\n \"\"\"\n Check, if a Gemfile exists in the current repo.\n \"\"\"\n return os.path.exists('Gemfile')\n\n if 'GIT_UP_BUNDLER_CHECK' in os.environ:\n print(colored(\n '''The GIT_UP_BUNDLER_CHECK environment variable is deprecated.\nYou can now tell git-up to check (or not check) for missing\ngems on a per-project basis using git's config system. To\nset it globally, run this command anywhere:\n\ngit config --global git-up.bundler.check true\n\nTo set it within a project, run this command inside that\nproject's directory:\n\ngit config git-up.bundler.check true\n\nReplace 'true' with 'false' to disable checking.''', 'yellow'))\n\n if self.settings['bundler.check']:\n return gemfile_exists()\n\n if ('GIT_UP_BUNDLER_CHECK' in os.environ\n and os.environ['GIT_UP_BUNDLER_CHECK'] == 'true'):\n return gemfile_exists()\n\n return False\n",
"def print_error(self, error):\n \"\"\"\n Print more information about an error.\n\n :type error: GitError\n \"\"\"\n print(colored(error.message, 'red'), file=self.stderr)\n\n if error.stdout or error.stderr:\n print(file=self.stderr)\n print(\"Here's what git said:\", file=self.stderr)\n print(file=self.stderr)\n\n if error.stdout:\n print(error.stdout, file=self.stderr)\n if error.stderr:\n print(error.stderr, file=self.stderr)\n\n if error.details:\n print(file=self.stderr)\n print(\"Here's what we know:\", file=self.stderr)\n print(str(error.details), file=self.stderr)\n print(file=self.stderr)\n"
] | class GitUp(object):
""" Conainter class for GitUp methods """
default_settings = {
'bundler.check': False,
'bundler.autoinstall': False,
'bundler.local': False,
'bundler.rbenv': False,
'fetch.prune': True,
'fetch.all': False,
'rebase.show-hashes': False,
'rebase.arguments': None,
'rebase.auto': True,
'rebase.log-hook': None,
'updates.check': True,
'push.auto': False,
'push.tags': False,
'push.all': False,
}
def __init__(self, testing=False, sparse=False):
# Sparse init: config only
if sparse:
self.git = GitWrapper(None)
# Load configuration
self.settings = self.default_settings.copy()
self.load_config()
return
# Testing: redirect stderr to stdout
self.testing = testing
if self.testing:
self.stderr = sys.stdout # Quiet testing
else: # pragma: no cover
self.stderr = sys.stderr
self.states = []
self.should_fetch = True
self.pushed = False
# Check, if we're in a git repo
try:
repo_dir = get_git_dir()
except (EnvironmentError, OSError, GitCommandNotFound) as e:
if isinstance(e, GitCommandNotFound) or e.errno == errno.ENOENT:
exc = GitError("The git executable could not be found")
raise exc
else:
raise
else:
if repo_dir is None:
exc = GitError("We don't seem to be in a git repository.")
raise exc
self.repo = Repo(repo_dir, odbt=GitCmdObjectDB)
# Check for branch tracking informatino
if not any(b.tracking_branch() for b in self.repo.branches):
exc = GitError("Can\'t update your repo because it doesn\'t has "
"any branches with tracking information.")
self.print_error(exc)
raise exc
self.git = GitWrapper(self.repo)
# target_map: map local branch names to remote tracking branches
#: :type: dict[str, git.refs.remote.RemoteReference]
self.target_map = dict()
for branch in self.repo.branches:
target = branch.tracking_branch()
if target:
if target.name.startswith('./'):
# Tracking branch is in local repo
target.is_local = True
else:
target.is_local = False
self.target_map[branch.name] = target
# branches: all local branches with tracking information
#: :type: list[git.refs.head.Head]
self.branches = [b for b in self.repo.branches if b.tracking_branch()]
self.branches.sort(key=lambda br: br.name)
# remotes: all remotes that are associated with local branches
#: :type: list[git.refs.remote.RemoteReference]
self.remotes = uniq(
# name = '<remote>/<branch>' -> '<remote>'
[r.name.split('/', 2)[0]
for r in list(self.target_map.values())]
)
# change_count: Number of unstaged changes
self.change_count = len(
self.git.status(porcelain=True, untracked_files='no').split('\n')
)
# Load configuration
self.settings = self.default_settings.copy()
self.load_config()
def run(self):
""" Run all the git-up stuff. """
try:
if self.should_fetch:
self.fetch()
self.rebase_all_branches()
if self.with_bundler():
self.check_bundler()
if self.settings['push.auto']:
self.push()
except GitError as error:
self.print_error(error)
# Used for test cases
if self.testing:
raise
else: # pragma: no cover
sys.exit(1)
def rebase_all_branches(self):
""" Rebase all branches, if possible. """
col_width = max(len(b.name) for b in self.branches) + 1
if self.repo.head.is_detached:
raise GitError("You're not currently on a branch. I'm exiting"
" in case you're in the middle of something.")
original_branch = self.repo.active_branch
with self.git.stasher() as stasher:
for branch in self.branches:
target = self.target_map[branch.name]
# Print branch name
if branch.name == original_branch.name:
attrs = ['bold']
else:
attrs = []
print(colored(branch.name.ljust(col_width), attrs=attrs),
end=' ')
# Check, if target branch exists
try:
if target.name.startswith('./'):
# Check, if local branch exists
self.git.rev_parse(target.name[2:])
else:
# Check, if remote branch exists
_ = target.commit
except (ValueError, GitError):
# Remote branch doesn't exist!
print(colored('error: remote branch doesn\'t exist', 'red'))
self.states.append('remote branch doesn\'t exist')
continue
# Get tracking branch
if target.is_local:
target = find(self.repo.branches,
lambda b: b.name == target.name[2:])
# Check status and act appropriately
if target.commit.hexsha == branch.commit.hexsha:
print(colored('up to date', 'green'))
self.states.append('up to date')
continue # Do not do anything
base = self.git.merge_base(branch.name, target.name)
if base == target.commit.hexsha:
print(colored('ahead of upstream', 'cyan'))
self.states.append('ahead')
continue # Do not do anything
fast_fastforward = False
if base == branch.commit.hexsha:
print(colored('fast-forwarding...', 'yellow'), end='')
self.states.append('fast-forwarding')
# Don't fast fast-forward the currently checked-out branch
fast_fastforward = (branch.name !=
self.repo.active_branch.name)
elif not self.settings['rebase.auto']:
print(colored('diverged', 'red'))
self.states.append('diverged')
continue # Do not do anything
else:
print(colored('rebasing', 'yellow'), end='')
self.states.append('rebasing')
if self.settings['rebase.show-hashes']:
print(' {}..{}'.format(base[0:7],
target.commit.hexsha[0:7]))
else:
print()
self.log(branch, target)
if fast_fastforward:
branch.commit = target.commit
else:
stasher()
self.git.checkout(branch.name)
self.git.rebase(target)
if (self.repo.head.is_detached # Only on Travis CI,
# we get a detached head after doing our rebase *confused*.
# Running self.repo.active_branch would fail.
or not self.repo.active_branch.name == original_branch.name):
print(colored('returning to {0}'.format(original_branch.name),
'magenta'))
original_branch.checkout()
def fetch(self):
"""
Fetch the recent refs from the remotes.
Unless git-up.fetch.all is set to true, all remotes with
locally existent branches will be fetched.
"""
fetch_kwargs = {'multiple': True}
fetch_args = []
if self.is_prune():
fetch_kwargs['prune'] = True
if self.settings['fetch.all']:
fetch_kwargs['all'] = True
else:
if '.' in self.remotes:
self.remotes.remove('.')
if not self.remotes:
# Only local target branches,
# `git fetch --multiple` will fail
return
fetch_args.append(self.remotes)
try:
self.git.fetch(*fetch_args, **fetch_kwargs)
except GitError as error:
error.message = "`git fetch` failed"
raise error
def push(self):
"""
Push the changes back to the remote(s) after fetching
"""
print('pushing...')
push_kwargs = {}
push_args = []
if self.settings['push.tags']:
push_kwargs['push'] = True
if self.settings['push.all']:
push_kwargs['all'] = True
else:
if '.' in self.remotes:
self.remotes.remove('.')
if not self.remotes:
# Only local target branches,
# `git push` will fail
return
push_args.append(self.remotes)
try:
self.git.push(*push_args, **push_kwargs)
self.pushed = True
except GitError as error:
error.message = "`git push` failed"
raise error
def log(self, branch, remote):
""" Call a log-command, if set by git-up.fetch.all. """
log_hook = self.settings['rebase.log-hook']
if log_hook:
if ON_WINDOWS: # pragma: no cover
# Running a string in CMD from Python is not that easy on
# Windows. Running 'cmd /C log_hook' produces problems when
# using multiple statements or things like 'echo'. Therefore,
# we write the string to a bat file and execute it.
# In addition, we replace occurences of $1 with %1 and so forth
# in case the user is used to Bash or sh.
# If there are occurences of %something, we'll replace it with
# %%something. This is the case when running something like
# 'git log --pretty=format:"%Cred%h..."'.
# Also, we replace a semicolon with a newline, because if you
# start with 'echo' on Windows, it will simply echo the
# semicolon and the commands behind instead of echoing and then
# running other commands
# Prepare log_hook
log_hook = re.sub(r'\$(\d+)', r'%\1', log_hook)
log_hook = re.sub(r'%(?!\d)', '%%', log_hook)
log_hook = re.sub(r'; ?', r'\n', log_hook)
# Write log_hook to an temporary file and get it's path
with NamedTemporaryFile(
prefix='PyGitUp.', suffix='.bat', delete=False
) as bat_file:
# Don't echo all commands
bat_file.file.write(b'@echo off\n')
# Run log_hook
bat_file.file.write(log_hook.encode('utf-8'))
# Run bat_file
state = subprocess.call(
[bat_file.name, branch.name, remote.name]
)
# Clean up file
os.remove(bat_file.name)
else: # pragma: no cover
# Run log_hook via 'shell -c'
state = subprocess.call(
[log_hook, 'git-up', branch.name, remote.name],
shell=True
)
if self.testing:
assert state == 0, 'log_hook returned != 0'
def version_info(self):
""" Tell, what version we're running at and if it's up to date. """
# Retrive and show local version info
package = pkg.get_distribution('git-up')
local_version_str = package.version
local_version = package.parsed_version
print('GitUp version is: ' + colored('v' + local_version_str, 'green'))
if not self.settings['updates.check']:
return
# Check for updates
print('Checking for updates...', end='')
try:
# Get version information from the PyPI JSON API
reader = codecs.getreader('utf-8')
details = json.load(reader(urlopen(PYPI_URL)))
online_version = details['info']['version']
except (HTTPError, URLError, ValueError):
recent = True # To not disturb the user with HTTP/parsing errors
else:
recent = local_version >= pkg.parse_version(online_version)
if not recent:
# noinspection PyUnboundLocalVariable
print(
'\rRecent version is: '
+ colored('v' + online_version, color='yellow', attrs=['bold'])
)
print('Run \'pip install -U git-up\' to get the update.')
else:
# Clear the update line
sys.stdout.write('\r' + ' ' * 80 + '\n')
###########################################################################
# Helpers
###########################################################################
def load_config(self):
"""
Load the configuration from git config.
"""
for key in self.settings:
value = self.config(key)
# Parse true/false
if value == '' or value is None:
continue # Not set by user, go on
if value.lower() == 'true':
value = True
elif value.lower() == 'false':
value = False
elif value:
pass # A user-defined string, store the value later
self.settings[key] = value
def config(self, key):
""" Get a git-up-specific config value. """
return self.git.config('git-up.{0}'.format(key))
def is_prune(self):
"""
Return True, if `git fetch --prune` is allowed.
Because of possible incompatibilities, this requires special
treatment.
"""
required_version = "1.6.6"
config_value = self.settings['fetch.prune']
if self.git.is_version_min(required_version):
return config_value is not False
else: # pragma: no cover
if config_value == 'true':
print(colored(
"Warning: fetch.prune is set to 'true' but your git"
"version doesn't seem to support it ({0} < {1})."
"Defaulting to 'false'.".format(self.git.version,
required_version),
'yellow'
))
###########################################################################
# Gemfile Checking
###########################################################################
def with_bundler(self):
"""
Check, if bundler check is requested.
Check, if the user wants us to check for new gems and return True in
this case.
:rtype : bool
"""
def gemfile_exists():
"""
Check, if a Gemfile exists in the current repo.
"""
return os.path.exists('Gemfile')
if 'GIT_UP_BUNDLER_CHECK' in os.environ:
print(colored(
'''The GIT_UP_BUNDLER_CHECK environment variable is deprecated.
You can now tell git-up to check (or not check) for missing
gems on a per-project basis using git's config system. To
set it globally, run this command anywhere:
git config --global git-up.bundler.check true
To set it within a project, run this command inside that
project's directory:
git config git-up.bundler.check true
Replace 'true' with 'false' to disable checking.''', 'yellow'))
if self.settings['bundler.check']:
return gemfile_exists()
if ('GIT_UP_BUNDLER_CHECK' in os.environ
and os.environ['GIT_UP_BUNDLER_CHECK'] == 'true'):
return gemfile_exists()
return False
def check_bundler(self):
"""
Run the bundler check.
"""
def get_config(name):
return name if self.config('bundler.' + name) else ''
from pkg_resources import Requirement, resource_filename
relative_path = os.path.join('PyGitUp', 'check-bundler.rb')
bundler_script = resource_filename(Requirement.parse('git-up'),
relative_path)
assert os.path.exists(bundler_script), 'check-bundler.rb doesn\'t ' \
'exist!'
return_value = subprocess.call(
['ruby', bundler_script, get_config('autoinstall'),
get_config('local'), get_config('rbenv')]
)
if self.testing:
assert return_value == 0, 'Errors while executing check-bundler.rb'
def print_error(self, error):
"""
Print more information about an error.
:type error: GitError
"""
print(colored(error.message, 'red'), file=self.stderr)
if error.stdout or error.stderr:
print(file=self.stderr)
print("Here's what git said:", file=self.stderr)
print(file=self.stderr)
if error.stdout:
print(error.stdout, file=self.stderr)
if error.stderr:
print(error.stderr, file=self.stderr)
if error.details:
print(file=self.stderr)
print("Here's what we know:", file=self.stderr)
print(str(error.details), file=self.stderr)
print(file=self.stderr)
|
msiemens/PyGitUp | PyGitUp/gitup.py | GitUp.rebase_all_branches | python | def rebase_all_branches(self):
""" Rebase all branches, if possible. """
col_width = max(len(b.name) for b in self.branches) + 1
if self.repo.head.is_detached:
raise GitError("You're not currently on a branch. I'm exiting"
" in case you're in the middle of something.")
original_branch = self.repo.active_branch
with self.git.stasher() as stasher:
for branch in self.branches:
target = self.target_map[branch.name]
# Print branch name
if branch.name == original_branch.name:
attrs = ['bold']
else:
attrs = []
print(colored(branch.name.ljust(col_width), attrs=attrs),
end=' ')
# Check, if target branch exists
try:
if target.name.startswith('./'):
# Check, if local branch exists
self.git.rev_parse(target.name[2:])
else:
# Check, if remote branch exists
_ = target.commit
except (ValueError, GitError):
# Remote branch doesn't exist!
print(colored('error: remote branch doesn\'t exist', 'red'))
self.states.append('remote branch doesn\'t exist')
continue
# Get tracking branch
if target.is_local:
target = find(self.repo.branches,
lambda b: b.name == target.name[2:])
# Check status and act appropriately
if target.commit.hexsha == branch.commit.hexsha:
print(colored('up to date', 'green'))
self.states.append('up to date')
continue # Do not do anything
base = self.git.merge_base(branch.name, target.name)
if base == target.commit.hexsha:
print(colored('ahead of upstream', 'cyan'))
self.states.append('ahead')
continue # Do not do anything
fast_fastforward = False
if base == branch.commit.hexsha:
print(colored('fast-forwarding...', 'yellow'), end='')
self.states.append('fast-forwarding')
# Don't fast fast-forward the currently checked-out branch
fast_fastforward = (branch.name !=
self.repo.active_branch.name)
elif not self.settings['rebase.auto']:
print(colored('diverged', 'red'))
self.states.append('diverged')
continue # Do not do anything
else:
print(colored('rebasing', 'yellow'), end='')
self.states.append('rebasing')
if self.settings['rebase.show-hashes']:
print(' {}..{}'.format(base[0:7],
target.commit.hexsha[0:7]))
else:
print()
self.log(branch, target)
if fast_fastforward:
branch.commit = target.commit
else:
stasher()
self.git.checkout(branch.name)
self.git.rebase(target)
if (self.repo.head.is_detached # Only on Travis CI,
# we get a detached head after doing our rebase *confused*.
# Running self.repo.active_branch would fail.
or not self.repo.active_branch.name == original_branch.name):
print(colored('returning to {0}'.format(original_branch.name),
'magenta'))
original_branch.checkout() | Rebase all branches, if possible. | train | https://github.com/msiemens/PyGitUp/blob/b1f78831cb6b8d29d3a7d59f7a2b54fdd0720e9c/PyGitUp/gitup.py#L216-L309 | [
"def log(self, branch, remote):\n \"\"\" Call a log-command, if set by git-up.fetch.all. \"\"\"\n log_hook = self.settings['rebase.log-hook']\n\n if log_hook:\n if ON_WINDOWS: # pragma: no cover\n # Running a string in CMD from Python is not that easy on\n # Windows. Running 'cmd /C log_hook' produces problems when\n # using multiple statements or things like 'echo'. Therefore,\n # we write the string to a bat file and execute it.\n\n # In addition, we replace occurences of $1 with %1 and so forth\n # in case the user is used to Bash or sh.\n # If there are occurences of %something, we'll replace it with\n # %%something. This is the case when running something like\n # 'git log --pretty=format:\"%Cred%h...\"'.\n # Also, we replace a semicolon with a newline, because if you\n # start with 'echo' on Windows, it will simply echo the\n # semicolon and the commands behind instead of echoing and then\n # running other commands\n\n # Prepare log_hook\n log_hook = re.sub(r'\\$(\\d+)', r'%\\1', log_hook)\n log_hook = re.sub(r'%(?!\\d)', '%%', log_hook)\n log_hook = re.sub(r'; ?', r'\\n', log_hook)\n\n # Write log_hook to an temporary file and get it's path\n with NamedTemporaryFile(\n prefix='PyGitUp.', suffix='.bat', delete=False\n ) as bat_file:\n # Don't echo all commands\n bat_file.file.write(b'@echo off\\n')\n # Run log_hook\n bat_file.file.write(log_hook.encode('utf-8'))\n\n # Run bat_file\n state = subprocess.call(\n [bat_file.name, branch.name, remote.name]\n )\n\n # Clean up file\n os.remove(bat_file.name)\n else: # pragma: no cover\n # Run log_hook via 'shell -c'\n state = subprocess.call(\n [log_hook, 'git-up', branch.name, remote.name],\n shell=True\n )\n\n if self.testing:\n assert state == 0, 'log_hook returned != 0'\n"
] | class GitUp(object):
""" Conainter class for GitUp methods """
default_settings = {
'bundler.check': False,
'bundler.autoinstall': False,
'bundler.local': False,
'bundler.rbenv': False,
'fetch.prune': True,
'fetch.all': False,
'rebase.show-hashes': False,
'rebase.arguments': None,
'rebase.auto': True,
'rebase.log-hook': None,
'updates.check': True,
'push.auto': False,
'push.tags': False,
'push.all': False,
}
def __init__(self, testing=False, sparse=False):
# Sparse init: config only
if sparse:
self.git = GitWrapper(None)
# Load configuration
self.settings = self.default_settings.copy()
self.load_config()
return
# Testing: redirect stderr to stdout
self.testing = testing
if self.testing:
self.stderr = sys.stdout # Quiet testing
else: # pragma: no cover
self.stderr = sys.stderr
self.states = []
self.should_fetch = True
self.pushed = False
# Check, if we're in a git repo
try:
repo_dir = get_git_dir()
except (EnvironmentError, OSError, GitCommandNotFound) as e:
if isinstance(e, GitCommandNotFound) or e.errno == errno.ENOENT:
exc = GitError("The git executable could not be found")
raise exc
else:
raise
else:
if repo_dir is None:
exc = GitError("We don't seem to be in a git repository.")
raise exc
self.repo = Repo(repo_dir, odbt=GitCmdObjectDB)
# Check for branch tracking informatino
if not any(b.tracking_branch() for b in self.repo.branches):
exc = GitError("Can\'t update your repo because it doesn\'t has "
"any branches with tracking information.")
self.print_error(exc)
raise exc
self.git = GitWrapper(self.repo)
# target_map: map local branch names to remote tracking branches
#: :type: dict[str, git.refs.remote.RemoteReference]
self.target_map = dict()
for branch in self.repo.branches:
target = branch.tracking_branch()
if target:
if target.name.startswith('./'):
# Tracking branch is in local repo
target.is_local = True
else:
target.is_local = False
self.target_map[branch.name] = target
# branches: all local branches with tracking information
#: :type: list[git.refs.head.Head]
self.branches = [b for b in self.repo.branches if b.tracking_branch()]
self.branches.sort(key=lambda br: br.name)
# remotes: all remotes that are associated with local branches
#: :type: list[git.refs.remote.RemoteReference]
self.remotes = uniq(
# name = '<remote>/<branch>' -> '<remote>'
[r.name.split('/', 2)[0]
for r in list(self.target_map.values())]
)
# change_count: Number of unstaged changes
self.change_count = len(
self.git.status(porcelain=True, untracked_files='no').split('\n')
)
# Load configuration
self.settings = self.default_settings.copy()
self.load_config()
def run(self):
""" Run all the git-up stuff. """
try:
if self.should_fetch:
self.fetch()
self.rebase_all_branches()
if self.with_bundler():
self.check_bundler()
if self.settings['push.auto']:
self.push()
except GitError as error:
self.print_error(error)
# Used for test cases
if self.testing:
raise
else: # pragma: no cover
sys.exit(1)
def rebase_all_branches(self):
""" Rebase all branches, if possible. """
col_width = max(len(b.name) for b in self.branches) + 1
if self.repo.head.is_detached:
raise GitError("You're not currently on a branch. I'm exiting"
" in case you're in the middle of something.")
original_branch = self.repo.active_branch
with self.git.stasher() as stasher:
for branch in self.branches:
target = self.target_map[branch.name]
# Print branch name
if branch.name == original_branch.name:
attrs = ['bold']
else:
attrs = []
print(colored(branch.name.ljust(col_width), attrs=attrs),
end=' ')
# Check, if target branch exists
try:
if target.name.startswith('./'):
# Check, if local branch exists
self.git.rev_parse(target.name[2:])
else:
# Check, if remote branch exists
_ = target.commit
except (ValueError, GitError):
# Remote branch doesn't exist!
print(colored('error: remote branch doesn\'t exist', 'red'))
self.states.append('remote branch doesn\'t exist')
continue
# Get tracking branch
if target.is_local:
target = find(self.repo.branches,
lambda b: b.name == target.name[2:])
# Check status and act appropriately
if target.commit.hexsha == branch.commit.hexsha:
print(colored('up to date', 'green'))
self.states.append('up to date')
continue # Do not do anything
base = self.git.merge_base(branch.name, target.name)
if base == target.commit.hexsha:
print(colored('ahead of upstream', 'cyan'))
self.states.append('ahead')
continue # Do not do anything
fast_fastforward = False
if base == branch.commit.hexsha:
print(colored('fast-forwarding...', 'yellow'), end='')
self.states.append('fast-forwarding')
# Don't fast fast-forward the currently checked-out branch
fast_fastforward = (branch.name !=
self.repo.active_branch.name)
elif not self.settings['rebase.auto']:
print(colored('diverged', 'red'))
self.states.append('diverged')
continue # Do not do anything
else:
print(colored('rebasing', 'yellow'), end='')
self.states.append('rebasing')
if self.settings['rebase.show-hashes']:
print(' {}..{}'.format(base[0:7],
target.commit.hexsha[0:7]))
else:
print()
self.log(branch, target)
if fast_fastforward:
branch.commit = target.commit
else:
stasher()
self.git.checkout(branch.name)
self.git.rebase(target)
if (self.repo.head.is_detached # Only on Travis CI,
# we get a detached head after doing our rebase *confused*.
# Running self.repo.active_branch would fail.
or not self.repo.active_branch.name == original_branch.name):
print(colored('returning to {0}'.format(original_branch.name),
'magenta'))
original_branch.checkout()
def fetch(self):
"""
Fetch the recent refs from the remotes.
Unless git-up.fetch.all is set to true, all remotes with
locally existent branches will be fetched.
"""
fetch_kwargs = {'multiple': True}
fetch_args = []
if self.is_prune():
fetch_kwargs['prune'] = True
if self.settings['fetch.all']:
fetch_kwargs['all'] = True
else:
if '.' in self.remotes:
self.remotes.remove('.')
if not self.remotes:
# Only local target branches,
# `git fetch --multiple` will fail
return
fetch_args.append(self.remotes)
try:
self.git.fetch(*fetch_args, **fetch_kwargs)
except GitError as error:
error.message = "`git fetch` failed"
raise error
def push(self):
"""
Push the changes back to the remote(s) after fetching
"""
print('pushing...')
push_kwargs = {}
push_args = []
if self.settings['push.tags']:
push_kwargs['push'] = True
if self.settings['push.all']:
push_kwargs['all'] = True
else:
if '.' in self.remotes:
self.remotes.remove('.')
if not self.remotes:
# Only local target branches,
# `git push` will fail
return
push_args.append(self.remotes)
try:
self.git.push(*push_args, **push_kwargs)
self.pushed = True
except GitError as error:
error.message = "`git push` failed"
raise error
def log(self, branch, remote):
""" Call a log-command, if set by git-up.fetch.all. """
log_hook = self.settings['rebase.log-hook']
if log_hook:
if ON_WINDOWS: # pragma: no cover
# Running a string in CMD from Python is not that easy on
# Windows. Running 'cmd /C log_hook' produces problems when
# using multiple statements or things like 'echo'. Therefore,
# we write the string to a bat file and execute it.
# In addition, we replace occurences of $1 with %1 and so forth
# in case the user is used to Bash or sh.
# If there are occurences of %something, we'll replace it with
# %%something. This is the case when running something like
# 'git log --pretty=format:"%Cred%h..."'.
# Also, we replace a semicolon with a newline, because if you
# start with 'echo' on Windows, it will simply echo the
# semicolon and the commands behind instead of echoing and then
# running other commands
# Prepare log_hook
log_hook = re.sub(r'\$(\d+)', r'%\1', log_hook)
log_hook = re.sub(r'%(?!\d)', '%%', log_hook)
log_hook = re.sub(r'; ?', r'\n', log_hook)
# Write log_hook to an temporary file and get it's path
with NamedTemporaryFile(
prefix='PyGitUp.', suffix='.bat', delete=False
) as bat_file:
# Don't echo all commands
bat_file.file.write(b'@echo off\n')
# Run log_hook
bat_file.file.write(log_hook.encode('utf-8'))
# Run bat_file
state = subprocess.call(
[bat_file.name, branch.name, remote.name]
)
# Clean up file
os.remove(bat_file.name)
else: # pragma: no cover
# Run log_hook via 'shell -c'
state = subprocess.call(
[log_hook, 'git-up', branch.name, remote.name],
shell=True
)
if self.testing:
assert state == 0, 'log_hook returned != 0'
def version_info(self):
""" Tell, what version we're running at and if it's up to date. """
# Retrive and show local version info
package = pkg.get_distribution('git-up')
local_version_str = package.version
local_version = package.parsed_version
print('GitUp version is: ' + colored('v' + local_version_str, 'green'))
if not self.settings['updates.check']:
return
# Check for updates
print('Checking for updates...', end='')
try:
# Get version information from the PyPI JSON API
reader = codecs.getreader('utf-8')
details = json.load(reader(urlopen(PYPI_URL)))
online_version = details['info']['version']
except (HTTPError, URLError, ValueError):
recent = True # To not disturb the user with HTTP/parsing errors
else:
recent = local_version >= pkg.parse_version(online_version)
if not recent:
# noinspection PyUnboundLocalVariable
print(
'\rRecent version is: '
+ colored('v' + online_version, color='yellow', attrs=['bold'])
)
print('Run \'pip install -U git-up\' to get the update.')
else:
# Clear the update line
sys.stdout.write('\r' + ' ' * 80 + '\n')
###########################################################################
# Helpers
###########################################################################
def load_config(self):
"""
Load the configuration from git config.
"""
for key in self.settings:
value = self.config(key)
# Parse true/false
if value == '' or value is None:
continue # Not set by user, go on
if value.lower() == 'true':
value = True
elif value.lower() == 'false':
value = False
elif value:
pass # A user-defined string, store the value later
self.settings[key] = value
def config(self, key):
""" Get a git-up-specific config value. """
return self.git.config('git-up.{0}'.format(key))
def is_prune(self):
"""
Return True, if `git fetch --prune` is allowed.
Because of possible incompatibilities, this requires special
treatment.
"""
required_version = "1.6.6"
config_value = self.settings['fetch.prune']
if self.git.is_version_min(required_version):
return config_value is not False
else: # pragma: no cover
if config_value == 'true':
print(colored(
"Warning: fetch.prune is set to 'true' but your git"
"version doesn't seem to support it ({0} < {1})."
"Defaulting to 'false'.".format(self.git.version,
required_version),
'yellow'
))
###########################################################################
# Gemfile Checking
###########################################################################
def with_bundler(self):
"""
Check, if bundler check is requested.
Check, if the user wants us to check for new gems and return True in
this case.
:rtype : bool
"""
def gemfile_exists():
"""
Check, if a Gemfile exists in the current repo.
"""
return os.path.exists('Gemfile')
if 'GIT_UP_BUNDLER_CHECK' in os.environ:
print(colored(
'''The GIT_UP_BUNDLER_CHECK environment variable is deprecated.
You can now tell git-up to check (or not check) for missing
gems on a per-project basis using git's config system. To
set it globally, run this command anywhere:
git config --global git-up.bundler.check true
To set it within a project, run this command inside that
project's directory:
git config git-up.bundler.check true
Replace 'true' with 'false' to disable checking.''', 'yellow'))
if self.settings['bundler.check']:
return gemfile_exists()
if ('GIT_UP_BUNDLER_CHECK' in os.environ
and os.environ['GIT_UP_BUNDLER_CHECK'] == 'true'):
return gemfile_exists()
return False
def check_bundler(self):
"""
Run the bundler check.
"""
def get_config(name):
return name if self.config('bundler.' + name) else ''
from pkg_resources import Requirement, resource_filename
relative_path = os.path.join('PyGitUp', 'check-bundler.rb')
bundler_script = resource_filename(Requirement.parse('git-up'),
relative_path)
assert os.path.exists(bundler_script), 'check-bundler.rb doesn\'t ' \
'exist!'
return_value = subprocess.call(
['ruby', bundler_script, get_config('autoinstall'),
get_config('local'), get_config('rbenv')]
)
if self.testing:
assert return_value == 0, 'Errors while executing check-bundler.rb'
def print_error(self, error):
"""
Print more information about an error.
:type error: GitError
"""
print(colored(error.message, 'red'), file=self.stderr)
if error.stdout or error.stderr:
print(file=self.stderr)
print("Here's what git said:", file=self.stderr)
print(file=self.stderr)
if error.stdout:
print(error.stdout, file=self.stderr)
if error.stderr:
print(error.stderr, file=self.stderr)
if error.details:
print(file=self.stderr)
print("Here's what we know:", file=self.stderr)
print(str(error.details), file=self.stderr)
print(file=self.stderr)
|
msiemens/PyGitUp | PyGitUp/gitup.py | GitUp.fetch | python | def fetch(self):
"""
Fetch the recent refs from the remotes.
Unless git-up.fetch.all is set to true, all remotes with
locally existent branches will be fetched.
"""
fetch_kwargs = {'multiple': True}
fetch_args = []
if self.is_prune():
fetch_kwargs['prune'] = True
if self.settings['fetch.all']:
fetch_kwargs['all'] = True
else:
if '.' in self.remotes:
self.remotes.remove('.')
if not self.remotes:
# Only local target branches,
# `git fetch --multiple` will fail
return
fetch_args.append(self.remotes)
try:
self.git.fetch(*fetch_args, **fetch_kwargs)
except GitError as error:
error.message = "`git fetch` failed"
raise error | Fetch the recent refs from the remotes.
Unless git-up.fetch.all is set to true, all remotes with
locally existent branches will be fetched. | train | https://github.com/msiemens/PyGitUp/blob/b1f78831cb6b8d29d3a7d59f7a2b54fdd0720e9c/PyGitUp/gitup.py#L311-L341 | [
"def is_prune(self):\n \"\"\"\n Return True, if `git fetch --prune` is allowed.\n\n Because of possible incompatibilities, this requires special\n treatment.\n \"\"\"\n required_version = \"1.6.6\"\n config_value = self.settings['fetch.prune']\n\n if self.git.is_version_min(required_version):\n return config_value is not False\n else: # pragma: no cover\n if config_value == 'true':\n print(colored(\n \"Warning: fetch.prune is set to 'true' but your git\"\n \"version doesn't seem to support it ({0} < {1}).\"\n \"Defaulting to 'false'.\".format(self.git.version,\n required_version),\n 'yellow'\n ))\n"
] | class GitUp(object):
""" Conainter class for GitUp methods """
default_settings = {
'bundler.check': False,
'bundler.autoinstall': False,
'bundler.local': False,
'bundler.rbenv': False,
'fetch.prune': True,
'fetch.all': False,
'rebase.show-hashes': False,
'rebase.arguments': None,
'rebase.auto': True,
'rebase.log-hook': None,
'updates.check': True,
'push.auto': False,
'push.tags': False,
'push.all': False,
}
def __init__(self, testing=False, sparse=False):
# Sparse init: config only
if sparse:
self.git = GitWrapper(None)
# Load configuration
self.settings = self.default_settings.copy()
self.load_config()
return
# Testing: redirect stderr to stdout
self.testing = testing
if self.testing:
self.stderr = sys.stdout # Quiet testing
else: # pragma: no cover
self.stderr = sys.stderr
self.states = []
self.should_fetch = True
self.pushed = False
# Check, if we're in a git repo
try:
repo_dir = get_git_dir()
except (EnvironmentError, OSError, GitCommandNotFound) as e:
if isinstance(e, GitCommandNotFound) or e.errno == errno.ENOENT:
exc = GitError("The git executable could not be found")
raise exc
else:
raise
else:
if repo_dir is None:
exc = GitError("We don't seem to be in a git repository.")
raise exc
self.repo = Repo(repo_dir, odbt=GitCmdObjectDB)
# Check for branch tracking informatino
if not any(b.tracking_branch() for b in self.repo.branches):
exc = GitError("Can\'t update your repo because it doesn\'t has "
"any branches with tracking information.")
self.print_error(exc)
raise exc
self.git = GitWrapper(self.repo)
# target_map: map local branch names to remote tracking branches
#: :type: dict[str, git.refs.remote.RemoteReference]
self.target_map = dict()
for branch in self.repo.branches:
target = branch.tracking_branch()
if target:
if target.name.startswith('./'):
# Tracking branch is in local repo
target.is_local = True
else:
target.is_local = False
self.target_map[branch.name] = target
# branches: all local branches with tracking information
#: :type: list[git.refs.head.Head]
self.branches = [b for b in self.repo.branches if b.tracking_branch()]
self.branches.sort(key=lambda br: br.name)
# remotes: all remotes that are associated with local branches
#: :type: list[git.refs.remote.RemoteReference]
self.remotes = uniq(
# name = '<remote>/<branch>' -> '<remote>'
[r.name.split('/', 2)[0]
for r in list(self.target_map.values())]
)
# change_count: Number of unstaged changes
self.change_count = len(
self.git.status(porcelain=True, untracked_files='no').split('\n')
)
# Load configuration
self.settings = self.default_settings.copy()
self.load_config()
def run(self):
""" Run all the git-up stuff. """
try:
if self.should_fetch:
self.fetch()
self.rebase_all_branches()
if self.with_bundler():
self.check_bundler()
if self.settings['push.auto']:
self.push()
except GitError as error:
self.print_error(error)
# Used for test cases
if self.testing:
raise
else: # pragma: no cover
sys.exit(1)
def rebase_all_branches(self):
""" Rebase all branches, if possible. """
col_width = max(len(b.name) for b in self.branches) + 1
if self.repo.head.is_detached:
raise GitError("You're not currently on a branch. I'm exiting"
" in case you're in the middle of something.")
original_branch = self.repo.active_branch
with self.git.stasher() as stasher:
for branch in self.branches:
target = self.target_map[branch.name]
# Print branch name
if branch.name == original_branch.name:
attrs = ['bold']
else:
attrs = []
print(colored(branch.name.ljust(col_width), attrs=attrs),
end=' ')
# Check, if target branch exists
try:
if target.name.startswith('./'):
# Check, if local branch exists
self.git.rev_parse(target.name[2:])
else:
# Check, if remote branch exists
_ = target.commit
except (ValueError, GitError):
# Remote branch doesn't exist!
print(colored('error: remote branch doesn\'t exist', 'red'))
self.states.append('remote branch doesn\'t exist')
continue
# Get tracking branch
if target.is_local:
target = find(self.repo.branches,
lambda b: b.name == target.name[2:])
# Check status and act appropriately
if target.commit.hexsha == branch.commit.hexsha:
print(colored('up to date', 'green'))
self.states.append('up to date')
continue # Do not do anything
base = self.git.merge_base(branch.name, target.name)
if base == target.commit.hexsha:
print(colored('ahead of upstream', 'cyan'))
self.states.append('ahead')
continue # Do not do anything
fast_fastforward = False
if base == branch.commit.hexsha:
print(colored('fast-forwarding...', 'yellow'), end='')
self.states.append('fast-forwarding')
# Don't fast fast-forward the currently checked-out branch
fast_fastforward = (branch.name !=
self.repo.active_branch.name)
elif not self.settings['rebase.auto']:
print(colored('diverged', 'red'))
self.states.append('diverged')
continue # Do not do anything
else:
print(colored('rebasing', 'yellow'), end='')
self.states.append('rebasing')
if self.settings['rebase.show-hashes']:
print(' {}..{}'.format(base[0:7],
target.commit.hexsha[0:7]))
else:
print()
self.log(branch, target)
if fast_fastforward:
branch.commit = target.commit
else:
stasher()
self.git.checkout(branch.name)
self.git.rebase(target)
if (self.repo.head.is_detached # Only on Travis CI,
# we get a detached head after doing our rebase *confused*.
# Running self.repo.active_branch would fail.
or not self.repo.active_branch.name == original_branch.name):
print(colored('returning to {0}'.format(original_branch.name),
'magenta'))
original_branch.checkout()
def fetch(self):
"""
Fetch the recent refs from the remotes.
Unless git-up.fetch.all is set to true, all remotes with
locally existent branches will be fetched.
"""
fetch_kwargs = {'multiple': True}
fetch_args = []
if self.is_prune():
fetch_kwargs['prune'] = True
if self.settings['fetch.all']:
fetch_kwargs['all'] = True
else:
if '.' in self.remotes:
self.remotes.remove('.')
if not self.remotes:
# Only local target branches,
# `git fetch --multiple` will fail
return
fetch_args.append(self.remotes)
try:
self.git.fetch(*fetch_args, **fetch_kwargs)
except GitError as error:
error.message = "`git fetch` failed"
raise error
def push(self):
"""
Push the changes back to the remote(s) after fetching
"""
print('pushing...')
push_kwargs = {}
push_args = []
if self.settings['push.tags']:
push_kwargs['push'] = True
if self.settings['push.all']:
push_kwargs['all'] = True
else:
if '.' in self.remotes:
self.remotes.remove('.')
if not self.remotes:
# Only local target branches,
# `git push` will fail
return
push_args.append(self.remotes)
try:
self.git.push(*push_args, **push_kwargs)
self.pushed = True
except GitError as error:
error.message = "`git push` failed"
raise error
def log(self, branch, remote):
""" Call a log-command, if set by git-up.fetch.all. """
log_hook = self.settings['rebase.log-hook']
if log_hook:
if ON_WINDOWS: # pragma: no cover
# Running a string in CMD from Python is not that easy on
# Windows. Running 'cmd /C log_hook' produces problems when
# using multiple statements or things like 'echo'. Therefore,
# we write the string to a bat file and execute it.
# In addition, we replace occurences of $1 with %1 and so forth
# in case the user is used to Bash or sh.
# If there are occurences of %something, we'll replace it with
# %%something. This is the case when running something like
# 'git log --pretty=format:"%Cred%h..."'.
# Also, we replace a semicolon with a newline, because if you
# start with 'echo' on Windows, it will simply echo the
# semicolon and the commands behind instead of echoing and then
# running other commands
# Prepare log_hook
log_hook = re.sub(r'\$(\d+)', r'%\1', log_hook)
log_hook = re.sub(r'%(?!\d)', '%%', log_hook)
log_hook = re.sub(r'; ?', r'\n', log_hook)
# Write log_hook to an temporary file and get it's path
with NamedTemporaryFile(
prefix='PyGitUp.', suffix='.bat', delete=False
) as bat_file:
# Don't echo all commands
bat_file.file.write(b'@echo off\n')
# Run log_hook
bat_file.file.write(log_hook.encode('utf-8'))
# Run bat_file
state = subprocess.call(
[bat_file.name, branch.name, remote.name]
)
# Clean up file
os.remove(bat_file.name)
else: # pragma: no cover
# Run log_hook via 'shell -c'
state = subprocess.call(
[log_hook, 'git-up', branch.name, remote.name],
shell=True
)
if self.testing:
assert state == 0, 'log_hook returned != 0'
def version_info(self):
""" Tell, what version we're running at and if it's up to date. """
# Retrive and show local version info
package = pkg.get_distribution('git-up')
local_version_str = package.version
local_version = package.parsed_version
print('GitUp version is: ' + colored('v' + local_version_str, 'green'))
if not self.settings['updates.check']:
return
# Check for updates
print('Checking for updates...', end='')
try:
# Get version information from the PyPI JSON API
reader = codecs.getreader('utf-8')
details = json.load(reader(urlopen(PYPI_URL)))
online_version = details['info']['version']
except (HTTPError, URLError, ValueError):
recent = True # To not disturb the user with HTTP/parsing errors
else:
recent = local_version >= pkg.parse_version(online_version)
if not recent:
# noinspection PyUnboundLocalVariable
print(
'\rRecent version is: '
+ colored('v' + online_version, color='yellow', attrs=['bold'])
)
print('Run \'pip install -U git-up\' to get the update.')
else:
# Clear the update line
sys.stdout.write('\r' + ' ' * 80 + '\n')
###########################################################################
# Helpers
###########################################################################
def load_config(self):
"""
Load the configuration from git config.
"""
for key in self.settings:
value = self.config(key)
# Parse true/false
if value == '' or value is None:
continue # Not set by user, go on
if value.lower() == 'true':
value = True
elif value.lower() == 'false':
value = False
elif value:
pass # A user-defined string, store the value later
self.settings[key] = value
def config(self, key):
""" Get a git-up-specific config value. """
return self.git.config('git-up.{0}'.format(key))
def is_prune(self):
"""
Return True, if `git fetch --prune` is allowed.
Because of possible incompatibilities, this requires special
treatment.
"""
required_version = "1.6.6"
config_value = self.settings['fetch.prune']
if self.git.is_version_min(required_version):
return config_value is not False
else: # pragma: no cover
if config_value == 'true':
print(colored(
"Warning: fetch.prune is set to 'true' but your git"
"version doesn't seem to support it ({0} < {1})."
"Defaulting to 'false'.".format(self.git.version,
required_version),
'yellow'
))
###########################################################################
# Gemfile Checking
###########################################################################
def with_bundler(self):
"""
Check, if bundler check is requested.
Check, if the user wants us to check for new gems and return True in
this case.
:rtype : bool
"""
def gemfile_exists():
"""
Check, if a Gemfile exists in the current repo.
"""
return os.path.exists('Gemfile')
if 'GIT_UP_BUNDLER_CHECK' in os.environ:
print(colored(
'''The GIT_UP_BUNDLER_CHECK environment variable is deprecated.
You can now tell git-up to check (or not check) for missing
gems on a per-project basis using git's config system. To
set it globally, run this command anywhere:
git config --global git-up.bundler.check true
To set it within a project, run this command inside that
project's directory:
git config git-up.bundler.check true
Replace 'true' with 'false' to disable checking.''', 'yellow'))
if self.settings['bundler.check']:
return gemfile_exists()
if ('GIT_UP_BUNDLER_CHECK' in os.environ
and os.environ['GIT_UP_BUNDLER_CHECK'] == 'true'):
return gemfile_exists()
return False
def check_bundler(self):
"""
Run the bundler check.
"""
def get_config(name):
return name if self.config('bundler.' + name) else ''
from pkg_resources import Requirement, resource_filename
relative_path = os.path.join('PyGitUp', 'check-bundler.rb')
bundler_script = resource_filename(Requirement.parse('git-up'),
relative_path)
assert os.path.exists(bundler_script), 'check-bundler.rb doesn\'t ' \
'exist!'
return_value = subprocess.call(
['ruby', bundler_script, get_config('autoinstall'),
get_config('local'), get_config('rbenv')]
)
if self.testing:
assert return_value == 0, 'Errors while executing check-bundler.rb'
def print_error(self, error):
"""
Print more information about an error.
:type error: GitError
"""
print(colored(error.message, 'red'), file=self.stderr)
if error.stdout or error.stderr:
print(file=self.stderr)
print("Here's what git said:", file=self.stderr)
print(file=self.stderr)
if error.stdout:
print(error.stdout, file=self.stderr)
if error.stderr:
print(error.stderr, file=self.stderr)
if error.details:
print(file=self.stderr)
print("Here's what we know:", file=self.stderr)
print(str(error.details), file=self.stderr)
print(file=self.stderr)
|
msiemens/PyGitUp | PyGitUp/gitup.py | GitUp.push | python | def push(self):
"""
Push the changes back to the remote(s) after fetching
"""
print('pushing...')
push_kwargs = {}
push_args = []
if self.settings['push.tags']:
push_kwargs['push'] = True
if self.settings['push.all']:
push_kwargs['all'] = True
else:
if '.' in self.remotes:
self.remotes.remove('.')
if not self.remotes:
# Only local target branches,
# `git push` will fail
return
push_args.append(self.remotes)
try:
self.git.push(*push_args, **push_kwargs)
self.pushed = True
except GitError as error:
error.message = "`git push` failed"
raise error | Push the changes back to the remote(s) after fetching | train | https://github.com/msiemens/PyGitUp/blob/b1f78831cb6b8d29d3a7d59f7a2b54fdd0720e9c/PyGitUp/gitup.py#L343-L372 | null | class GitUp(object):
""" Conainter class for GitUp methods """
default_settings = {
'bundler.check': False,
'bundler.autoinstall': False,
'bundler.local': False,
'bundler.rbenv': False,
'fetch.prune': True,
'fetch.all': False,
'rebase.show-hashes': False,
'rebase.arguments': None,
'rebase.auto': True,
'rebase.log-hook': None,
'updates.check': True,
'push.auto': False,
'push.tags': False,
'push.all': False,
}
def __init__(self, testing=False, sparse=False):
# Sparse init: config only
if sparse:
self.git = GitWrapper(None)
# Load configuration
self.settings = self.default_settings.copy()
self.load_config()
return
# Testing: redirect stderr to stdout
self.testing = testing
if self.testing:
self.stderr = sys.stdout # Quiet testing
else: # pragma: no cover
self.stderr = sys.stderr
self.states = []
self.should_fetch = True
self.pushed = False
# Check, if we're in a git repo
try:
repo_dir = get_git_dir()
except (EnvironmentError, OSError, GitCommandNotFound) as e:
if isinstance(e, GitCommandNotFound) or e.errno == errno.ENOENT:
exc = GitError("The git executable could not be found")
raise exc
else:
raise
else:
if repo_dir is None:
exc = GitError("We don't seem to be in a git repository.")
raise exc
self.repo = Repo(repo_dir, odbt=GitCmdObjectDB)
# Check for branch tracking informatino
if not any(b.tracking_branch() for b in self.repo.branches):
exc = GitError("Can\'t update your repo because it doesn\'t has "
"any branches with tracking information.")
self.print_error(exc)
raise exc
self.git = GitWrapper(self.repo)
# target_map: map local branch names to remote tracking branches
#: :type: dict[str, git.refs.remote.RemoteReference]
self.target_map = dict()
for branch in self.repo.branches:
target = branch.tracking_branch()
if target:
if target.name.startswith('./'):
# Tracking branch is in local repo
target.is_local = True
else:
target.is_local = False
self.target_map[branch.name] = target
# branches: all local branches with tracking information
#: :type: list[git.refs.head.Head]
self.branches = [b for b in self.repo.branches if b.tracking_branch()]
self.branches.sort(key=lambda br: br.name)
# remotes: all remotes that are associated with local branches
#: :type: list[git.refs.remote.RemoteReference]
self.remotes = uniq(
# name = '<remote>/<branch>' -> '<remote>'
[r.name.split('/', 2)[0]
for r in list(self.target_map.values())]
)
# change_count: Number of unstaged changes
self.change_count = len(
self.git.status(porcelain=True, untracked_files='no').split('\n')
)
# Load configuration
self.settings = self.default_settings.copy()
self.load_config()
def run(self):
""" Run all the git-up stuff. """
try:
if self.should_fetch:
self.fetch()
self.rebase_all_branches()
if self.with_bundler():
self.check_bundler()
if self.settings['push.auto']:
self.push()
except GitError as error:
self.print_error(error)
# Used for test cases
if self.testing:
raise
else: # pragma: no cover
sys.exit(1)
def rebase_all_branches(self):
""" Rebase all branches, if possible. """
col_width = max(len(b.name) for b in self.branches) + 1
if self.repo.head.is_detached:
raise GitError("You're not currently on a branch. I'm exiting"
" in case you're in the middle of something.")
original_branch = self.repo.active_branch
with self.git.stasher() as stasher:
for branch in self.branches:
target = self.target_map[branch.name]
# Print branch name
if branch.name == original_branch.name:
attrs = ['bold']
else:
attrs = []
print(colored(branch.name.ljust(col_width), attrs=attrs),
end=' ')
# Check, if target branch exists
try:
if target.name.startswith('./'):
# Check, if local branch exists
self.git.rev_parse(target.name[2:])
else:
# Check, if remote branch exists
_ = target.commit
except (ValueError, GitError):
# Remote branch doesn't exist!
print(colored('error: remote branch doesn\'t exist', 'red'))
self.states.append('remote branch doesn\'t exist')
continue
# Get tracking branch
if target.is_local:
target = find(self.repo.branches,
lambda b: b.name == target.name[2:])
# Check status and act appropriately
if target.commit.hexsha == branch.commit.hexsha:
print(colored('up to date', 'green'))
self.states.append('up to date')
continue # Do not do anything
base = self.git.merge_base(branch.name, target.name)
if base == target.commit.hexsha:
print(colored('ahead of upstream', 'cyan'))
self.states.append('ahead')
continue # Do not do anything
fast_fastforward = False
if base == branch.commit.hexsha:
print(colored('fast-forwarding...', 'yellow'), end='')
self.states.append('fast-forwarding')
# Don't fast fast-forward the currently checked-out branch
fast_fastforward = (branch.name !=
self.repo.active_branch.name)
elif not self.settings['rebase.auto']:
print(colored('diverged', 'red'))
self.states.append('diverged')
continue # Do not do anything
else:
print(colored('rebasing', 'yellow'), end='')
self.states.append('rebasing')
if self.settings['rebase.show-hashes']:
print(' {}..{}'.format(base[0:7],
target.commit.hexsha[0:7]))
else:
print()
self.log(branch, target)
if fast_fastforward:
branch.commit = target.commit
else:
stasher()
self.git.checkout(branch.name)
self.git.rebase(target)
if (self.repo.head.is_detached # Only on Travis CI,
# we get a detached head after doing our rebase *confused*.
# Running self.repo.active_branch would fail.
or not self.repo.active_branch.name == original_branch.name):
print(colored('returning to {0}'.format(original_branch.name),
'magenta'))
original_branch.checkout()
def fetch(self):
"""
Fetch the recent refs from the remotes.
Unless git-up.fetch.all is set to true, all remotes with
locally existent branches will be fetched.
"""
fetch_kwargs = {'multiple': True}
fetch_args = []
if self.is_prune():
fetch_kwargs['prune'] = True
if self.settings['fetch.all']:
fetch_kwargs['all'] = True
else:
if '.' in self.remotes:
self.remotes.remove('.')
if not self.remotes:
# Only local target branches,
# `git fetch --multiple` will fail
return
fetch_args.append(self.remotes)
try:
self.git.fetch(*fetch_args, **fetch_kwargs)
except GitError as error:
error.message = "`git fetch` failed"
raise error
def push(self):
"""
Push the changes back to the remote(s) after fetching
"""
print('pushing...')
push_kwargs = {}
push_args = []
if self.settings['push.tags']:
push_kwargs['push'] = True
if self.settings['push.all']:
push_kwargs['all'] = True
else:
if '.' in self.remotes:
self.remotes.remove('.')
if not self.remotes:
# Only local target branches,
# `git push` will fail
return
push_args.append(self.remotes)
try:
self.git.push(*push_args, **push_kwargs)
self.pushed = True
except GitError as error:
error.message = "`git push` failed"
raise error
def log(self, branch, remote):
""" Call a log-command, if set by git-up.fetch.all. """
log_hook = self.settings['rebase.log-hook']
if log_hook:
if ON_WINDOWS: # pragma: no cover
# Running a string in CMD from Python is not that easy on
# Windows. Running 'cmd /C log_hook' produces problems when
# using multiple statements or things like 'echo'. Therefore,
# we write the string to a bat file and execute it.
# In addition, we replace occurences of $1 with %1 and so forth
# in case the user is used to Bash or sh.
# If there are occurences of %something, we'll replace it with
# %%something. This is the case when running something like
# 'git log --pretty=format:"%Cred%h..."'.
# Also, we replace a semicolon with a newline, because if you
# start with 'echo' on Windows, it will simply echo the
# semicolon and the commands behind instead of echoing and then
# running other commands
# Prepare log_hook
log_hook = re.sub(r'\$(\d+)', r'%\1', log_hook)
log_hook = re.sub(r'%(?!\d)', '%%', log_hook)
log_hook = re.sub(r'; ?', r'\n', log_hook)
# Write log_hook to an temporary file and get it's path
with NamedTemporaryFile(
prefix='PyGitUp.', suffix='.bat', delete=False
) as bat_file:
# Don't echo all commands
bat_file.file.write(b'@echo off\n')
# Run log_hook
bat_file.file.write(log_hook.encode('utf-8'))
# Run bat_file
state = subprocess.call(
[bat_file.name, branch.name, remote.name]
)
# Clean up file
os.remove(bat_file.name)
else: # pragma: no cover
# Run log_hook via 'shell -c'
state = subprocess.call(
[log_hook, 'git-up', branch.name, remote.name],
shell=True
)
if self.testing:
assert state == 0, 'log_hook returned != 0'
def version_info(self):
""" Tell, what version we're running at and if it's up to date. """
# Retrive and show local version info
package = pkg.get_distribution('git-up')
local_version_str = package.version
local_version = package.parsed_version
print('GitUp version is: ' + colored('v' + local_version_str, 'green'))
if not self.settings['updates.check']:
return
# Check for updates
print('Checking for updates...', end='')
try:
# Get version information from the PyPI JSON API
reader = codecs.getreader('utf-8')
details = json.load(reader(urlopen(PYPI_URL)))
online_version = details['info']['version']
except (HTTPError, URLError, ValueError):
recent = True # To not disturb the user with HTTP/parsing errors
else:
recent = local_version >= pkg.parse_version(online_version)
if not recent:
# noinspection PyUnboundLocalVariable
print(
'\rRecent version is: '
+ colored('v' + online_version, color='yellow', attrs=['bold'])
)
print('Run \'pip install -U git-up\' to get the update.')
else:
# Clear the update line
sys.stdout.write('\r' + ' ' * 80 + '\n')
###########################################################################
# Helpers
###########################################################################
def load_config(self):
"""
Load the configuration from git config.
"""
for key in self.settings:
value = self.config(key)
# Parse true/false
if value == '' or value is None:
continue # Not set by user, go on
if value.lower() == 'true':
value = True
elif value.lower() == 'false':
value = False
elif value:
pass # A user-defined string, store the value later
self.settings[key] = value
def config(self, key):
""" Get a git-up-specific config value. """
return self.git.config('git-up.{0}'.format(key))
def is_prune(self):
"""
Return True, if `git fetch --prune` is allowed.
Because of possible incompatibilities, this requires special
treatment.
"""
required_version = "1.6.6"
config_value = self.settings['fetch.prune']
if self.git.is_version_min(required_version):
return config_value is not False
else: # pragma: no cover
if config_value == 'true':
print(colored(
"Warning: fetch.prune is set to 'true' but your git"
"version doesn't seem to support it ({0} < {1})."
"Defaulting to 'false'.".format(self.git.version,
required_version),
'yellow'
))
###########################################################################
# Gemfile Checking
###########################################################################
def with_bundler(self):
"""
Check, if bundler check is requested.
Check, if the user wants us to check for new gems and return True in
this case.
:rtype : bool
"""
def gemfile_exists():
"""
Check, if a Gemfile exists in the current repo.
"""
return os.path.exists('Gemfile')
if 'GIT_UP_BUNDLER_CHECK' in os.environ:
print(colored(
'''The GIT_UP_BUNDLER_CHECK environment variable is deprecated.
You can now tell git-up to check (or not check) for missing
gems on a per-project basis using git's config system. To
set it globally, run this command anywhere:
git config --global git-up.bundler.check true
To set it within a project, run this command inside that
project's directory:
git config git-up.bundler.check true
Replace 'true' with 'false' to disable checking.''', 'yellow'))
if self.settings['bundler.check']:
return gemfile_exists()
if ('GIT_UP_BUNDLER_CHECK' in os.environ
and os.environ['GIT_UP_BUNDLER_CHECK'] == 'true'):
return gemfile_exists()
return False
def check_bundler(self):
"""
Run the bundler check.
"""
def get_config(name):
return name if self.config('bundler.' + name) else ''
from pkg_resources import Requirement, resource_filename
relative_path = os.path.join('PyGitUp', 'check-bundler.rb')
bundler_script = resource_filename(Requirement.parse('git-up'),
relative_path)
assert os.path.exists(bundler_script), 'check-bundler.rb doesn\'t ' \
'exist!'
return_value = subprocess.call(
['ruby', bundler_script, get_config('autoinstall'),
get_config('local'), get_config('rbenv')]
)
if self.testing:
assert return_value == 0, 'Errors while executing check-bundler.rb'
def print_error(self, error):
"""
Print more information about an error.
:type error: GitError
"""
print(colored(error.message, 'red'), file=self.stderr)
if error.stdout or error.stderr:
print(file=self.stderr)
print("Here's what git said:", file=self.stderr)
print(file=self.stderr)
if error.stdout:
print(error.stdout, file=self.stderr)
if error.stderr:
print(error.stderr, file=self.stderr)
if error.details:
print(file=self.stderr)
print("Here's what we know:", file=self.stderr)
print(str(error.details), file=self.stderr)
print(file=self.stderr)
|
msiemens/PyGitUp | PyGitUp/gitup.py | GitUp.log | python | def log(self, branch, remote):
""" Call a log-command, if set by git-up.fetch.all. """
log_hook = self.settings['rebase.log-hook']
if log_hook:
if ON_WINDOWS: # pragma: no cover
# Running a string in CMD from Python is not that easy on
# Windows. Running 'cmd /C log_hook' produces problems when
# using multiple statements or things like 'echo'. Therefore,
# we write the string to a bat file and execute it.
# In addition, we replace occurences of $1 with %1 and so forth
# in case the user is used to Bash or sh.
# If there are occurences of %something, we'll replace it with
# %%something. This is the case when running something like
# 'git log --pretty=format:"%Cred%h..."'.
# Also, we replace a semicolon with a newline, because if you
# start with 'echo' on Windows, it will simply echo the
# semicolon and the commands behind instead of echoing and then
# running other commands
# Prepare log_hook
log_hook = re.sub(r'\$(\d+)', r'%\1', log_hook)
log_hook = re.sub(r'%(?!\d)', '%%', log_hook)
log_hook = re.sub(r'; ?', r'\n', log_hook)
# Write log_hook to an temporary file and get it's path
with NamedTemporaryFile(
prefix='PyGitUp.', suffix='.bat', delete=False
) as bat_file:
# Don't echo all commands
bat_file.file.write(b'@echo off\n')
# Run log_hook
bat_file.file.write(log_hook.encode('utf-8'))
# Run bat_file
state = subprocess.call(
[bat_file.name, branch.name, remote.name]
)
# Clean up file
os.remove(bat_file.name)
else: # pragma: no cover
# Run log_hook via 'shell -c'
state = subprocess.call(
[log_hook, 'git-up', branch.name, remote.name],
shell=True
)
if self.testing:
assert state == 0, 'log_hook returned != 0' | Call a log-command, if set by git-up.fetch.all. | train | https://github.com/msiemens/PyGitUp/blob/b1f78831cb6b8d29d3a7d59f7a2b54fdd0720e9c/PyGitUp/gitup.py#L374-L424 | null | class GitUp(object):
""" Conainter class for GitUp methods """
default_settings = {
'bundler.check': False,
'bundler.autoinstall': False,
'bundler.local': False,
'bundler.rbenv': False,
'fetch.prune': True,
'fetch.all': False,
'rebase.show-hashes': False,
'rebase.arguments': None,
'rebase.auto': True,
'rebase.log-hook': None,
'updates.check': True,
'push.auto': False,
'push.tags': False,
'push.all': False,
}
def __init__(self, testing=False, sparse=False):
# Sparse init: config only
if sparse:
self.git = GitWrapper(None)
# Load configuration
self.settings = self.default_settings.copy()
self.load_config()
return
# Testing: redirect stderr to stdout
self.testing = testing
if self.testing:
self.stderr = sys.stdout # Quiet testing
else: # pragma: no cover
self.stderr = sys.stderr
self.states = []
self.should_fetch = True
self.pushed = False
# Check, if we're in a git repo
try:
repo_dir = get_git_dir()
except (EnvironmentError, OSError, GitCommandNotFound) as e:
if isinstance(e, GitCommandNotFound) or e.errno == errno.ENOENT:
exc = GitError("The git executable could not be found")
raise exc
else:
raise
else:
if repo_dir is None:
exc = GitError("We don't seem to be in a git repository.")
raise exc
self.repo = Repo(repo_dir, odbt=GitCmdObjectDB)
# Check for branch tracking informatino
if not any(b.tracking_branch() for b in self.repo.branches):
exc = GitError("Can\'t update your repo because it doesn\'t has "
"any branches with tracking information.")
self.print_error(exc)
raise exc
self.git = GitWrapper(self.repo)
# target_map: map local branch names to remote tracking branches
#: :type: dict[str, git.refs.remote.RemoteReference]
self.target_map = dict()
for branch in self.repo.branches:
target = branch.tracking_branch()
if target:
if target.name.startswith('./'):
# Tracking branch is in local repo
target.is_local = True
else:
target.is_local = False
self.target_map[branch.name] = target
# branches: all local branches with tracking information
#: :type: list[git.refs.head.Head]
self.branches = [b for b in self.repo.branches if b.tracking_branch()]
self.branches.sort(key=lambda br: br.name)
# remotes: all remotes that are associated with local branches
#: :type: list[git.refs.remote.RemoteReference]
self.remotes = uniq(
# name = '<remote>/<branch>' -> '<remote>'
[r.name.split('/', 2)[0]
for r in list(self.target_map.values())]
)
# change_count: Number of unstaged changes
self.change_count = len(
self.git.status(porcelain=True, untracked_files='no').split('\n')
)
# Load configuration
self.settings = self.default_settings.copy()
self.load_config()
def run(self):
""" Run all the git-up stuff. """
try:
if self.should_fetch:
self.fetch()
self.rebase_all_branches()
if self.with_bundler():
self.check_bundler()
if self.settings['push.auto']:
self.push()
except GitError as error:
self.print_error(error)
# Used for test cases
if self.testing:
raise
else: # pragma: no cover
sys.exit(1)
def rebase_all_branches(self):
""" Rebase all branches, if possible. """
col_width = max(len(b.name) for b in self.branches) + 1
if self.repo.head.is_detached:
raise GitError("You're not currently on a branch. I'm exiting"
" in case you're in the middle of something.")
original_branch = self.repo.active_branch
with self.git.stasher() as stasher:
for branch in self.branches:
target = self.target_map[branch.name]
# Print branch name
if branch.name == original_branch.name:
attrs = ['bold']
else:
attrs = []
print(colored(branch.name.ljust(col_width), attrs=attrs),
end=' ')
# Check, if target branch exists
try:
if target.name.startswith('./'):
# Check, if local branch exists
self.git.rev_parse(target.name[2:])
else:
# Check, if remote branch exists
_ = target.commit
except (ValueError, GitError):
# Remote branch doesn't exist!
print(colored('error: remote branch doesn\'t exist', 'red'))
self.states.append('remote branch doesn\'t exist')
continue
# Get tracking branch
if target.is_local:
target = find(self.repo.branches,
lambda b: b.name == target.name[2:])
# Check status and act appropriately
if target.commit.hexsha == branch.commit.hexsha:
print(colored('up to date', 'green'))
self.states.append('up to date')
continue # Do not do anything
base = self.git.merge_base(branch.name, target.name)
if base == target.commit.hexsha:
print(colored('ahead of upstream', 'cyan'))
self.states.append('ahead')
continue # Do not do anything
fast_fastforward = False
if base == branch.commit.hexsha:
print(colored('fast-forwarding...', 'yellow'), end='')
self.states.append('fast-forwarding')
# Don't fast fast-forward the currently checked-out branch
fast_fastforward = (branch.name !=
self.repo.active_branch.name)
elif not self.settings['rebase.auto']:
print(colored('diverged', 'red'))
self.states.append('diverged')
continue # Do not do anything
else:
print(colored('rebasing', 'yellow'), end='')
self.states.append('rebasing')
if self.settings['rebase.show-hashes']:
print(' {}..{}'.format(base[0:7],
target.commit.hexsha[0:7]))
else:
print()
self.log(branch, target)
if fast_fastforward:
branch.commit = target.commit
else:
stasher()
self.git.checkout(branch.name)
self.git.rebase(target)
if (self.repo.head.is_detached # Only on Travis CI,
# we get a detached head after doing our rebase *confused*.
# Running self.repo.active_branch would fail.
or not self.repo.active_branch.name == original_branch.name):
print(colored('returning to {0}'.format(original_branch.name),
'magenta'))
original_branch.checkout()
def fetch(self):
"""
Fetch the recent refs from the remotes.
Unless git-up.fetch.all is set to true, all remotes with
locally existent branches will be fetched.
"""
fetch_kwargs = {'multiple': True}
fetch_args = []
if self.is_prune():
fetch_kwargs['prune'] = True
if self.settings['fetch.all']:
fetch_kwargs['all'] = True
else:
if '.' in self.remotes:
self.remotes.remove('.')
if not self.remotes:
# Only local target branches,
# `git fetch --multiple` will fail
return
fetch_args.append(self.remotes)
try:
self.git.fetch(*fetch_args, **fetch_kwargs)
except GitError as error:
error.message = "`git fetch` failed"
raise error
def push(self):
"""
Push the changes back to the remote(s) after fetching
"""
print('pushing...')
push_kwargs = {}
push_args = []
if self.settings['push.tags']:
push_kwargs['push'] = True
if self.settings['push.all']:
push_kwargs['all'] = True
else:
if '.' in self.remotes:
self.remotes.remove('.')
if not self.remotes:
# Only local target branches,
# `git push` will fail
return
push_args.append(self.remotes)
try:
self.git.push(*push_args, **push_kwargs)
self.pushed = True
except GitError as error:
error.message = "`git push` failed"
raise error
def log(self, branch, remote):
""" Call a log-command, if set by git-up.fetch.all. """
log_hook = self.settings['rebase.log-hook']
if log_hook:
if ON_WINDOWS: # pragma: no cover
# Running a string in CMD from Python is not that easy on
# Windows. Running 'cmd /C log_hook' produces problems when
# using multiple statements or things like 'echo'. Therefore,
# we write the string to a bat file and execute it.
# In addition, we replace occurences of $1 with %1 and so forth
# in case the user is used to Bash or sh.
# If there are occurences of %something, we'll replace it with
# %%something. This is the case when running something like
# 'git log --pretty=format:"%Cred%h..."'.
# Also, we replace a semicolon with a newline, because if you
# start with 'echo' on Windows, it will simply echo the
# semicolon and the commands behind instead of echoing and then
# running other commands
# Prepare log_hook
log_hook = re.sub(r'\$(\d+)', r'%\1', log_hook)
log_hook = re.sub(r'%(?!\d)', '%%', log_hook)
log_hook = re.sub(r'; ?', r'\n', log_hook)
# Write log_hook to an temporary file and get it's path
with NamedTemporaryFile(
prefix='PyGitUp.', suffix='.bat', delete=False
) as bat_file:
# Don't echo all commands
bat_file.file.write(b'@echo off\n')
# Run log_hook
bat_file.file.write(log_hook.encode('utf-8'))
# Run bat_file
state = subprocess.call(
[bat_file.name, branch.name, remote.name]
)
# Clean up file
os.remove(bat_file.name)
else: # pragma: no cover
# Run log_hook via 'shell -c'
state = subprocess.call(
[log_hook, 'git-up', branch.name, remote.name],
shell=True
)
if self.testing:
assert state == 0, 'log_hook returned != 0'
def version_info(self):
""" Tell, what version we're running at and if it's up to date. """
# Retrive and show local version info
package = pkg.get_distribution('git-up')
local_version_str = package.version
local_version = package.parsed_version
print('GitUp version is: ' + colored('v' + local_version_str, 'green'))
if not self.settings['updates.check']:
return
# Check for updates
print('Checking for updates...', end='')
try:
# Get version information from the PyPI JSON API
reader = codecs.getreader('utf-8')
details = json.load(reader(urlopen(PYPI_URL)))
online_version = details['info']['version']
except (HTTPError, URLError, ValueError):
recent = True # To not disturb the user with HTTP/parsing errors
else:
recent = local_version >= pkg.parse_version(online_version)
if not recent:
# noinspection PyUnboundLocalVariable
print(
'\rRecent version is: '
+ colored('v' + online_version, color='yellow', attrs=['bold'])
)
print('Run \'pip install -U git-up\' to get the update.')
else:
# Clear the update line
sys.stdout.write('\r' + ' ' * 80 + '\n')
###########################################################################
# Helpers
###########################################################################
def load_config(self):
"""
Load the configuration from git config.
"""
for key in self.settings:
value = self.config(key)
# Parse true/false
if value == '' or value is None:
continue # Not set by user, go on
if value.lower() == 'true':
value = True
elif value.lower() == 'false':
value = False
elif value:
pass # A user-defined string, store the value later
self.settings[key] = value
def config(self, key):
""" Get a git-up-specific config value. """
return self.git.config('git-up.{0}'.format(key))
def is_prune(self):
"""
Return True, if `git fetch --prune` is allowed.
Because of possible incompatibilities, this requires special
treatment.
"""
required_version = "1.6.6"
config_value = self.settings['fetch.prune']
if self.git.is_version_min(required_version):
return config_value is not False
else: # pragma: no cover
if config_value == 'true':
print(colored(
"Warning: fetch.prune is set to 'true' but your git"
"version doesn't seem to support it ({0} < {1})."
"Defaulting to 'false'.".format(self.git.version,
required_version),
'yellow'
))
###########################################################################
# Gemfile Checking
###########################################################################
def with_bundler(self):
"""
Check, if bundler check is requested.
Check, if the user wants us to check for new gems and return True in
this case.
:rtype : bool
"""
def gemfile_exists():
"""
Check, if a Gemfile exists in the current repo.
"""
return os.path.exists('Gemfile')
if 'GIT_UP_BUNDLER_CHECK' in os.environ:
print(colored(
'''The GIT_UP_BUNDLER_CHECK environment variable is deprecated.
You can now tell git-up to check (or not check) for missing
gems on a per-project basis using git's config system. To
set it globally, run this command anywhere:
git config --global git-up.bundler.check true
To set it within a project, run this command inside that
project's directory:
git config git-up.bundler.check true
Replace 'true' with 'false' to disable checking.''', 'yellow'))
if self.settings['bundler.check']:
return gemfile_exists()
if ('GIT_UP_BUNDLER_CHECK' in os.environ
and os.environ['GIT_UP_BUNDLER_CHECK'] == 'true'):
return gemfile_exists()
return False
def check_bundler(self):
"""
Run the bundler check.
"""
def get_config(name):
return name if self.config('bundler.' + name) else ''
from pkg_resources import Requirement, resource_filename
relative_path = os.path.join('PyGitUp', 'check-bundler.rb')
bundler_script = resource_filename(Requirement.parse('git-up'),
relative_path)
assert os.path.exists(bundler_script), 'check-bundler.rb doesn\'t ' \
'exist!'
return_value = subprocess.call(
['ruby', bundler_script, get_config('autoinstall'),
get_config('local'), get_config('rbenv')]
)
if self.testing:
assert return_value == 0, 'Errors while executing check-bundler.rb'
def print_error(self, error):
"""
Print more information about an error.
:type error: GitError
"""
print(colored(error.message, 'red'), file=self.stderr)
if error.stdout or error.stderr:
print(file=self.stderr)
print("Here's what git said:", file=self.stderr)
print(file=self.stderr)
if error.stdout:
print(error.stdout, file=self.stderr)
if error.stderr:
print(error.stderr, file=self.stderr)
if error.details:
print(file=self.stderr)
print("Here's what we know:", file=self.stderr)
print(str(error.details), file=self.stderr)
print(file=self.stderr)
|
msiemens/PyGitUp | PyGitUp/gitup.py | GitUp.version_info | python | def version_info(self):
""" Tell, what version we're running at and if it's up to date. """
# Retrive and show local version info
package = pkg.get_distribution('git-up')
local_version_str = package.version
local_version = package.parsed_version
print('GitUp version is: ' + colored('v' + local_version_str, 'green'))
if not self.settings['updates.check']:
return
# Check for updates
print('Checking for updates...', end='')
try:
# Get version information from the PyPI JSON API
reader = codecs.getreader('utf-8')
details = json.load(reader(urlopen(PYPI_URL)))
online_version = details['info']['version']
except (HTTPError, URLError, ValueError):
recent = True # To not disturb the user with HTTP/parsing errors
else:
recent = local_version >= pkg.parse_version(online_version)
if not recent:
# noinspection PyUnboundLocalVariable
print(
'\rRecent version is: '
+ colored('v' + online_version, color='yellow', attrs=['bold'])
)
print('Run \'pip install -U git-up\' to get the update.')
else:
# Clear the update line
sys.stdout.write('\r' + ' ' * 80 + '\n') | Tell, what version we're running at and if it's up to date. | train | https://github.com/msiemens/PyGitUp/blob/b1f78831cb6b8d29d3a7d59f7a2b54fdd0720e9c/PyGitUp/gitup.py#L426-L461 | null | class GitUp(object):
""" Conainter class for GitUp methods """
default_settings = {
'bundler.check': False,
'bundler.autoinstall': False,
'bundler.local': False,
'bundler.rbenv': False,
'fetch.prune': True,
'fetch.all': False,
'rebase.show-hashes': False,
'rebase.arguments': None,
'rebase.auto': True,
'rebase.log-hook': None,
'updates.check': True,
'push.auto': False,
'push.tags': False,
'push.all': False,
}
def __init__(self, testing=False, sparse=False):
# Sparse init: config only
if sparse:
self.git = GitWrapper(None)
# Load configuration
self.settings = self.default_settings.copy()
self.load_config()
return
# Testing: redirect stderr to stdout
self.testing = testing
if self.testing:
self.stderr = sys.stdout # Quiet testing
else: # pragma: no cover
self.stderr = sys.stderr
self.states = []
self.should_fetch = True
self.pushed = False
# Check, if we're in a git repo
try:
repo_dir = get_git_dir()
except (EnvironmentError, OSError, GitCommandNotFound) as e:
if isinstance(e, GitCommandNotFound) or e.errno == errno.ENOENT:
exc = GitError("The git executable could not be found")
raise exc
else:
raise
else:
if repo_dir is None:
exc = GitError("We don't seem to be in a git repository.")
raise exc
self.repo = Repo(repo_dir, odbt=GitCmdObjectDB)
# Check for branch tracking informatino
if not any(b.tracking_branch() for b in self.repo.branches):
exc = GitError("Can\'t update your repo because it doesn\'t has "
"any branches with tracking information.")
self.print_error(exc)
raise exc
self.git = GitWrapper(self.repo)
# target_map: map local branch names to remote tracking branches
#: :type: dict[str, git.refs.remote.RemoteReference]
self.target_map = dict()
for branch in self.repo.branches:
target = branch.tracking_branch()
if target:
if target.name.startswith('./'):
# Tracking branch is in local repo
target.is_local = True
else:
target.is_local = False
self.target_map[branch.name] = target
# branches: all local branches with tracking information
#: :type: list[git.refs.head.Head]
self.branches = [b for b in self.repo.branches if b.tracking_branch()]
self.branches.sort(key=lambda br: br.name)
# remotes: all remotes that are associated with local branches
#: :type: list[git.refs.remote.RemoteReference]
self.remotes = uniq(
# name = '<remote>/<branch>' -> '<remote>'
[r.name.split('/', 2)[0]
for r in list(self.target_map.values())]
)
# change_count: Number of unstaged changes
self.change_count = len(
self.git.status(porcelain=True, untracked_files='no').split('\n')
)
# Load configuration
self.settings = self.default_settings.copy()
self.load_config()
def run(self):
""" Run all the git-up stuff. """
try:
if self.should_fetch:
self.fetch()
self.rebase_all_branches()
if self.with_bundler():
self.check_bundler()
if self.settings['push.auto']:
self.push()
except GitError as error:
self.print_error(error)
# Used for test cases
if self.testing:
raise
else: # pragma: no cover
sys.exit(1)
def rebase_all_branches(self):
""" Rebase all branches, if possible. """
col_width = max(len(b.name) for b in self.branches) + 1
if self.repo.head.is_detached:
raise GitError("You're not currently on a branch. I'm exiting"
" in case you're in the middle of something.")
original_branch = self.repo.active_branch
with self.git.stasher() as stasher:
for branch in self.branches:
target = self.target_map[branch.name]
# Print branch name
if branch.name == original_branch.name:
attrs = ['bold']
else:
attrs = []
print(colored(branch.name.ljust(col_width), attrs=attrs),
end=' ')
# Check, if target branch exists
try:
if target.name.startswith('./'):
# Check, if local branch exists
self.git.rev_parse(target.name[2:])
else:
# Check, if remote branch exists
_ = target.commit
except (ValueError, GitError):
# Remote branch doesn't exist!
print(colored('error: remote branch doesn\'t exist', 'red'))
self.states.append('remote branch doesn\'t exist')
continue
# Get tracking branch
if target.is_local:
target = find(self.repo.branches,
lambda b: b.name == target.name[2:])
# Check status and act appropriately
if target.commit.hexsha == branch.commit.hexsha:
print(colored('up to date', 'green'))
self.states.append('up to date')
continue # Do not do anything
base = self.git.merge_base(branch.name, target.name)
if base == target.commit.hexsha:
print(colored('ahead of upstream', 'cyan'))
self.states.append('ahead')
continue # Do not do anything
fast_fastforward = False
if base == branch.commit.hexsha:
print(colored('fast-forwarding...', 'yellow'), end='')
self.states.append('fast-forwarding')
# Don't fast fast-forward the currently checked-out branch
fast_fastforward = (branch.name !=
self.repo.active_branch.name)
elif not self.settings['rebase.auto']:
print(colored('diverged', 'red'))
self.states.append('diverged')
continue # Do not do anything
else:
print(colored('rebasing', 'yellow'), end='')
self.states.append('rebasing')
if self.settings['rebase.show-hashes']:
print(' {}..{}'.format(base[0:7],
target.commit.hexsha[0:7]))
else:
print()
self.log(branch, target)
if fast_fastforward:
branch.commit = target.commit
else:
stasher()
self.git.checkout(branch.name)
self.git.rebase(target)
if (self.repo.head.is_detached # Only on Travis CI,
# we get a detached head after doing our rebase *confused*.
# Running self.repo.active_branch would fail.
or not self.repo.active_branch.name == original_branch.name):
print(colored('returning to {0}'.format(original_branch.name),
'magenta'))
original_branch.checkout()
def fetch(self):
"""
Fetch the recent refs from the remotes.
Unless git-up.fetch.all is set to true, all remotes with
locally existent branches will be fetched.
"""
fetch_kwargs = {'multiple': True}
fetch_args = []
if self.is_prune():
fetch_kwargs['prune'] = True
if self.settings['fetch.all']:
fetch_kwargs['all'] = True
else:
if '.' in self.remotes:
self.remotes.remove('.')
if not self.remotes:
# Only local target branches,
# `git fetch --multiple` will fail
return
fetch_args.append(self.remotes)
try:
self.git.fetch(*fetch_args, **fetch_kwargs)
except GitError as error:
error.message = "`git fetch` failed"
raise error
def push(self):
"""
Push the changes back to the remote(s) after fetching
"""
print('pushing...')
push_kwargs = {}
push_args = []
if self.settings['push.tags']:
push_kwargs['push'] = True
if self.settings['push.all']:
push_kwargs['all'] = True
else:
if '.' in self.remotes:
self.remotes.remove('.')
if not self.remotes:
# Only local target branches,
# `git push` will fail
return
push_args.append(self.remotes)
try:
self.git.push(*push_args, **push_kwargs)
self.pushed = True
except GitError as error:
error.message = "`git push` failed"
raise error
def log(self, branch, remote):
""" Call a log-command, if set by git-up.fetch.all. """
log_hook = self.settings['rebase.log-hook']
if log_hook:
if ON_WINDOWS: # pragma: no cover
# Running a string in CMD from Python is not that easy on
# Windows. Running 'cmd /C log_hook' produces problems when
# using multiple statements or things like 'echo'. Therefore,
# we write the string to a bat file and execute it.
# In addition, we replace occurences of $1 with %1 and so forth
# in case the user is used to Bash or sh.
# If there are occurences of %something, we'll replace it with
# %%something. This is the case when running something like
# 'git log --pretty=format:"%Cred%h..."'.
# Also, we replace a semicolon with a newline, because if you
# start with 'echo' on Windows, it will simply echo the
# semicolon and the commands behind instead of echoing and then
# running other commands
# Prepare log_hook
log_hook = re.sub(r'\$(\d+)', r'%\1', log_hook)
log_hook = re.sub(r'%(?!\d)', '%%', log_hook)
log_hook = re.sub(r'; ?', r'\n', log_hook)
# Write log_hook to an temporary file and get it's path
with NamedTemporaryFile(
prefix='PyGitUp.', suffix='.bat', delete=False
) as bat_file:
# Don't echo all commands
bat_file.file.write(b'@echo off\n')
# Run log_hook
bat_file.file.write(log_hook.encode('utf-8'))
# Run bat_file
state = subprocess.call(
[bat_file.name, branch.name, remote.name]
)
# Clean up file
os.remove(bat_file.name)
else: # pragma: no cover
# Run log_hook via 'shell -c'
state = subprocess.call(
[log_hook, 'git-up', branch.name, remote.name],
shell=True
)
if self.testing:
assert state == 0, 'log_hook returned != 0'
def version_info(self):
""" Tell, what version we're running at and if it's up to date. """
# Retrive and show local version info
package = pkg.get_distribution('git-up')
local_version_str = package.version
local_version = package.parsed_version
print('GitUp version is: ' + colored('v' + local_version_str, 'green'))
if not self.settings['updates.check']:
return
# Check for updates
print('Checking for updates...', end='')
try:
# Get version information from the PyPI JSON API
reader = codecs.getreader('utf-8')
details = json.load(reader(urlopen(PYPI_URL)))
online_version = details['info']['version']
except (HTTPError, URLError, ValueError):
recent = True # To not disturb the user with HTTP/parsing errors
else:
recent = local_version >= pkg.parse_version(online_version)
if not recent:
# noinspection PyUnboundLocalVariable
print(
'\rRecent version is: '
+ colored('v' + online_version, color='yellow', attrs=['bold'])
)
print('Run \'pip install -U git-up\' to get the update.')
else:
# Clear the update line
sys.stdout.write('\r' + ' ' * 80 + '\n')
###########################################################################
# Helpers
###########################################################################
def load_config(self):
"""
Load the configuration from git config.
"""
for key in self.settings:
value = self.config(key)
# Parse true/false
if value == '' or value is None:
continue # Not set by user, go on
if value.lower() == 'true':
value = True
elif value.lower() == 'false':
value = False
elif value:
pass # A user-defined string, store the value later
self.settings[key] = value
def config(self, key):
""" Get a git-up-specific config value. """
return self.git.config('git-up.{0}'.format(key))
def is_prune(self):
"""
Return True, if `git fetch --prune` is allowed.
Because of possible incompatibilities, this requires special
treatment.
"""
required_version = "1.6.6"
config_value = self.settings['fetch.prune']
if self.git.is_version_min(required_version):
return config_value is not False
else: # pragma: no cover
if config_value == 'true':
print(colored(
"Warning: fetch.prune is set to 'true' but your git"
"version doesn't seem to support it ({0} < {1})."
"Defaulting to 'false'.".format(self.git.version,
required_version),
'yellow'
))
###########################################################################
# Gemfile Checking
###########################################################################
def with_bundler(self):
"""
Check, if bundler check is requested.
Check, if the user wants us to check for new gems and return True in
this case.
:rtype : bool
"""
def gemfile_exists():
"""
Check, if a Gemfile exists in the current repo.
"""
return os.path.exists('Gemfile')
if 'GIT_UP_BUNDLER_CHECK' in os.environ:
print(colored(
'''The GIT_UP_BUNDLER_CHECK environment variable is deprecated.
You can now tell git-up to check (or not check) for missing
gems on a per-project basis using git's config system. To
set it globally, run this command anywhere:
git config --global git-up.bundler.check true
To set it within a project, run this command inside that
project's directory:
git config git-up.bundler.check true
Replace 'true' with 'false' to disable checking.''', 'yellow'))
if self.settings['bundler.check']:
return gemfile_exists()
if ('GIT_UP_BUNDLER_CHECK' in os.environ
and os.environ['GIT_UP_BUNDLER_CHECK'] == 'true'):
return gemfile_exists()
return False
def check_bundler(self):
"""
Run the bundler check.
"""
def get_config(name):
return name if self.config('bundler.' + name) else ''
from pkg_resources import Requirement, resource_filename
relative_path = os.path.join('PyGitUp', 'check-bundler.rb')
bundler_script = resource_filename(Requirement.parse('git-up'),
relative_path)
assert os.path.exists(bundler_script), 'check-bundler.rb doesn\'t ' \
'exist!'
return_value = subprocess.call(
['ruby', bundler_script, get_config('autoinstall'),
get_config('local'), get_config('rbenv')]
)
if self.testing:
assert return_value == 0, 'Errors while executing check-bundler.rb'
def print_error(self, error):
"""
Print more information about an error.
:type error: GitError
"""
print(colored(error.message, 'red'), file=self.stderr)
if error.stdout or error.stderr:
print(file=self.stderr)
print("Here's what git said:", file=self.stderr)
print(file=self.stderr)
if error.stdout:
print(error.stdout, file=self.stderr)
if error.stderr:
print(error.stderr, file=self.stderr)
if error.details:
print(file=self.stderr)
print("Here's what we know:", file=self.stderr)
print(str(error.details), file=self.stderr)
print(file=self.stderr)
|
msiemens/PyGitUp | PyGitUp/gitup.py | GitUp.load_config | python | def load_config(self):
"""
Load the configuration from git config.
"""
for key in self.settings:
value = self.config(key)
# Parse true/false
if value == '' or value is None:
continue # Not set by user, go on
if value.lower() == 'true':
value = True
elif value.lower() == 'false':
value = False
elif value:
pass # A user-defined string, store the value later
self.settings[key] = value | Load the configuration from git config. | train | https://github.com/msiemens/PyGitUp/blob/b1f78831cb6b8d29d3a7d59f7a2b54fdd0720e9c/PyGitUp/gitup.py#L467-L483 | [
"def config(self, key):\n \"\"\" Get a git-up-specific config value. \"\"\"\n return self.git.config('git-up.{0}'.format(key))\n"
] | class GitUp(object):
""" Conainter class for GitUp methods """
default_settings = {
'bundler.check': False,
'bundler.autoinstall': False,
'bundler.local': False,
'bundler.rbenv': False,
'fetch.prune': True,
'fetch.all': False,
'rebase.show-hashes': False,
'rebase.arguments': None,
'rebase.auto': True,
'rebase.log-hook': None,
'updates.check': True,
'push.auto': False,
'push.tags': False,
'push.all': False,
}
def __init__(self, testing=False, sparse=False):
# Sparse init: config only
if sparse:
self.git = GitWrapper(None)
# Load configuration
self.settings = self.default_settings.copy()
self.load_config()
return
# Testing: redirect stderr to stdout
self.testing = testing
if self.testing:
self.stderr = sys.stdout # Quiet testing
else: # pragma: no cover
self.stderr = sys.stderr
self.states = []
self.should_fetch = True
self.pushed = False
# Check, if we're in a git repo
try:
repo_dir = get_git_dir()
except (EnvironmentError, OSError, GitCommandNotFound) as e:
if isinstance(e, GitCommandNotFound) or e.errno == errno.ENOENT:
exc = GitError("The git executable could not be found")
raise exc
else:
raise
else:
if repo_dir is None:
exc = GitError("We don't seem to be in a git repository.")
raise exc
self.repo = Repo(repo_dir, odbt=GitCmdObjectDB)
# Check for branch tracking informatino
if not any(b.tracking_branch() for b in self.repo.branches):
exc = GitError("Can\'t update your repo because it doesn\'t has "
"any branches with tracking information.")
self.print_error(exc)
raise exc
self.git = GitWrapper(self.repo)
# target_map: map local branch names to remote tracking branches
#: :type: dict[str, git.refs.remote.RemoteReference]
self.target_map = dict()
for branch in self.repo.branches:
target = branch.tracking_branch()
if target:
if target.name.startswith('./'):
# Tracking branch is in local repo
target.is_local = True
else:
target.is_local = False
self.target_map[branch.name] = target
# branches: all local branches with tracking information
#: :type: list[git.refs.head.Head]
self.branches = [b for b in self.repo.branches if b.tracking_branch()]
self.branches.sort(key=lambda br: br.name)
# remotes: all remotes that are associated with local branches
#: :type: list[git.refs.remote.RemoteReference]
self.remotes = uniq(
# name = '<remote>/<branch>' -> '<remote>'
[r.name.split('/', 2)[0]
for r in list(self.target_map.values())]
)
# change_count: Number of unstaged changes
self.change_count = len(
self.git.status(porcelain=True, untracked_files='no').split('\n')
)
# Load configuration
self.settings = self.default_settings.copy()
self.load_config()
def run(self):
""" Run all the git-up stuff. """
try:
if self.should_fetch:
self.fetch()
self.rebase_all_branches()
if self.with_bundler():
self.check_bundler()
if self.settings['push.auto']:
self.push()
except GitError as error:
self.print_error(error)
# Used for test cases
if self.testing:
raise
else: # pragma: no cover
sys.exit(1)
def rebase_all_branches(self):
""" Rebase all branches, if possible. """
col_width = max(len(b.name) for b in self.branches) + 1
if self.repo.head.is_detached:
raise GitError("You're not currently on a branch. I'm exiting"
" in case you're in the middle of something.")
original_branch = self.repo.active_branch
with self.git.stasher() as stasher:
for branch in self.branches:
target = self.target_map[branch.name]
# Print branch name
if branch.name == original_branch.name:
attrs = ['bold']
else:
attrs = []
print(colored(branch.name.ljust(col_width), attrs=attrs),
end=' ')
# Check, if target branch exists
try:
if target.name.startswith('./'):
# Check, if local branch exists
self.git.rev_parse(target.name[2:])
else:
# Check, if remote branch exists
_ = target.commit
except (ValueError, GitError):
# Remote branch doesn't exist!
print(colored('error: remote branch doesn\'t exist', 'red'))
self.states.append('remote branch doesn\'t exist')
continue
# Get tracking branch
if target.is_local:
target = find(self.repo.branches,
lambda b: b.name == target.name[2:])
# Check status and act appropriately
if target.commit.hexsha == branch.commit.hexsha:
print(colored('up to date', 'green'))
self.states.append('up to date')
continue # Do not do anything
base = self.git.merge_base(branch.name, target.name)
if base == target.commit.hexsha:
print(colored('ahead of upstream', 'cyan'))
self.states.append('ahead')
continue # Do not do anything
fast_fastforward = False
if base == branch.commit.hexsha:
print(colored('fast-forwarding...', 'yellow'), end='')
self.states.append('fast-forwarding')
# Don't fast fast-forward the currently checked-out branch
fast_fastforward = (branch.name !=
self.repo.active_branch.name)
elif not self.settings['rebase.auto']:
print(colored('diverged', 'red'))
self.states.append('diverged')
continue # Do not do anything
else:
print(colored('rebasing', 'yellow'), end='')
self.states.append('rebasing')
if self.settings['rebase.show-hashes']:
print(' {}..{}'.format(base[0:7],
target.commit.hexsha[0:7]))
else:
print()
self.log(branch, target)
if fast_fastforward:
branch.commit = target.commit
else:
stasher()
self.git.checkout(branch.name)
self.git.rebase(target)
if (self.repo.head.is_detached # Only on Travis CI,
# we get a detached head after doing our rebase *confused*.
# Running self.repo.active_branch would fail.
or not self.repo.active_branch.name == original_branch.name):
print(colored('returning to {0}'.format(original_branch.name),
'magenta'))
original_branch.checkout()
def fetch(self):
"""
Fetch the recent refs from the remotes.
Unless git-up.fetch.all is set to true, all remotes with
locally existent branches will be fetched.
"""
fetch_kwargs = {'multiple': True}
fetch_args = []
if self.is_prune():
fetch_kwargs['prune'] = True
if self.settings['fetch.all']:
fetch_kwargs['all'] = True
else:
if '.' in self.remotes:
self.remotes.remove('.')
if not self.remotes:
# Only local target branches,
# `git fetch --multiple` will fail
return
fetch_args.append(self.remotes)
try:
self.git.fetch(*fetch_args, **fetch_kwargs)
except GitError as error:
error.message = "`git fetch` failed"
raise error
def push(self):
"""
Push the changes back to the remote(s) after fetching
"""
print('pushing...')
push_kwargs = {}
push_args = []
if self.settings['push.tags']:
push_kwargs['push'] = True
if self.settings['push.all']:
push_kwargs['all'] = True
else:
if '.' in self.remotes:
self.remotes.remove('.')
if not self.remotes:
# Only local target branches,
# `git push` will fail
return
push_args.append(self.remotes)
try:
self.git.push(*push_args, **push_kwargs)
self.pushed = True
except GitError as error:
error.message = "`git push` failed"
raise error
def log(self, branch, remote):
""" Call a log-command, if set by git-up.fetch.all. """
log_hook = self.settings['rebase.log-hook']
if log_hook:
if ON_WINDOWS: # pragma: no cover
# Running a string in CMD from Python is not that easy on
# Windows. Running 'cmd /C log_hook' produces problems when
# using multiple statements or things like 'echo'. Therefore,
# we write the string to a bat file and execute it.
# In addition, we replace occurences of $1 with %1 and so forth
# in case the user is used to Bash or sh.
# If there are occurences of %something, we'll replace it with
# %%something. This is the case when running something like
# 'git log --pretty=format:"%Cred%h..."'.
# Also, we replace a semicolon with a newline, because if you
# start with 'echo' on Windows, it will simply echo the
# semicolon and the commands behind instead of echoing and then
# running other commands
# Prepare log_hook
log_hook = re.sub(r'\$(\d+)', r'%\1', log_hook)
log_hook = re.sub(r'%(?!\d)', '%%', log_hook)
log_hook = re.sub(r'; ?', r'\n', log_hook)
# Write log_hook to an temporary file and get it's path
with NamedTemporaryFile(
prefix='PyGitUp.', suffix='.bat', delete=False
) as bat_file:
# Don't echo all commands
bat_file.file.write(b'@echo off\n')
# Run log_hook
bat_file.file.write(log_hook.encode('utf-8'))
# Run bat_file
state = subprocess.call(
[bat_file.name, branch.name, remote.name]
)
# Clean up file
os.remove(bat_file.name)
else: # pragma: no cover
# Run log_hook via 'shell -c'
state = subprocess.call(
[log_hook, 'git-up', branch.name, remote.name],
shell=True
)
if self.testing:
assert state == 0, 'log_hook returned != 0'
def version_info(self):
""" Tell, what version we're running at and if it's up to date. """
# Retrive and show local version info
package = pkg.get_distribution('git-up')
local_version_str = package.version
local_version = package.parsed_version
print('GitUp version is: ' + colored('v' + local_version_str, 'green'))
if not self.settings['updates.check']:
return
# Check for updates
print('Checking for updates...', end='')
try:
# Get version information from the PyPI JSON API
reader = codecs.getreader('utf-8')
details = json.load(reader(urlopen(PYPI_URL)))
online_version = details['info']['version']
except (HTTPError, URLError, ValueError):
recent = True # To not disturb the user with HTTP/parsing errors
else:
recent = local_version >= pkg.parse_version(online_version)
if not recent:
# noinspection PyUnboundLocalVariable
print(
'\rRecent version is: '
+ colored('v' + online_version, color='yellow', attrs=['bold'])
)
print('Run \'pip install -U git-up\' to get the update.')
else:
# Clear the update line
sys.stdout.write('\r' + ' ' * 80 + '\n')
###########################################################################
# Helpers
###########################################################################
def load_config(self):
"""
Load the configuration from git config.
"""
for key in self.settings:
value = self.config(key)
# Parse true/false
if value == '' or value is None:
continue # Not set by user, go on
if value.lower() == 'true':
value = True
elif value.lower() == 'false':
value = False
elif value:
pass # A user-defined string, store the value later
self.settings[key] = value
def config(self, key):
""" Get a git-up-specific config value. """
return self.git.config('git-up.{0}'.format(key))
def is_prune(self):
"""
Return True, if `git fetch --prune` is allowed.
Because of possible incompatibilities, this requires special
treatment.
"""
required_version = "1.6.6"
config_value = self.settings['fetch.prune']
if self.git.is_version_min(required_version):
return config_value is not False
else: # pragma: no cover
if config_value == 'true':
print(colored(
"Warning: fetch.prune is set to 'true' but your git"
"version doesn't seem to support it ({0} < {1})."
"Defaulting to 'false'.".format(self.git.version,
required_version),
'yellow'
))
###########################################################################
# Gemfile Checking
###########################################################################
def with_bundler(self):
"""
Check, if bundler check is requested.
Check, if the user wants us to check for new gems and return True in
this case.
:rtype : bool
"""
def gemfile_exists():
"""
Check, if a Gemfile exists in the current repo.
"""
return os.path.exists('Gemfile')
if 'GIT_UP_BUNDLER_CHECK' in os.environ:
print(colored(
'''The GIT_UP_BUNDLER_CHECK environment variable is deprecated.
You can now tell git-up to check (or not check) for missing
gems on a per-project basis using git's config system. To
set it globally, run this command anywhere:
git config --global git-up.bundler.check true
To set it within a project, run this command inside that
project's directory:
git config git-up.bundler.check true
Replace 'true' with 'false' to disable checking.''', 'yellow'))
if self.settings['bundler.check']:
return gemfile_exists()
if ('GIT_UP_BUNDLER_CHECK' in os.environ
and os.environ['GIT_UP_BUNDLER_CHECK'] == 'true'):
return gemfile_exists()
return False
def check_bundler(self):
"""
Run the bundler check.
"""
def get_config(name):
return name if self.config('bundler.' + name) else ''
from pkg_resources import Requirement, resource_filename
relative_path = os.path.join('PyGitUp', 'check-bundler.rb')
bundler_script = resource_filename(Requirement.parse('git-up'),
relative_path)
assert os.path.exists(bundler_script), 'check-bundler.rb doesn\'t ' \
'exist!'
return_value = subprocess.call(
['ruby', bundler_script, get_config('autoinstall'),
get_config('local'), get_config('rbenv')]
)
if self.testing:
assert return_value == 0, 'Errors while executing check-bundler.rb'
def print_error(self, error):
"""
Print more information about an error.
:type error: GitError
"""
print(colored(error.message, 'red'), file=self.stderr)
if error.stdout or error.stderr:
print(file=self.stderr)
print("Here's what git said:", file=self.stderr)
print(file=self.stderr)
if error.stdout:
print(error.stdout, file=self.stderr)
if error.stderr:
print(error.stderr, file=self.stderr)
if error.details:
print(file=self.stderr)
print("Here's what we know:", file=self.stderr)
print(str(error.details), file=self.stderr)
print(file=self.stderr)
|
msiemens/PyGitUp | PyGitUp/gitup.py | GitUp.is_prune | python | def is_prune(self):
"""
Return True, if `git fetch --prune` is allowed.
Because of possible incompatibilities, this requires special
treatment.
"""
required_version = "1.6.6"
config_value = self.settings['fetch.prune']
if self.git.is_version_min(required_version):
return config_value is not False
else: # pragma: no cover
if config_value == 'true':
print(colored(
"Warning: fetch.prune is set to 'true' but your git"
"version doesn't seem to support it ({0} < {1})."
"Defaulting to 'false'.".format(self.git.version,
required_version),
'yellow'
)) | Return True, if `git fetch --prune` is allowed.
Because of possible incompatibilities, this requires special
treatment. | train | https://github.com/msiemens/PyGitUp/blob/b1f78831cb6b8d29d3a7d59f7a2b54fdd0720e9c/PyGitUp/gitup.py#L489-L509 | null | class GitUp(object):
""" Conainter class for GitUp methods """
default_settings = {
'bundler.check': False,
'bundler.autoinstall': False,
'bundler.local': False,
'bundler.rbenv': False,
'fetch.prune': True,
'fetch.all': False,
'rebase.show-hashes': False,
'rebase.arguments': None,
'rebase.auto': True,
'rebase.log-hook': None,
'updates.check': True,
'push.auto': False,
'push.tags': False,
'push.all': False,
}
def __init__(self, testing=False, sparse=False):
# Sparse init: config only
if sparse:
self.git = GitWrapper(None)
# Load configuration
self.settings = self.default_settings.copy()
self.load_config()
return
# Testing: redirect stderr to stdout
self.testing = testing
if self.testing:
self.stderr = sys.stdout # Quiet testing
else: # pragma: no cover
self.stderr = sys.stderr
self.states = []
self.should_fetch = True
self.pushed = False
# Check, if we're in a git repo
try:
repo_dir = get_git_dir()
except (EnvironmentError, OSError, GitCommandNotFound) as e:
if isinstance(e, GitCommandNotFound) or e.errno == errno.ENOENT:
exc = GitError("The git executable could not be found")
raise exc
else:
raise
else:
if repo_dir is None:
exc = GitError("We don't seem to be in a git repository.")
raise exc
self.repo = Repo(repo_dir, odbt=GitCmdObjectDB)
# Check for branch tracking informatino
if not any(b.tracking_branch() for b in self.repo.branches):
exc = GitError("Can\'t update your repo because it doesn\'t has "
"any branches with tracking information.")
self.print_error(exc)
raise exc
self.git = GitWrapper(self.repo)
# target_map: map local branch names to remote tracking branches
#: :type: dict[str, git.refs.remote.RemoteReference]
self.target_map = dict()
for branch in self.repo.branches:
target = branch.tracking_branch()
if target:
if target.name.startswith('./'):
# Tracking branch is in local repo
target.is_local = True
else:
target.is_local = False
self.target_map[branch.name] = target
# branches: all local branches with tracking information
#: :type: list[git.refs.head.Head]
self.branches = [b for b in self.repo.branches if b.tracking_branch()]
self.branches.sort(key=lambda br: br.name)
# remotes: all remotes that are associated with local branches
#: :type: list[git.refs.remote.RemoteReference]
self.remotes = uniq(
# name = '<remote>/<branch>' -> '<remote>'
[r.name.split('/', 2)[0]
for r in list(self.target_map.values())]
)
# change_count: Number of unstaged changes
self.change_count = len(
self.git.status(porcelain=True, untracked_files='no').split('\n')
)
# Load configuration
self.settings = self.default_settings.copy()
self.load_config()
def run(self):
""" Run all the git-up stuff. """
try:
if self.should_fetch:
self.fetch()
self.rebase_all_branches()
if self.with_bundler():
self.check_bundler()
if self.settings['push.auto']:
self.push()
except GitError as error:
self.print_error(error)
# Used for test cases
if self.testing:
raise
else: # pragma: no cover
sys.exit(1)
def rebase_all_branches(self):
""" Rebase all branches, if possible. """
col_width = max(len(b.name) for b in self.branches) + 1
if self.repo.head.is_detached:
raise GitError("You're not currently on a branch. I'm exiting"
" in case you're in the middle of something.")
original_branch = self.repo.active_branch
with self.git.stasher() as stasher:
for branch in self.branches:
target = self.target_map[branch.name]
# Print branch name
if branch.name == original_branch.name:
attrs = ['bold']
else:
attrs = []
print(colored(branch.name.ljust(col_width), attrs=attrs),
end=' ')
# Check, if target branch exists
try:
if target.name.startswith('./'):
# Check, if local branch exists
self.git.rev_parse(target.name[2:])
else:
# Check, if remote branch exists
_ = target.commit
except (ValueError, GitError):
# Remote branch doesn't exist!
print(colored('error: remote branch doesn\'t exist', 'red'))
self.states.append('remote branch doesn\'t exist')
continue
# Get tracking branch
if target.is_local:
target = find(self.repo.branches,
lambda b: b.name == target.name[2:])
# Check status and act appropriately
if target.commit.hexsha == branch.commit.hexsha:
print(colored('up to date', 'green'))
self.states.append('up to date')
continue # Do not do anything
base = self.git.merge_base(branch.name, target.name)
if base == target.commit.hexsha:
print(colored('ahead of upstream', 'cyan'))
self.states.append('ahead')
continue # Do not do anything
fast_fastforward = False
if base == branch.commit.hexsha:
print(colored('fast-forwarding...', 'yellow'), end='')
self.states.append('fast-forwarding')
# Don't fast fast-forward the currently checked-out branch
fast_fastforward = (branch.name !=
self.repo.active_branch.name)
elif not self.settings['rebase.auto']:
print(colored('diverged', 'red'))
self.states.append('diverged')
continue # Do not do anything
else:
print(colored('rebasing', 'yellow'), end='')
self.states.append('rebasing')
if self.settings['rebase.show-hashes']:
print(' {}..{}'.format(base[0:7],
target.commit.hexsha[0:7]))
else:
print()
self.log(branch, target)
if fast_fastforward:
branch.commit = target.commit
else:
stasher()
self.git.checkout(branch.name)
self.git.rebase(target)
if (self.repo.head.is_detached # Only on Travis CI,
# we get a detached head after doing our rebase *confused*.
# Running self.repo.active_branch would fail.
or not self.repo.active_branch.name == original_branch.name):
print(colored('returning to {0}'.format(original_branch.name),
'magenta'))
original_branch.checkout()
def fetch(self):
"""
Fetch the recent refs from the remotes.
Unless git-up.fetch.all is set to true, all remotes with
locally existent branches will be fetched.
"""
fetch_kwargs = {'multiple': True}
fetch_args = []
if self.is_prune():
fetch_kwargs['prune'] = True
if self.settings['fetch.all']:
fetch_kwargs['all'] = True
else:
if '.' in self.remotes:
self.remotes.remove('.')
if not self.remotes:
# Only local target branches,
# `git fetch --multiple` will fail
return
fetch_args.append(self.remotes)
try:
self.git.fetch(*fetch_args, **fetch_kwargs)
except GitError as error:
error.message = "`git fetch` failed"
raise error
def push(self):
"""
Push the changes back to the remote(s) after fetching
"""
print('pushing...')
push_kwargs = {}
push_args = []
if self.settings['push.tags']:
push_kwargs['push'] = True
if self.settings['push.all']:
push_kwargs['all'] = True
else:
if '.' in self.remotes:
self.remotes.remove('.')
if not self.remotes:
# Only local target branches,
# `git push` will fail
return
push_args.append(self.remotes)
try:
self.git.push(*push_args, **push_kwargs)
self.pushed = True
except GitError as error:
error.message = "`git push` failed"
raise error
def log(self, branch, remote):
""" Call a log-command, if set by git-up.fetch.all. """
log_hook = self.settings['rebase.log-hook']
if log_hook:
if ON_WINDOWS: # pragma: no cover
# Running a string in CMD from Python is not that easy on
# Windows. Running 'cmd /C log_hook' produces problems when
# using multiple statements or things like 'echo'. Therefore,
# we write the string to a bat file and execute it.
# In addition, we replace occurences of $1 with %1 and so forth
# in case the user is used to Bash or sh.
# If there are occurences of %something, we'll replace it with
# %%something. This is the case when running something like
# 'git log --pretty=format:"%Cred%h..."'.
# Also, we replace a semicolon with a newline, because if you
# start with 'echo' on Windows, it will simply echo the
# semicolon and the commands behind instead of echoing and then
# running other commands
# Prepare log_hook
log_hook = re.sub(r'\$(\d+)', r'%\1', log_hook)
log_hook = re.sub(r'%(?!\d)', '%%', log_hook)
log_hook = re.sub(r'; ?', r'\n', log_hook)
# Write log_hook to an temporary file and get it's path
with NamedTemporaryFile(
prefix='PyGitUp.', suffix='.bat', delete=False
) as bat_file:
# Don't echo all commands
bat_file.file.write(b'@echo off\n')
# Run log_hook
bat_file.file.write(log_hook.encode('utf-8'))
# Run bat_file
state = subprocess.call(
[bat_file.name, branch.name, remote.name]
)
# Clean up file
os.remove(bat_file.name)
else: # pragma: no cover
# Run log_hook via 'shell -c'
state = subprocess.call(
[log_hook, 'git-up', branch.name, remote.name],
shell=True
)
if self.testing:
assert state == 0, 'log_hook returned != 0'
def version_info(self):
""" Tell, what version we're running at and if it's up to date. """
# Retrive and show local version info
package = pkg.get_distribution('git-up')
local_version_str = package.version
local_version = package.parsed_version
print('GitUp version is: ' + colored('v' + local_version_str, 'green'))
if not self.settings['updates.check']:
return
# Check for updates
print('Checking for updates...', end='')
try:
# Get version information from the PyPI JSON API
reader = codecs.getreader('utf-8')
details = json.load(reader(urlopen(PYPI_URL)))
online_version = details['info']['version']
except (HTTPError, URLError, ValueError):
recent = True # To not disturb the user with HTTP/parsing errors
else:
recent = local_version >= pkg.parse_version(online_version)
if not recent:
# noinspection PyUnboundLocalVariable
print(
'\rRecent version is: '
+ colored('v' + online_version, color='yellow', attrs=['bold'])
)
print('Run \'pip install -U git-up\' to get the update.')
else:
# Clear the update line
sys.stdout.write('\r' + ' ' * 80 + '\n')
###########################################################################
# Helpers
###########################################################################
def load_config(self):
"""
Load the configuration from git config.
"""
for key in self.settings:
value = self.config(key)
# Parse true/false
if value == '' or value is None:
continue # Not set by user, go on
if value.lower() == 'true':
value = True
elif value.lower() == 'false':
value = False
elif value:
pass # A user-defined string, store the value later
self.settings[key] = value
def config(self, key):
""" Get a git-up-specific config value. """
return self.git.config('git-up.{0}'.format(key))
def is_prune(self):
"""
Return True, if `git fetch --prune` is allowed.
Because of possible incompatibilities, this requires special
treatment.
"""
required_version = "1.6.6"
config_value = self.settings['fetch.prune']
if self.git.is_version_min(required_version):
return config_value is not False
else: # pragma: no cover
if config_value == 'true':
print(colored(
"Warning: fetch.prune is set to 'true' but your git"
"version doesn't seem to support it ({0} < {1})."
"Defaulting to 'false'.".format(self.git.version,
required_version),
'yellow'
))
###########################################################################
# Gemfile Checking
###########################################################################
def with_bundler(self):
"""
Check, if bundler check is requested.
Check, if the user wants us to check for new gems and return True in
this case.
:rtype : bool
"""
def gemfile_exists():
"""
Check, if a Gemfile exists in the current repo.
"""
return os.path.exists('Gemfile')
if 'GIT_UP_BUNDLER_CHECK' in os.environ:
print(colored(
'''The GIT_UP_BUNDLER_CHECK environment variable is deprecated.
You can now tell git-up to check (or not check) for missing
gems on a per-project basis using git's config system. To
set it globally, run this command anywhere:
git config --global git-up.bundler.check true
To set it within a project, run this command inside that
project's directory:
git config git-up.bundler.check true
Replace 'true' with 'false' to disable checking.''', 'yellow'))
if self.settings['bundler.check']:
return gemfile_exists()
if ('GIT_UP_BUNDLER_CHECK' in os.environ
and os.environ['GIT_UP_BUNDLER_CHECK'] == 'true'):
return gemfile_exists()
return False
def check_bundler(self):
"""
Run the bundler check.
"""
def get_config(name):
return name if self.config('bundler.' + name) else ''
from pkg_resources import Requirement, resource_filename
relative_path = os.path.join('PyGitUp', 'check-bundler.rb')
bundler_script = resource_filename(Requirement.parse('git-up'),
relative_path)
assert os.path.exists(bundler_script), 'check-bundler.rb doesn\'t ' \
'exist!'
return_value = subprocess.call(
['ruby', bundler_script, get_config('autoinstall'),
get_config('local'), get_config('rbenv')]
)
if self.testing:
assert return_value == 0, 'Errors while executing check-bundler.rb'
def print_error(self, error):
"""
Print more information about an error.
:type error: GitError
"""
print(colored(error.message, 'red'), file=self.stderr)
if error.stdout or error.stderr:
print(file=self.stderr)
print("Here's what git said:", file=self.stderr)
print(file=self.stderr)
if error.stdout:
print(error.stdout, file=self.stderr)
if error.stderr:
print(error.stderr, file=self.stderr)
if error.details:
print(file=self.stderr)
print("Here's what we know:", file=self.stderr)
print(str(error.details), file=self.stderr)
print(file=self.stderr)
|
msiemens/PyGitUp | PyGitUp/gitup.py | GitUp.with_bundler | python | def with_bundler(self):
"""
Check, if bundler check is requested.
Check, if the user wants us to check for new gems and return True in
this case.
:rtype : bool
"""
def gemfile_exists():
"""
Check, if a Gemfile exists in the current repo.
"""
return os.path.exists('Gemfile')
if 'GIT_UP_BUNDLER_CHECK' in os.environ:
print(colored(
'''The GIT_UP_BUNDLER_CHECK environment variable is deprecated.
You can now tell git-up to check (or not check) for missing
gems on a per-project basis using git's config system. To
set it globally, run this command anywhere:
git config --global git-up.bundler.check true
To set it within a project, run this command inside that
project's directory:
git config git-up.bundler.check true
Replace 'true' with 'false' to disable checking.''', 'yellow'))
if self.settings['bundler.check']:
return gemfile_exists()
if ('GIT_UP_BUNDLER_CHECK' in os.environ
and os.environ['GIT_UP_BUNDLER_CHECK'] == 'true'):
return gemfile_exists()
return False | Check, if bundler check is requested.
Check, if the user wants us to check for new gems and return True in
this case.
:rtype : bool | train | https://github.com/msiemens/PyGitUp/blob/b1f78831cb6b8d29d3a7d59f7a2b54fdd0720e9c/PyGitUp/gitup.py#L515-L553 | [
"def gemfile_exists():\n \"\"\"\n Check, if a Gemfile exists in the current repo.\n \"\"\"\n return os.path.exists('Gemfile')\n"
] | class GitUp(object):
""" Conainter class for GitUp methods """
default_settings = {
'bundler.check': False,
'bundler.autoinstall': False,
'bundler.local': False,
'bundler.rbenv': False,
'fetch.prune': True,
'fetch.all': False,
'rebase.show-hashes': False,
'rebase.arguments': None,
'rebase.auto': True,
'rebase.log-hook': None,
'updates.check': True,
'push.auto': False,
'push.tags': False,
'push.all': False,
}
def __init__(self, testing=False, sparse=False):
# Sparse init: config only
if sparse:
self.git = GitWrapper(None)
# Load configuration
self.settings = self.default_settings.copy()
self.load_config()
return
# Testing: redirect stderr to stdout
self.testing = testing
if self.testing:
self.stderr = sys.stdout # Quiet testing
else: # pragma: no cover
self.stderr = sys.stderr
self.states = []
self.should_fetch = True
self.pushed = False
# Check, if we're in a git repo
try:
repo_dir = get_git_dir()
except (EnvironmentError, OSError, GitCommandNotFound) as e:
if isinstance(e, GitCommandNotFound) or e.errno == errno.ENOENT:
exc = GitError("The git executable could not be found")
raise exc
else:
raise
else:
if repo_dir is None:
exc = GitError("We don't seem to be in a git repository.")
raise exc
self.repo = Repo(repo_dir, odbt=GitCmdObjectDB)
# Check for branch tracking informatino
if not any(b.tracking_branch() for b in self.repo.branches):
exc = GitError("Can\'t update your repo because it doesn\'t has "
"any branches with tracking information.")
self.print_error(exc)
raise exc
self.git = GitWrapper(self.repo)
# target_map: map local branch names to remote tracking branches
#: :type: dict[str, git.refs.remote.RemoteReference]
self.target_map = dict()
for branch in self.repo.branches:
target = branch.tracking_branch()
if target:
if target.name.startswith('./'):
# Tracking branch is in local repo
target.is_local = True
else:
target.is_local = False
self.target_map[branch.name] = target
# branches: all local branches with tracking information
#: :type: list[git.refs.head.Head]
self.branches = [b for b in self.repo.branches if b.tracking_branch()]
self.branches.sort(key=lambda br: br.name)
# remotes: all remotes that are associated with local branches
#: :type: list[git.refs.remote.RemoteReference]
self.remotes = uniq(
# name = '<remote>/<branch>' -> '<remote>'
[r.name.split('/', 2)[0]
for r in list(self.target_map.values())]
)
# change_count: Number of unstaged changes
self.change_count = len(
self.git.status(porcelain=True, untracked_files='no').split('\n')
)
# Load configuration
self.settings = self.default_settings.copy()
self.load_config()
def run(self):
""" Run all the git-up stuff. """
try:
if self.should_fetch:
self.fetch()
self.rebase_all_branches()
if self.with_bundler():
self.check_bundler()
if self.settings['push.auto']:
self.push()
except GitError as error:
self.print_error(error)
# Used for test cases
if self.testing:
raise
else: # pragma: no cover
sys.exit(1)
def rebase_all_branches(self):
""" Rebase all branches, if possible. """
col_width = max(len(b.name) for b in self.branches) + 1
if self.repo.head.is_detached:
raise GitError("You're not currently on a branch. I'm exiting"
" in case you're in the middle of something.")
original_branch = self.repo.active_branch
with self.git.stasher() as stasher:
for branch in self.branches:
target = self.target_map[branch.name]
# Print branch name
if branch.name == original_branch.name:
attrs = ['bold']
else:
attrs = []
print(colored(branch.name.ljust(col_width), attrs=attrs),
end=' ')
# Check, if target branch exists
try:
if target.name.startswith('./'):
# Check, if local branch exists
self.git.rev_parse(target.name[2:])
else:
# Check, if remote branch exists
_ = target.commit
except (ValueError, GitError):
# Remote branch doesn't exist!
print(colored('error: remote branch doesn\'t exist', 'red'))
self.states.append('remote branch doesn\'t exist')
continue
# Get tracking branch
if target.is_local:
target = find(self.repo.branches,
lambda b: b.name == target.name[2:])
# Check status and act appropriately
if target.commit.hexsha == branch.commit.hexsha:
print(colored('up to date', 'green'))
self.states.append('up to date')
continue # Do not do anything
base = self.git.merge_base(branch.name, target.name)
if base == target.commit.hexsha:
print(colored('ahead of upstream', 'cyan'))
self.states.append('ahead')
continue # Do not do anything
fast_fastforward = False
if base == branch.commit.hexsha:
print(colored('fast-forwarding...', 'yellow'), end='')
self.states.append('fast-forwarding')
# Don't fast fast-forward the currently checked-out branch
fast_fastforward = (branch.name !=
self.repo.active_branch.name)
elif not self.settings['rebase.auto']:
print(colored('diverged', 'red'))
self.states.append('diverged')
continue # Do not do anything
else:
print(colored('rebasing', 'yellow'), end='')
self.states.append('rebasing')
if self.settings['rebase.show-hashes']:
print(' {}..{}'.format(base[0:7],
target.commit.hexsha[0:7]))
else:
print()
self.log(branch, target)
if fast_fastforward:
branch.commit = target.commit
else:
stasher()
self.git.checkout(branch.name)
self.git.rebase(target)
if (self.repo.head.is_detached # Only on Travis CI,
# we get a detached head after doing our rebase *confused*.
# Running self.repo.active_branch would fail.
or not self.repo.active_branch.name == original_branch.name):
print(colored('returning to {0}'.format(original_branch.name),
'magenta'))
original_branch.checkout()
def fetch(self):
"""
Fetch the recent refs from the remotes.
Unless git-up.fetch.all is set to true, all remotes with
locally existent branches will be fetched.
"""
fetch_kwargs = {'multiple': True}
fetch_args = []
if self.is_prune():
fetch_kwargs['prune'] = True
if self.settings['fetch.all']:
fetch_kwargs['all'] = True
else:
if '.' in self.remotes:
self.remotes.remove('.')
if not self.remotes:
# Only local target branches,
# `git fetch --multiple` will fail
return
fetch_args.append(self.remotes)
try:
self.git.fetch(*fetch_args, **fetch_kwargs)
except GitError as error:
error.message = "`git fetch` failed"
raise error
def push(self):
"""
Push the changes back to the remote(s) after fetching
"""
print('pushing...')
push_kwargs = {}
push_args = []
if self.settings['push.tags']:
push_kwargs['push'] = True
if self.settings['push.all']:
push_kwargs['all'] = True
else:
if '.' in self.remotes:
self.remotes.remove('.')
if not self.remotes:
# Only local target branches,
# `git push` will fail
return
push_args.append(self.remotes)
try:
self.git.push(*push_args, **push_kwargs)
self.pushed = True
except GitError as error:
error.message = "`git push` failed"
raise error
def log(self, branch, remote):
""" Call a log-command, if set by git-up.fetch.all. """
log_hook = self.settings['rebase.log-hook']
if log_hook:
if ON_WINDOWS: # pragma: no cover
# Running a string in CMD from Python is not that easy on
# Windows. Running 'cmd /C log_hook' produces problems when
# using multiple statements or things like 'echo'. Therefore,
# we write the string to a bat file and execute it.
# In addition, we replace occurences of $1 with %1 and so forth
# in case the user is used to Bash or sh.
# If there are occurences of %something, we'll replace it with
# %%something. This is the case when running something like
# 'git log --pretty=format:"%Cred%h..."'.
# Also, we replace a semicolon with a newline, because if you
# start with 'echo' on Windows, it will simply echo the
# semicolon and the commands behind instead of echoing and then
# running other commands
# Prepare log_hook
log_hook = re.sub(r'\$(\d+)', r'%\1', log_hook)
log_hook = re.sub(r'%(?!\d)', '%%', log_hook)
log_hook = re.sub(r'; ?', r'\n', log_hook)
# Write log_hook to an temporary file and get it's path
with NamedTemporaryFile(
prefix='PyGitUp.', suffix='.bat', delete=False
) as bat_file:
# Don't echo all commands
bat_file.file.write(b'@echo off\n')
# Run log_hook
bat_file.file.write(log_hook.encode('utf-8'))
# Run bat_file
state = subprocess.call(
[bat_file.name, branch.name, remote.name]
)
# Clean up file
os.remove(bat_file.name)
else: # pragma: no cover
# Run log_hook via 'shell -c'
state = subprocess.call(
[log_hook, 'git-up', branch.name, remote.name],
shell=True
)
if self.testing:
assert state == 0, 'log_hook returned != 0'
def version_info(self):
""" Tell, what version we're running at and if it's up to date. """
# Retrive and show local version info
package = pkg.get_distribution('git-up')
local_version_str = package.version
local_version = package.parsed_version
print('GitUp version is: ' + colored('v' + local_version_str, 'green'))
if not self.settings['updates.check']:
return
# Check for updates
print('Checking for updates...', end='')
try:
# Get version information from the PyPI JSON API
reader = codecs.getreader('utf-8')
details = json.load(reader(urlopen(PYPI_URL)))
online_version = details['info']['version']
except (HTTPError, URLError, ValueError):
recent = True # To not disturb the user with HTTP/parsing errors
else:
recent = local_version >= pkg.parse_version(online_version)
if not recent:
# noinspection PyUnboundLocalVariable
print(
'\rRecent version is: '
+ colored('v' + online_version, color='yellow', attrs=['bold'])
)
print('Run \'pip install -U git-up\' to get the update.')
else:
# Clear the update line
sys.stdout.write('\r' + ' ' * 80 + '\n')
###########################################################################
# Helpers
###########################################################################
def load_config(self):
"""
Load the configuration from git config.
"""
for key in self.settings:
value = self.config(key)
# Parse true/false
if value == '' or value is None:
continue # Not set by user, go on
if value.lower() == 'true':
value = True
elif value.lower() == 'false':
value = False
elif value:
pass # A user-defined string, store the value later
self.settings[key] = value
def config(self, key):
""" Get a git-up-specific config value. """
return self.git.config('git-up.{0}'.format(key))
def is_prune(self):
"""
Return True, if `git fetch --prune` is allowed.
Because of possible incompatibilities, this requires special
treatment.
"""
required_version = "1.6.6"
config_value = self.settings['fetch.prune']
if self.git.is_version_min(required_version):
return config_value is not False
else: # pragma: no cover
if config_value == 'true':
print(colored(
"Warning: fetch.prune is set to 'true' but your git"
"version doesn't seem to support it ({0} < {1})."
"Defaulting to 'false'.".format(self.git.version,
required_version),
'yellow'
))
###########################################################################
# Gemfile Checking
###########################################################################
def with_bundler(self):
"""
Check, if bundler check is requested.
Check, if the user wants us to check for new gems and return True in
this case.
:rtype : bool
"""
def gemfile_exists():
"""
Check, if a Gemfile exists in the current repo.
"""
return os.path.exists('Gemfile')
if 'GIT_UP_BUNDLER_CHECK' in os.environ:
print(colored(
'''The GIT_UP_BUNDLER_CHECK environment variable is deprecated.
You can now tell git-up to check (or not check) for missing
gems on a per-project basis using git's config system. To
set it globally, run this command anywhere:
git config --global git-up.bundler.check true
To set it within a project, run this command inside that
project's directory:
git config git-up.bundler.check true
Replace 'true' with 'false' to disable checking.''', 'yellow'))
if self.settings['bundler.check']:
return gemfile_exists()
if ('GIT_UP_BUNDLER_CHECK' in os.environ
and os.environ['GIT_UP_BUNDLER_CHECK'] == 'true'):
return gemfile_exists()
return False
def check_bundler(self):
"""
Run the bundler check.
"""
def get_config(name):
return name if self.config('bundler.' + name) else ''
from pkg_resources import Requirement, resource_filename
relative_path = os.path.join('PyGitUp', 'check-bundler.rb')
bundler_script = resource_filename(Requirement.parse('git-up'),
relative_path)
assert os.path.exists(bundler_script), 'check-bundler.rb doesn\'t ' \
'exist!'
return_value = subprocess.call(
['ruby', bundler_script, get_config('autoinstall'),
get_config('local'), get_config('rbenv')]
)
if self.testing:
assert return_value == 0, 'Errors while executing check-bundler.rb'
def print_error(self, error):
"""
Print more information about an error.
:type error: GitError
"""
print(colored(error.message, 'red'), file=self.stderr)
if error.stdout or error.stderr:
print(file=self.stderr)
print("Here's what git said:", file=self.stderr)
print(file=self.stderr)
if error.stdout:
print(error.stdout, file=self.stderr)
if error.stderr:
print(error.stderr, file=self.stderr)
if error.details:
print(file=self.stderr)
print("Here's what we know:", file=self.stderr)
print(str(error.details), file=self.stderr)
print(file=self.stderr)
|
msiemens/PyGitUp | PyGitUp/gitup.py | GitUp.check_bundler | python | def check_bundler(self):
"""
Run the bundler check.
"""
def get_config(name):
return name if self.config('bundler.' + name) else ''
from pkg_resources import Requirement, resource_filename
relative_path = os.path.join('PyGitUp', 'check-bundler.rb')
bundler_script = resource_filename(Requirement.parse('git-up'),
relative_path)
assert os.path.exists(bundler_script), 'check-bundler.rb doesn\'t ' \
'exist!'
return_value = subprocess.call(
['ruby', bundler_script, get_config('autoinstall'),
get_config('local'), get_config('rbenv')]
)
if self.testing:
assert return_value == 0, 'Errors while executing check-bundler.rb' | Run the bundler check. | train | https://github.com/msiemens/PyGitUp/blob/b1f78831cb6b8d29d3a7d59f7a2b54fdd0720e9c/PyGitUp/gitup.py#L555-L576 | [
"def get_config(name):\n return name if self.config('bundler.' + name) else ''\n"
] | class GitUp(object):
""" Conainter class for GitUp methods """
default_settings = {
'bundler.check': False,
'bundler.autoinstall': False,
'bundler.local': False,
'bundler.rbenv': False,
'fetch.prune': True,
'fetch.all': False,
'rebase.show-hashes': False,
'rebase.arguments': None,
'rebase.auto': True,
'rebase.log-hook': None,
'updates.check': True,
'push.auto': False,
'push.tags': False,
'push.all': False,
}
def __init__(self, testing=False, sparse=False):
# Sparse init: config only
if sparse:
self.git = GitWrapper(None)
# Load configuration
self.settings = self.default_settings.copy()
self.load_config()
return
# Testing: redirect stderr to stdout
self.testing = testing
if self.testing:
self.stderr = sys.stdout # Quiet testing
else: # pragma: no cover
self.stderr = sys.stderr
self.states = []
self.should_fetch = True
self.pushed = False
# Check, if we're in a git repo
try:
repo_dir = get_git_dir()
except (EnvironmentError, OSError, GitCommandNotFound) as e:
if isinstance(e, GitCommandNotFound) or e.errno == errno.ENOENT:
exc = GitError("The git executable could not be found")
raise exc
else:
raise
else:
if repo_dir is None:
exc = GitError("We don't seem to be in a git repository.")
raise exc
self.repo = Repo(repo_dir, odbt=GitCmdObjectDB)
# Check for branch tracking informatino
if not any(b.tracking_branch() for b in self.repo.branches):
exc = GitError("Can\'t update your repo because it doesn\'t has "
"any branches with tracking information.")
self.print_error(exc)
raise exc
self.git = GitWrapper(self.repo)
# target_map: map local branch names to remote tracking branches
#: :type: dict[str, git.refs.remote.RemoteReference]
self.target_map = dict()
for branch in self.repo.branches:
target = branch.tracking_branch()
if target:
if target.name.startswith('./'):
# Tracking branch is in local repo
target.is_local = True
else:
target.is_local = False
self.target_map[branch.name] = target
# branches: all local branches with tracking information
#: :type: list[git.refs.head.Head]
self.branches = [b for b in self.repo.branches if b.tracking_branch()]
self.branches.sort(key=lambda br: br.name)
# remotes: all remotes that are associated with local branches
#: :type: list[git.refs.remote.RemoteReference]
self.remotes = uniq(
# name = '<remote>/<branch>' -> '<remote>'
[r.name.split('/', 2)[0]
for r in list(self.target_map.values())]
)
# change_count: Number of unstaged changes
self.change_count = len(
self.git.status(porcelain=True, untracked_files='no').split('\n')
)
# Load configuration
self.settings = self.default_settings.copy()
self.load_config()
def run(self):
""" Run all the git-up stuff. """
try:
if self.should_fetch:
self.fetch()
self.rebase_all_branches()
if self.with_bundler():
self.check_bundler()
if self.settings['push.auto']:
self.push()
except GitError as error:
self.print_error(error)
# Used for test cases
if self.testing:
raise
else: # pragma: no cover
sys.exit(1)
def rebase_all_branches(self):
""" Rebase all branches, if possible. """
col_width = max(len(b.name) for b in self.branches) + 1
if self.repo.head.is_detached:
raise GitError("You're not currently on a branch. I'm exiting"
" in case you're in the middle of something.")
original_branch = self.repo.active_branch
with self.git.stasher() as stasher:
for branch in self.branches:
target = self.target_map[branch.name]
# Print branch name
if branch.name == original_branch.name:
attrs = ['bold']
else:
attrs = []
print(colored(branch.name.ljust(col_width), attrs=attrs),
end=' ')
# Check, if target branch exists
try:
if target.name.startswith('./'):
# Check, if local branch exists
self.git.rev_parse(target.name[2:])
else:
# Check, if remote branch exists
_ = target.commit
except (ValueError, GitError):
# Remote branch doesn't exist!
print(colored('error: remote branch doesn\'t exist', 'red'))
self.states.append('remote branch doesn\'t exist')
continue
# Get tracking branch
if target.is_local:
target = find(self.repo.branches,
lambda b: b.name == target.name[2:])
# Check status and act appropriately
if target.commit.hexsha == branch.commit.hexsha:
print(colored('up to date', 'green'))
self.states.append('up to date')
continue # Do not do anything
base = self.git.merge_base(branch.name, target.name)
if base == target.commit.hexsha:
print(colored('ahead of upstream', 'cyan'))
self.states.append('ahead')
continue # Do not do anything
fast_fastforward = False
if base == branch.commit.hexsha:
print(colored('fast-forwarding...', 'yellow'), end='')
self.states.append('fast-forwarding')
# Don't fast fast-forward the currently checked-out branch
fast_fastforward = (branch.name !=
self.repo.active_branch.name)
elif not self.settings['rebase.auto']:
print(colored('diverged', 'red'))
self.states.append('diverged')
continue # Do not do anything
else:
print(colored('rebasing', 'yellow'), end='')
self.states.append('rebasing')
if self.settings['rebase.show-hashes']:
print(' {}..{}'.format(base[0:7],
target.commit.hexsha[0:7]))
else:
print()
self.log(branch, target)
if fast_fastforward:
branch.commit = target.commit
else:
stasher()
self.git.checkout(branch.name)
self.git.rebase(target)
if (self.repo.head.is_detached # Only on Travis CI,
# we get a detached head after doing our rebase *confused*.
# Running self.repo.active_branch would fail.
or not self.repo.active_branch.name == original_branch.name):
print(colored('returning to {0}'.format(original_branch.name),
'magenta'))
original_branch.checkout()
def fetch(self):
"""
Fetch the recent refs from the remotes.
Unless git-up.fetch.all is set to true, all remotes with
locally existent branches will be fetched.
"""
fetch_kwargs = {'multiple': True}
fetch_args = []
if self.is_prune():
fetch_kwargs['prune'] = True
if self.settings['fetch.all']:
fetch_kwargs['all'] = True
else:
if '.' in self.remotes:
self.remotes.remove('.')
if not self.remotes:
# Only local target branches,
# `git fetch --multiple` will fail
return
fetch_args.append(self.remotes)
try:
self.git.fetch(*fetch_args, **fetch_kwargs)
except GitError as error:
error.message = "`git fetch` failed"
raise error
def push(self):
"""
Push the changes back to the remote(s) after fetching
"""
print('pushing...')
push_kwargs = {}
push_args = []
if self.settings['push.tags']:
push_kwargs['push'] = True
if self.settings['push.all']:
push_kwargs['all'] = True
else:
if '.' in self.remotes:
self.remotes.remove('.')
if not self.remotes:
# Only local target branches,
# `git push` will fail
return
push_args.append(self.remotes)
try:
self.git.push(*push_args, **push_kwargs)
self.pushed = True
except GitError as error:
error.message = "`git push` failed"
raise error
def log(self, branch, remote):
""" Call a log-command, if set by git-up.fetch.all. """
log_hook = self.settings['rebase.log-hook']
if log_hook:
if ON_WINDOWS: # pragma: no cover
# Running a string in CMD from Python is not that easy on
# Windows. Running 'cmd /C log_hook' produces problems when
# using multiple statements or things like 'echo'. Therefore,
# we write the string to a bat file and execute it.
# In addition, we replace occurences of $1 with %1 and so forth
# in case the user is used to Bash or sh.
# If there are occurences of %something, we'll replace it with
# %%something. This is the case when running something like
# 'git log --pretty=format:"%Cred%h..."'.
# Also, we replace a semicolon with a newline, because if you
# start with 'echo' on Windows, it will simply echo the
# semicolon and the commands behind instead of echoing and then
# running other commands
# Prepare log_hook
log_hook = re.sub(r'\$(\d+)', r'%\1', log_hook)
log_hook = re.sub(r'%(?!\d)', '%%', log_hook)
log_hook = re.sub(r'; ?', r'\n', log_hook)
# Write log_hook to an temporary file and get it's path
with NamedTemporaryFile(
prefix='PyGitUp.', suffix='.bat', delete=False
) as bat_file:
# Don't echo all commands
bat_file.file.write(b'@echo off\n')
# Run log_hook
bat_file.file.write(log_hook.encode('utf-8'))
# Run bat_file
state = subprocess.call(
[bat_file.name, branch.name, remote.name]
)
# Clean up file
os.remove(bat_file.name)
else: # pragma: no cover
# Run log_hook via 'shell -c'
state = subprocess.call(
[log_hook, 'git-up', branch.name, remote.name],
shell=True
)
if self.testing:
assert state == 0, 'log_hook returned != 0'
def version_info(self):
""" Tell, what version we're running at and if it's up to date. """
# Retrive and show local version info
package = pkg.get_distribution('git-up')
local_version_str = package.version
local_version = package.parsed_version
print('GitUp version is: ' + colored('v' + local_version_str, 'green'))
if not self.settings['updates.check']:
return
# Check for updates
print('Checking for updates...', end='')
try:
# Get version information from the PyPI JSON API
reader = codecs.getreader('utf-8')
details = json.load(reader(urlopen(PYPI_URL)))
online_version = details['info']['version']
except (HTTPError, URLError, ValueError):
recent = True # To not disturb the user with HTTP/parsing errors
else:
recent = local_version >= pkg.parse_version(online_version)
if not recent:
# noinspection PyUnboundLocalVariable
print(
'\rRecent version is: '
+ colored('v' + online_version, color='yellow', attrs=['bold'])
)
print('Run \'pip install -U git-up\' to get the update.')
else:
# Clear the update line
sys.stdout.write('\r' + ' ' * 80 + '\n')
###########################################################################
# Helpers
###########################################################################
def load_config(self):
"""
Load the configuration from git config.
"""
for key in self.settings:
value = self.config(key)
# Parse true/false
if value == '' or value is None:
continue # Not set by user, go on
if value.lower() == 'true':
value = True
elif value.lower() == 'false':
value = False
elif value:
pass # A user-defined string, store the value later
self.settings[key] = value
def config(self, key):
""" Get a git-up-specific config value. """
return self.git.config('git-up.{0}'.format(key))
def is_prune(self):
"""
Return True, if `git fetch --prune` is allowed.
Because of possible incompatibilities, this requires special
treatment.
"""
required_version = "1.6.6"
config_value = self.settings['fetch.prune']
if self.git.is_version_min(required_version):
return config_value is not False
else: # pragma: no cover
if config_value == 'true':
print(colored(
"Warning: fetch.prune is set to 'true' but your git"
"version doesn't seem to support it ({0} < {1})."
"Defaulting to 'false'.".format(self.git.version,
required_version),
'yellow'
))
###########################################################################
# Gemfile Checking
###########################################################################
def with_bundler(self):
"""
Check, if bundler check is requested.
Check, if the user wants us to check for new gems and return True in
this case.
:rtype : bool
"""
def gemfile_exists():
"""
Check, if a Gemfile exists in the current repo.
"""
return os.path.exists('Gemfile')
if 'GIT_UP_BUNDLER_CHECK' in os.environ:
print(colored(
'''The GIT_UP_BUNDLER_CHECK environment variable is deprecated.
You can now tell git-up to check (or not check) for missing
gems on a per-project basis using git's config system. To
set it globally, run this command anywhere:
git config --global git-up.bundler.check true
To set it within a project, run this command inside that
project's directory:
git config git-up.bundler.check true
Replace 'true' with 'false' to disable checking.''', 'yellow'))
if self.settings['bundler.check']:
return gemfile_exists()
if ('GIT_UP_BUNDLER_CHECK' in os.environ
and os.environ['GIT_UP_BUNDLER_CHECK'] == 'true'):
return gemfile_exists()
return False
def check_bundler(self):
"""
Run the bundler check.
"""
def get_config(name):
return name if self.config('bundler.' + name) else ''
from pkg_resources import Requirement, resource_filename
relative_path = os.path.join('PyGitUp', 'check-bundler.rb')
bundler_script = resource_filename(Requirement.parse('git-up'),
relative_path)
assert os.path.exists(bundler_script), 'check-bundler.rb doesn\'t ' \
'exist!'
return_value = subprocess.call(
['ruby', bundler_script, get_config('autoinstall'),
get_config('local'), get_config('rbenv')]
)
if self.testing:
assert return_value == 0, 'Errors while executing check-bundler.rb'
def print_error(self, error):
"""
Print more information about an error.
:type error: GitError
"""
print(colored(error.message, 'red'), file=self.stderr)
if error.stdout or error.stderr:
print(file=self.stderr)
print("Here's what git said:", file=self.stderr)
print(file=self.stderr)
if error.stdout:
print(error.stdout, file=self.stderr)
if error.stderr:
print(error.stderr, file=self.stderr)
if error.details:
print(file=self.stderr)
print("Here's what we know:", file=self.stderr)
print(str(error.details), file=self.stderr)
print(file=self.stderr)
|
msiemens/PyGitUp | PyGitUp/gitup.py | GitUp.print_error | python | def print_error(self, error):
"""
Print more information about an error.
:type error: GitError
"""
print(colored(error.message, 'red'), file=self.stderr)
if error.stdout or error.stderr:
print(file=self.stderr)
print("Here's what git said:", file=self.stderr)
print(file=self.stderr)
if error.stdout:
print(error.stdout, file=self.stderr)
if error.stderr:
print(error.stderr, file=self.stderr)
if error.details:
print(file=self.stderr)
print("Here's what we know:", file=self.stderr)
print(str(error.details), file=self.stderr)
print(file=self.stderr) | Print more information about an error.
:type error: GitError | train | https://github.com/msiemens/PyGitUp/blob/b1f78831cb6b8d29d3a7d59f7a2b54fdd0720e9c/PyGitUp/gitup.py#L578-L600 | null | class GitUp(object):
""" Conainter class for GitUp methods """
default_settings = {
'bundler.check': False,
'bundler.autoinstall': False,
'bundler.local': False,
'bundler.rbenv': False,
'fetch.prune': True,
'fetch.all': False,
'rebase.show-hashes': False,
'rebase.arguments': None,
'rebase.auto': True,
'rebase.log-hook': None,
'updates.check': True,
'push.auto': False,
'push.tags': False,
'push.all': False,
}
def __init__(self, testing=False, sparse=False):
# Sparse init: config only
if sparse:
self.git = GitWrapper(None)
# Load configuration
self.settings = self.default_settings.copy()
self.load_config()
return
# Testing: redirect stderr to stdout
self.testing = testing
if self.testing:
self.stderr = sys.stdout # Quiet testing
else: # pragma: no cover
self.stderr = sys.stderr
self.states = []
self.should_fetch = True
self.pushed = False
# Check, if we're in a git repo
try:
repo_dir = get_git_dir()
except (EnvironmentError, OSError, GitCommandNotFound) as e:
if isinstance(e, GitCommandNotFound) or e.errno == errno.ENOENT:
exc = GitError("The git executable could not be found")
raise exc
else:
raise
else:
if repo_dir is None:
exc = GitError("We don't seem to be in a git repository.")
raise exc
self.repo = Repo(repo_dir, odbt=GitCmdObjectDB)
# Check for branch tracking informatino
if not any(b.tracking_branch() for b in self.repo.branches):
exc = GitError("Can\'t update your repo because it doesn\'t has "
"any branches with tracking information.")
self.print_error(exc)
raise exc
self.git = GitWrapper(self.repo)
# target_map: map local branch names to remote tracking branches
#: :type: dict[str, git.refs.remote.RemoteReference]
self.target_map = dict()
for branch in self.repo.branches:
target = branch.tracking_branch()
if target:
if target.name.startswith('./'):
# Tracking branch is in local repo
target.is_local = True
else:
target.is_local = False
self.target_map[branch.name] = target
# branches: all local branches with tracking information
#: :type: list[git.refs.head.Head]
self.branches = [b for b in self.repo.branches if b.tracking_branch()]
self.branches.sort(key=lambda br: br.name)
# remotes: all remotes that are associated with local branches
#: :type: list[git.refs.remote.RemoteReference]
self.remotes = uniq(
# name = '<remote>/<branch>' -> '<remote>'
[r.name.split('/', 2)[0]
for r in list(self.target_map.values())]
)
# change_count: Number of unstaged changes
self.change_count = len(
self.git.status(porcelain=True, untracked_files='no').split('\n')
)
# Load configuration
self.settings = self.default_settings.copy()
self.load_config()
def run(self):
""" Run all the git-up stuff. """
try:
if self.should_fetch:
self.fetch()
self.rebase_all_branches()
if self.with_bundler():
self.check_bundler()
if self.settings['push.auto']:
self.push()
except GitError as error:
self.print_error(error)
# Used for test cases
if self.testing:
raise
else: # pragma: no cover
sys.exit(1)
def rebase_all_branches(self):
""" Rebase all branches, if possible. """
col_width = max(len(b.name) for b in self.branches) + 1
if self.repo.head.is_detached:
raise GitError("You're not currently on a branch. I'm exiting"
" in case you're in the middle of something.")
original_branch = self.repo.active_branch
with self.git.stasher() as stasher:
for branch in self.branches:
target = self.target_map[branch.name]
# Print branch name
if branch.name == original_branch.name:
attrs = ['bold']
else:
attrs = []
print(colored(branch.name.ljust(col_width), attrs=attrs),
end=' ')
# Check, if target branch exists
try:
if target.name.startswith('./'):
# Check, if local branch exists
self.git.rev_parse(target.name[2:])
else:
# Check, if remote branch exists
_ = target.commit
except (ValueError, GitError):
# Remote branch doesn't exist!
print(colored('error: remote branch doesn\'t exist', 'red'))
self.states.append('remote branch doesn\'t exist')
continue
# Get tracking branch
if target.is_local:
target = find(self.repo.branches,
lambda b: b.name == target.name[2:])
# Check status and act appropriately
if target.commit.hexsha == branch.commit.hexsha:
print(colored('up to date', 'green'))
self.states.append('up to date')
continue # Do not do anything
base = self.git.merge_base(branch.name, target.name)
if base == target.commit.hexsha:
print(colored('ahead of upstream', 'cyan'))
self.states.append('ahead')
continue # Do not do anything
fast_fastforward = False
if base == branch.commit.hexsha:
print(colored('fast-forwarding...', 'yellow'), end='')
self.states.append('fast-forwarding')
# Don't fast fast-forward the currently checked-out branch
fast_fastforward = (branch.name !=
self.repo.active_branch.name)
elif not self.settings['rebase.auto']:
print(colored('diverged', 'red'))
self.states.append('diverged')
continue # Do not do anything
else:
print(colored('rebasing', 'yellow'), end='')
self.states.append('rebasing')
if self.settings['rebase.show-hashes']:
print(' {}..{}'.format(base[0:7],
target.commit.hexsha[0:7]))
else:
print()
self.log(branch, target)
if fast_fastforward:
branch.commit = target.commit
else:
stasher()
self.git.checkout(branch.name)
self.git.rebase(target)
if (self.repo.head.is_detached # Only on Travis CI,
# we get a detached head after doing our rebase *confused*.
# Running self.repo.active_branch would fail.
or not self.repo.active_branch.name == original_branch.name):
print(colored('returning to {0}'.format(original_branch.name),
'magenta'))
original_branch.checkout()
def fetch(self):
"""
Fetch the recent refs from the remotes.
Unless git-up.fetch.all is set to true, all remotes with
locally existent branches will be fetched.
"""
fetch_kwargs = {'multiple': True}
fetch_args = []
if self.is_prune():
fetch_kwargs['prune'] = True
if self.settings['fetch.all']:
fetch_kwargs['all'] = True
else:
if '.' in self.remotes:
self.remotes.remove('.')
if not self.remotes:
# Only local target branches,
# `git fetch --multiple` will fail
return
fetch_args.append(self.remotes)
try:
self.git.fetch(*fetch_args, **fetch_kwargs)
except GitError as error:
error.message = "`git fetch` failed"
raise error
def push(self):
"""
Push the changes back to the remote(s) after fetching
"""
print('pushing...')
push_kwargs = {}
push_args = []
if self.settings['push.tags']:
push_kwargs['push'] = True
if self.settings['push.all']:
push_kwargs['all'] = True
else:
if '.' in self.remotes:
self.remotes.remove('.')
if not self.remotes:
# Only local target branches,
# `git push` will fail
return
push_args.append(self.remotes)
try:
self.git.push(*push_args, **push_kwargs)
self.pushed = True
except GitError as error:
error.message = "`git push` failed"
raise error
def log(self, branch, remote):
""" Call a log-command, if set by git-up.fetch.all. """
log_hook = self.settings['rebase.log-hook']
if log_hook:
if ON_WINDOWS: # pragma: no cover
# Running a string in CMD from Python is not that easy on
# Windows. Running 'cmd /C log_hook' produces problems when
# using multiple statements or things like 'echo'. Therefore,
# we write the string to a bat file and execute it.
# In addition, we replace occurences of $1 with %1 and so forth
# in case the user is used to Bash or sh.
# If there are occurences of %something, we'll replace it with
# %%something. This is the case when running something like
# 'git log --pretty=format:"%Cred%h..."'.
# Also, we replace a semicolon with a newline, because if you
# start with 'echo' on Windows, it will simply echo the
# semicolon and the commands behind instead of echoing and then
# running other commands
# Prepare log_hook
log_hook = re.sub(r'\$(\d+)', r'%\1', log_hook)
log_hook = re.sub(r'%(?!\d)', '%%', log_hook)
log_hook = re.sub(r'; ?', r'\n', log_hook)
# Write log_hook to an temporary file and get it's path
with NamedTemporaryFile(
prefix='PyGitUp.', suffix='.bat', delete=False
) as bat_file:
# Don't echo all commands
bat_file.file.write(b'@echo off\n')
# Run log_hook
bat_file.file.write(log_hook.encode('utf-8'))
# Run bat_file
state = subprocess.call(
[bat_file.name, branch.name, remote.name]
)
# Clean up file
os.remove(bat_file.name)
else: # pragma: no cover
# Run log_hook via 'shell -c'
state = subprocess.call(
[log_hook, 'git-up', branch.name, remote.name],
shell=True
)
if self.testing:
assert state == 0, 'log_hook returned != 0'
def version_info(self):
""" Tell, what version we're running at and if it's up to date. """
# Retrive and show local version info
package = pkg.get_distribution('git-up')
local_version_str = package.version
local_version = package.parsed_version
print('GitUp version is: ' + colored('v' + local_version_str, 'green'))
if not self.settings['updates.check']:
return
# Check for updates
print('Checking for updates...', end='')
try:
# Get version information from the PyPI JSON API
reader = codecs.getreader('utf-8')
details = json.load(reader(urlopen(PYPI_URL)))
online_version = details['info']['version']
except (HTTPError, URLError, ValueError):
recent = True # To not disturb the user with HTTP/parsing errors
else:
recent = local_version >= pkg.parse_version(online_version)
if not recent:
# noinspection PyUnboundLocalVariable
print(
'\rRecent version is: '
+ colored('v' + online_version, color='yellow', attrs=['bold'])
)
print('Run \'pip install -U git-up\' to get the update.')
else:
# Clear the update line
sys.stdout.write('\r' + ' ' * 80 + '\n')
###########################################################################
# Helpers
###########################################################################
def load_config(self):
"""
Load the configuration from git config.
"""
for key in self.settings:
value = self.config(key)
# Parse true/false
if value == '' or value is None:
continue # Not set by user, go on
if value.lower() == 'true':
value = True
elif value.lower() == 'false':
value = False
elif value:
pass # A user-defined string, store the value later
self.settings[key] = value
def config(self, key):
""" Get a git-up-specific config value. """
return self.git.config('git-up.{0}'.format(key))
def is_prune(self):
"""
Return True, if `git fetch --prune` is allowed.
Because of possible incompatibilities, this requires special
treatment.
"""
required_version = "1.6.6"
config_value = self.settings['fetch.prune']
if self.git.is_version_min(required_version):
return config_value is not False
else: # pragma: no cover
if config_value == 'true':
print(colored(
"Warning: fetch.prune is set to 'true' but your git"
"version doesn't seem to support it ({0} < {1})."
"Defaulting to 'false'.".format(self.git.version,
required_version),
'yellow'
))
###########################################################################
# Gemfile Checking
###########################################################################
def with_bundler(self):
"""
Check, if bundler check is requested.
Check, if the user wants us to check for new gems and return True in
this case.
:rtype : bool
"""
def gemfile_exists():
"""
Check, if a Gemfile exists in the current repo.
"""
return os.path.exists('Gemfile')
if 'GIT_UP_BUNDLER_CHECK' in os.environ:
print(colored(
'''The GIT_UP_BUNDLER_CHECK environment variable is deprecated.
You can now tell git-up to check (or not check) for missing
gems on a per-project basis using git's config system. To
set it globally, run this command anywhere:
git config --global git-up.bundler.check true
To set it within a project, run this command inside that
project's directory:
git config git-up.bundler.check true
Replace 'true' with 'false' to disable checking.''', 'yellow'))
if self.settings['bundler.check']:
return gemfile_exists()
if ('GIT_UP_BUNDLER_CHECK' in os.environ
and os.environ['GIT_UP_BUNDLER_CHECK'] == 'true'):
return gemfile_exists()
return False
def check_bundler(self):
"""
Run the bundler check.
"""
def get_config(name):
return name if self.config('bundler.' + name) else ''
from pkg_resources import Requirement, resource_filename
relative_path = os.path.join('PyGitUp', 'check-bundler.rb')
bundler_script = resource_filename(Requirement.parse('git-up'),
relative_path)
assert os.path.exists(bundler_script), 'check-bundler.rb doesn\'t ' \
'exist!'
return_value = subprocess.call(
['ruby', bundler_script, get_config('autoinstall'),
get_config('local'), get_config('rbenv')]
)
if self.testing:
assert return_value == 0, 'Errors while executing check-bundler.rb'
def print_error(self, error):
"""
Print more information about an error.
:type error: GitError
"""
print(colored(error.message, 'red'), file=self.stderr)
if error.stdout or error.stderr:
print(file=self.stderr)
print("Here's what git said:", file=self.stderr)
print(file=self.stderr)
if error.stdout:
print(error.stdout, file=self.stderr)
if error.stderr:
print(error.stderr, file=self.stderr)
if error.details:
print(file=self.stderr)
print("Here's what we know:", file=self.stderr)
print(str(error.details), file=self.stderr)
print(file=self.stderr)
|
Yelp/venv-update | venv_update.py | parseargs | python | def parseargs(argv):
'''handle --help, --version and our double-equal ==options'''
args = []
options = {}
key = None
for arg in argv:
if arg in DEFAULT_OPTION_VALUES:
key = arg.strip('=').replace('-', '_')
options[key] = ()
elif key is None:
args.append(arg)
else:
options[key] += (arg,)
if set(args) & {'-h', '--help'}:
print(__doc__, end='')
exit(0)
elif set(args) & {'-V', '--version'}:
print(__version__)
exit(0)
elif args:
exit('invalid option: %s\nTry --help for more information.' % args[0])
return options | handle --help, --version and our double-equal ==options | train | https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/venv_update.py#L78-L101 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''\
usage: venv-update [-hV] [options]
Update a (possibly non-existent) virtualenv directory using a pip requirements
file. When this script completes, the virtualenv directory should contain the
same packages as if it were deleted then rebuilt.
venv-update uses "trailing equal" options (e.g. venv=) to delimit groups of
(conventional, dashed) options to pass to wrapped commands (virtualenv and pip).
Options:
venv= parameters are passed to virtualenv
default: {venv=}
install= options to pip-command
default: {install=}
pip-command= is run after the virtualenv directory is bootstrapped
default: {pip-command=}
bootstrap-deps= dependencies to install before pip-command= is run
default: {bootstrap-deps=}
Examples:
# install requirements.txt to "venv"
venv-update
# install requirements.txt to "myenv"
venv-update venv= myenv
# install requirements.txt to "myenv" using Python 3.4
venv-update venv= -ppython3.4 myenv
# install myreqs.txt to "venv"
venv-update install= -r myreqs.txt
# install requirements.txt to "venv", verbosely
venv-update venv= venv -vvv install= -r requirements.txt -vvv
# install requirements.txt to "venv", without pip-faster --update --prune
venv-update pip-command= pip install
We strongly recommend that you keep the default value of pip-command= in order
to quickly and reproducibly install your requirements. You can override the
packages installed during bootstrapping, prior to pip-command=, by setting
bootstrap-deps=
Pip options are also controllable via environment variables.
See https://pip.readthedocs.org/en/stable/user_guide/#environment-variables
For example:
PIP_INDEX_URL=https://pypi.example.com/simple venv-update
Please send issues to: https://github.com/yelp/venv-update
'''
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from os.path import exists
from os.path import join
from subprocess import CalledProcessError
__version__ = '3.2.2'
DEFAULT_VIRTUALENV_PATH = 'venv'
DEFAULT_OPTION_VALUES = {
'venv=': (DEFAULT_VIRTUALENV_PATH,),
'install=': ('-r', 'requirements.txt',),
'pip-command=': ('pip-faster', 'install', '--upgrade', '--prune'),
'bootstrap-deps=': ('venv-update==' + __version__,),
}
__doc__ = __doc__.format(
**{key: ' '.join(val) for key, val in DEFAULT_OPTION_VALUES.items()}
)
# This script must not rely on anything other than
# stdlib>=2.6 and virtualenv>1.11
def timid_relpath(arg):
"""convert an argument to a relative path, carefully"""
# TODO-TEST: unit tests
from os.path import isabs, relpath, sep
if isabs(arg):
result = relpath(arg)
if result.count(sep) + 1 < arg.count(sep):
return result
return arg
def shellescape(args):
from pipes import quote
return ' '.join(quote(timid_relpath(arg)) for arg in args)
def colorize(cmd):
from os import isatty
if isatty(1):
template = '\033[36m>\033[m \033[32m{0}\033[m'
else:
template = '> {0}'
return template.format(shellescape(cmd))
def run(cmd):
from subprocess import check_call
check_call(('echo', colorize(cmd)))
check_call(cmd)
def info(msg):
# use a subprocess to ensure correct output interleaving.
from subprocess import check_call
check_call(('echo', msg))
def check_output(cmd):
from subprocess import Popen, PIPE
process = Popen(cmd, stdout=PIPE)
output, _ = process.communicate()
if process.returncode:
raise CalledProcessError(process.returncode, cmd)
else:
assert process.returncode == 0
return output.decode('UTF-8')
def samefile(file1, file2):
if not exists(file1) or not exists(file2):
return False
else:
from os.path import samefile
return samefile(file1, file2)
def exec_(argv): # never returns
"""Wrapper to os.execv which shows the command and runs any atexit handlers (for coverage's sake).
Like os.execv, this function never returns.
"""
# info('EXEC' + colorize(argv)) # TODO: debug logging by environment variable
# in python3, sys.exitfunc has gone away, and atexit._run_exitfuncs seems to be the only pubic-ish interface
# https://hg.python.org/cpython/file/3.4/Modules/atexitmodule.c#l289
import atexit
atexit._run_exitfuncs()
from os import execv
execv(argv[0], argv)
class Scratch(object):
def __init__(self):
self.dir = join(user_cache_dir(), 'venv-update', __version__)
self.venv = join(self.dir, 'venv')
self.python = venv_python(self.venv)
self.src = join(self.dir, 'src')
def exec_scratch_virtualenv(args):
"""
goals:
- get any random site-packages off of the pythonpath
- ensure we can import virtualenv
- ensure that we're not using the interpreter that we may need to delete
- idempotency: do nothing if the above goals are already met
"""
scratch = Scratch()
if not exists(scratch.python):
run(('virtualenv', scratch.venv))
if not exists(join(scratch.src, 'virtualenv.py')):
scratch_python = venv_python(scratch.venv)
# TODO: do we allow user-defined override of which version of virtualenv to install?
tmp = scratch.src + '.tmp'
run((scratch_python, '-m', 'pip.__main__', 'install', 'virtualenv', '--target', tmp))
from os import rename
rename(tmp, scratch.src)
import sys
from os.path import realpath
# We want to compare the paths themselves as sometimes sys.path is the same
# as scratch.venv, but with a suffix of bin/..
if realpath(sys.prefix) != realpath(scratch.venv):
# TODO-TEST: sometimes we would get a stale version of venv-update
exec_((scratch.python, dotpy(__file__)) + args) # never returns
# TODO-TEST: the original venv-update's directory was on sys.path (when using symlinking)
sys.path[0] = scratch.src
def get_original_path(venv_path): # TODO-TEST: a unit test
"""This helps us know whether someone has tried to relocate the virtualenv"""
return check_output(('sh', '-c', '. %s; printf "$VIRTUAL_ENV"' % venv_executable(venv_path, 'activate')))
def has_system_site_packages(interpreter):
# TODO: unit-test
system_site_packages = check_output((
interpreter,
'-c',
# stolen directly from virtualenv's site.py
"""\
import site, os.path
print(
0
if os.path.exists(
os.path.join(os.path.dirname(site.__file__), 'no-global-site-packages.txt')
) else
1
)"""
))
system_site_packages = int(system_site_packages)
assert system_site_packages in (0, 1)
return bool(system_site_packages)
def get_python_version(interpreter):
if not exists(interpreter):
return None
cmd = (interpreter, '-c', 'import sys; print(sys.version)')
return check_output(cmd)
def invalid_virtualenv_reason(venv_path, source_python, destination_python, options):
try:
orig_path = get_original_path(venv_path)
except CalledProcessError:
return 'could not inspect metadata'
if not samefile(orig_path, venv_path):
return 'virtualenv moved {} -> {}'.format(timid_relpath(orig_path), timid_relpath(venv_path))
elif has_system_site_packages(destination_python) != options.system_site_packages:
return 'system-site-packages changed, to %s' % options.system_site_packages
if source_python is None:
return
destination_version = get_python_version(destination_python)
source_version = get_python_version(source_python)
if source_version != destination_version:
return 'python version changed {} -> {}'.format(destination_version, source_version)
def ensure_virtualenv(args, return_values):
"""Ensure we have a valid virtualenv."""
def adjust_options(options, args):
# TODO-TEST: proper error message with no arguments
venv_path = return_values.venv_path = args[0]
if venv_path == DEFAULT_VIRTUALENV_PATH or options.prompt == '<dirname>':
from os.path import abspath, basename, dirname
options.prompt = '(%s)' % basename(dirname(abspath(venv_path)))
# end of option munging.
# there are two python interpreters involved here:
# 1) the interpreter we're instructing virtualenv to copy
if options.python is None:
source_python = None
else:
source_python = virtualenv.resolve_interpreter(options.python)
# 2) the interpreter virtualenv will create
destination_python = venv_python(venv_path)
if exists(destination_python):
reason = invalid_virtualenv_reason(venv_path, source_python, destination_python, options)
if reason:
info('Removing invalidated virtualenv. (%s)' % reason)
run(('rm', '-rf', venv_path))
else:
info('Keeping valid virtualenv from previous run.')
raise SystemExit(0) # looks good! we're done here.
# this is actually a documented extension point:
# http://virtualenv.readthedocs.org/en/latest/reference.html#adjust_options
import virtualenv
virtualenv.adjust_options = adjust_options
from sys import argv
argv[:] = ('virtualenv',) + args
info(colorize(argv))
raise_on_failure(virtualenv.main)
# There might not be a venv_path if doing something like "venv= --version"
# and not actually asking virtualenv to make a venv.
if return_values.venv_path is not None:
run(('rm', '-rf', join(return_values.venv_path, 'local')))
def wait_for_all_subprocesses():
from os import wait
try:
while True:
wait()
except OSError as error:
if error.errno == 10: # no child processes
return
else:
raise
def touch(filename, timestamp):
"""set the mtime of a file"""
if timestamp is not None:
timestamp = (timestamp, timestamp) # atime, mtime
from os import utime
utime(filename, timestamp)
def mark_venv_valid(venv_path):
wait_for_all_subprocesses()
touch(venv_path, None)
def mark_venv_invalid(venv_path):
# LBYL, to attempt to avoid any exception during exception handling
from os.path import isdir
if venv_path and isdir(venv_path):
info('')
info("Something went wrong! Sending '%s' back in time, so make knows it's invalid." % timid_relpath(venv_path))
wait_for_all_subprocesses()
touch(venv_path, 0)
def dotpy(filename):
if filename.endswith(('.pyc', '.pyo', '.pyd')):
return filename[:-1]
else:
return filename
def venv_executable(venv_path, executable):
return join(venv_path, 'bin', executable)
def venv_python(venv_path):
return venv_executable(venv_path, 'python')
def user_cache_dir():
# stolen from pip.utils.appdirs.user_cache_dir
from os import getenv
from os.path import expanduser
return getenv('XDG_CACHE_HOME', expanduser('~/.cache'))
def venv_update(
venv=DEFAULT_OPTION_VALUES['venv='],
install=DEFAULT_OPTION_VALUES['install='],
pip_command=DEFAULT_OPTION_VALUES['pip-command='],
bootstrap_deps=DEFAULT_OPTION_VALUES['bootstrap-deps='],
):
"""we have an arbitrary python interpreter active, (possibly) outside the virtualenv we want.
make a fresh venv at the right spot, make sure it has pip-faster, and use it
"""
# SMELL: mutable argument as return value
class return_values(object):
venv_path = None
try:
ensure_virtualenv(venv, return_values)
if return_values.venv_path is None:
return
# invariant: the final virtualenv exists, with the right python version
raise_on_failure(lambda: pip_faster(return_values.venv_path, pip_command, install, bootstrap_deps))
except BaseException:
mark_venv_invalid(return_values.venv_path)
raise
else:
mark_venv_valid(return_values.venv_path)
def execfile_(filename):
with open(filename) as code:
code = compile(code.read(), filename, 'exec')
exec(code, {'__file__': filename})
def pip_faster(venv_path, pip_command, install, bootstrap_deps):
"""install and run pip-faster"""
# activate the virtualenv
execfile_(venv_executable(venv_path, 'activate_this.py'))
# disable a useless warning
# FIXME: ensure a "true SSLContext" is available
from os import environ
environ['PIP_DISABLE_PIP_VERSION_CHECK'] = '1'
# we always have to run the bootstrap, because the presense of an
# executable doesn't imply the right version. pip is able to validate the
# version in the fastpath case quickly anyway.
run(('pip', 'install') + bootstrap_deps)
run(pip_command + install)
def raise_on_failure(mainfunc):
"""raise if and only if mainfunc fails"""
try:
errors = mainfunc()
if errors:
exit(errors)
except CalledProcessError as error:
exit(error.returncode)
except SystemExit as error:
if error.code:
raise
except KeyboardInterrupt: # I don't plan to test-cover this. :pragma:nocover:
exit(1)
def main():
from sys import argv
args = tuple(argv[1:])
# process --help before we create any side-effects.
options = parseargs(args)
exec_scratch_virtualenv(args)
return venv_update(**options)
if __name__ == '__main__':
exit(main())
|
Yelp/venv-update | venv_update.py | timid_relpath | python | def timid_relpath(arg):
# TODO-TEST: unit tests
from os.path import isabs, relpath, sep
if isabs(arg):
result = relpath(arg)
if result.count(sep) + 1 < arg.count(sep):
return result
return arg | convert an argument to a relative path, carefully | train | https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/venv_update.py#L104-L113 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''\
usage: venv-update [-hV] [options]
Update a (possibly non-existent) virtualenv directory using a pip requirements
file. When this script completes, the virtualenv directory should contain the
same packages as if it were deleted then rebuilt.
venv-update uses "trailing equal" options (e.g. venv=) to delimit groups of
(conventional, dashed) options to pass to wrapped commands (virtualenv and pip).
Options:
venv= parameters are passed to virtualenv
default: {venv=}
install= options to pip-command
default: {install=}
pip-command= is run after the virtualenv directory is bootstrapped
default: {pip-command=}
bootstrap-deps= dependencies to install before pip-command= is run
default: {bootstrap-deps=}
Examples:
# install requirements.txt to "venv"
venv-update
# install requirements.txt to "myenv"
venv-update venv= myenv
# install requirements.txt to "myenv" using Python 3.4
venv-update venv= -ppython3.4 myenv
# install myreqs.txt to "venv"
venv-update install= -r myreqs.txt
# install requirements.txt to "venv", verbosely
venv-update venv= venv -vvv install= -r requirements.txt -vvv
# install requirements.txt to "venv", without pip-faster --update --prune
venv-update pip-command= pip install
We strongly recommend that you keep the default value of pip-command= in order
to quickly and reproducibly install your requirements. You can override the
packages installed during bootstrapping, prior to pip-command=, by setting
bootstrap-deps=
Pip options are also controllable via environment variables.
See https://pip.readthedocs.org/en/stable/user_guide/#environment-variables
For example:
PIP_INDEX_URL=https://pypi.example.com/simple venv-update
Please send issues to: https://github.com/yelp/venv-update
'''
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from os.path import exists
from os.path import join
from subprocess import CalledProcessError
__version__ = '3.2.2'
DEFAULT_VIRTUALENV_PATH = 'venv'
DEFAULT_OPTION_VALUES = {
'venv=': (DEFAULT_VIRTUALENV_PATH,),
'install=': ('-r', 'requirements.txt',),
'pip-command=': ('pip-faster', 'install', '--upgrade', '--prune'),
'bootstrap-deps=': ('venv-update==' + __version__,),
}
__doc__ = __doc__.format(
**{key: ' '.join(val) for key, val in DEFAULT_OPTION_VALUES.items()}
)
# This script must not rely on anything other than
# stdlib>=2.6 and virtualenv>1.11
def parseargs(argv):
'''handle --help, --version and our double-equal ==options'''
args = []
options = {}
key = None
for arg in argv:
if arg in DEFAULT_OPTION_VALUES:
key = arg.strip('=').replace('-', '_')
options[key] = ()
elif key is None:
args.append(arg)
else:
options[key] += (arg,)
if set(args) & {'-h', '--help'}:
print(__doc__, end='')
exit(0)
elif set(args) & {'-V', '--version'}:
print(__version__)
exit(0)
elif args:
exit('invalid option: %s\nTry --help for more information.' % args[0])
return options
def shellescape(args):
from pipes import quote
return ' '.join(quote(timid_relpath(arg)) for arg in args)
def colorize(cmd):
from os import isatty
if isatty(1):
template = '\033[36m>\033[m \033[32m{0}\033[m'
else:
template = '> {0}'
return template.format(shellescape(cmd))
def run(cmd):
from subprocess import check_call
check_call(('echo', colorize(cmd)))
check_call(cmd)
def info(msg):
# use a subprocess to ensure correct output interleaving.
from subprocess import check_call
check_call(('echo', msg))
def check_output(cmd):
from subprocess import Popen, PIPE
process = Popen(cmd, stdout=PIPE)
output, _ = process.communicate()
if process.returncode:
raise CalledProcessError(process.returncode, cmd)
else:
assert process.returncode == 0
return output.decode('UTF-8')
def samefile(file1, file2):
if not exists(file1) or not exists(file2):
return False
else:
from os.path import samefile
return samefile(file1, file2)
def exec_(argv): # never returns
"""Wrapper to os.execv which shows the command and runs any atexit handlers (for coverage's sake).
Like os.execv, this function never returns.
"""
# info('EXEC' + colorize(argv)) # TODO: debug logging by environment variable
# in python3, sys.exitfunc has gone away, and atexit._run_exitfuncs seems to be the only pubic-ish interface
# https://hg.python.org/cpython/file/3.4/Modules/atexitmodule.c#l289
import atexit
atexit._run_exitfuncs()
from os import execv
execv(argv[0], argv)
class Scratch(object):
def __init__(self):
self.dir = join(user_cache_dir(), 'venv-update', __version__)
self.venv = join(self.dir, 'venv')
self.python = venv_python(self.venv)
self.src = join(self.dir, 'src')
def exec_scratch_virtualenv(args):
"""
goals:
- get any random site-packages off of the pythonpath
- ensure we can import virtualenv
- ensure that we're not using the interpreter that we may need to delete
- idempotency: do nothing if the above goals are already met
"""
scratch = Scratch()
if not exists(scratch.python):
run(('virtualenv', scratch.venv))
if not exists(join(scratch.src, 'virtualenv.py')):
scratch_python = venv_python(scratch.venv)
# TODO: do we allow user-defined override of which version of virtualenv to install?
tmp = scratch.src + '.tmp'
run((scratch_python, '-m', 'pip.__main__', 'install', 'virtualenv', '--target', tmp))
from os import rename
rename(tmp, scratch.src)
import sys
from os.path import realpath
# We want to compare the paths themselves as sometimes sys.path is the same
# as scratch.venv, but with a suffix of bin/..
if realpath(sys.prefix) != realpath(scratch.venv):
# TODO-TEST: sometimes we would get a stale version of venv-update
exec_((scratch.python, dotpy(__file__)) + args) # never returns
# TODO-TEST: the original venv-update's directory was on sys.path (when using symlinking)
sys.path[0] = scratch.src
def get_original_path(venv_path): # TODO-TEST: a unit test
"""This helps us know whether someone has tried to relocate the virtualenv"""
return check_output(('sh', '-c', '. %s; printf "$VIRTUAL_ENV"' % venv_executable(venv_path, 'activate')))
def has_system_site_packages(interpreter):
# TODO: unit-test
system_site_packages = check_output((
interpreter,
'-c',
# stolen directly from virtualenv's site.py
"""\
import site, os.path
print(
0
if os.path.exists(
os.path.join(os.path.dirname(site.__file__), 'no-global-site-packages.txt')
) else
1
)"""
))
system_site_packages = int(system_site_packages)
assert system_site_packages in (0, 1)
return bool(system_site_packages)
def get_python_version(interpreter):
if not exists(interpreter):
return None
cmd = (interpreter, '-c', 'import sys; print(sys.version)')
return check_output(cmd)
def invalid_virtualenv_reason(venv_path, source_python, destination_python, options):
try:
orig_path = get_original_path(venv_path)
except CalledProcessError:
return 'could not inspect metadata'
if not samefile(orig_path, venv_path):
return 'virtualenv moved {} -> {}'.format(timid_relpath(orig_path), timid_relpath(venv_path))
elif has_system_site_packages(destination_python) != options.system_site_packages:
return 'system-site-packages changed, to %s' % options.system_site_packages
if source_python is None:
return
destination_version = get_python_version(destination_python)
source_version = get_python_version(source_python)
if source_version != destination_version:
return 'python version changed {} -> {}'.format(destination_version, source_version)
def ensure_virtualenv(args, return_values):
"""Ensure we have a valid virtualenv."""
def adjust_options(options, args):
# TODO-TEST: proper error message with no arguments
venv_path = return_values.venv_path = args[0]
if venv_path == DEFAULT_VIRTUALENV_PATH or options.prompt == '<dirname>':
from os.path import abspath, basename, dirname
options.prompt = '(%s)' % basename(dirname(abspath(venv_path)))
# end of option munging.
# there are two python interpreters involved here:
# 1) the interpreter we're instructing virtualenv to copy
if options.python is None:
source_python = None
else:
source_python = virtualenv.resolve_interpreter(options.python)
# 2) the interpreter virtualenv will create
destination_python = venv_python(venv_path)
if exists(destination_python):
reason = invalid_virtualenv_reason(venv_path, source_python, destination_python, options)
if reason:
info('Removing invalidated virtualenv. (%s)' % reason)
run(('rm', '-rf', venv_path))
else:
info('Keeping valid virtualenv from previous run.')
raise SystemExit(0) # looks good! we're done here.
# this is actually a documented extension point:
# http://virtualenv.readthedocs.org/en/latest/reference.html#adjust_options
import virtualenv
virtualenv.adjust_options = adjust_options
from sys import argv
argv[:] = ('virtualenv',) + args
info(colorize(argv))
raise_on_failure(virtualenv.main)
# There might not be a venv_path if doing something like "venv= --version"
# and not actually asking virtualenv to make a venv.
if return_values.venv_path is not None:
run(('rm', '-rf', join(return_values.venv_path, 'local')))
def wait_for_all_subprocesses():
from os import wait
try:
while True:
wait()
except OSError as error:
if error.errno == 10: # no child processes
return
else:
raise
def touch(filename, timestamp):
"""set the mtime of a file"""
if timestamp is not None:
timestamp = (timestamp, timestamp) # atime, mtime
from os import utime
utime(filename, timestamp)
def mark_venv_valid(venv_path):
wait_for_all_subprocesses()
touch(venv_path, None)
def mark_venv_invalid(venv_path):
# LBYL, to attempt to avoid any exception during exception handling
from os.path import isdir
if venv_path and isdir(venv_path):
info('')
info("Something went wrong! Sending '%s' back in time, so make knows it's invalid." % timid_relpath(venv_path))
wait_for_all_subprocesses()
touch(venv_path, 0)
def dotpy(filename):
if filename.endswith(('.pyc', '.pyo', '.pyd')):
return filename[:-1]
else:
return filename
def venv_executable(venv_path, executable):
return join(venv_path, 'bin', executable)
def venv_python(venv_path):
return venv_executable(venv_path, 'python')
def user_cache_dir():
# stolen from pip.utils.appdirs.user_cache_dir
from os import getenv
from os.path import expanduser
return getenv('XDG_CACHE_HOME', expanduser('~/.cache'))
def venv_update(
venv=DEFAULT_OPTION_VALUES['venv='],
install=DEFAULT_OPTION_VALUES['install='],
pip_command=DEFAULT_OPTION_VALUES['pip-command='],
bootstrap_deps=DEFAULT_OPTION_VALUES['bootstrap-deps='],
):
"""we have an arbitrary python interpreter active, (possibly) outside the virtualenv we want.
make a fresh venv at the right spot, make sure it has pip-faster, and use it
"""
# SMELL: mutable argument as return value
class return_values(object):
venv_path = None
try:
ensure_virtualenv(venv, return_values)
if return_values.venv_path is None:
return
# invariant: the final virtualenv exists, with the right python version
raise_on_failure(lambda: pip_faster(return_values.venv_path, pip_command, install, bootstrap_deps))
except BaseException:
mark_venv_invalid(return_values.venv_path)
raise
else:
mark_venv_valid(return_values.venv_path)
def execfile_(filename):
with open(filename) as code:
code = compile(code.read(), filename, 'exec')
exec(code, {'__file__': filename})
def pip_faster(venv_path, pip_command, install, bootstrap_deps):
"""install and run pip-faster"""
# activate the virtualenv
execfile_(venv_executable(venv_path, 'activate_this.py'))
# disable a useless warning
# FIXME: ensure a "true SSLContext" is available
from os import environ
environ['PIP_DISABLE_PIP_VERSION_CHECK'] = '1'
# we always have to run the bootstrap, because the presense of an
# executable doesn't imply the right version. pip is able to validate the
# version in the fastpath case quickly anyway.
run(('pip', 'install') + bootstrap_deps)
run(pip_command + install)
def raise_on_failure(mainfunc):
"""raise if and only if mainfunc fails"""
try:
errors = mainfunc()
if errors:
exit(errors)
except CalledProcessError as error:
exit(error.returncode)
except SystemExit as error:
if error.code:
raise
except KeyboardInterrupt: # I don't plan to test-cover this. :pragma:nocover:
exit(1)
def main():
from sys import argv
args = tuple(argv[1:])
# process --help before we create any side-effects.
options = parseargs(args)
exec_scratch_virtualenv(args)
return venv_update(**options)
if __name__ == '__main__':
exit(main())
|
Yelp/venv-update | venv_update.py | exec_ | python | def exec_(argv): # never returns
# info('EXEC' + colorize(argv)) # TODO: debug logging by environment variable
# in python3, sys.exitfunc has gone away, and atexit._run_exitfuncs seems to be the only pubic-ish interface
# https://hg.python.org/cpython/file/3.4/Modules/atexitmodule.c#l289
import atexit
atexit._run_exitfuncs()
from os import execv
execv(argv[0], argv) | Wrapper to os.execv which shows the command and runs any atexit handlers (for coverage's sake).
Like os.execv, this function never returns. | train | https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/venv_update.py#L163-L175 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''\
usage: venv-update [-hV] [options]
Update a (possibly non-existent) virtualenv directory using a pip requirements
file. When this script completes, the virtualenv directory should contain the
same packages as if it were deleted then rebuilt.
venv-update uses "trailing equal" options (e.g. venv=) to delimit groups of
(conventional, dashed) options to pass to wrapped commands (virtualenv and pip).
Options:
venv= parameters are passed to virtualenv
default: {venv=}
install= options to pip-command
default: {install=}
pip-command= is run after the virtualenv directory is bootstrapped
default: {pip-command=}
bootstrap-deps= dependencies to install before pip-command= is run
default: {bootstrap-deps=}
Examples:
# install requirements.txt to "venv"
venv-update
# install requirements.txt to "myenv"
venv-update venv= myenv
# install requirements.txt to "myenv" using Python 3.4
venv-update venv= -ppython3.4 myenv
# install myreqs.txt to "venv"
venv-update install= -r myreqs.txt
# install requirements.txt to "venv", verbosely
venv-update venv= venv -vvv install= -r requirements.txt -vvv
# install requirements.txt to "venv", without pip-faster --update --prune
venv-update pip-command= pip install
We strongly recommend that you keep the default value of pip-command= in order
to quickly and reproducibly install your requirements. You can override the
packages installed during bootstrapping, prior to pip-command=, by setting
bootstrap-deps=
Pip options are also controllable via environment variables.
See https://pip.readthedocs.org/en/stable/user_guide/#environment-variables
For example:
PIP_INDEX_URL=https://pypi.example.com/simple venv-update
Please send issues to: https://github.com/yelp/venv-update
'''
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from os.path import exists
from os.path import join
from subprocess import CalledProcessError
__version__ = '3.2.2'
DEFAULT_VIRTUALENV_PATH = 'venv'
DEFAULT_OPTION_VALUES = {
'venv=': (DEFAULT_VIRTUALENV_PATH,),
'install=': ('-r', 'requirements.txt',),
'pip-command=': ('pip-faster', 'install', '--upgrade', '--prune'),
'bootstrap-deps=': ('venv-update==' + __version__,),
}
__doc__ = __doc__.format(
**{key: ' '.join(val) for key, val in DEFAULT_OPTION_VALUES.items()}
)
# This script must not rely on anything other than
# stdlib>=2.6 and virtualenv>1.11
def parseargs(argv):
'''handle --help, --version and our double-equal ==options'''
args = []
options = {}
key = None
for arg in argv:
if arg in DEFAULT_OPTION_VALUES:
key = arg.strip('=').replace('-', '_')
options[key] = ()
elif key is None:
args.append(arg)
else:
options[key] += (arg,)
if set(args) & {'-h', '--help'}:
print(__doc__, end='')
exit(0)
elif set(args) & {'-V', '--version'}:
print(__version__)
exit(0)
elif args:
exit('invalid option: %s\nTry --help for more information.' % args[0])
return options
def timid_relpath(arg):
"""convert an argument to a relative path, carefully"""
# TODO-TEST: unit tests
from os.path import isabs, relpath, sep
if isabs(arg):
result = relpath(arg)
if result.count(sep) + 1 < arg.count(sep):
return result
return arg
def shellescape(args):
from pipes import quote
return ' '.join(quote(timid_relpath(arg)) for arg in args)
def colorize(cmd):
from os import isatty
if isatty(1):
template = '\033[36m>\033[m \033[32m{0}\033[m'
else:
template = '> {0}'
return template.format(shellescape(cmd))
def run(cmd):
from subprocess import check_call
check_call(('echo', colorize(cmd)))
check_call(cmd)
def info(msg):
# use a subprocess to ensure correct output interleaving.
from subprocess import check_call
check_call(('echo', msg))
def check_output(cmd):
from subprocess import Popen, PIPE
process = Popen(cmd, stdout=PIPE)
output, _ = process.communicate()
if process.returncode:
raise CalledProcessError(process.returncode, cmd)
else:
assert process.returncode == 0
return output.decode('UTF-8')
def samefile(file1, file2):
if not exists(file1) or not exists(file2):
return False
else:
from os.path import samefile
return samefile(file1, file2)
class Scratch(object):
def __init__(self):
self.dir = join(user_cache_dir(), 'venv-update', __version__)
self.venv = join(self.dir, 'venv')
self.python = venv_python(self.venv)
self.src = join(self.dir, 'src')
def exec_scratch_virtualenv(args):
"""
goals:
- get any random site-packages off of the pythonpath
- ensure we can import virtualenv
- ensure that we're not using the interpreter that we may need to delete
- idempotency: do nothing if the above goals are already met
"""
scratch = Scratch()
if not exists(scratch.python):
run(('virtualenv', scratch.venv))
if not exists(join(scratch.src, 'virtualenv.py')):
scratch_python = venv_python(scratch.venv)
# TODO: do we allow user-defined override of which version of virtualenv to install?
tmp = scratch.src + '.tmp'
run((scratch_python, '-m', 'pip.__main__', 'install', 'virtualenv', '--target', tmp))
from os import rename
rename(tmp, scratch.src)
import sys
from os.path import realpath
# We want to compare the paths themselves as sometimes sys.path is the same
# as scratch.venv, but with a suffix of bin/..
if realpath(sys.prefix) != realpath(scratch.venv):
# TODO-TEST: sometimes we would get a stale version of venv-update
exec_((scratch.python, dotpy(__file__)) + args) # never returns
# TODO-TEST: the original venv-update's directory was on sys.path (when using symlinking)
sys.path[0] = scratch.src
def get_original_path(venv_path): # TODO-TEST: a unit test
"""This helps us know whether someone has tried to relocate the virtualenv"""
return check_output(('sh', '-c', '. %s; printf "$VIRTUAL_ENV"' % venv_executable(venv_path, 'activate')))
def has_system_site_packages(interpreter):
# TODO: unit-test
system_site_packages = check_output((
interpreter,
'-c',
# stolen directly from virtualenv's site.py
"""\
import site, os.path
print(
0
if os.path.exists(
os.path.join(os.path.dirname(site.__file__), 'no-global-site-packages.txt')
) else
1
)"""
))
system_site_packages = int(system_site_packages)
assert system_site_packages in (0, 1)
return bool(system_site_packages)
def get_python_version(interpreter):
if not exists(interpreter):
return None
cmd = (interpreter, '-c', 'import sys; print(sys.version)')
return check_output(cmd)
def invalid_virtualenv_reason(venv_path, source_python, destination_python, options):
try:
orig_path = get_original_path(venv_path)
except CalledProcessError:
return 'could not inspect metadata'
if not samefile(orig_path, venv_path):
return 'virtualenv moved {} -> {}'.format(timid_relpath(orig_path), timid_relpath(venv_path))
elif has_system_site_packages(destination_python) != options.system_site_packages:
return 'system-site-packages changed, to %s' % options.system_site_packages
if source_python is None:
return
destination_version = get_python_version(destination_python)
source_version = get_python_version(source_python)
if source_version != destination_version:
return 'python version changed {} -> {}'.format(destination_version, source_version)
def ensure_virtualenv(args, return_values):
"""Ensure we have a valid virtualenv."""
def adjust_options(options, args):
# TODO-TEST: proper error message with no arguments
venv_path = return_values.venv_path = args[0]
if venv_path == DEFAULT_VIRTUALENV_PATH or options.prompt == '<dirname>':
from os.path import abspath, basename, dirname
options.prompt = '(%s)' % basename(dirname(abspath(venv_path)))
# end of option munging.
# there are two python interpreters involved here:
# 1) the interpreter we're instructing virtualenv to copy
if options.python is None:
source_python = None
else:
source_python = virtualenv.resolve_interpreter(options.python)
# 2) the interpreter virtualenv will create
destination_python = venv_python(venv_path)
if exists(destination_python):
reason = invalid_virtualenv_reason(venv_path, source_python, destination_python, options)
if reason:
info('Removing invalidated virtualenv. (%s)' % reason)
run(('rm', '-rf', venv_path))
else:
info('Keeping valid virtualenv from previous run.')
raise SystemExit(0) # looks good! we're done here.
# this is actually a documented extension point:
# http://virtualenv.readthedocs.org/en/latest/reference.html#adjust_options
import virtualenv
virtualenv.adjust_options = adjust_options
from sys import argv
argv[:] = ('virtualenv',) + args
info(colorize(argv))
raise_on_failure(virtualenv.main)
# There might not be a venv_path if doing something like "venv= --version"
# and not actually asking virtualenv to make a venv.
if return_values.venv_path is not None:
run(('rm', '-rf', join(return_values.venv_path, 'local')))
def wait_for_all_subprocesses():
from os import wait
try:
while True:
wait()
except OSError as error:
if error.errno == 10: # no child processes
return
else:
raise
def touch(filename, timestamp):
"""set the mtime of a file"""
if timestamp is not None:
timestamp = (timestamp, timestamp) # atime, mtime
from os import utime
utime(filename, timestamp)
def mark_venv_valid(venv_path):
wait_for_all_subprocesses()
touch(venv_path, None)
def mark_venv_invalid(venv_path):
# LBYL, to attempt to avoid any exception during exception handling
from os.path import isdir
if venv_path and isdir(venv_path):
info('')
info("Something went wrong! Sending '%s' back in time, so make knows it's invalid." % timid_relpath(venv_path))
wait_for_all_subprocesses()
touch(venv_path, 0)
def dotpy(filename):
if filename.endswith(('.pyc', '.pyo', '.pyd')):
return filename[:-1]
else:
return filename
def venv_executable(venv_path, executable):
return join(venv_path, 'bin', executable)
def venv_python(venv_path):
return venv_executable(venv_path, 'python')
def user_cache_dir():
# stolen from pip.utils.appdirs.user_cache_dir
from os import getenv
from os.path import expanduser
return getenv('XDG_CACHE_HOME', expanduser('~/.cache'))
def venv_update(
venv=DEFAULT_OPTION_VALUES['venv='],
install=DEFAULT_OPTION_VALUES['install='],
pip_command=DEFAULT_OPTION_VALUES['pip-command='],
bootstrap_deps=DEFAULT_OPTION_VALUES['bootstrap-deps='],
):
"""we have an arbitrary python interpreter active, (possibly) outside the virtualenv we want.
make a fresh venv at the right spot, make sure it has pip-faster, and use it
"""
# SMELL: mutable argument as return value
class return_values(object):
venv_path = None
try:
ensure_virtualenv(venv, return_values)
if return_values.venv_path is None:
return
# invariant: the final virtualenv exists, with the right python version
raise_on_failure(lambda: pip_faster(return_values.venv_path, pip_command, install, bootstrap_deps))
except BaseException:
mark_venv_invalid(return_values.venv_path)
raise
else:
mark_venv_valid(return_values.venv_path)
def execfile_(filename):
with open(filename) as code:
code = compile(code.read(), filename, 'exec')
exec(code, {'__file__': filename})
def pip_faster(venv_path, pip_command, install, bootstrap_deps):
"""install and run pip-faster"""
# activate the virtualenv
execfile_(venv_executable(venv_path, 'activate_this.py'))
# disable a useless warning
# FIXME: ensure a "true SSLContext" is available
from os import environ
environ['PIP_DISABLE_PIP_VERSION_CHECK'] = '1'
# we always have to run the bootstrap, because the presense of an
# executable doesn't imply the right version. pip is able to validate the
# version in the fastpath case quickly anyway.
run(('pip', 'install') + bootstrap_deps)
run(pip_command + install)
def raise_on_failure(mainfunc):
"""raise if and only if mainfunc fails"""
try:
errors = mainfunc()
if errors:
exit(errors)
except CalledProcessError as error:
exit(error.returncode)
except SystemExit as error:
if error.code:
raise
except KeyboardInterrupt: # I don't plan to test-cover this. :pragma:nocover:
exit(1)
def main():
from sys import argv
args = tuple(argv[1:])
# process --help before we create any side-effects.
options = parseargs(args)
exec_scratch_virtualenv(args)
return venv_update(**options)
if __name__ == '__main__':
exit(main())
|
Yelp/venv-update | venv_update.py | exec_scratch_virtualenv | python | def exec_scratch_virtualenv(args):
scratch = Scratch()
if not exists(scratch.python):
run(('virtualenv', scratch.venv))
if not exists(join(scratch.src, 'virtualenv.py')):
scratch_python = venv_python(scratch.venv)
# TODO: do we allow user-defined override of which version of virtualenv to install?
tmp = scratch.src + '.tmp'
run((scratch_python, '-m', 'pip.__main__', 'install', 'virtualenv', '--target', tmp))
from os import rename
rename(tmp, scratch.src)
import sys
from os.path import realpath
# We want to compare the paths themselves as sometimes sys.path is the same
# as scratch.venv, but with a suffix of bin/..
if realpath(sys.prefix) != realpath(scratch.venv):
# TODO-TEST: sometimes we would get a stale version of venv-update
exec_((scratch.python, dotpy(__file__)) + args) # never returns
# TODO-TEST: the original venv-update's directory was on sys.path (when using symlinking)
sys.path[0] = scratch.src | goals:
- get any random site-packages off of the pythonpath
- ensure we can import virtualenv
- ensure that we're not using the interpreter that we may need to delete
- idempotency: do nothing if the above goals are already met | train | https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/venv_update.py#L187-L217 | [
"def run(cmd):\n from subprocess import check_call\n check_call(('echo', colorize(cmd)))\n check_call(cmd)\n",
"def dotpy(filename):\n if filename.endswith(('.pyc', '.pyo', '.pyd')):\n return filename[:-1]\n else:\n return filename\n",
"def exec_(argv): # never returns\n \"\"\"Wrapper to os.execv which shows the command and runs any atexit handlers (for coverage's sake).\n Like os.execv, this function never returns.\n \"\"\"\n # info('EXEC' + colorize(argv)) # TODO: debug logging by environment variable\n\n # in python3, sys.exitfunc has gone away, and atexit._run_exitfuncs seems to be the only pubic-ish interface\n # https://hg.python.org/cpython/file/3.4/Modules/atexitmodule.c#l289\n import atexit\n atexit._run_exitfuncs()\n\n from os import execv\n execv(argv[0], argv)\n",
"def venv_python(venv_path):\n return venv_executable(venv_path, 'python')\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''\
usage: venv-update [-hV] [options]
Update a (possibly non-existent) virtualenv directory using a pip requirements
file. When this script completes, the virtualenv directory should contain the
same packages as if it were deleted then rebuilt.
venv-update uses "trailing equal" options (e.g. venv=) to delimit groups of
(conventional, dashed) options to pass to wrapped commands (virtualenv and pip).
Options:
venv= parameters are passed to virtualenv
default: {venv=}
install= options to pip-command
default: {install=}
pip-command= is run after the virtualenv directory is bootstrapped
default: {pip-command=}
bootstrap-deps= dependencies to install before pip-command= is run
default: {bootstrap-deps=}
Examples:
# install requirements.txt to "venv"
venv-update
# install requirements.txt to "myenv"
venv-update venv= myenv
# install requirements.txt to "myenv" using Python 3.4
venv-update venv= -ppython3.4 myenv
# install myreqs.txt to "venv"
venv-update install= -r myreqs.txt
# install requirements.txt to "venv", verbosely
venv-update venv= venv -vvv install= -r requirements.txt -vvv
# install requirements.txt to "venv", without pip-faster --update --prune
venv-update pip-command= pip install
We strongly recommend that you keep the default value of pip-command= in order
to quickly and reproducibly install your requirements. You can override the
packages installed during bootstrapping, prior to pip-command=, by setting
bootstrap-deps=
Pip options are also controllable via environment variables.
See https://pip.readthedocs.org/en/stable/user_guide/#environment-variables
For example:
PIP_INDEX_URL=https://pypi.example.com/simple venv-update
Please send issues to: https://github.com/yelp/venv-update
'''
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from os.path import exists
from os.path import join
from subprocess import CalledProcessError
__version__ = '3.2.2'
DEFAULT_VIRTUALENV_PATH = 'venv'
DEFAULT_OPTION_VALUES = {
'venv=': (DEFAULT_VIRTUALENV_PATH,),
'install=': ('-r', 'requirements.txt',),
'pip-command=': ('pip-faster', 'install', '--upgrade', '--prune'),
'bootstrap-deps=': ('venv-update==' + __version__,),
}
__doc__ = __doc__.format(
**{key: ' '.join(val) for key, val in DEFAULT_OPTION_VALUES.items()}
)
# This script must not rely on anything other than
# stdlib>=2.6 and virtualenv>1.11
def parseargs(argv):
'''handle --help, --version and our double-equal ==options'''
args = []
options = {}
key = None
for arg in argv:
if arg in DEFAULT_OPTION_VALUES:
key = arg.strip('=').replace('-', '_')
options[key] = ()
elif key is None:
args.append(arg)
else:
options[key] += (arg,)
if set(args) & {'-h', '--help'}:
print(__doc__, end='')
exit(0)
elif set(args) & {'-V', '--version'}:
print(__version__)
exit(0)
elif args:
exit('invalid option: %s\nTry --help for more information.' % args[0])
return options
def timid_relpath(arg):
"""convert an argument to a relative path, carefully"""
# TODO-TEST: unit tests
from os.path import isabs, relpath, sep
if isabs(arg):
result = relpath(arg)
if result.count(sep) + 1 < arg.count(sep):
return result
return arg
def shellescape(args):
from pipes import quote
return ' '.join(quote(timid_relpath(arg)) for arg in args)
def colorize(cmd):
from os import isatty
if isatty(1):
template = '\033[36m>\033[m \033[32m{0}\033[m'
else:
template = '> {0}'
return template.format(shellescape(cmd))
def run(cmd):
from subprocess import check_call
check_call(('echo', colorize(cmd)))
check_call(cmd)
def info(msg):
# use a subprocess to ensure correct output interleaving.
from subprocess import check_call
check_call(('echo', msg))
def check_output(cmd):
from subprocess import Popen, PIPE
process = Popen(cmd, stdout=PIPE)
output, _ = process.communicate()
if process.returncode:
raise CalledProcessError(process.returncode, cmd)
else:
assert process.returncode == 0
return output.decode('UTF-8')
def samefile(file1, file2):
if not exists(file1) or not exists(file2):
return False
else:
from os.path import samefile
return samefile(file1, file2)
def exec_(argv): # never returns
"""Wrapper to os.execv which shows the command and runs any atexit handlers (for coverage's sake).
Like os.execv, this function never returns.
"""
# info('EXEC' + colorize(argv)) # TODO: debug logging by environment variable
# in python3, sys.exitfunc has gone away, and atexit._run_exitfuncs seems to be the only pubic-ish interface
# https://hg.python.org/cpython/file/3.4/Modules/atexitmodule.c#l289
import atexit
atexit._run_exitfuncs()
from os import execv
execv(argv[0], argv)
class Scratch(object):
def __init__(self):
self.dir = join(user_cache_dir(), 'venv-update', __version__)
self.venv = join(self.dir, 'venv')
self.python = venv_python(self.venv)
self.src = join(self.dir, 'src')
def get_original_path(venv_path): # TODO-TEST: a unit test
"""This helps us know whether someone has tried to relocate the virtualenv"""
return check_output(('sh', '-c', '. %s; printf "$VIRTUAL_ENV"' % venv_executable(venv_path, 'activate')))
def has_system_site_packages(interpreter):
# TODO: unit-test
system_site_packages = check_output((
interpreter,
'-c',
# stolen directly from virtualenv's site.py
"""\
import site, os.path
print(
0
if os.path.exists(
os.path.join(os.path.dirname(site.__file__), 'no-global-site-packages.txt')
) else
1
)"""
))
system_site_packages = int(system_site_packages)
assert system_site_packages in (0, 1)
return bool(system_site_packages)
def get_python_version(interpreter):
if not exists(interpreter):
return None
cmd = (interpreter, '-c', 'import sys; print(sys.version)')
return check_output(cmd)
def invalid_virtualenv_reason(venv_path, source_python, destination_python, options):
try:
orig_path = get_original_path(venv_path)
except CalledProcessError:
return 'could not inspect metadata'
if not samefile(orig_path, venv_path):
return 'virtualenv moved {} -> {}'.format(timid_relpath(orig_path), timid_relpath(venv_path))
elif has_system_site_packages(destination_python) != options.system_site_packages:
return 'system-site-packages changed, to %s' % options.system_site_packages
if source_python is None:
return
destination_version = get_python_version(destination_python)
source_version = get_python_version(source_python)
if source_version != destination_version:
return 'python version changed {} -> {}'.format(destination_version, source_version)
def ensure_virtualenv(args, return_values):
"""Ensure we have a valid virtualenv."""
def adjust_options(options, args):
# TODO-TEST: proper error message with no arguments
venv_path = return_values.venv_path = args[0]
if venv_path == DEFAULT_VIRTUALENV_PATH or options.prompt == '<dirname>':
from os.path import abspath, basename, dirname
options.prompt = '(%s)' % basename(dirname(abspath(venv_path)))
# end of option munging.
# there are two python interpreters involved here:
# 1) the interpreter we're instructing virtualenv to copy
if options.python is None:
source_python = None
else:
source_python = virtualenv.resolve_interpreter(options.python)
# 2) the interpreter virtualenv will create
destination_python = venv_python(venv_path)
if exists(destination_python):
reason = invalid_virtualenv_reason(venv_path, source_python, destination_python, options)
if reason:
info('Removing invalidated virtualenv. (%s)' % reason)
run(('rm', '-rf', venv_path))
else:
info('Keeping valid virtualenv from previous run.')
raise SystemExit(0) # looks good! we're done here.
# this is actually a documented extension point:
# http://virtualenv.readthedocs.org/en/latest/reference.html#adjust_options
import virtualenv
virtualenv.adjust_options = adjust_options
from sys import argv
argv[:] = ('virtualenv',) + args
info(colorize(argv))
raise_on_failure(virtualenv.main)
# There might not be a venv_path if doing something like "venv= --version"
# and not actually asking virtualenv to make a venv.
if return_values.venv_path is not None:
run(('rm', '-rf', join(return_values.venv_path, 'local')))
def wait_for_all_subprocesses():
from os import wait
try:
while True:
wait()
except OSError as error:
if error.errno == 10: # no child processes
return
else:
raise
def touch(filename, timestamp):
"""set the mtime of a file"""
if timestamp is not None:
timestamp = (timestamp, timestamp) # atime, mtime
from os import utime
utime(filename, timestamp)
def mark_venv_valid(venv_path):
wait_for_all_subprocesses()
touch(venv_path, None)
def mark_venv_invalid(venv_path):
# LBYL, to attempt to avoid any exception during exception handling
from os.path import isdir
if venv_path and isdir(venv_path):
info('')
info("Something went wrong! Sending '%s' back in time, so make knows it's invalid." % timid_relpath(venv_path))
wait_for_all_subprocesses()
touch(venv_path, 0)
def dotpy(filename):
if filename.endswith(('.pyc', '.pyo', '.pyd')):
return filename[:-1]
else:
return filename
def venv_executable(venv_path, executable):
return join(venv_path, 'bin', executable)
def venv_python(venv_path):
return venv_executable(venv_path, 'python')
def user_cache_dir():
# stolen from pip.utils.appdirs.user_cache_dir
from os import getenv
from os.path import expanduser
return getenv('XDG_CACHE_HOME', expanduser('~/.cache'))
def venv_update(
venv=DEFAULT_OPTION_VALUES['venv='],
install=DEFAULT_OPTION_VALUES['install='],
pip_command=DEFAULT_OPTION_VALUES['pip-command='],
bootstrap_deps=DEFAULT_OPTION_VALUES['bootstrap-deps='],
):
"""we have an arbitrary python interpreter active, (possibly) outside the virtualenv we want.
make a fresh venv at the right spot, make sure it has pip-faster, and use it
"""
# SMELL: mutable argument as return value
class return_values(object):
venv_path = None
try:
ensure_virtualenv(venv, return_values)
if return_values.venv_path is None:
return
# invariant: the final virtualenv exists, with the right python version
raise_on_failure(lambda: pip_faster(return_values.venv_path, pip_command, install, bootstrap_deps))
except BaseException:
mark_venv_invalid(return_values.venv_path)
raise
else:
mark_venv_valid(return_values.venv_path)
def execfile_(filename):
with open(filename) as code:
code = compile(code.read(), filename, 'exec')
exec(code, {'__file__': filename})
def pip_faster(venv_path, pip_command, install, bootstrap_deps):
"""install and run pip-faster"""
# activate the virtualenv
execfile_(venv_executable(venv_path, 'activate_this.py'))
# disable a useless warning
# FIXME: ensure a "true SSLContext" is available
from os import environ
environ['PIP_DISABLE_PIP_VERSION_CHECK'] = '1'
# we always have to run the bootstrap, because the presense of an
# executable doesn't imply the right version. pip is able to validate the
# version in the fastpath case quickly anyway.
run(('pip', 'install') + bootstrap_deps)
run(pip_command + install)
def raise_on_failure(mainfunc):
"""raise if and only if mainfunc fails"""
try:
errors = mainfunc()
if errors:
exit(errors)
except CalledProcessError as error:
exit(error.returncode)
except SystemExit as error:
if error.code:
raise
except KeyboardInterrupt: # I don't plan to test-cover this. :pragma:nocover:
exit(1)
def main():
from sys import argv
args = tuple(argv[1:])
# process --help before we create any side-effects.
options = parseargs(args)
exec_scratch_virtualenv(args)
return venv_update(**options)
if __name__ == '__main__':
exit(main())
|
Yelp/venv-update | venv_update.py | ensure_virtualenv | python | def ensure_virtualenv(args, return_values):
def adjust_options(options, args):
# TODO-TEST: proper error message with no arguments
venv_path = return_values.venv_path = args[0]
if venv_path == DEFAULT_VIRTUALENV_PATH or options.prompt == '<dirname>':
from os.path import abspath, basename, dirname
options.prompt = '(%s)' % basename(dirname(abspath(venv_path)))
# end of option munging.
# there are two python interpreters involved here:
# 1) the interpreter we're instructing virtualenv to copy
if options.python is None:
source_python = None
else:
source_python = virtualenv.resolve_interpreter(options.python)
# 2) the interpreter virtualenv will create
destination_python = venv_python(venv_path)
if exists(destination_python):
reason = invalid_virtualenv_reason(venv_path, source_python, destination_python, options)
if reason:
info('Removing invalidated virtualenv. (%s)' % reason)
run(('rm', '-rf', venv_path))
else:
info('Keeping valid virtualenv from previous run.')
raise SystemExit(0) # looks good! we're done here.
# this is actually a documented extension point:
# http://virtualenv.readthedocs.org/en/latest/reference.html#adjust_options
import virtualenv
virtualenv.adjust_options = adjust_options
from sys import argv
argv[:] = ('virtualenv',) + args
info(colorize(argv))
raise_on_failure(virtualenv.main)
# There might not be a venv_path if doing something like "venv= --version"
# and not actually asking virtualenv to make a venv.
if return_values.venv_path is not None:
run(('rm', '-rf', join(return_values.venv_path, 'local'))) | Ensure we have a valid virtualenv. | train | https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/venv_update.py#L272-L313 | [
"def info(msg):\n # use a subprocess to ensure correct output interleaving.\n from subprocess import check_call\n check_call(('echo', msg))\n",
"def run(cmd):\n from subprocess import check_call\n check_call(('echo', colorize(cmd)))\n check_call(cmd)\n",
"def colorize(cmd):\n from os import isatty\n\n if isatty(1):\n template = '\\033[36m>\\033[m \\033[32m{0}\\033[m'\n else:\n template = '> {0}'\n\n return template.format(shellescape(cmd))\n",
"def raise_on_failure(mainfunc):\n \"\"\"raise if and only if mainfunc fails\"\"\"\n try:\n errors = mainfunc()\n if errors:\n exit(errors)\n except CalledProcessError as error:\n exit(error.returncode)\n except SystemExit as error:\n if error.code:\n raise\n except KeyboardInterrupt: # I don't plan to test-cover this. :pragma:nocover:\n exit(1)\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''\
usage: venv-update [-hV] [options]
Update a (possibly non-existent) virtualenv directory using a pip requirements
file. When this script completes, the virtualenv directory should contain the
same packages as if it were deleted then rebuilt.
venv-update uses "trailing equal" options (e.g. venv=) to delimit groups of
(conventional, dashed) options to pass to wrapped commands (virtualenv and pip).
Options:
venv= parameters are passed to virtualenv
default: {venv=}
install= options to pip-command
default: {install=}
pip-command= is run after the virtualenv directory is bootstrapped
default: {pip-command=}
bootstrap-deps= dependencies to install before pip-command= is run
default: {bootstrap-deps=}
Examples:
# install requirements.txt to "venv"
venv-update
# install requirements.txt to "myenv"
venv-update venv= myenv
# install requirements.txt to "myenv" using Python 3.4
venv-update venv= -ppython3.4 myenv
# install myreqs.txt to "venv"
venv-update install= -r myreqs.txt
# install requirements.txt to "venv", verbosely
venv-update venv= venv -vvv install= -r requirements.txt -vvv
# install requirements.txt to "venv", without pip-faster --update --prune
venv-update pip-command= pip install
We strongly recommend that you keep the default value of pip-command= in order
to quickly and reproducibly install your requirements. You can override the
packages installed during bootstrapping, prior to pip-command=, by setting
bootstrap-deps=
Pip options are also controllable via environment variables.
See https://pip.readthedocs.org/en/stable/user_guide/#environment-variables
For example:
PIP_INDEX_URL=https://pypi.example.com/simple venv-update
Please send issues to: https://github.com/yelp/venv-update
'''
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from os.path import exists
from os.path import join
from subprocess import CalledProcessError
__version__ = '3.2.2'
DEFAULT_VIRTUALENV_PATH = 'venv'
DEFAULT_OPTION_VALUES = {
'venv=': (DEFAULT_VIRTUALENV_PATH,),
'install=': ('-r', 'requirements.txt',),
'pip-command=': ('pip-faster', 'install', '--upgrade', '--prune'),
'bootstrap-deps=': ('venv-update==' + __version__,),
}
__doc__ = __doc__.format(
**{key: ' '.join(val) for key, val in DEFAULT_OPTION_VALUES.items()}
)
# This script must not rely on anything other than
# stdlib>=2.6 and virtualenv>1.11
def parseargs(argv):
'''handle --help, --version and our double-equal ==options'''
args = []
options = {}
key = None
for arg in argv:
if arg in DEFAULT_OPTION_VALUES:
key = arg.strip('=').replace('-', '_')
options[key] = ()
elif key is None:
args.append(arg)
else:
options[key] += (arg,)
if set(args) & {'-h', '--help'}:
print(__doc__, end='')
exit(0)
elif set(args) & {'-V', '--version'}:
print(__version__)
exit(0)
elif args:
exit('invalid option: %s\nTry --help for more information.' % args[0])
return options
def timid_relpath(arg):
"""convert an argument to a relative path, carefully"""
# TODO-TEST: unit tests
from os.path import isabs, relpath, sep
if isabs(arg):
result = relpath(arg)
if result.count(sep) + 1 < arg.count(sep):
return result
return arg
def shellescape(args):
from pipes import quote
return ' '.join(quote(timid_relpath(arg)) for arg in args)
def colorize(cmd):
from os import isatty
if isatty(1):
template = '\033[36m>\033[m \033[32m{0}\033[m'
else:
template = '> {0}'
return template.format(shellescape(cmd))
def run(cmd):
from subprocess import check_call
check_call(('echo', colorize(cmd)))
check_call(cmd)
def info(msg):
# use a subprocess to ensure correct output interleaving.
from subprocess import check_call
check_call(('echo', msg))
def check_output(cmd):
from subprocess import Popen, PIPE
process = Popen(cmd, stdout=PIPE)
output, _ = process.communicate()
if process.returncode:
raise CalledProcessError(process.returncode, cmd)
else:
assert process.returncode == 0
return output.decode('UTF-8')
def samefile(file1, file2):
if not exists(file1) or not exists(file2):
return False
else:
from os.path import samefile
return samefile(file1, file2)
def exec_(argv): # never returns
"""Wrapper to os.execv which shows the command and runs any atexit handlers (for coverage's sake).
Like os.execv, this function never returns.
"""
# info('EXEC' + colorize(argv)) # TODO: debug logging by environment variable
# in python3, sys.exitfunc has gone away, and atexit._run_exitfuncs seems to be the only pubic-ish interface
# https://hg.python.org/cpython/file/3.4/Modules/atexitmodule.c#l289
import atexit
atexit._run_exitfuncs()
from os import execv
execv(argv[0], argv)
class Scratch(object):
def __init__(self):
self.dir = join(user_cache_dir(), 'venv-update', __version__)
self.venv = join(self.dir, 'venv')
self.python = venv_python(self.venv)
self.src = join(self.dir, 'src')
def exec_scratch_virtualenv(args):
"""
goals:
- get any random site-packages off of the pythonpath
- ensure we can import virtualenv
- ensure that we're not using the interpreter that we may need to delete
- idempotency: do nothing if the above goals are already met
"""
scratch = Scratch()
if not exists(scratch.python):
run(('virtualenv', scratch.venv))
if not exists(join(scratch.src, 'virtualenv.py')):
scratch_python = venv_python(scratch.venv)
# TODO: do we allow user-defined override of which version of virtualenv to install?
tmp = scratch.src + '.tmp'
run((scratch_python, '-m', 'pip.__main__', 'install', 'virtualenv', '--target', tmp))
from os import rename
rename(tmp, scratch.src)
import sys
from os.path import realpath
# We want to compare the paths themselves as sometimes sys.path is the same
# as scratch.venv, but with a suffix of bin/..
if realpath(sys.prefix) != realpath(scratch.venv):
# TODO-TEST: sometimes we would get a stale version of venv-update
exec_((scratch.python, dotpy(__file__)) + args) # never returns
# TODO-TEST: the original venv-update's directory was on sys.path (when using symlinking)
sys.path[0] = scratch.src
def get_original_path(venv_path): # TODO-TEST: a unit test
"""This helps us know whether someone has tried to relocate the virtualenv"""
return check_output(('sh', '-c', '. %s; printf "$VIRTUAL_ENV"' % venv_executable(venv_path, 'activate')))
def has_system_site_packages(interpreter):
# TODO: unit-test
system_site_packages = check_output((
interpreter,
'-c',
# stolen directly from virtualenv's site.py
"""\
import site, os.path
print(
0
if os.path.exists(
os.path.join(os.path.dirname(site.__file__), 'no-global-site-packages.txt')
) else
1
)"""
))
system_site_packages = int(system_site_packages)
assert system_site_packages in (0, 1)
return bool(system_site_packages)
def get_python_version(interpreter):
if not exists(interpreter):
return None
cmd = (interpreter, '-c', 'import sys; print(sys.version)')
return check_output(cmd)
def invalid_virtualenv_reason(venv_path, source_python, destination_python, options):
try:
orig_path = get_original_path(venv_path)
except CalledProcessError:
return 'could not inspect metadata'
if not samefile(orig_path, venv_path):
return 'virtualenv moved {} -> {}'.format(timid_relpath(orig_path), timid_relpath(venv_path))
elif has_system_site_packages(destination_python) != options.system_site_packages:
return 'system-site-packages changed, to %s' % options.system_site_packages
if source_python is None:
return
destination_version = get_python_version(destination_python)
source_version = get_python_version(source_python)
if source_version != destination_version:
return 'python version changed {} -> {}'.format(destination_version, source_version)
def wait_for_all_subprocesses():
from os import wait
try:
while True:
wait()
except OSError as error:
if error.errno == 10: # no child processes
return
else:
raise
def touch(filename, timestamp):
"""set the mtime of a file"""
if timestamp is not None:
timestamp = (timestamp, timestamp) # atime, mtime
from os import utime
utime(filename, timestamp)
def mark_venv_valid(venv_path):
wait_for_all_subprocesses()
touch(venv_path, None)
def mark_venv_invalid(venv_path):
# LBYL, to attempt to avoid any exception during exception handling
from os.path import isdir
if venv_path and isdir(venv_path):
info('')
info("Something went wrong! Sending '%s' back in time, so make knows it's invalid." % timid_relpath(venv_path))
wait_for_all_subprocesses()
touch(venv_path, 0)
def dotpy(filename):
if filename.endswith(('.pyc', '.pyo', '.pyd')):
return filename[:-1]
else:
return filename
def venv_executable(venv_path, executable):
return join(venv_path, 'bin', executable)
def venv_python(venv_path):
return venv_executable(venv_path, 'python')
def user_cache_dir():
# stolen from pip.utils.appdirs.user_cache_dir
from os import getenv
from os.path import expanduser
return getenv('XDG_CACHE_HOME', expanduser('~/.cache'))
def venv_update(
venv=DEFAULT_OPTION_VALUES['venv='],
install=DEFAULT_OPTION_VALUES['install='],
pip_command=DEFAULT_OPTION_VALUES['pip-command='],
bootstrap_deps=DEFAULT_OPTION_VALUES['bootstrap-deps='],
):
"""we have an arbitrary python interpreter active, (possibly) outside the virtualenv we want.
make a fresh venv at the right spot, make sure it has pip-faster, and use it
"""
# SMELL: mutable argument as return value
class return_values(object):
venv_path = None
try:
ensure_virtualenv(venv, return_values)
if return_values.venv_path is None:
return
# invariant: the final virtualenv exists, with the right python version
raise_on_failure(lambda: pip_faster(return_values.venv_path, pip_command, install, bootstrap_deps))
except BaseException:
mark_venv_invalid(return_values.venv_path)
raise
else:
mark_venv_valid(return_values.venv_path)
def execfile_(filename):
with open(filename) as code:
code = compile(code.read(), filename, 'exec')
exec(code, {'__file__': filename})
def pip_faster(venv_path, pip_command, install, bootstrap_deps):
"""install and run pip-faster"""
# activate the virtualenv
execfile_(venv_executable(venv_path, 'activate_this.py'))
# disable a useless warning
# FIXME: ensure a "true SSLContext" is available
from os import environ
environ['PIP_DISABLE_PIP_VERSION_CHECK'] = '1'
# we always have to run the bootstrap, because the presense of an
# executable doesn't imply the right version. pip is able to validate the
# version in the fastpath case quickly anyway.
run(('pip', 'install') + bootstrap_deps)
run(pip_command + install)
def raise_on_failure(mainfunc):
"""raise if and only if mainfunc fails"""
try:
errors = mainfunc()
if errors:
exit(errors)
except CalledProcessError as error:
exit(error.returncode)
except SystemExit as error:
if error.code:
raise
except KeyboardInterrupt: # I don't plan to test-cover this. :pragma:nocover:
exit(1)
def main():
from sys import argv
args = tuple(argv[1:])
# process --help before we create any side-effects.
options = parseargs(args)
exec_scratch_virtualenv(args)
return venv_update(**options)
if __name__ == '__main__':
exit(main())
|
Yelp/venv-update | venv_update.py | touch | python | def touch(filename, timestamp):
if timestamp is not None:
timestamp = (timestamp, timestamp) # atime, mtime
from os import utime
utime(filename, timestamp) | set the mtime of a file | train | https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/venv_update.py#L328-L334 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''\
usage: venv-update [-hV] [options]
Update a (possibly non-existent) virtualenv directory using a pip requirements
file. When this script completes, the virtualenv directory should contain the
same packages as if it were deleted then rebuilt.
venv-update uses "trailing equal" options (e.g. venv=) to delimit groups of
(conventional, dashed) options to pass to wrapped commands (virtualenv and pip).
Options:
venv= parameters are passed to virtualenv
default: {venv=}
install= options to pip-command
default: {install=}
pip-command= is run after the virtualenv directory is bootstrapped
default: {pip-command=}
bootstrap-deps= dependencies to install before pip-command= is run
default: {bootstrap-deps=}
Examples:
# install requirements.txt to "venv"
venv-update
# install requirements.txt to "myenv"
venv-update venv= myenv
# install requirements.txt to "myenv" using Python 3.4
venv-update venv= -ppython3.4 myenv
# install myreqs.txt to "venv"
venv-update install= -r myreqs.txt
# install requirements.txt to "venv", verbosely
venv-update venv= venv -vvv install= -r requirements.txt -vvv
# install requirements.txt to "venv", without pip-faster --update --prune
venv-update pip-command= pip install
We strongly recommend that you keep the default value of pip-command= in order
to quickly and reproducibly install your requirements. You can override the
packages installed during bootstrapping, prior to pip-command=, by setting
bootstrap-deps=
Pip options are also controllable via environment variables.
See https://pip.readthedocs.org/en/stable/user_guide/#environment-variables
For example:
PIP_INDEX_URL=https://pypi.example.com/simple venv-update
Please send issues to: https://github.com/yelp/venv-update
'''
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from os.path import exists
from os.path import join
from subprocess import CalledProcessError
__version__ = '3.2.2'
DEFAULT_VIRTUALENV_PATH = 'venv'
DEFAULT_OPTION_VALUES = {
'venv=': (DEFAULT_VIRTUALENV_PATH,),
'install=': ('-r', 'requirements.txt',),
'pip-command=': ('pip-faster', 'install', '--upgrade', '--prune'),
'bootstrap-deps=': ('venv-update==' + __version__,),
}
__doc__ = __doc__.format(
**{key: ' '.join(val) for key, val in DEFAULT_OPTION_VALUES.items()}
)
# This script must not rely on anything other than
# stdlib>=2.6 and virtualenv>1.11
def parseargs(argv):
'''handle --help, --version and our double-equal ==options'''
args = []
options = {}
key = None
for arg in argv:
if arg in DEFAULT_OPTION_VALUES:
key = arg.strip('=').replace('-', '_')
options[key] = ()
elif key is None:
args.append(arg)
else:
options[key] += (arg,)
if set(args) & {'-h', '--help'}:
print(__doc__, end='')
exit(0)
elif set(args) & {'-V', '--version'}:
print(__version__)
exit(0)
elif args:
exit('invalid option: %s\nTry --help for more information.' % args[0])
return options
def timid_relpath(arg):
"""convert an argument to a relative path, carefully"""
# TODO-TEST: unit tests
from os.path import isabs, relpath, sep
if isabs(arg):
result = relpath(arg)
if result.count(sep) + 1 < arg.count(sep):
return result
return arg
def shellescape(args):
from pipes import quote
return ' '.join(quote(timid_relpath(arg)) for arg in args)
def colorize(cmd):
from os import isatty
if isatty(1):
template = '\033[36m>\033[m \033[32m{0}\033[m'
else:
template = '> {0}'
return template.format(shellescape(cmd))
def run(cmd):
from subprocess import check_call
check_call(('echo', colorize(cmd)))
check_call(cmd)
def info(msg):
# use a subprocess to ensure correct output interleaving.
from subprocess import check_call
check_call(('echo', msg))
def check_output(cmd):
from subprocess import Popen, PIPE
process = Popen(cmd, stdout=PIPE)
output, _ = process.communicate()
if process.returncode:
raise CalledProcessError(process.returncode, cmd)
else:
assert process.returncode == 0
return output.decode('UTF-8')
def samefile(file1, file2):
if not exists(file1) or not exists(file2):
return False
else:
from os.path import samefile
return samefile(file1, file2)
def exec_(argv): # never returns
"""Wrapper to os.execv which shows the command and runs any atexit handlers (for coverage's sake).
Like os.execv, this function never returns.
"""
# info('EXEC' + colorize(argv)) # TODO: debug logging by environment variable
# in python3, sys.exitfunc has gone away, and atexit._run_exitfuncs seems to be the only pubic-ish interface
# https://hg.python.org/cpython/file/3.4/Modules/atexitmodule.c#l289
import atexit
atexit._run_exitfuncs()
from os import execv
execv(argv[0], argv)
class Scratch(object):
def __init__(self):
self.dir = join(user_cache_dir(), 'venv-update', __version__)
self.venv = join(self.dir, 'venv')
self.python = venv_python(self.venv)
self.src = join(self.dir, 'src')
def exec_scratch_virtualenv(args):
"""
goals:
- get any random site-packages off of the pythonpath
- ensure we can import virtualenv
- ensure that we're not using the interpreter that we may need to delete
- idempotency: do nothing if the above goals are already met
"""
scratch = Scratch()
if not exists(scratch.python):
run(('virtualenv', scratch.venv))
if not exists(join(scratch.src, 'virtualenv.py')):
scratch_python = venv_python(scratch.venv)
# TODO: do we allow user-defined override of which version of virtualenv to install?
tmp = scratch.src + '.tmp'
run((scratch_python, '-m', 'pip.__main__', 'install', 'virtualenv', '--target', tmp))
from os import rename
rename(tmp, scratch.src)
import sys
from os.path import realpath
# We want to compare the paths themselves as sometimes sys.path is the same
# as scratch.venv, but with a suffix of bin/..
if realpath(sys.prefix) != realpath(scratch.venv):
# TODO-TEST: sometimes we would get a stale version of venv-update
exec_((scratch.python, dotpy(__file__)) + args) # never returns
# TODO-TEST: the original venv-update's directory was on sys.path (when using symlinking)
sys.path[0] = scratch.src
def get_original_path(venv_path): # TODO-TEST: a unit test
"""This helps us know whether someone has tried to relocate the virtualenv"""
return check_output(('sh', '-c', '. %s; printf "$VIRTUAL_ENV"' % venv_executable(venv_path, 'activate')))
def has_system_site_packages(interpreter):
# TODO: unit-test
system_site_packages = check_output((
interpreter,
'-c',
# stolen directly from virtualenv's site.py
"""\
import site, os.path
print(
0
if os.path.exists(
os.path.join(os.path.dirname(site.__file__), 'no-global-site-packages.txt')
) else
1
)"""
))
system_site_packages = int(system_site_packages)
assert system_site_packages in (0, 1)
return bool(system_site_packages)
def get_python_version(interpreter):
if not exists(interpreter):
return None
cmd = (interpreter, '-c', 'import sys; print(sys.version)')
return check_output(cmd)
def invalid_virtualenv_reason(venv_path, source_python, destination_python, options):
try:
orig_path = get_original_path(venv_path)
except CalledProcessError:
return 'could not inspect metadata'
if not samefile(orig_path, venv_path):
return 'virtualenv moved {} -> {}'.format(timid_relpath(orig_path), timid_relpath(venv_path))
elif has_system_site_packages(destination_python) != options.system_site_packages:
return 'system-site-packages changed, to %s' % options.system_site_packages
if source_python is None:
return
destination_version = get_python_version(destination_python)
source_version = get_python_version(source_python)
if source_version != destination_version:
return 'python version changed {} -> {}'.format(destination_version, source_version)
def ensure_virtualenv(args, return_values):
"""Ensure we have a valid virtualenv."""
def adjust_options(options, args):
# TODO-TEST: proper error message with no arguments
venv_path = return_values.venv_path = args[0]
if venv_path == DEFAULT_VIRTUALENV_PATH or options.prompt == '<dirname>':
from os.path import abspath, basename, dirname
options.prompt = '(%s)' % basename(dirname(abspath(venv_path)))
# end of option munging.
# there are two python interpreters involved here:
# 1) the interpreter we're instructing virtualenv to copy
if options.python is None:
source_python = None
else:
source_python = virtualenv.resolve_interpreter(options.python)
# 2) the interpreter virtualenv will create
destination_python = venv_python(venv_path)
if exists(destination_python):
reason = invalid_virtualenv_reason(venv_path, source_python, destination_python, options)
if reason:
info('Removing invalidated virtualenv. (%s)' % reason)
run(('rm', '-rf', venv_path))
else:
info('Keeping valid virtualenv from previous run.')
raise SystemExit(0) # looks good! we're done here.
# this is actually a documented extension point:
# http://virtualenv.readthedocs.org/en/latest/reference.html#adjust_options
import virtualenv
virtualenv.adjust_options = adjust_options
from sys import argv
argv[:] = ('virtualenv',) + args
info(colorize(argv))
raise_on_failure(virtualenv.main)
# There might not be a venv_path if doing something like "venv= --version"
# and not actually asking virtualenv to make a venv.
if return_values.venv_path is not None:
run(('rm', '-rf', join(return_values.venv_path, 'local')))
def wait_for_all_subprocesses():
from os import wait
try:
while True:
wait()
except OSError as error:
if error.errno == 10: # no child processes
return
else:
raise
def mark_venv_valid(venv_path):
wait_for_all_subprocesses()
touch(venv_path, None)
def mark_venv_invalid(venv_path):
# LBYL, to attempt to avoid any exception during exception handling
from os.path import isdir
if venv_path and isdir(venv_path):
info('')
info("Something went wrong! Sending '%s' back in time, so make knows it's invalid." % timid_relpath(venv_path))
wait_for_all_subprocesses()
touch(venv_path, 0)
def dotpy(filename):
if filename.endswith(('.pyc', '.pyo', '.pyd')):
return filename[:-1]
else:
return filename
def venv_executable(venv_path, executable):
return join(venv_path, 'bin', executable)
def venv_python(venv_path):
return venv_executable(venv_path, 'python')
def user_cache_dir():
# stolen from pip.utils.appdirs.user_cache_dir
from os import getenv
from os.path import expanduser
return getenv('XDG_CACHE_HOME', expanduser('~/.cache'))
def venv_update(
venv=DEFAULT_OPTION_VALUES['venv='],
install=DEFAULT_OPTION_VALUES['install='],
pip_command=DEFAULT_OPTION_VALUES['pip-command='],
bootstrap_deps=DEFAULT_OPTION_VALUES['bootstrap-deps='],
):
"""we have an arbitrary python interpreter active, (possibly) outside the virtualenv we want.
make a fresh venv at the right spot, make sure it has pip-faster, and use it
"""
# SMELL: mutable argument as return value
class return_values(object):
venv_path = None
try:
ensure_virtualenv(venv, return_values)
if return_values.venv_path is None:
return
# invariant: the final virtualenv exists, with the right python version
raise_on_failure(lambda: pip_faster(return_values.venv_path, pip_command, install, bootstrap_deps))
except BaseException:
mark_venv_invalid(return_values.venv_path)
raise
else:
mark_venv_valid(return_values.venv_path)
def execfile_(filename):
with open(filename) as code:
code = compile(code.read(), filename, 'exec')
exec(code, {'__file__': filename})
def pip_faster(venv_path, pip_command, install, bootstrap_deps):
"""install and run pip-faster"""
# activate the virtualenv
execfile_(venv_executable(venv_path, 'activate_this.py'))
# disable a useless warning
# FIXME: ensure a "true SSLContext" is available
from os import environ
environ['PIP_DISABLE_PIP_VERSION_CHECK'] = '1'
# we always have to run the bootstrap, because the presense of an
# executable doesn't imply the right version. pip is able to validate the
# version in the fastpath case quickly anyway.
run(('pip', 'install') + bootstrap_deps)
run(pip_command + install)
def raise_on_failure(mainfunc):
"""raise if and only if mainfunc fails"""
try:
errors = mainfunc()
if errors:
exit(errors)
except CalledProcessError as error:
exit(error.returncode)
except SystemExit as error:
if error.code:
raise
except KeyboardInterrupt: # I don't plan to test-cover this. :pragma:nocover:
exit(1)
def main():
from sys import argv
args = tuple(argv[1:])
# process --help before we create any side-effects.
options = parseargs(args)
exec_scratch_virtualenv(args)
return venv_update(**options)
if __name__ == '__main__':
exit(main())
|
Yelp/venv-update | venv_update.py | venv_update | python | def venv_update(
venv=DEFAULT_OPTION_VALUES['venv='],
install=DEFAULT_OPTION_VALUES['install='],
pip_command=DEFAULT_OPTION_VALUES['pip-command='],
bootstrap_deps=DEFAULT_OPTION_VALUES['bootstrap-deps='],
):
# SMELL: mutable argument as return value
class return_values(object):
venv_path = None
try:
ensure_virtualenv(venv, return_values)
if return_values.venv_path is None:
return
# invariant: the final virtualenv exists, with the right python version
raise_on_failure(lambda: pip_faster(return_values.venv_path, pip_command, install, bootstrap_deps))
except BaseException:
mark_venv_invalid(return_values.venv_path)
raise
else:
mark_venv_valid(return_values.venv_path) | we have an arbitrary python interpreter active, (possibly) outside the virtualenv we want.
make a fresh venv at the right spot, make sure it has pip-faster, and use it | train | https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/venv_update.py#L374-L399 | [
"def ensure_virtualenv(args, return_values):\n \"\"\"Ensure we have a valid virtualenv.\"\"\"\n def adjust_options(options, args):\n # TODO-TEST: proper error message with no arguments\n venv_path = return_values.venv_path = args[0]\n\n if venv_path == DEFAULT_VIRTUALENV_PATH or options.prompt == '<dirname>':\n from os.path import abspath, basename, dirname\n options.prompt = '(%s)' % basename(dirname(abspath(venv_path)))\n # end of option munging.\n\n # there are two python interpreters involved here:\n # 1) the interpreter we're instructing virtualenv to copy\n if options.python is None:\n source_python = None\n else:\n source_python = virtualenv.resolve_interpreter(options.python)\n # 2) the interpreter virtualenv will create\n destination_python = venv_python(venv_path)\n\n if exists(destination_python):\n reason = invalid_virtualenv_reason(venv_path, source_python, destination_python, options)\n if reason:\n info('Removing invalidated virtualenv. (%s)' % reason)\n run(('rm', '-rf', venv_path))\n else:\n info('Keeping valid virtualenv from previous run.')\n raise SystemExit(0) # looks good! we're done here.\n\n # this is actually a documented extension point:\n # http://virtualenv.readthedocs.org/en/latest/reference.html#adjust_options\n import virtualenv\n virtualenv.adjust_options = adjust_options\n\n from sys import argv\n argv[:] = ('virtualenv',) + args\n info(colorize(argv))\n raise_on_failure(virtualenv.main)\n # There might not be a venv_path if doing something like \"venv= --version\"\n # and not actually asking virtualenv to make a venv.\n if return_values.venv_path is not None:\n run(('rm', '-rf', join(return_values.venv_path, 'local')))\n",
"def mark_venv_invalid(venv_path):\n # LBYL, to attempt to avoid any exception during exception handling\n from os.path import isdir\n if venv_path and isdir(venv_path):\n info('')\n info(\"Something went wrong! Sending '%s' back in time, so make knows it's invalid.\" % timid_relpath(venv_path))\n wait_for_all_subprocesses()\n touch(venv_path, 0)\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''\
usage: venv-update [-hV] [options]
Update a (possibly non-existent) virtualenv directory using a pip requirements
file. When this script completes, the virtualenv directory should contain the
same packages as if it were deleted then rebuilt.
venv-update uses "trailing equal" options (e.g. venv=) to delimit groups of
(conventional, dashed) options to pass to wrapped commands (virtualenv and pip).
Options:
venv= parameters are passed to virtualenv
default: {venv=}
install= options to pip-command
default: {install=}
pip-command= is run after the virtualenv directory is bootstrapped
default: {pip-command=}
bootstrap-deps= dependencies to install before pip-command= is run
default: {bootstrap-deps=}
Examples:
# install requirements.txt to "venv"
venv-update
# install requirements.txt to "myenv"
venv-update venv= myenv
# install requirements.txt to "myenv" using Python 3.4
venv-update venv= -ppython3.4 myenv
# install myreqs.txt to "venv"
venv-update install= -r myreqs.txt
# install requirements.txt to "venv", verbosely
venv-update venv= venv -vvv install= -r requirements.txt -vvv
# install requirements.txt to "venv", without pip-faster --update --prune
venv-update pip-command= pip install
We strongly recommend that you keep the default value of pip-command= in order
to quickly and reproducibly install your requirements. You can override the
packages installed during bootstrapping, prior to pip-command=, by setting
bootstrap-deps=
Pip options are also controllable via environment variables.
See https://pip.readthedocs.org/en/stable/user_guide/#environment-variables
For example:
PIP_INDEX_URL=https://pypi.example.com/simple venv-update
Please send issues to: https://github.com/yelp/venv-update
'''
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from os.path import exists
from os.path import join
from subprocess import CalledProcessError
__version__ = '3.2.2'
DEFAULT_VIRTUALENV_PATH = 'venv'
DEFAULT_OPTION_VALUES = {
'venv=': (DEFAULT_VIRTUALENV_PATH,),
'install=': ('-r', 'requirements.txt',),
'pip-command=': ('pip-faster', 'install', '--upgrade', '--prune'),
'bootstrap-deps=': ('venv-update==' + __version__,),
}
__doc__ = __doc__.format(
**{key: ' '.join(val) for key, val in DEFAULT_OPTION_VALUES.items()}
)
# This script must not rely on anything other than
# stdlib>=2.6 and virtualenv>1.11
def parseargs(argv):
'''handle --help, --version and our double-equal ==options'''
args = []
options = {}
key = None
for arg in argv:
if arg in DEFAULT_OPTION_VALUES:
key = arg.strip('=').replace('-', '_')
options[key] = ()
elif key is None:
args.append(arg)
else:
options[key] += (arg,)
if set(args) & {'-h', '--help'}:
print(__doc__, end='')
exit(0)
elif set(args) & {'-V', '--version'}:
print(__version__)
exit(0)
elif args:
exit('invalid option: %s\nTry --help for more information.' % args[0])
return options
def timid_relpath(arg):
"""convert an argument to a relative path, carefully"""
# TODO-TEST: unit tests
from os.path import isabs, relpath, sep
if isabs(arg):
result = relpath(arg)
if result.count(sep) + 1 < arg.count(sep):
return result
return arg
def shellescape(args):
from pipes import quote
return ' '.join(quote(timid_relpath(arg)) for arg in args)
def colorize(cmd):
from os import isatty
if isatty(1):
template = '\033[36m>\033[m \033[32m{0}\033[m'
else:
template = '> {0}'
return template.format(shellescape(cmd))
def run(cmd):
from subprocess import check_call
check_call(('echo', colorize(cmd)))
check_call(cmd)
def info(msg):
# use a subprocess to ensure correct output interleaving.
from subprocess import check_call
check_call(('echo', msg))
def check_output(cmd):
from subprocess import Popen, PIPE
process = Popen(cmd, stdout=PIPE)
output, _ = process.communicate()
if process.returncode:
raise CalledProcessError(process.returncode, cmd)
else:
assert process.returncode == 0
return output.decode('UTF-8')
def samefile(file1, file2):
if not exists(file1) or not exists(file2):
return False
else:
from os.path import samefile
return samefile(file1, file2)
def exec_(argv): # never returns
"""Wrapper to os.execv which shows the command and runs any atexit handlers (for coverage's sake).
Like os.execv, this function never returns.
"""
# info('EXEC' + colorize(argv)) # TODO: debug logging by environment variable
# in python3, sys.exitfunc has gone away, and atexit._run_exitfuncs seems to be the only pubic-ish interface
# https://hg.python.org/cpython/file/3.4/Modules/atexitmodule.c#l289
import atexit
atexit._run_exitfuncs()
from os import execv
execv(argv[0], argv)
class Scratch(object):
def __init__(self):
self.dir = join(user_cache_dir(), 'venv-update', __version__)
self.venv = join(self.dir, 'venv')
self.python = venv_python(self.venv)
self.src = join(self.dir, 'src')
def exec_scratch_virtualenv(args):
"""
goals:
- get any random site-packages off of the pythonpath
- ensure we can import virtualenv
- ensure that we're not using the interpreter that we may need to delete
- idempotency: do nothing if the above goals are already met
"""
scratch = Scratch()
if not exists(scratch.python):
run(('virtualenv', scratch.venv))
if not exists(join(scratch.src, 'virtualenv.py')):
scratch_python = venv_python(scratch.venv)
# TODO: do we allow user-defined override of which version of virtualenv to install?
tmp = scratch.src + '.tmp'
run((scratch_python, '-m', 'pip.__main__', 'install', 'virtualenv', '--target', tmp))
from os import rename
rename(tmp, scratch.src)
import sys
from os.path import realpath
# We want to compare the paths themselves as sometimes sys.path is the same
# as scratch.venv, but with a suffix of bin/..
if realpath(sys.prefix) != realpath(scratch.venv):
# TODO-TEST: sometimes we would get a stale version of venv-update
exec_((scratch.python, dotpy(__file__)) + args) # never returns
# TODO-TEST: the original venv-update's directory was on sys.path (when using symlinking)
sys.path[0] = scratch.src
def get_original_path(venv_path): # TODO-TEST: a unit test
"""This helps us know whether someone has tried to relocate the virtualenv"""
return check_output(('sh', '-c', '. %s; printf "$VIRTUAL_ENV"' % venv_executable(venv_path, 'activate')))
def has_system_site_packages(interpreter):
# TODO: unit-test
system_site_packages = check_output((
interpreter,
'-c',
# stolen directly from virtualenv's site.py
"""\
import site, os.path
print(
0
if os.path.exists(
os.path.join(os.path.dirname(site.__file__), 'no-global-site-packages.txt')
) else
1
)"""
))
system_site_packages = int(system_site_packages)
assert system_site_packages in (0, 1)
return bool(system_site_packages)
def get_python_version(interpreter):
if not exists(interpreter):
return None
cmd = (interpreter, '-c', 'import sys; print(sys.version)')
return check_output(cmd)
def invalid_virtualenv_reason(venv_path, source_python, destination_python, options):
try:
orig_path = get_original_path(venv_path)
except CalledProcessError:
return 'could not inspect metadata'
if not samefile(orig_path, venv_path):
return 'virtualenv moved {} -> {}'.format(timid_relpath(orig_path), timid_relpath(venv_path))
elif has_system_site_packages(destination_python) != options.system_site_packages:
return 'system-site-packages changed, to %s' % options.system_site_packages
if source_python is None:
return
destination_version = get_python_version(destination_python)
source_version = get_python_version(source_python)
if source_version != destination_version:
return 'python version changed {} -> {}'.format(destination_version, source_version)
def ensure_virtualenv(args, return_values):
"""Ensure we have a valid virtualenv."""
def adjust_options(options, args):
# TODO-TEST: proper error message with no arguments
venv_path = return_values.venv_path = args[0]
if venv_path == DEFAULT_VIRTUALENV_PATH or options.prompt == '<dirname>':
from os.path import abspath, basename, dirname
options.prompt = '(%s)' % basename(dirname(abspath(venv_path)))
# end of option munging.
# there are two python interpreters involved here:
# 1) the interpreter we're instructing virtualenv to copy
if options.python is None:
source_python = None
else:
source_python = virtualenv.resolve_interpreter(options.python)
# 2) the interpreter virtualenv will create
destination_python = venv_python(venv_path)
if exists(destination_python):
reason = invalid_virtualenv_reason(venv_path, source_python, destination_python, options)
if reason:
info('Removing invalidated virtualenv. (%s)' % reason)
run(('rm', '-rf', venv_path))
else:
info('Keeping valid virtualenv from previous run.')
raise SystemExit(0) # looks good! we're done here.
# this is actually a documented extension point:
# http://virtualenv.readthedocs.org/en/latest/reference.html#adjust_options
import virtualenv
virtualenv.adjust_options = adjust_options
from sys import argv
argv[:] = ('virtualenv',) + args
info(colorize(argv))
raise_on_failure(virtualenv.main)
# There might not be a venv_path if doing something like "venv= --version"
# and not actually asking virtualenv to make a venv.
if return_values.venv_path is not None:
run(('rm', '-rf', join(return_values.venv_path, 'local')))
def wait_for_all_subprocesses():
from os import wait
try:
while True:
wait()
except OSError as error:
if error.errno == 10: # no child processes
return
else:
raise
def touch(filename, timestamp):
"""set the mtime of a file"""
if timestamp is not None:
timestamp = (timestamp, timestamp) # atime, mtime
from os import utime
utime(filename, timestamp)
def mark_venv_valid(venv_path):
wait_for_all_subprocesses()
touch(venv_path, None)
def mark_venv_invalid(venv_path):
# LBYL, to attempt to avoid any exception during exception handling
from os.path import isdir
if venv_path and isdir(venv_path):
info('')
info("Something went wrong! Sending '%s' back in time, so make knows it's invalid." % timid_relpath(venv_path))
wait_for_all_subprocesses()
touch(venv_path, 0)
def dotpy(filename):
if filename.endswith(('.pyc', '.pyo', '.pyd')):
return filename[:-1]
else:
return filename
def venv_executable(venv_path, executable):
return join(venv_path, 'bin', executable)
def venv_python(venv_path):
return venv_executable(venv_path, 'python')
def user_cache_dir():
# stolen from pip.utils.appdirs.user_cache_dir
from os import getenv
from os.path import expanduser
return getenv('XDG_CACHE_HOME', expanduser('~/.cache'))
def execfile_(filename):
with open(filename) as code:
code = compile(code.read(), filename, 'exec')
exec(code, {'__file__': filename})
def pip_faster(venv_path, pip_command, install, bootstrap_deps):
"""install and run pip-faster"""
# activate the virtualenv
execfile_(venv_executable(venv_path, 'activate_this.py'))
# disable a useless warning
# FIXME: ensure a "true SSLContext" is available
from os import environ
environ['PIP_DISABLE_PIP_VERSION_CHECK'] = '1'
# we always have to run the bootstrap, because the presense of an
# executable doesn't imply the right version. pip is able to validate the
# version in the fastpath case quickly anyway.
run(('pip', 'install') + bootstrap_deps)
run(pip_command + install)
def raise_on_failure(mainfunc):
"""raise if and only if mainfunc fails"""
try:
errors = mainfunc()
if errors:
exit(errors)
except CalledProcessError as error:
exit(error.returncode)
except SystemExit as error:
if error.code:
raise
except KeyboardInterrupt: # I don't plan to test-cover this. :pragma:nocover:
exit(1)
def main():
from sys import argv
args = tuple(argv[1:])
# process --help before we create any side-effects.
options = parseargs(args)
exec_scratch_virtualenv(args)
return venv_update(**options)
if __name__ == '__main__':
exit(main())
|
Yelp/venv-update | venv_update.py | pip_faster | python | def pip_faster(venv_path, pip_command, install, bootstrap_deps):
# activate the virtualenv
execfile_(venv_executable(venv_path, 'activate_this.py'))
# disable a useless warning
# FIXME: ensure a "true SSLContext" is available
from os import environ
environ['PIP_DISABLE_PIP_VERSION_CHECK'] = '1'
# we always have to run the bootstrap, because the presense of an
# executable doesn't imply the right version. pip is able to validate the
# version in the fastpath case quickly anyway.
run(('pip', 'install') + bootstrap_deps)
run(pip_command + install) | install and run pip-faster | train | https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/venv_update.py#L408-L423 | [
"def run(cmd):\n from subprocess import check_call\n check_call(('echo', colorize(cmd)))\n check_call(cmd)\n",
"def venv_executable(venv_path, executable):\n return join(venv_path, 'bin', executable)\n",
"def execfile_(filename):\n with open(filename) as code:\n code = compile(code.read(), filename, 'exec')\n exec(code, {'__file__': filename})\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''\
usage: venv-update [-hV] [options]
Update a (possibly non-existent) virtualenv directory using a pip requirements
file. When this script completes, the virtualenv directory should contain the
same packages as if it were deleted then rebuilt.
venv-update uses "trailing equal" options (e.g. venv=) to delimit groups of
(conventional, dashed) options to pass to wrapped commands (virtualenv and pip).
Options:
venv= parameters are passed to virtualenv
default: {venv=}
install= options to pip-command
default: {install=}
pip-command= is run after the virtualenv directory is bootstrapped
default: {pip-command=}
bootstrap-deps= dependencies to install before pip-command= is run
default: {bootstrap-deps=}
Examples:
# install requirements.txt to "venv"
venv-update
# install requirements.txt to "myenv"
venv-update venv= myenv
# install requirements.txt to "myenv" using Python 3.4
venv-update venv= -ppython3.4 myenv
# install myreqs.txt to "venv"
venv-update install= -r myreqs.txt
# install requirements.txt to "venv", verbosely
venv-update venv= venv -vvv install= -r requirements.txt -vvv
# install requirements.txt to "venv", without pip-faster --update --prune
venv-update pip-command= pip install
We strongly recommend that you keep the default value of pip-command= in order
to quickly and reproducibly install your requirements. You can override the
packages installed during bootstrapping, prior to pip-command=, by setting
bootstrap-deps=
Pip options are also controllable via environment variables.
See https://pip.readthedocs.org/en/stable/user_guide/#environment-variables
For example:
PIP_INDEX_URL=https://pypi.example.com/simple venv-update
Please send issues to: https://github.com/yelp/venv-update
'''
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from os.path import exists
from os.path import join
from subprocess import CalledProcessError
__version__ = '3.2.2'
DEFAULT_VIRTUALENV_PATH = 'venv'
DEFAULT_OPTION_VALUES = {
'venv=': (DEFAULT_VIRTUALENV_PATH,),
'install=': ('-r', 'requirements.txt',),
'pip-command=': ('pip-faster', 'install', '--upgrade', '--prune'),
'bootstrap-deps=': ('venv-update==' + __version__,),
}
__doc__ = __doc__.format(
**{key: ' '.join(val) for key, val in DEFAULT_OPTION_VALUES.items()}
)
# This script must not rely on anything other than
# stdlib>=2.6 and virtualenv>1.11
def parseargs(argv):
'''handle --help, --version and our double-equal ==options'''
args = []
options = {}
key = None
for arg in argv:
if arg in DEFAULT_OPTION_VALUES:
key = arg.strip('=').replace('-', '_')
options[key] = ()
elif key is None:
args.append(arg)
else:
options[key] += (arg,)
if set(args) & {'-h', '--help'}:
print(__doc__, end='')
exit(0)
elif set(args) & {'-V', '--version'}:
print(__version__)
exit(0)
elif args:
exit('invalid option: %s\nTry --help for more information.' % args[0])
return options
def timid_relpath(arg):
"""convert an argument to a relative path, carefully"""
# TODO-TEST: unit tests
from os.path import isabs, relpath, sep
if isabs(arg):
result = relpath(arg)
if result.count(sep) + 1 < arg.count(sep):
return result
return arg
def shellescape(args):
from pipes import quote
return ' '.join(quote(timid_relpath(arg)) for arg in args)
def colorize(cmd):
from os import isatty
if isatty(1):
template = '\033[36m>\033[m \033[32m{0}\033[m'
else:
template = '> {0}'
return template.format(shellescape(cmd))
def run(cmd):
from subprocess import check_call
check_call(('echo', colorize(cmd)))
check_call(cmd)
def info(msg):
# use a subprocess to ensure correct output interleaving.
from subprocess import check_call
check_call(('echo', msg))
def check_output(cmd):
from subprocess import Popen, PIPE
process = Popen(cmd, stdout=PIPE)
output, _ = process.communicate()
if process.returncode:
raise CalledProcessError(process.returncode, cmd)
else:
assert process.returncode == 0
return output.decode('UTF-8')
def samefile(file1, file2):
if not exists(file1) or not exists(file2):
return False
else:
from os.path import samefile
return samefile(file1, file2)
def exec_(argv): # never returns
"""Wrapper to os.execv which shows the command and runs any atexit handlers (for coverage's sake).
Like os.execv, this function never returns.
"""
# info('EXEC' + colorize(argv)) # TODO: debug logging by environment variable
# in python3, sys.exitfunc has gone away, and atexit._run_exitfuncs seems to be the only pubic-ish interface
# https://hg.python.org/cpython/file/3.4/Modules/atexitmodule.c#l289
import atexit
atexit._run_exitfuncs()
from os import execv
execv(argv[0], argv)
class Scratch(object):
def __init__(self):
self.dir = join(user_cache_dir(), 'venv-update', __version__)
self.venv = join(self.dir, 'venv')
self.python = venv_python(self.venv)
self.src = join(self.dir, 'src')
def exec_scratch_virtualenv(args):
"""
goals:
- get any random site-packages off of the pythonpath
- ensure we can import virtualenv
- ensure that we're not using the interpreter that we may need to delete
- idempotency: do nothing if the above goals are already met
"""
scratch = Scratch()
if not exists(scratch.python):
run(('virtualenv', scratch.venv))
if not exists(join(scratch.src, 'virtualenv.py')):
scratch_python = venv_python(scratch.venv)
# TODO: do we allow user-defined override of which version of virtualenv to install?
tmp = scratch.src + '.tmp'
run((scratch_python, '-m', 'pip.__main__', 'install', 'virtualenv', '--target', tmp))
from os import rename
rename(tmp, scratch.src)
import sys
from os.path import realpath
# We want to compare the paths themselves as sometimes sys.path is the same
# as scratch.venv, but with a suffix of bin/..
if realpath(sys.prefix) != realpath(scratch.venv):
# TODO-TEST: sometimes we would get a stale version of venv-update
exec_((scratch.python, dotpy(__file__)) + args) # never returns
# TODO-TEST: the original venv-update's directory was on sys.path (when using symlinking)
sys.path[0] = scratch.src
def get_original_path(venv_path): # TODO-TEST: a unit test
"""This helps us know whether someone has tried to relocate the virtualenv"""
return check_output(('sh', '-c', '. %s; printf "$VIRTUAL_ENV"' % venv_executable(venv_path, 'activate')))
def has_system_site_packages(interpreter):
# TODO: unit-test
system_site_packages = check_output((
interpreter,
'-c',
# stolen directly from virtualenv's site.py
"""\
import site, os.path
print(
0
if os.path.exists(
os.path.join(os.path.dirname(site.__file__), 'no-global-site-packages.txt')
) else
1
)"""
))
system_site_packages = int(system_site_packages)
assert system_site_packages in (0, 1)
return bool(system_site_packages)
def get_python_version(interpreter):
if not exists(interpreter):
return None
cmd = (interpreter, '-c', 'import sys; print(sys.version)')
return check_output(cmd)
def invalid_virtualenv_reason(venv_path, source_python, destination_python, options):
try:
orig_path = get_original_path(venv_path)
except CalledProcessError:
return 'could not inspect metadata'
if not samefile(orig_path, venv_path):
return 'virtualenv moved {} -> {}'.format(timid_relpath(orig_path), timid_relpath(venv_path))
elif has_system_site_packages(destination_python) != options.system_site_packages:
return 'system-site-packages changed, to %s' % options.system_site_packages
if source_python is None:
return
destination_version = get_python_version(destination_python)
source_version = get_python_version(source_python)
if source_version != destination_version:
return 'python version changed {} -> {}'.format(destination_version, source_version)
def ensure_virtualenv(args, return_values):
"""Ensure we have a valid virtualenv."""
def adjust_options(options, args):
# TODO-TEST: proper error message with no arguments
venv_path = return_values.venv_path = args[0]
if venv_path == DEFAULT_VIRTUALENV_PATH or options.prompt == '<dirname>':
from os.path import abspath, basename, dirname
options.prompt = '(%s)' % basename(dirname(abspath(venv_path)))
# end of option munging.
# there are two python interpreters involved here:
# 1) the interpreter we're instructing virtualenv to copy
if options.python is None:
source_python = None
else:
source_python = virtualenv.resolve_interpreter(options.python)
# 2) the interpreter virtualenv will create
destination_python = venv_python(venv_path)
if exists(destination_python):
reason = invalid_virtualenv_reason(venv_path, source_python, destination_python, options)
if reason:
info('Removing invalidated virtualenv. (%s)' % reason)
run(('rm', '-rf', venv_path))
else:
info('Keeping valid virtualenv from previous run.')
raise SystemExit(0) # looks good! we're done here.
# this is actually a documented extension point:
# http://virtualenv.readthedocs.org/en/latest/reference.html#adjust_options
import virtualenv
virtualenv.adjust_options = adjust_options
from sys import argv
argv[:] = ('virtualenv',) + args
info(colorize(argv))
raise_on_failure(virtualenv.main)
# There might not be a venv_path if doing something like "venv= --version"
# and not actually asking virtualenv to make a venv.
if return_values.venv_path is not None:
run(('rm', '-rf', join(return_values.venv_path, 'local')))
def wait_for_all_subprocesses():
from os import wait
try:
while True:
wait()
except OSError as error:
if error.errno == 10: # no child processes
return
else:
raise
def touch(filename, timestamp):
"""set the mtime of a file"""
if timestamp is not None:
timestamp = (timestamp, timestamp) # atime, mtime
from os import utime
utime(filename, timestamp)
def mark_venv_valid(venv_path):
wait_for_all_subprocesses()
touch(venv_path, None)
def mark_venv_invalid(venv_path):
# LBYL, to attempt to avoid any exception during exception handling
from os.path import isdir
if venv_path and isdir(venv_path):
info('')
info("Something went wrong! Sending '%s' back in time, so make knows it's invalid." % timid_relpath(venv_path))
wait_for_all_subprocesses()
touch(venv_path, 0)
def dotpy(filename):
if filename.endswith(('.pyc', '.pyo', '.pyd')):
return filename[:-1]
else:
return filename
def venv_executable(venv_path, executable):
return join(venv_path, 'bin', executable)
def venv_python(venv_path):
return venv_executable(venv_path, 'python')
def user_cache_dir():
# stolen from pip.utils.appdirs.user_cache_dir
from os import getenv
from os.path import expanduser
return getenv('XDG_CACHE_HOME', expanduser('~/.cache'))
def venv_update(
venv=DEFAULT_OPTION_VALUES['venv='],
install=DEFAULT_OPTION_VALUES['install='],
pip_command=DEFAULT_OPTION_VALUES['pip-command='],
bootstrap_deps=DEFAULT_OPTION_VALUES['bootstrap-deps='],
):
"""we have an arbitrary python interpreter active, (possibly) outside the virtualenv we want.
make a fresh venv at the right spot, make sure it has pip-faster, and use it
"""
# SMELL: mutable argument as return value
class return_values(object):
venv_path = None
try:
ensure_virtualenv(venv, return_values)
if return_values.venv_path is None:
return
# invariant: the final virtualenv exists, with the right python version
raise_on_failure(lambda: pip_faster(return_values.venv_path, pip_command, install, bootstrap_deps))
except BaseException:
mark_venv_invalid(return_values.venv_path)
raise
else:
mark_venv_valid(return_values.venv_path)
def execfile_(filename):
with open(filename) as code:
code = compile(code.read(), filename, 'exec')
exec(code, {'__file__': filename})
def raise_on_failure(mainfunc):
"""raise if and only if mainfunc fails"""
try:
errors = mainfunc()
if errors:
exit(errors)
except CalledProcessError as error:
exit(error.returncode)
except SystemExit as error:
if error.code:
raise
except KeyboardInterrupt: # I don't plan to test-cover this. :pragma:nocover:
exit(1)
def main():
from sys import argv
args = tuple(argv[1:])
# process --help before we create any side-effects.
options = parseargs(args)
exec_scratch_virtualenv(args)
return venv_update(**options)
if __name__ == '__main__':
exit(main())
|
Yelp/venv-update | venv_update.py | raise_on_failure | python | def raise_on_failure(mainfunc):
try:
errors = mainfunc()
if errors:
exit(errors)
except CalledProcessError as error:
exit(error.returncode)
except SystemExit as error:
if error.code:
raise
except KeyboardInterrupt: # I don't plan to test-cover this. :pragma:nocover:
exit(1) | raise if and only if mainfunc fails | train | https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/venv_update.py#L426-L438 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''\
usage: venv-update [-hV] [options]
Update a (possibly non-existent) virtualenv directory using a pip requirements
file. When this script completes, the virtualenv directory should contain the
same packages as if it were deleted then rebuilt.
venv-update uses "trailing equal" options (e.g. venv=) to delimit groups of
(conventional, dashed) options to pass to wrapped commands (virtualenv and pip).
Options:
venv= parameters are passed to virtualenv
default: {venv=}
install= options to pip-command
default: {install=}
pip-command= is run after the virtualenv directory is bootstrapped
default: {pip-command=}
bootstrap-deps= dependencies to install before pip-command= is run
default: {bootstrap-deps=}
Examples:
# install requirements.txt to "venv"
venv-update
# install requirements.txt to "myenv"
venv-update venv= myenv
# install requirements.txt to "myenv" using Python 3.4
venv-update venv= -ppython3.4 myenv
# install myreqs.txt to "venv"
venv-update install= -r myreqs.txt
# install requirements.txt to "venv", verbosely
venv-update venv= venv -vvv install= -r requirements.txt -vvv
# install requirements.txt to "venv", without pip-faster --update --prune
venv-update pip-command= pip install
We strongly recommend that you keep the default value of pip-command= in order
to quickly and reproducibly install your requirements. You can override the
packages installed during bootstrapping, prior to pip-command=, by setting
bootstrap-deps=
Pip options are also controllable via environment variables.
See https://pip.readthedocs.org/en/stable/user_guide/#environment-variables
For example:
PIP_INDEX_URL=https://pypi.example.com/simple venv-update
Please send issues to: https://github.com/yelp/venv-update
'''
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from os.path import exists
from os.path import join
from subprocess import CalledProcessError
__version__ = '3.2.2'
DEFAULT_VIRTUALENV_PATH = 'venv'
DEFAULT_OPTION_VALUES = {
'venv=': (DEFAULT_VIRTUALENV_PATH,),
'install=': ('-r', 'requirements.txt',),
'pip-command=': ('pip-faster', 'install', '--upgrade', '--prune'),
'bootstrap-deps=': ('venv-update==' + __version__,),
}
__doc__ = __doc__.format(
**{key: ' '.join(val) for key, val in DEFAULT_OPTION_VALUES.items()}
)
# This script must not rely on anything other than
# stdlib>=2.6 and virtualenv>1.11
def parseargs(argv):
'''handle --help, --version and our double-equal ==options'''
args = []
options = {}
key = None
for arg in argv:
if arg in DEFAULT_OPTION_VALUES:
key = arg.strip('=').replace('-', '_')
options[key] = ()
elif key is None:
args.append(arg)
else:
options[key] += (arg,)
if set(args) & {'-h', '--help'}:
print(__doc__, end='')
exit(0)
elif set(args) & {'-V', '--version'}:
print(__version__)
exit(0)
elif args:
exit('invalid option: %s\nTry --help for more information.' % args[0])
return options
def timid_relpath(arg):
"""convert an argument to a relative path, carefully"""
# TODO-TEST: unit tests
from os.path import isabs, relpath, sep
if isabs(arg):
result = relpath(arg)
if result.count(sep) + 1 < arg.count(sep):
return result
return arg
def shellescape(args):
from pipes import quote
return ' '.join(quote(timid_relpath(arg)) for arg in args)
def colorize(cmd):
from os import isatty
if isatty(1):
template = '\033[36m>\033[m \033[32m{0}\033[m'
else:
template = '> {0}'
return template.format(shellescape(cmd))
def run(cmd):
from subprocess import check_call
check_call(('echo', colorize(cmd)))
check_call(cmd)
def info(msg):
# use a subprocess to ensure correct output interleaving.
from subprocess import check_call
check_call(('echo', msg))
def check_output(cmd):
from subprocess import Popen, PIPE
process = Popen(cmd, stdout=PIPE)
output, _ = process.communicate()
if process.returncode:
raise CalledProcessError(process.returncode, cmd)
else:
assert process.returncode == 0
return output.decode('UTF-8')
def samefile(file1, file2):
if not exists(file1) or not exists(file2):
return False
else:
from os.path import samefile
return samefile(file1, file2)
def exec_(argv): # never returns
"""Wrapper to os.execv which shows the command and runs any atexit handlers (for coverage's sake).
Like os.execv, this function never returns.
"""
# info('EXEC' + colorize(argv)) # TODO: debug logging by environment variable
# in python3, sys.exitfunc has gone away, and atexit._run_exitfuncs seems to be the only pubic-ish interface
# https://hg.python.org/cpython/file/3.4/Modules/atexitmodule.c#l289
import atexit
atexit._run_exitfuncs()
from os import execv
execv(argv[0], argv)
class Scratch(object):
def __init__(self):
self.dir = join(user_cache_dir(), 'venv-update', __version__)
self.venv = join(self.dir, 'venv')
self.python = venv_python(self.venv)
self.src = join(self.dir, 'src')
def exec_scratch_virtualenv(args):
"""
goals:
- get any random site-packages off of the pythonpath
- ensure we can import virtualenv
- ensure that we're not using the interpreter that we may need to delete
- idempotency: do nothing if the above goals are already met
"""
scratch = Scratch()
if not exists(scratch.python):
run(('virtualenv', scratch.venv))
if not exists(join(scratch.src, 'virtualenv.py')):
scratch_python = venv_python(scratch.venv)
# TODO: do we allow user-defined override of which version of virtualenv to install?
tmp = scratch.src + '.tmp'
run((scratch_python, '-m', 'pip.__main__', 'install', 'virtualenv', '--target', tmp))
from os import rename
rename(tmp, scratch.src)
import sys
from os.path import realpath
# We want to compare the paths themselves as sometimes sys.path is the same
# as scratch.venv, but with a suffix of bin/..
if realpath(sys.prefix) != realpath(scratch.venv):
# TODO-TEST: sometimes we would get a stale version of venv-update
exec_((scratch.python, dotpy(__file__)) + args) # never returns
# TODO-TEST: the original venv-update's directory was on sys.path (when using symlinking)
sys.path[0] = scratch.src
def get_original_path(venv_path): # TODO-TEST: a unit test
"""This helps us know whether someone has tried to relocate the virtualenv"""
return check_output(('sh', '-c', '. %s; printf "$VIRTUAL_ENV"' % venv_executable(venv_path, 'activate')))
def has_system_site_packages(interpreter):
# TODO: unit-test
system_site_packages = check_output((
interpreter,
'-c',
# stolen directly from virtualenv's site.py
"""\
import site, os.path
print(
0
if os.path.exists(
os.path.join(os.path.dirname(site.__file__), 'no-global-site-packages.txt')
) else
1
)"""
))
system_site_packages = int(system_site_packages)
assert system_site_packages in (0, 1)
return bool(system_site_packages)
def get_python_version(interpreter):
if not exists(interpreter):
return None
cmd = (interpreter, '-c', 'import sys; print(sys.version)')
return check_output(cmd)
def invalid_virtualenv_reason(venv_path, source_python, destination_python, options):
try:
orig_path = get_original_path(venv_path)
except CalledProcessError:
return 'could not inspect metadata'
if not samefile(orig_path, venv_path):
return 'virtualenv moved {} -> {}'.format(timid_relpath(orig_path), timid_relpath(venv_path))
elif has_system_site_packages(destination_python) != options.system_site_packages:
return 'system-site-packages changed, to %s' % options.system_site_packages
if source_python is None:
return
destination_version = get_python_version(destination_python)
source_version = get_python_version(source_python)
if source_version != destination_version:
return 'python version changed {} -> {}'.format(destination_version, source_version)
def ensure_virtualenv(args, return_values):
"""Ensure we have a valid virtualenv."""
def adjust_options(options, args):
# TODO-TEST: proper error message with no arguments
venv_path = return_values.venv_path = args[0]
if venv_path == DEFAULT_VIRTUALENV_PATH or options.prompt == '<dirname>':
from os.path import abspath, basename, dirname
options.prompt = '(%s)' % basename(dirname(abspath(venv_path)))
# end of option munging.
# there are two python interpreters involved here:
# 1) the interpreter we're instructing virtualenv to copy
if options.python is None:
source_python = None
else:
source_python = virtualenv.resolve_interpreter(options.python)
# 2) the interpreter virtualenv will create
destination_python = venv_python(venv_path)
if exists(destination_python):
reason = invalid_virtualenv_reason(venv_path, source_python, destination_python, options)
if reason:
info('Removing invalidated virtualenv. (%s)' % reason)
run(('rm', '-rf', venv_path))
else:
info('Keeping valid virtualenv from previous run.')
raise SystemExit(0) # looks good! we're done here.
# this is actually a documented extension point:
# http://virtualenv.readthedocs.org/en/latest/reference.html#adjust_options
import virtualenv
virtualenv.adjust_options = adjust_options
from sys import argv
argv[:] = ('virtualenv',) + args
info(colorize(argv))
raise_on_failure(virtualenv.main)
# There might not be a venv_path if doing something like "venv= --version"
# and not actually asking virtualenv to make a venv.
if return_values.venv_path is not None:
run(('rm', '-rf', join(return_values.venv_path, 'local')))
def wait_for_all_subprocesses():
from os import wait
try:
while True:
wait()
except OSError as error:
if error.errno == 10: # no child processes
return
else:
raise
def touch(filename, timestamp):
"""set the mtime of a file"""
if timestamp is not None:
timestamp = (timestamp, timestamp) # atime, mtime
from os import utime
utime(filename, timestamp)
def mark_venv_valid(venv_path):
wait_for_all_subprocesses()
touch(venv_path, None)
def mark_venv_invalid(venv_path):
# LBYL, to attempt to avoid any exception during exception handling
from os.path import isdir
if venv_path and isdir(venv_path):
info('')
info("Something went wrong! Sending '%s' back in time, so make knows it's invalid." % timid_relpath(venv_path))
wait_for_all_subprocesses()
touch(venv_path, 0)
def dotpy(filename):
if filename.endswith(('.pyc', '.pyo', '.pyd')):
return filename[:-1]
else:
return filename
def venv_executable(venv_path, executable):
return join(venv_path, 'bin', executable)
def venv_python(venv_path):
return venv_executable(venv_path, 'python')
def user_cache_dir():
# stolen from pip.utils.appdirs.user_cache_dir
from os import getenv
from os.path import expanduser
return getenv('XDG_CACHE_HOME', expanduser('~/.cache'))
def venv_update(
venv=DEFAULT_OPTION_VALUES['venv='],
install=DEFAULT_OPTION_VALUES['install='],
pip_command=DEFAULT_OPTION_VALUES['pip-command='],
bootstrap_deps=DEFAULT_OPTION_VALUES['bootstrap-deps='],
):
"""we have an arbitrary python interpreter active, (possibly) outside the virtualenv we want.
make a fresh venv at the right spot, make sure it has pip-faster, and use it
"""
# SMELL: mutable argument as return value
class return_values(object):
venv_path = None
try:
ensure_virtualenv(venv, return_values)
if return_values.venv_path is None:
return
# invariant: the final virtualenv exists, with the right python version
raise_on_failure(lambda: pip_faster(return_values.venv_path, pip_command, install, bootstrap_deps))
except BaseException:
mark_venv_invalid(return_values.venv_path)
raise
else:
mark_venv_valid(return_values.venv_path)
def execfile_(filename):
with open(filename) as code:
code = compile(code.read(), filename, 'exec')
exec(code, {'__file__': filename})
def pip_faster(venv_path, pip_command, install, bootstrap_deps):
"""install and run pip-faster"""
# activate the virtualenv
execfile_(venv_executable(venv_path, 'activate_this.py'))
# disable a useless warning
# FIXME: ensure a "true SSLContext" is available
from os import environ
environ['PIP_DISABLE_PIP_VERSION_CHECK'] = '1'
# we always have to run the bootstrap, because the presense of an
# executable doesn't imply the right version. pip is able to validate the
# version in the fastpath case quickly anyway.
run(('pip', 'install') + bootstrap_deps)
run(pip_command + install)
def main():
from sys import argv
args = tuple(argv[1:])
# process --help before we create any side-effects.
options = parseargs(args)
exec_scratch_virtualenv(args)
return venv_update(**options)
if __name__ == '__main__':
exit(main())
|
Yelp/venv-update | pip_faster.py | cache_installed_wheels | python | def cache_installed_wheels(index_url, installed_packages):
for installed_package in installed_packages:
if not _can_be_cached(installed_package):
continue
_store_wheel_in_cache(installed_package.link.path, index_url) | After installation, pip tells us what it installed and from where.
We build a structure that looks like
.cache/pip-faster/wheelhouse/$index_url/$wheel | train | https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/pip_faster.py#L171-L181 | [
"def _can_be_cached(package):\n return (\n package.is_wheel and\n # An assertion that we're looking in the pip wheel dir\n package.link.path.startswith(CACHE.pip_wheelhouse)\n )\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''pip-faster is a thin wrapper around pip.
It only adds a --prune option to the `install` subcommand.
`pip-faster install --prune` will *uninstall* any installed packages that are
not required.
Otherwise, you should find that pip-faster gives the same results as pip, just
more quickly, especially in the case of pinned requirements (e.g.
package-x==1.2.3).
Version control at: https://github.com/yelp/venv-update
'''
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import errno
import glob
import os
import random
import re
import shutil
import sys
from contextlib import contextmanager
import pip as pipmodule
from pip._internal import logger
from pip._internal.commands.install import InstallCommand
from pip._internal.exceptions import DistributionNotFound
from pip._internal.exceptions import InstallationError
from pip._internal.index import BestVersionAlreadyInstalled
from pip._internal.index import HTMLPage
from pip._internal.index import Link
from pip._internal.index import PackageFinder
from pip._internal.req import InstallRequirement
from pip._internal.wheel import Wheel
from venv_update import colorize
from venv_update import raise_on_failure
from venv_update import timid_relpath
from venv_update import user_cache_dir
# Debian de-vendorizes the version of pip it ships
try: # :pragma:nocover: non-debian
from pip._vendor import pkg_resources
except ImportError: # :pragma:nocover: debian
import pkg_resources
try: # :pragma:nocover: pip>=18.1
from pip._internal.req.constructors import install_req_from_line
except ImportError: # :pragma:nocover: pip<18.1
install_req_from_line = InstallRequirement.from_line
# Thanks six!
PY2 = str is bytes
if PY2: # :pragma:nocover:
_reraise_src = 'def reraise(tp, value, tb=None): raise tp, value, tb'
exec(_reraise_src)
else: # :pragma:nocover:
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
class CACHE(object):
_cache_dir = user_cache_dir()
wheelhouse = os.path.join(_cache_dir, 'pip-faster', 'wheelhouse')
pip_wheelhouse = os.path.join(_cache_dir, 'pip', 'wheels')
def ignorecase_glob(glob):
return ''.join([
'[{}{}]'.format(char.lower(), char.upper())
if char.isalpha() else char
for char in glob
])
def optimistic_wheel_search(req, index_urls):
name = req.name.replace('-', '_').lower()
for index_url in index_urls:
expected_location = os.path.join(
CACHE.wheelhouse, index_url, ignorecase_glob(name) + '-*.whl',
)
for link in glob.glob(expected_location):
link = Link('file:' + link)
wheel = Wheel(link.filename)
if req.specifier.contains(wheel.version) and wheel.supported():
return link
def is_req_pinned(requirement):
if not requirement:
# url-style requirement
return False
for spec in requirement.specifier:
if spec.operator == '==' and not spec.version.endswith('.*'):
return True
return False
class FasterPackageFinder(PackageFinder):
def find_requirement(self, req, upgrade):
if is_req_pinned(req.req):
# if the version is pinned-down by a ==
# first try to use any installed package that satisfies the req
if req.satisfied_by:
logger.info('Faster! pinned requirement already installed.')
raise BestVersionAlreadyInstalled
# then try an optimistic search for a .whl file:
link = optimistic_wheel_search(req.req, self.index_urls)
if link is None:
# The wheel will be built during prepare_files
logger.debug('No wheel found locally for pinned requirement %s', req)
else:
logger.info('Faster! Pinned wheel found, without hitting PyPI.')
return link
else:
# unpinned requirements aren't very notable. only show with -v
logger.info('slow: full search for unpinned requirement %s', req)
# otherwise, do the full network search, per usual
try:
return super(FasterPackageFinder, self).find_requirement(req, upgrade)
except DistributionNotFound:
exc_info = sys.exc_info()
# Best effort: try and install from suitable version on-disk
link = optimistic_wheel_search(req.req, self.index_urls)
if link:
return link
else:
reraise(*exc_info)
def _can_be_cached(package):
return (
package.is_wheel and
# An assertion that we're looking in the pip wheel dir
package.link.path.startswith(CACHE.pip_wheelhouse)
)
def mkdirp(pth):
try:
os.makedirs(pth)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _store_wheel_in_cache(file_path, index_url):
filename = os.path.basename(file_path)
cache = os.path.join(CACHE.wheelhouse, index_url, filename)
cache_tmp = '{}.{}'.format(cache, random.randint(0, sys.maxsize))
cache_dir = os.path.dirname(cache)
mkdirp(cache_dir)
# Atomicity
shutil.copy(file_path, cache_tmp)
os.rename(cache_tmp, cache)
def get_patched_download_http_url(orig_download_http_url, index_urls):
def pipfaster_download_http_url(link, *args, **kwargs):
file_path, content_type = orig_download_http_url(link, *args, **kwargs)
if link.is_wheel:
for index_url in index_urls:
if (
# pip <18.1
isinstance(link.comes_from, HTMLPage) and
link.comes_from.url.startswith(index_url)
) or (
# pip >= 18.1
isinstance(link.comes_from, (str, type(''))) and
link.comes_from.startswith(index_url)
):
_store_wheel_in_cache(file_path, index_url)
break
return file_path, content_type
return pipfaster_download_http_url
def pip(args):
"""Run pip, in-process."""
from sys import stdout
stdout.write(colorize(('pip',) + args))
stdout.write('\n')
stdout.flush()
return pipmodule._internal.main(list(args))
def dist_to_req(dist):
"""Make a pip.FrozenRequirement from a pkg_resources distribution object"""
try: # :pragma:nocover: (pip>=10)
from pip._internal.operations.freeze import FrozenRequirement
except ImportError: # :pragma:nocover: (pip<10)
from pip import FrozenRequirement
# normalize the casing, dashes in the req name
orig_name, dist.project_name = dist.project_name, dist.key
result = FrozenRequirement.from_dist(dist, [])
# put things back the way we found it.
dist.project_name = orig_name
return result
def pip_get_installed():
"""Code extracted from the middle of the pip freeze command.
FIXME: does not list anything installed via -e
"""
from pip._internal.utils.misc import dist_is_local
return tuple(
dist_to_req(dist)
for dist in fresh_working_set()
if dist_is_local(dist)
if dist.key != 'python' # See #220
)
def normalize_name(name):
"""Normalize a python package name a la PEP 503"""
# https://www.python.org/dev/peps/pep-0503/#normalized-names
return re.sub('[-_.]+', '-', name).lower()
def fresh_working_set():
"""return a pkg_resources "working set", representing the *currently* installed packages"""
class WorkingSetPlusEditableInstalls(pkg_resources.WorkingSet):
def __init__(self, *args, **kwargs):
self._normalized_name_mapping = {}
super(WorkingSetPlusEditableInstalls, self).__init__(*args, **kwargs)
def add_entry(self, entry):
"""Same as the original .add_entry, but sets only=False, so that egg-links are honored."""
logger.debug('working-set entry: %r', entry)
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in pkg_resources.find_distributions(entry, False):
# eggs override anything that's installed normally
# fun fact: pkg_resources.working_set's results depend on the
# ordering of os.listdir since the order of os.listdir is
# entirely arbitrary (an implemenation detail of file system),
# without calling site.main(), an .egg-link file may or may not
# be honored, depending on the filesystem
replace = (dist.precedence == pkg_resources.EGG_DIST)
self._normalized_name_mapping[normalize_name(dist.key)] = dist.key
self.add(dist, entry, False, replace=replace)
def find_normalized(self, req):
req = _package_req_to_pkg_resources_req(str(req))
req.key = self._normalized_name_mapping.get(normalize_name(req.key), req.key)
return self.find(req)
return WorkingSetPlusEditableInstalls()
def req_cycle(req):
"""is this requirement cyclic?"""
cls = req.__class__
seen = {req.name}
while isinstance(req.comes_from, cls):
req = req.comes_from
if req.name in seen:
return True
else:
seen.add(req.name)
return False
def pretty_req(req):
"""
return a copy of a pip requirement that is a bit more readable,
at the expense of removing some of its data
"""
from copy import copy
req = copy(req)
req.link = None
req.satisfied_by = None
return req
def _package_req_to_pkg_resources_req(req):
return pkg_resources.Requirement.parse(str(req))
def trace_requirements(requirements):
"""given an iterable of pip InstallRequirements,
return the set of required packages, given their transitive requirements.
"""
requirements = tuple(pretty_req(r) for r in requirements)
working_set = fresh_working_set()
# breadth-first traversal:
from collections import deque
queue = deque(requirements)
queued = {_package_req_to_pkg_resources_req(req.req) for req in queue}
errors = []
result = []
while queue:
req = queue.popleft()
logger.debug('tracing: %s', req)
try:
dist = working_set.find_normalized(_package_req_to_pkg_resources_req(req.req))
except pkg_resources.VersionConflict as conflict:
dist = conflict.args[0]
errors.append('Error: version conflict: {} ({}) <-> {}'.format(
dist, timid_relpath(dist.location), req
))
assert dist is not None, 'Should be unreachable in pip8+'
result.append(dist_to_req(dist))
# TODO: pip does no validation of extras. should we?
extras = [extra for extra in req.extras if extra in dist.extras]
for sub_req in sorted(dist.requires(extras=extras), key=lambda req: req.key):
sub_req = InstallRequirement(sub_req, req)
if req_cycle(sub_req):
logger.warning('Circular dependency! %s', sub_req)
continue
elif sub_req.req in queued:
logger.debug('already queued: %s', sub_req)
continue
else:
logger.debug('adding sub-requirement %s', sub_req)
queue.append(sub_req)
queued.add(sub_req.req)
if errors:
raise InstallationError('\n'.join(errors))
return result
def reqnames(reqs):
return {req.name for req in reqs}
class FasterInstallCommand(InstallCommand):
def __init__(self, *args, **kw):
super(FasterInstallCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'--prune',
action='store_true',
dest='prune',
default=False,
help='Uninstall any non-required packages.',
)
cmd_opts.add_option(
'--no-prune',
action='store_false',
dest='prune',
help='Do not uninstall any non-required packages.',
)
def run(self, options, args):
"""update install options with caching values"""
if options.prune:
previously_installed = pip_get_installed()
index_urls = [options.index_url] + options.extra_index_urls
with pipfaster_download_cacher(index_urls):
requirement_set = super(FasterInstallCommand, self).run(
options, args,
)
required = requirement_set.requirements.values()
# With extra_index_urls we don't know where the wheel is from
if not options.extra_index_urls:
cache_installed_wheels(options.index_url, requirement_set.successfully_downloaded)
if not options.ignore_dependencies:
# transitive requirements, previously installed, are also required
# this has a side-effect of finding any missing / conflicting requirements
required = trace_requirements(required)
if not options.prune:
return requirement_set
extraneous = (
reqnames(previously_installed) -
reqnames(required) -
# the stage1 bootstrap packages
reqnames(trace_requirements([install_req_from_line('venv-update')])) -
# See #186
frozenset(('pkg-resources',))
)
if extraneous:
extraneous = sorted(extraneous)
pip(('uninstall', '--yes') + tuple(extraneous))
# TODO: Cleanup: remove stale values from the cache and wheelhouse that have not been accessed in a week.
# TODO: a pip_faster.patch module
def patch(attrs, updates):
"""Perform a set of updates to a attribute dictionary, return the original values."""
orig = {}
for attr, value in updates:
orig[attr] = attrs[attr]
attrs[attr] = value
return orig
@contextmanager
def patched(attrs, updates):
"""A context in which some attributes temporarily have a modified value."""
orig = patch(attrs, updates.items())
try:
yield orig
finally:
patch(attrs, orig.items())
# END: pip_faster.patch module
def pipfaster_install_prune_option():
return patched(pipmodule._internal.commands.commands_dict, {FasterInstallCommand.name: FasterInstallCommand})
def pipfaster_packagefinder():
"""Provide a short-circuited search when the requirement is pinned and appears on disk.
Suggested upstream at: https://github.com/pypa/pip/pull/2114
"""
# A poor man's dependency injection: monkeypatch :(
try: # :pragma:nocover: pip>=18.1
from pip._internal.cli import base_command
except ImportError: # :pragma:nocover: pip<18.1
from pip._internal import basecommand as base_command
return patched(vars(base_command), {'PackageFinder': FasterPackageFinder})
def pipfaster_download_cacher(index_urls):
"""vanilla pip stores a cache of the http session in its cache and not the
wheel files. We intercept the download and save those files into our
cache
"""
from pip._internal import download
orig = download._download_http_url
patched_fn = get_patched_download_http_url(orig, index_urls)
return patched(vars(download), {'_download_http_url': patched_fn})
def main():
with pipfaster_install_prune_option():
with pipfaster_packagefinder():
raise_on_failure(pipmodule._internal.main)
if __name__ == '__main__':
exit(main())
|
Yelp/venv-update | pip_faster.py | pip | python | def pip(args):
from sys import stdout
stdout.write(colorize(('pip',) + args))
stdout.write('\n')
stdout.flush()
return pipmodule._internal.main(list(args)) | Run pip, in-process. | train | https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/pip_faster.py#L204-L211 | [
"def colorize(cmd):\n from os import isatty\n\n if isatty(1):\n template = '\\033[36m>\\033[m \\033[32m{0}\\033[m'\n else:\n template = '> {0}'\n\n return template.format(shellescape(cmd))\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''pip-faster is a thin wrapper around pip.
It only adds a --prune option to the `install` subcommand.
`pip-faster install --prune` will *uninstall* any installed packages that are
not required.
Otherwise, you should find that pip-faster gives the same results as pip, just
more quickly, especially in the case of pinned requirements (e.g.
package-x==1.2.3).
Version control at: https://github.com/yelp/venv-update
'''
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import errno
import glob
import os
import random
import re
import shutil
import sys
from contextlib import contextmanager
import pip as pipmodule
from pip._internal import logger
from pip._internal.commands.install import InstallCommand
from pip._internal.exceptions import DistributionNotFound
from pip._internal.exceptions import InstallationError
from pip._internal.index import BestVersionAlreadyInstalled
from pip._internal.index import HTMLPage
from pip._internal.index import Link
from pip._internal.index import PackageFinder
from pip._internal.req import InstallRequirement
from pip._internal.wheel import Wheel
from venv_update import colorize
from venv_update import raise_on_failure
from venv_update import timid_relpath
from venv_update import user_cache_dir
# Debian de-vendorizes the version of pip it ships
try: # :pragma:nocover: non-debian
from pip._vendor import pkg_resources
except ImportError: # :pragma:nocover: debian
import pkg_resources
try: # :pragma:nocover: pip>=18.1
from pip._internal.req.constructors import install_req_from_line
except ImportError: # :pragma:nocover: pip<18.1
install_req_from_line = InstallRequirement.from_line
# Thanks six!
PY2 = str is bytes
if PY2: # :pragma:nocover:
_reraise_src = 'def reraise(tp, value, tb=None): raise tp, value, tb'
exec(_reraise_src)
else: # :pragma:nocover:
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
class CACHE(object):
_cache_dir = user_cache_dir()
wheelhouse = os.path.join(_cache_dir, 'pip-faster', 'wheelhouse')
pip_wheelhouse = os.path.join(_cache_dir, 'pip', 'wheels')
def ignorecase_glob(glob):
return ''.join([
'[{}{}]'.format(char.lower(), char.upper())
if char.isalpha() else char
for char in glob
])
def optimistic_wheel_search(req, index_urls):
name = req.name.replace('-', '_').lower()
for index_url in index_urls:
expected_location = os.path.join(
CACHE.wheelhouse, index_url, ignorecase_glob(name) + '-*.whl',
)
for link in glob.glob(expected_location):
link = Link('file:' + link)
wheel = Wheel(link.filename)
if req.specifier.contains(wheel.version) and wheel.supported():
return link
def is_req_pinned(requirement):
if not requirement:
# url-style requirement
return False
for spec in requirement.specifier:
if spec.operator == '==' and not spec.version.endswith('.*'):
return True
return False
class FasterPackageFinder(PackageFinder):
def find_requirement(self, req, upgrade):
if is_req_pinned(req.req):
# if the version is pinned-down by a ==
# first try to use any installed package that satisfies the req
if req.satisfied_by:
logger.info('Faster! pinned requirement already installed.')
raise BestVersionAlreadyInstalled
# then try an optimistic search for a .whl file:
link = optimistic_wheel_search(req.req, self.index_urls)
if link is None:
# The wheel will be built during prepare_files
logger.debug('No wheel found locally for pinned requirement %s', req)
else:
logger.info('Faster! Pinned wheel found, without hitting PyPI.')
return link
else:
# unpinned requirements aren't very notable. only show with -v
logger.info('slow: full search for unpinned requirement %s', req)
# otherwise, do the full network search, per usual
try:
return super(FasterPackageFinder, self).find_requirement(req, upgrade)
except DistributionNotFound:
exc_info = sys.exc_info()
# Best effort: try and install from suitable version on-disk
link = optimistic_wheel_search(req.req, self.index_urls)
if link:
return link
else:
reraise(*exc_info)
def _can_be_cached(package):
return (
package.is_wheel and
# An assertion that we're looking in the pip wheel dir
package.link.path.startswith(CACHE.pip_wheelhouse)
)
def mkdirp(pth):
try:
os.makedirs(pth)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _store_wheel_in_cache(file_path, index_url):
filename = os.path.basename(file_path)
cache = os.path.join(CACHE.wheelhouse, index_url, filename)
cache_tmp = '{}.{}'.format(cache, random.randint(0, sys.maxsize))
cache_dir = os.path.dirname(cache)
mkdirp(cache_dir)
# Atomicity
shutil.copy(file_path, cache_tmp)
os.rename(cache_tmp, cache)
def cache_installed_wheels(index_url, installed_packages):
"""After installation, pip tells us what it installed and from where.
We build a structure that looks like
.cache/pip-faster/wheelhouse/$index_url/$wheel
"""
for installed_package in installed_packages:
if not _can_be_cached(installed_package):
continue
_store_wheel_in_cache(installed_package.link.path, index_url)
def get_patched_download_http_url(orig_download_http_url, index_urls):
def pipfaster_download_http_url(link, *args, **kwargs):
file_path, content_type = orig_download_http_url(link, *args, **kwargs)
if link.is_wheel:
for index_url in index_urls:
if (
# pip <18.1
isinstance(link.comes_from, HTMLPage) and
link.comes_from.url.startswith(index_url)
) or (
# pip >= 18.1
isinstance(link.comes_from, (str, type(''))) and
link.comes_from.startswith(index_url)
):
_store_wheel_in_cache(file_path, index_url)
break
return file_path, content_type
return pipfaster_download_http_url
def dist_to_req(dist):
"""Make a pip.FrozenRequirement from a pkg_resources distribution object"""
try: # :pragma:nocover: (pip>=10)
from pip._internal.operations.freeze import FrozenRequirement
except ImportError: # :pragma:nocover: (pip<10)
from pip import FrozenRequirement
# normalize the casing, dashes in the req name
orig_name, dist.project_name = dist.project_name, dist.key
result = FrozenRequirement.from_dist(dist, [])
# put things back the way we found it.
dist.project_name = orig_name
return result
def pip_get_installed():
"""Code extracted from the middle of the pip freeze command.
FIXME: does not list anything installed via -e
"""
from pip._internal.utils.misc import dist_is_local
return tuple(
dist_to_req(dist)
for dist in fresh_working_set()
if dist_is_local(dist)
if dist.key != 'python' # See #220
)
def normalize_name(name):
"""Normalize a python package name a la PEP 503"""
# https://www.python.org/dev/peps/pep-0503/#normalized-names
return re.sub('[-_.]+', '-', name).lower()
def fresh_working_set():
"""return a pkg_resources "working set", representing the *currently* installed packages"""
class WorkingSetPlusEditableInstalls(pkg_resources.WorkingSet):
def __init__(self, *args, **kwargs):
self._normalized_name_mapping = {}
super(WorkingSetPlusEditableInstalls, self).__init__(*args, **kwargs)
def add_entry(self, entry):
"""Same as the original .add_entry, but sets only=False, so that egg-links are honored."""
logger.debug('working-set entry: %r', entry)
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in pkg_resources.find_distributions(entry, False):
# eggs override anything that's installed normally
# fun fact: pkg_resources.working_set's results depend on the
# ordering of os.listdir since the order of os.listdir is
# entirely arbitrary (an implemenation detail of file system),
# without calling site.main(), an .egg-link file may or may not
# be honored, depending on the filesystem
replace = (dist.precedence == pkg_resources.EGG_DIST)
self._normalized_name_mapping[normalize_name(dist.key)] = dist.key
self.add(dist, entry, False, replace=replace)
def find_normalized(self, req):
req = _package_req_to_pkg_resources_req(str(req))
req.key = self._normalized_name_mapping.get(normalize_name(req.key), req.key)
return self.find(req)
return WorkingSetPlusEditableInstalls()
def req_cycle(req):
"""is this requirement cyclic?"""
cls = req.__class__
seen = {req.name}
while isinstance(req.comes_from, cls):
req = req.comes_from
if req.name in seen:
return True
else:
seen.add(req.name)
return False
def pretty_req(req):
"""
return a copy of a pip requirement that is a bit more readable,
at the expense of removing some of its data
"""
from copy import copy
req = copy(req)
req.link = None
req.satisfied_by = None
return req
def _package_req_to_pkg_resources_req(req):
return pkg_resources.Requirement.parse(str(req))
def trace_requirements(requirements):
"""given an iterable of pip InstallRequirements,
return the set of required packages, given their transitive requirements.
"""
requirements = tuple(pretty_req(r) for r in requirements)
working_set = fresh_working_set()
# breadth-first traversal:
from collections import deque
queue = deque(requirements)
queued = {_package_req_to_pkg_resources_req(req.req) for req in queue}
errors = []
result = []
while queue:
req = queue.popleft()
logger.debug('tracing: %s', req)
try:
dist = working_set.find_normalized(_package_req_to_pkg_resources_req(req.req))
except pkg_resources.VersionConflict as conflict:
dist = conflict.args[0]
errors.append('Error: version conflict: {} ({}) <-> {}'.format(
dist, timid_relpath(dist.location), req
))
assert dist is not None, 'Should be unreachable in pip8+'
result.append(dist_to_req(dist))
# TODO: pip does no validation of extras. should we?
extras = [extra for extra in req.extras if extra in dist.extras]
for sub_req in sorted(dist.requires(extras=extras), key=lambda req: req.key):
sub_req = InstallRequirement(sub_req, req)
if req_cycle(sub_req):
logger.warning('Circular dependency! %s', sub_req)
continue
elif sub_req.req in queued:
logger.debug('already queued: %s', sub_req)
continue
else:
logger.debug('adding sub-requirement %s', sub_req)
queue.append(sub_req)
queued.add(sub_req.req)
if errors:
raise InstallationError('\n'.join(errors))
return result
def reqnames(reqs):
return {req.name for req in reqs}
class FasterInstallCommand(InstallCommand):
def __init__(self, *args, **kw):
super(FasterInstallCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'--prune',
action='store_true',
dest='prune',
default=False,
help='Uninstall any non-required packages.',
)
cmd_opts.add_option(
'--no-prune',
action='store_false',
dest='prune',
help='Do not uninstall any non-required packages.',
)
def run(self, options, args):
"""update install options with caching values"""
if options.prune:
previously_installed = pip_get_installed()
index_urls = [options.index_url] + options.extra_index_urls
with pipfaster_download_cacher(index_urls):
requirement_set = super(FasterInstallCommand, self).run(
options, args,
)
required = requirement_set.requirements.values()
# With extra_index_urls we don't know where the wheel is from
if not options.extra_index_urls:
cache_installed_wheels(options.index_url, requirement_set.successfully_downloaded)
if not options.ignore_dependencies:
# transitive requirements, previously installed, are also required
# this has a side-effect of finding any missing / conflicting requirements
required = trace_requirements(required)
if not options.prune:
return requirement_set
extraneous = (
reqnames(previously_installed) -
reqnames(required) -
# the stage1 bootstrap packages
reqnames(trace_requirements([install_req_from_line('venv-update')])) -
# See #186
frozenset(('pkg-resources',))
)
if extraneous:
extraneous = sorted(extraneous)
pip(('uninstall', '--yes') + tuple(extraneous))
# TODO: Cleanup: remove stale values from the cache and wheelhouse that have not been accessed in a week.
# TODO: a pip_faster.patch module
def patch(attrs, updates):
"""Perform a set of updates to a attribute dictionary, return the original values."""
orig = {}
for attr, value in updates:
orig[attr] = attrs[attr]
attrs[attr] = value
return orig
@contextmanager
def patched(attrs, updates):
"""A context in which some attributes temporarily have a modified value."""
orig = patch(attrs, updates.items())
try:
yield orig
finally:
patch(attrs, orig.items())
# END: pip_faster.patch module
def pipfaster_install_prune_option():
return patched(pipmodule._internal.commands.commands_dict, {FasterInstallCommand.name: FasterInstallCommand})
def pipfaster_packagefinder():
"""Provide a short-circuited search when the requirement is pinned and appears on disk.
Suggested upstream at: https://github.com/pypa/pip/pull/2114
"""
# A poor man's dependency injection: monkeypatch :(
try: # :pragma:nocover: pip>=18.1
from pip._internal.cli import base_command
except ImportError: # :pragma:nocover: pip<18.1
from pip._internal import basecommand as base_command
return patched(vars(base_command), {'PackageFinder': FasterPackageFinder})
def pipfaster_download_cacher(index_urls):
"""vanilla pip stores a cache of the http session in its cache and not the
wheel files. We intercept the download and save those files into our
cache
"""
from pip._internal import download
orig = download._download_http_url
patched_fn = get_patched_download_http_url(orig, index_urls)
return patched(vars(download), {'_download_http_url': patched_fn})
def main():
with pipfaster_install_prune_option():
with pipfaster_packagefinder():
raise_on_failure(pipmodule._internal.main)
if __name__ == '__main__':
exit(main())
|
Yelp/venv-update | pip_faster.py | dist_to_req | python | def dist_to_req(dist):
try: # :pragma:nocover: (pip>=10)
from pip._internal.operations.freeze import FrozenRequirement
except ImportError: # :pragma:nocover: (pip<10)
from pip import FrozenRequirement
# normalize the casing, dashes in the req name
orig_name, dist.project_name = dist.project_name, dist.key
result = FrozenRequirement.from_dist(dist, [])
# put things back the way we found it.
dist.project_name = orig_name
return result | Make a pip.FrozenRequirement from a pkg_resources distribution object | train | https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/pip_faster.py#L214-L227 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''pip-faster is a thin wrapper around pip.
It only adds a --prune option to the `install` subcommand.
`pip-faster install --prune` will *uninstall* any installed packages that are
not required.
Otherwise, you should find that pip-faster gives the same results as pip, just
more quickly, especially in the case of pinned requirements (e.g.
package-x==1.2.3).
Version control at: https://github.com/yelp/venv-update
'''
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import errno
import glob
import os
import random
import re
import shutil
import sys
from contextlib import contextmanager
import pip as pipmodule
from pip._internal import logger
from pip._internal.commands.install import InstallCommand
from pip._internal.exceptions import DistributionNotFound
from pip._internal.exceptions import InstallationError
from pip._internal.index import BestVersionAlreadyInstalled
from pip._internal.index import HTMLPage
from pip._internal.index import Link
from pip._internal.index import PackageFinder
from pip._internal.req import InstallRequirement
from pip._internal.wheel import Wheel
from venv_update import colorize
from venv_update import raise_on_failure
from venv_update import timid_relpath
from venv_update import user_cache_dir
# Debian de-vendorizes the version of pip it ships
try: # :pragma:nocover: non-debian
from pip._vendor import pkg_resources
except ImportError: # :pragma:nocover: debian
import pkg_resources
try: # :pragma:nocover: pip>=18.1
from pip._internal.req.constructors import install_req_from_line
except ImportError: # :pragma:nocover: pip<18.1
install_req_from_line = InstallRequirement.from_line
# Thanks six!
PY2 = str is bytes
if PY2: # :pragma:nocover:
_reraise_src = 'def reraise(tp, value, tb=None): raise tp, value, tb'
exec(_reraise_src)
else: # :pragma:nocover:
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
class CACHE(object):
_cache_dir = user_cache_dir()
wheelhouse = os.path.join(_cache_dir, 'pip-faster', 'wheelhouse')
pip_wheelhouse = os.path.join(_cache_dir, 'pip', 'wheels')
def ignorecase_glob(glob):
return ''.join([
'[{}{}]'.format(char.lower(), char.upper())
if char.isalpha() else char
for char in glob
])
def optimistic_wheel_search(req, index_urls):
name = req.name.replace('-', '_').lower()
for index_url in index_urls:
expected_location = os.path.join(
CACHE.wheelhouse, index_url, ignorecase_glob(name) + '-*.whl',
)
for link in glob.glob(expected_location):
link = Link('file:' + link)
wheel = Wheel(link.filename)
if req.specifier.contains(wheel.version) and wheel.supported():
return link
def is_req_pinned(requirement):
if not requirement:
# url-style requirement
return False
for spec in requirement.specifier:
if spec.operator == '==' and not spec.version.endswith('.*'):
return True
return False
class FasterPackageFinder(PackageFinder):
def find_requirement(self, req, upgrade):
if is_req_pinned(req.req):
# if the version is pinned-down by a ==
# first try to use any installed package that satisfies the req
if req.satisfied_by:
logger.info('Faster! pinned requirement already installed.')
raise BestVersionAlreadyInstalled
# then try an optimistic search for a .whl file:
link = optimistic_wheel_search(req.req, self.index_urls)
if link is None:
# The wheel will be built during prepare_files
logger.debug('No wheel found locally for pinned requirement %s', req)
else:
logger.info('Faster! Pinned wheel found, without hitting PyPI.')
return link
else:
# unpinned requirements aren't very notable. only show with -v
logger.info('slow: full search for unpinned requirement %s', req)
# otherwise, do the full network search, per usual
try:
return super(FasterPackageFinder, self).find_requirement(req, upgrade)
except DistributionNotFound:
exc_info = sys.exc_info()
# Best effort: try and install from suitable version on-disk
link = optimistic_wheel_search(req.req, self.index_urls)
if link:
return link
else:
reraise(*exc_info)
def _can_be_cached(package):
return (
package.is_wheel and
# An assertion that we're looking in the pip wheel dir
package.link.path.startswith(CACHE.pip_wheelhouse)
)
def mkdirp(pth):
try:
os.makedirs(pth)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _store_wheel_in_cache(file_path, index_url):
filename = os.path.basename(file_path)
cache = os.path.join(CACHE.wheelhouse, index_url, filename)
cache_tmp = '{}.{}'.format(cache, random.randint(0, sys.maxsize))
cache_dir = os.path.dirname(cache)
mkdirp(cache_dir)
# Atomicity
shutil.copy(file_path, cache_tmp)
os.rename(cache_tmp, cache)
def cache_installed_wheels(index_url, installed_packages):
"""After installation, pip tells us what it installed and from where.
We build a structure that looks like
.cache/pip-faster/wheelhouse/$index_url/$wheel
"""
for installed_package in installed_packages:
if not _can_be_cached(installed_package):
continue
_store_wheel_in_cache(installed_package.link.path, index_url)
def get_patched_download_http_url(orig_download_http_url, index_urls):
def pipfaster_download_http_url(link, *args, **kwargs):
file_path, content_type = orig_download_http_url(link, *args, **kwargs)
if link.is_wheel:
for index_url in index_urls:
if (
# pip <18.1
isinstance(link.comes_from, HTMLPage) and
link.comes_from.url.startswith(index_url)
) or (
# pip >= 18.1
isinstance(link.comes_from, (str, type(''))) and
link.comes_from.startswith(index_url)
):
_store_wheel_in_cache(file_path, index_url)
break
return file_path, content_type
return pipfaster_download_http_url
def pip(args):
"""Run pip, in-process."""
from sys import stdout
stdout.write(colorize(('pip',) + args))
stdout.write('\n')
stdout.flush()
return pipmodule._internal.main(list(args))
def pip_get_installed():
"""Code extracted from the middle of the pip freeze command.
FIXME: does not list anything installed via -e
"""
from pip._internal.utils.misc import dist_is_local
return tuple(
dist_to_req(dist)
for dist in fresh_working_set()
if dist_is_local(dist)
if dist.key != 'python' # See #220
)
def normalize_name(name):
"""Normalize a python package name a la PEP 503"""
# https://www.python.org/dev/peps/pep-0503/#normalized-names
return re.sub('[-_.]+', '-', name).lower()
def fresh_working_set():
"""return a pkg_resources "working set", representing the *currently* installed packages"""
class WorkingSetPlusEditableInstalls(pkg_resources.WorkingSet):
def __init__(self, *args, **kwargs):
self._normalized_name_mapping = {}
super(WorkingSetPlusEditableInstalls, self).__init__(*args, **kwargs)
def add_entry(self, entry):
"""Same as the original .add_entry, but sets only=False, so that egg-links are honored."""
logger.debug('working-set entry: %r', entry)
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in pkg_resources.find_distributions(entry, False):
# eggs override anything that's installed normally
# fun fact: pkg_resources.working_set's results depend on the
# ordering of os.listdir since the order of os.listdir is
# entirely arbitrary (an implemenation detail of file system),
# without calling site.main(), an .egg-link file may or may not
# be honored, depending on the filesystem
replace = (dist.precedence == pkg_resources.EGG_DIST)
self._normalized_name_mapping[normalize_name(dist.key)] = dist.key
self.add(dist, entry, False, replace=replace)
def find_normalized(self, req):
req = _package_req_to_pkg_resources_req(str(req))
req.key = self._normalized_name_mapping.get(normalize_name(req.key), req.key)
return self.find(req)
return WorkingSetPlusEditableInstalls()
def req_cycle(req):
"""is this requirement cyclic?"""
cls = req.__class__
seen = {req.name}
while isinstance(req.comes_from, cls):
req = req.comes_from
if req.name in seen:
return True
else:
seen.add(req.name)
return False
def pretty_req(req):
"""
return a copy of a pip requirement that is a bit more readable,
at the expense of removing some of its data
"""
from copy import copy
req = copy(req)
req.link = None
req.satisfied_by = None
return req
def _package_req_to_pkg_resources_req(req):
return pkg_resources.Requirement.parse(str(req))
def trace_requirements(requirements):
"""given an iterable of pip InstallRequirements,
return the set of required packages, given their transitive requirements.
"""
requirements = tuple(pretty_req(r) for r in requirements)
working_set = fresh_working_set()
# breadth-first traversal:
from collections import deque
queue = deque(requirements)
queued = {_package_req_to_pkg_resources_req(req.req) for req in queue}
errors = []
result = []
while queue:
req = queue.popleft()
logger.debug('tracing: %s', req)
try:
dist = working_set.find_normalized(_package_req_to_pkg_resources_req(req.req))
except pkg_resources.VersionConflict as conflict:
dist = conflict.args[0]
errors.append('Error: version conflict: {} ({}) <-> {}'.format(
dist, timid_relpath(dist.location), req
))
assert dist is not None, 'Should be unreachable in pip8+'
result.append(dist_to_req(dist))
# TODO: pip does no validation of extras. should we?
extras = [extra for extra in req.extras if extra in dist.extras]
for sub_req in sorted(dist.requires(extras=extras), key=lambda req: req.key):
sub_req = InstallRequirement(sub_req, req)
if req_cycle(sub_req):
logger.warning('Circular dependency! %s', sub_req)
continue
elif sub_req.req in queued:
logger.debug('already queued: %s', sub_req)
continue
else:
logger.debug('adding sub-requirement %s', sub_req)
queue.append(sub_req)
queued.add(sub_req.req)
if errors:
raise InstallationError('\n'.join(errors))
return result
def reqnames(reqs):
return {req.name for req in reqs}
class FasterInstallCommand(InstallCommand):
def __init__(self, *args, **kw):
super(FasterInstallCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'--prune',
action='store_true',
dest='prune',
default=False,
help='Uninstall any non-required packages.',
)
cmd_opts.add_option(
'--no-prune',
action='store_false',
dest='prune',
help='Do not uninstall any non-required packages.',
)
def run(self, options, args):
"""update install options with caching values"""
if options.prune:
previously_installed = pip_get_installed()
index_urls = [options.index_url] + options.extra_index_urls
with pipfaster_download_cacher(index_urls):
requirement_set = super(FasterInstallCommand, self).run(
options, args,
)
required = requirement_set.requirements.values()
# With extra_index_urls we don't know where the wheel is from
if not options.extra_index_urls:
cache_installed_wheels(options.index_url, requirement_set.successfully_downloaded)
if not options.ignore_dependencies:
# transitive requirements, previously installed, are also required
# this has a side-effect of finding any missing / conflicting requirements
required = trace_requirements(required)
if not options.prune:
return requirement_set
extraneous = (
reqnames(previously_installed) -
reqnames(required) -
# the stage1 bootstrap packages
reqnames(trace_requirements([install_req_from_line('venv-update')])) -
# See #186
frozenset(('pkg-resources',))
)
if extraneous:
extraneous = sorted(extraneous)
pip(('uninstall', '--yes') + tuple(extraneous))
# TODO: Cleanup: remove stale values from the cache and wheelhouse that have not been accessed in a week.
# TODO: a pip_faster.patch module
def patch(attrs, updates):
"""Perform a set of updates to a attribute dictionary, return the original values."""
orig = {}
for attr, value in updates:
orig[attr] = attrs[attr]
attrs[attr] = value
return orig
@contextmanager
def patched(attrs, updates):
"""A context in which some attributes temporarily have a modified value."""
orig = patch(attrs, updates.items())
try:
yield orig
finally:
patch(attrs, orig.items())
# END: pip_faster.patch module
def pipfaster_install_prune_option():
return patched(pipmodule._internal.commands.commands_dict, {FasterInstallCommand.name: FasterInstallCommand})
def pipfaster_packagefinder():
"""Provide a short-circuited search when the requirement is pinned and appears on disk.
Suggested upstream at: https://github.com/pypa/pip/pull/2114
"""
# A poor man's dependency injection: monkeypatch :(
try: # :pragma:nocover: pip>=18.1
from pip._internal.cli import base_command
except ImportError: # :pragma:nocover: pip<18.1
from pip._internal import basecommand as base_command
return patched(vars(base_command), {'PackageFinder': FasterPackageFinder})
def pipfaster_download_cacher(index_urls):
"""vanilla pip stores a cache of the http session in its cache and not the
wheel files. We intercept the download and save those files into our
cache
"""
from pip._internal import download
orig = download._download_http_url
patched_fn = get_patched_download_http_url(orig, index_urls)
return patched(vars(download), {'_download_http_url': patched_fn})
def main():
with pipfaster_install_prune_option():
with pipfaster_packagefinder():
raise_on_failure(pipmodule._internal.main)
if __name__ == '__main__':
exit(main())
|
Yelp/venv-update | pip_faster.py | pip_get_installed | python | def pip_get_installed():
from pip._internal.utils.misc import dist_is_local
return tuple(
dist_to_req(dist)
for dist in fresh_working_set()
if dist_is_local(dist)
if dist.key != 'python' # See #220
) | Code extracted from the middle of the pip freeze command.
FIXME: does not list anything installed via -e | train | https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/pip_faster.py#L230-L241 | [
"def fresh_working_set():\n \"\"\"return a pkg_resources \"working set\", representing the *currently* installed packages\"\"\"\n class WorkingSetPlusEditableInstalls(pkg_resources.WorkingSet):\n\n def __init__(self, *args, **kwargs):\n self._normalized_name_mapping = {}\n super(WorkingSetPlusEditableInstalls, self).__init__(*args, **kwargs)\n\n def add_entry(self, entry):\n \"\"\"Same as the original .add_entry, but sets only=False, so that egg-links are honored.\"\"\"\n logger.debug('working-set entry: %r', entry)\n self.entry_keys.setdefault(entry, [])\n self.entries.append(entry)\n for dist in pkg_resources.find_distributions(entry, False):\n\n # eggs override anything that's installed normally\n # fun fact: pkg_resources.working_set's results depend on the\n # ordering of os.listdir since the order of os.listdir is\n # entirely arbitrary (an implemenation detail of file system),\n # without calling site.main(), an .egg-link file may or may not\n # be honored, depending on the filesystem\n replace = (dist.precedence == pkg_resources.EGG_DIST)\n self._normalized_name_mapping[normalize_name(dist.key)] = dist.key\n self.add(dist, entry, False, replace=replace)\n\n def find_normalized(self, req):\n req = _package_req_to_pkg_resources_req(str(req))\n req.key = self._normalized_name_mapping.get(normalize_name(req.key), req.key)\n return self.find(req)\n\n return WorkingSetPlusEditableInstalls()\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''pip-faster is a thin wrapper around pip.
It only adds a --prune option to the `install` subcommand.
`pip-faster install --prune` will *uninstall* any installed packages that are
not required.
Otherwise, you should find that pip-faster gives the same results as pip, just
more quickly, especially in the case of pinned requirements (e.g.
package-x==1.2.3).
Version control at: https://github.com/yelp/venv-update
'''
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import errno
import glob
import os
import random
import re
import shutil
import sys
from contextlib import contextmanager
import pip as pipmodule
from pip._internal import logger
from pip._internal.commands.install import InstallCommand
from pip._internal.exceptions import DistributionNotFound
from pip._internal.exceptions import InstallationError
from pip._internal.index import BestVersionAlreadyInstalled
from pip._internal.index import HTMLPage
from pip._internal.index import Link
from pip._internal.index import PackageFinder
from pip._internal.req import InstallRequirement
from pip._internal.wheel import Wheel
from venv_update import colorize
from venv_update import raise_on_failure
from venv_update import timid_relpath
from venv_update import user_cache_dir
# Debian de-vendorizes the version of pip it ships
try: # :pragma:nocover: non-debian
from pip._vendor import pkg_resources
except ImportError: # :pragma:nocover: debian
import pkg_resources
try: # :pragma:nocover: pip>=18.1
from pip._internal.req.constructors import install_req_from_line
except ImportError: # :pragma:nocover: pip<18.1
install_req_from_line = InstallRequirement.from_line
# Thanks six!
PY2 = str is bytes
if PY2: # :pragma:nocover:
_reraise_src = 'def reraise(tp, value, tb=None): raise tp, value, tb'
exec(_reraise_src)
else: # :pragma:nocover:
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
class CACHE(object):
_cache_dir = user_cache_dir()
wheelhouse = os.path.join(_cache_dir, 'pip-faster', 'wheelhouse')
pip_wheelhouse = os.path.join(_cache_dir, 'pip', 'wheels')
def ignorecase_glob(glob):
return ''.join([
'[{}{}]'.format(char.lower(), char.upper())
if char.isalpha() else char
for char in glob
])
def optimistic_wheel_search(req, index_urls):
name = req.name.replace('-', '_').lower()
for index_url in index_urls:
expected_location = os.path.join(
CACHE.wheelhouse, index_url, ignorecase_glob(name) + '-*.whl',
)
for link in glob.glob(expected_location):
link = Link('file:' + link)
wheel = Wheel(link.filename)
if req.specifier.contains(wheel.version) and wheel.supported():
return link
def is_req_pinned(requirement):
if not requirement:
# url-style requirement
return False
for spec in requirement.specifier:
if spec.operator == '==' and not spec.version.endswith('.*'):
return True
return False
class FasterPackageFinder(PackageFinder):
def find_requirement(self, req, upgrade):
if is_req_pinned(req.req):
# if the version is pinned-down by a ==
# first try to use any installed package that satisfies the req
if req.satisfied_by:
logger.info('Faster! pinned requirement already installed.')
raise BestVersionAlreadyInstalled
# then try an optimistic search for a .whl file:
link = optimistic_wheel_search(req.req, self.index_urls)
if link is None:
# The wheel will be built during prepare_files
logger.debug('No wheel found locally for pinned requirement %s', req)
else:
logger.info('Faster! Pinned wheel found, without hitting PyPI.')
return link
else:
# unpinned requirements aren't very notable. only show with -v
logger.info('slow: full search for unpinned requirement %s', req)
# otherwise, do the full network search, per usual
try:
return super(FasterPackageFinder, self).find_requirement(req, upgrade)
except DistributionNotFound:
exc_info = sys.exc_info()
# Best effort: try and install from suitable version on-disk
link = optimistic_wheel_search(req.req, self.index_urls)
if link:
return link
else:
reraise(*exc_info)
def _can_be_cached(package):
return (
package.is_wheel and
# An assertion that we're looking in the pip wheel dir
package.link.path.startswith(CACHE.pip_wheelhouse)
)
def mkdirp(pth):
try:
os.makedirs(pth)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _store_wheel_in_cache(file_path, index_url):
filename = os.path.basename(file_path)
cache = os.path.join(CACHE.wheelhouse, index_url, filename)
cache_tmp = '{}.{}'.format(cache, random.randint(0, sys.maxsize))
cache_dir = os.path.dirname(cache)
mkdirp(cache_dir)
# Atomicity
shutil.copy(file_path, cache_tmp)
os.rename(cache_tmp, cache)
def cache_installed_wheels(index_url, installed_packages):
"""After installation, pip tells us what it installed and from where.
We build a structure that looks like
.cache/pip-faster/wheelhouse/$index_url/$wheel
"""
for installed_package in installed_packages:
if not _can_be_cached(installed_package):
continue
_store_wheel_in_cache(installed_package.link.path, index_url)
def get_patched_download_http_url(orig_download_http_url, index_urls):
def pipfaster_download_http_url(link, *args, **kwargs):
file_path, content_type = orig_download_http_url(link, *args, **kwargs)
if link.is_wheel:
for index_url in index_urls:
if (
# pip <18.1
isinstance(link.comes_from, HTMLPage) and
link.comes_from.url.startswith(index_url)
) or (
# pip >= 18.1
isinstance(link.comes_from, (str, type(''))) and
link.comes_from.startswith(index_url)
):
_store_wheel_in_cache(file_path, index_url)
break
return file_path, content_type
return pipfaster_download_http_url
def pip(args):
"""Run pip, in-process."""
from sys import stdout
stdout.write(colorize(('pip',) + args))
stdout.write('\n')
stdout.flush()
return pipmodule._internal.main(list(args))
def dist_to_req(dist):
"""Make a pip.FrozenRequirement from a pkg_resources distribution object"""
try: # :pragma:nocover: (pip>=10)
from pip._internal.operations.freeze import FrozenRequirement
except ImportError: # :pragma:nocover: (pip<10)
from pip import FrozenRequirement
# normalize the casing, dashes in the req name
orig_name, dist.project_name = dist.project_name, dist.key
result = FrozenRequirement.from_dist(dist, [])
# put things back the way we found it.
dist.project_name = orig_name
return result
def normalize_name(name):
"""Normalize a python package name a la PEP 503"""
# https://www.python.org/dev/peps/pep-0503/#normalized-names
return re.sub('[-_.]+', '-', name).lower()
def fresh_working_set():
"""return a pkg_resources "working set", representing the *currently* installed packages"""
class WorkingSetPlusEditableInstalls(pkg_resources.WorkingSet):
def __init__(self, *args, **kwargs):
self._normalized_name_mapping = {}
super(WorkingSetPlusEditableInstalls, self).__init__(*args, **kwargs)
def add_entry(self, entry):
"""Same as the original .add_entry, but sets only=False, so that egg-links are honored."""
logger.debug('working-set entry: %r', entry)
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in pkg_resources.find_distributions(entry, False):
# eggs override anything that's installed normally
# fun fact: pkg_resources.working_set's results depend on the
# ordering of os.listdir since the order of os.listdir is
# entirely arbitrary (an implemenation detail of file system),
# without calling site.main(), an .egg-link file may or may not
# be honored, depending on the filesystem
replace = (dist.precedence == pkg_resources.EGG_DIST)
self._normalized_name_mapping[normalize_name(dist.key)] = dist.key
self.add(dist, entry, False, replace=replace)
def find_normalized(self, req):
req = _package_req_to_pkg_resources_req(str(req))
req.key = self._normalized_name_mapping.get(normalize_name(req.key), req.key)
return self.find(req)
return WorkingSetPlusEditableInstalls()
def req_cycle(req):
"""is this requirement cyclic?"""
cls = req.__class__
seen = {req.name}
while isinstance(req.comes_from, cls):
req = req.comes_from
if req.name in seen:
return True
else:
seen.add(req.name)
return False
def pretty_req(req):
"""
return a copy of a pip requirement that is a bit more readable,
at the expense of removing some of its data
"""
from copy import copy
req = copy(req)
req.link = None
req.satisfied_by = None
return req
def _package_req_to_pkg_resources_req(req):
return pkg_resources.Requirement.parse(str(req))
def trace_requirements(requirements):
"""given an iterable of pip InstallRequirements,
return the set of required packages, given their transitive requirements.
"""
requirements = tuple(pretty_req(r) for r in requirements)
working_set = fresh_working_set()
# breadth-first traversal:
from collections import deque
queue = deque(requirements)
queued = {_package_req_to_pkg_resources_req(req.req) for req in queue}
errors = []
result = []
while queue:
req = queue.popleft()
logger.debug('tracing: %s', req)
try:
dist = working_set.find_normalized(_package_req_to_pkg_resources_req(req.req))
except pkg_resources.VersionConflict as conflict:
dist = conflict.args[0]
errors.append('Error: version conflict: {} ({}) <-> {}'.format(
dist, timid_relpath(dist.location), req
))
assert dist is not None, 'Should be unreachable in pip8+'
result.append(dist_to_req(dist))
# TODO: pip does no validation of extras. should we?
extras = [extra for extra in req.extras if extra in dist.extras]
for sub_req in sorted(dist.requires(extras=extras), key=lambda req: req.key):
sub_req = InstallRequirement(sub_req, req)
if req_cycle(sub_req):
logger.warning('Circular dependency! %s', sub_req)
continue
elif sub_req.req in queued:
logger.debug('already queued: %s', sub_req)
continue
else:
logger.debug('adding sub-requirement %s', sub_req)
queue.append(sub_req)
queued.add(sub_req.req)
if errors:
raise InstallationError('\n'.join(errors))
return result
def reqnames(reqs):
return {req.name for req in reqs}
class FasterInstallCommand(InstallCommand):
def __init__(self, *args, **kw):
super(FasterInstallCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'--prune',
action='store_true',
dest='prune',
default=False,
help='Uninstall any non-required packages.',
)
cmd_opts.add_option(
'--no-prune',
action='store_false',
dest='prune',
help='Do not uninstall any non-required packages.',
)
def run(self, options, args):
"""update install options with caching values"""
if options.prune:
previously_installed = pip_get_installed()
index_urls = [options.index_url] + options.extra_index_urls
with pipfaster_download_cacher(index_urls):
requirement_set = super(FasterInstallCommand, self).run(
options, args,
)
required = requirement_set.requirements.values()
# With extra_index_urls we don't know where the wheel is from
if not options.extra_index_urls:
cache_installed_wheels(options.index_url, requirement_set.successfully_downloaded)
if not options.ignore_dependencies:
# transitive requirements, previously installed, are also required
# this has a side-effect of finding any missing / conflicting requirements
required = trace_requirements(required)
if not options.prune:
return requirement_set
extraneous = (
reqnames(previously_installed) -
reqnames(required) -
# the stage1 bootstrap packages
reqnames(trace_requirements([install_req_from_line('venv-update')])) -
# See #186
frozenset(('pkg-resources',))
)
if extraneous:
extraneous = sorted(extraneous)
pip(('uninstall', '--yes') + tuple(extraneous))
# TODO: Cleanup: remove stale values from the cache and wheelhouse that have not been accessed in a week.
# TODO: a pip_faster.patch module
def patch(attrs, updates):
"""Perform a set of updates to a attribute dictionary, return the original values."""
orig = {}
for attr, value in updates:
orig[attr] = attrs[attr]
attrs[attr] = value
return orig
@contextmanager
def patched(attrs, updates):
"""A context in which some attributes temporarily have a modified value."""
orig = patch(attrs, updates.items())
try:
yield orig
finally:
patch(attrs, orig.items())
# END: pip_faster.patch module
def pipfaster_install_prune_option():
return patched(pipmodule._internal.commands.commands_dict, {FasterInstallCommand.name: FasterInstallCommand})
def pipfaster_packagefinder():
"""Provide a short-circuited search when the requirement is pinned and appears on disk.
Suggested upstream at: https://github.com/pypa/pip/pull/2114
"""
# A poor man's dependency injection: monkeypatch :(
try: # :pragma:nocover: pip>=18.1
from pip._internal.cli import base_command
except ImportError: # :pragma:nocover: pip<18.1
from pip._internal import basecommand as base_command
return patched(vars(base_command), {'PackageFinder': FasterPackageFinder})
def pipfaster_download_cacher(index_urls):
"""vanilla pip stores a cache of the http session in its cache and not the
wheel files. We intercept the download and save those files into our
cache
"""
from pip._internal import download
orig = download._download_http_url
patched_fn = get_patched_download_http_url(orig, index_urls)
return patched(vars(download), {'_download_http_url': patched_fn})
def main():
with pipfaster_install_prune_option():
with pipfaster_packagefinder():
raise_on_failure(pipmodule._internal.main)
if __name__ == '__main__':
exit(main())
|
Yelp/venv-update | pip_faster.py | fresh_working_set | python | def fresh_working_set():
class WorkingSetPlusEditableInstalls(pkg_resources.WorkingSet):
def __init__(self, *args, **kwargs):
self._normalized_name_mapping = {}
super(WorkingSetPlusEditableInstalls, self).__init__(*args, **kwargs)
def add_entry(self, entry):
"""Same as the original .add_entry, but sets only=False, so that egg-links are honored."""
logger.debug('working-set entry: %r', entry)
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in pkg_resources.find_distributions(entry, False):
# eggs override anything that's installed normally
# fun fact: pkg_resources.working_set's results depend on the
# ordering of os.listdir since the order of os.listdir is
# entirely arbitrary (an implemenation detail of file system),
# without calling site.main(), an .egg-link file may or may not
# be honored, depending on the filesystem
replace = (dist.precedence == pkg_resources.EGG_DIST)
self._normalized_name_mapping[normalize_name(dist.key)] = dist.key
self.add(dist, entry, False, replace=replace)
def find_normalized(self, req):
req = _package_req_to_pkg_resources_req(str(req))
req.key = self._normalized_name_mapping.get(normalize_name(req.key), req.key)
return self.find(req)
return WorkingSetPlusEditableInstalls() | return a pkg_resources "working set", representing the *currently* installed packages | train | https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/pip_faster.py#L250-L280 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''pip-faster is a thin wrapper around pip.
It only adds a --prune option to the `install` subcommand.
`pip-faster install --prune` will *uninstall* any installed packages that are
not required.
Otherwise, you should find that pip-faster gives the same results as pip, just
more quickly, especially in the case of pinned requirements (e.g.
package-x==1.2.3).
Version control at: https://github.com/yelp/venv-update
'''
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import errno
import glob
import os
import random
import re
import shutil
import sys
from contextlib import contextmanager
import pip as pipmodule
from pip._internal import logger
from pip._internal.commands.install import InstallCommand
from pip._internal.exceptions import DistributionNotFound
from pip._internal.exceptions import InstallationError
from pip._internal.index import BestVersionAlreadyInstalled
from pip._internal.index import HTMLPage
from pip._internal.index import Link
from pip._internal.index import PackageFinder
from pip._internal.req import InstallRequirement
from pip._internal.wheel import Wheel
from venv_update import colorize
from venv_update import raise_on_failure
from venv_update import timid_relpath
from venv_update import user_cache_dir
# Debian de-vendorizes the version of pip it ships
try: # :pragma:nocover: non-debian
from pip._vendor import pkg_resources
except ImportError: # :pragma:nocover: debian
import pkg_resources
try: # :pragma:nocover: pip>=18.1
from pip._internal.req.constructors import install_req_from_line
except ImportError: # :pragma:nocover: pip<18.1
install_req_from_line = InstallRequirement.from_line
# Thanks six!
PY2 = str is bytes
if PY2: # :pragma:nocover:
_reraise_src = 'def reraise(tp, value, tb=None): raise tp, value, tb'
exec(_reraise_src)
else: # :pragma:nocover:
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
class CACHE(object):
_cache_dir = user_cache_dir()
wheelhouse = os.path.join(_cache_dir, 'pip-faster', 'wheelhouse')
pip_wheelhouse = os.path.join(_cache_dir, 'pip', 'wheels')
def ignorecase_glob(glob):
return ''.join([
'[{}{}]'.format(char.lower(), char.upper())
if char.isalpha() else char
for char in glob
])
def optimistic_wheel_search(req, index_urls):
name = req.name.replace('-', '_').lower()
for index_url in index_urls:
expected_location = os.path.join(
CACHE.wheelhouse, index_url, ignorecase_glob(name) + '-*.whl',
)
for link in glob.glob(expected_location):
link = Link('file:' + link)
wheel = Wheel(link.filename)
if req.specifier.contains(wheel.version) and wheel.supported():
return link
def is_req_pinned(requirement):
if not requirement:
# url-style requirement
return False
for spec in requirement.specifier:
if spec.operator == '==' and not spec.version.endswith('.*'):
return True
return False
class FasterPackageFinder(PackageFinder):
def find_requirement(self, req, upgrade):
if is_req_pinned(req.req):
# if the version is pinned-down by a ==
# first try to use any installed package that satisfies the req
if req.satisfied_by:
logger.info('Faster! pinned requirement already installed.')
raise BestVersionAlreadyInstalled
# then try an optimistic search for a .whl file:
link = optimistic_wheel_search(req.req, self.index_urls)
if link is None:
# The wheel will be built during prepare_files
logger.debug('No wheel found locally for pinned requirement %s', req)
else:
logger.info('Faster! Pinned wheel found, without hitting PyPI.')
return link
else:
# unpinned requirements aren't very notable. only show with -v
logger.info('slow: full search for unpinned requirement %s', req)
# otherwise, do the full network search, per usual
try:
return super(FasterPackageFinder, self).find_requirement(req, upgrade)
except DistributionNotFound:
exc_info = sys.exc_info()
# Best effort: try and install from suitable version on-disk
link = optimistic_wheel_search(req.req, self.index_urls)
if link:
return link
else:
reraise(*exc_info)
def _can_be_cached(package):
return (
package.is_wheel and
# An assertion that we're looking in the pip wheel dir
package.link.path.startswith(CACHE.pip_wheelhouse)
)
def mkdirp(pth):
try:
os.makedirs(pth)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _store_wheel_in_cache(file_path, index_url):
filename = os.path.basename(file_path)
cache = os.path.join(CACHE.wheelhouse, index_url, filename)
cache_tmp = '{}.{}'.format(cache, random.randint(0, sys.maxsize))
cache_dir = os.path.dirname(cache)
mkdirp(cache_dir)
# Atomicity
shutil.copy(file_path, cache_tmp)
os.rename(cache_tmp, cache)
def cache_installed_wheels(index_url, installed_packages):
"""After installation, pip tells us what it installed and from where.
We build a structure that looks like
.cache/pip-faster/wheelhouse/$index_url/$wheel
"""
for installed_package in installed_packages:
if not _can_be_cached(installed_package):
continue
_store_wheel_in_cache(installed_package.link.path, index_url)
def get_patched_download_http_url(orig_download_http_url, index_urls):
def pipfaster_download_http_url(link, *args, **kwargs):
file_path, content_type = orig_download_http_url(link, *args, **kwargs)
if link.is_wheel:
for index_url in index_urls:
if (
# pip <18.1
isinstance(link.comes_from, HTMLPage) and
link.comes_from.url.startswith(index_url)
) or (
# pip >= 18.1
isinstance(link.comes_from, (str, type(''))) and
link.comes_from.startswith(index_url)
):
_store_wheel_in_cache(file_path, index_url)
break
return file_path, content_type
return pipfaster_download_http_url
def pip(args):
"""Run pip, in-process."""
from sys import stdout
stdout.write(colorize(('pip',) + args))
stdout.write('\n')
stdout.flush()
return pipmodule._internal.main(list(args))
def dist_to_req(dist):
"""Make a pip.FrozenRequirement from a pkg_resources distribution object"""
try: # :pragma:nocover: (pip>=10)
from pip._internal.operations.freeze import FrozenRequirement
except ImportError: # :pragma:nocover: (pip<10)
from pip import FrozenRequirement
# normalize the casing, dashes in the req name
orig_name, dist.project_name = dist.project_name, dist.key
result = FrozenRequirement.from_dist(dist, [])
# put things back the way we found it.
dist.project_name = orig_name
return result
def pip_get_installed():
"""Code extracted from the middle of the pip freeze command.
FIXME: does not list anything installed via -e
"""
from pip._internal.utils.misc import dist_is_local
return tuple(
dist_to_req(dist)
for dist in fresh_working_set()
if dist_is_local(dist)
if dist.key != 'python' # See #220
)
def normalize_name(name):
"""Normalize a python package name a la PEP 503"""
# https://www.python.org/dev/peps/pep-0503/#normalized-names
return re.sub('[-_.]+', '-', name).lower()
def req_cycle(req):
"""is this requirement cyclic?"""
cls = req.__class__
seen = {req.name}
while isinstance(req.comes_from, cls):
req = req.comes_from
if req.name in seen:
return True
else:
seen.add(req.name)
return False
def pretty_req(req):
"""
return a copy of a pip requirement that is a bit more readable,
at the expense of removing some of its data
"""
from copy import copy
req = copy(req)
req.link = None
req.satisfied_by = None
return req
def _package_req_to_pkg_resources_req(req):
return pkg_resources.Requirement.parse(str(req))
def trace_requirements(requirements):
"""given an iterable of pip InstallRequirements,
return the set of required packages, given their transitive requirements.
"""
requirements = tuple(pretty_req(r) for r in requirements)
working_set = fresh_working_set()
# breadth-first traversal:
from collections import deque
queue = deque(requirements)
queued = {_package_req_to_pkg_resources_req(req.req) for req in queue}
errors = []
result = []
while queue:
req = queue.popleft()
logger.debug('tracing: %s', req)
try:
dist = working_set.find_normalized(_package_req_to_pkg_resources_req(req.req))
except pkg_resources.VersionConflict as conflict:
dist = conflict.args[0]
errors.append('Error: version conflict: {} ({}) <-> {}'.format(
dist, timid_relpath(dist.location), req
))
assert dist is not None, 'Should be unreachable in pip8+'
result.append(dist_to_req(dist))
# TODO: pip does no validation of extras. should we?
extras = [extra for extra in req.extras if extra in dist.extras]
for sub_req in sorted(dist.requires(extras=extras), key=lambda req: req.key):
sub_req = InstallRequirement(sub_req, req)
if req_cycle(sub_req):
logger.warning('Circular dependency! %s', sub_req)
continue
elif sub_req.req in queued:
logger.debug('already queued: %s', sub_req)
continue
else:
logger.debug('adding sub-requirement %s', sub_req)
queue.append(sub_req)
queued.add(sub_req.req)
if errors:
raise InstallationError('\n'.join(errors))
return result
def reqnames(reqs):
return {req.name for req in reqs}
class FasterInstallCommand(InstallCommand):
def __init__(self, *args, **kw):
super(FasterInstallCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'--prune',
action='store_true',
dest='prune',
default=False,
help='Uninstall any non-required packages.',
)
cmd_opts.add_option(
'--no-prune',
action='store_false',
dest='prune',
help='Do not uninstall any non-required packages.',
)
def run(self, options, args):
"""update install options with caching values"""
if options.prune:
previously_installed = pip_get_installed()
index_urls = [options.index_url] + options.extra_index_urls
with pipfaster_download_cacher(index_urls):
requirement_set = super(FasterInstallCommand, self).run(
options, args,
)
required = requirement_set.requirements.values()
# With extra_index_urls we don't know where the wheel is from
if not options.extra_index_urls:
cache_installed_wheels(options.index_url, requirement_set.successfully_downloaded)
if not options.ignore_dependencies:
# transitive requirements, previously installed, are also required
# this has a side-effect of finding any missing / conflicting requirements
required = trace_requirements(required)
if not options.prune:
return requirement_set
extraneous = (
reqnames(previously_installed) -
reqnames(required) -
# the stage1 bootstrap packages
reqnames(trace_requirements([install_req_from_line('venv-update')])) -
# See #186
frozenset(('pkg-resources',))
)
if extraneous:
extraneous = sorted(extraneous)
pip(('uninstall', '--yes') + tuple(extraneous))
# TODO: Cleanup: remove stale values from the cache and wheelhouse that have not been accessed in a week.
# TODO: a pip_faster.patch module
def patch(attrs, updates):
"""Perform a set of updates to a attribute dictionary, return the original values."""
orig = {}
for attr, value in updates:
orig[attr] = attrs[attr]
attrs[attr] = value
return orig
@contextmanager
def patched(attrs, updates):
"""A context in which some attributes temporarily have a modified value."""
orig = patch(attrs, updates.items())
try:
yield orig
finally:
patch(attrs, orig.items())
# END: pip_faster.patch module
def pipfaster_install_prune_option():
return patched(pipmodule._internal.commands.commands_dict, {FasterInstallCommand.name: FasterInstallCommand})
def pipfaster_packagefinder():
"""Provide a short-circuited search when the requirement is pinned and appears on disk.
Suggested upstream at: https://github.com/pypa/pip/pull/2114
"""
# A poor man's dependency injection: monkeypatch :(
try: # :pragma:nocover: pip>=18.1
from pip._internal.cli import base_command
except ImportError: # :pragma:nocover: pip<18.1
from pip._internal import basecommand as base_command
return patched(vars(base_command), {'PackageFinder': FasterPackageFinder})
def pipfaster_download_cacher(index_urls):
"""vanilla pip stores a cache of the http session in its cache and not the
wheel files. We intercept the download and save those files into our
cache
"""
from pip._internal import download
orig = download._download_http_url
patched_fn = get_patched_download_http_url(orig, index_urls)
return patched(vars(download), {'_download_http_url': patched_fn})
def main():
with pipfaster_install_prune_option():
with pipfaster_packagefinder():
raise_on_failure(pipmodule._internal.main)
if __name__ == '__main__':
exit(main())
|
Yelp/venv-update | pip_faster.py | req_cycle | python | def req_cycle(req):
cls = req.__class__
seen = {req.name}
while isinstance(req.comes_from, cls):
req = req.comes_from
if req.name in seen:
return True
else:
seen.add(req.name)
return False | is this requirement cyclic? | train | https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/pip_faster.py#L283-L293 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''pip-faster is a thin wrapper around pip.
It only adds a --prune option to the `install` subcommand.
`pip-faster install --prune` will *uninstall* any installed packages that are
not required.
Otherwise, you should find that pip-faster gives the same results as pip, just
more quickly, especially in the case of pinned requirements (e.g.
package-x==1.2.3).
Version control at: https://github.com/yelp/venv-update
'''
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import errno
import glob
import os
import random
import re
import shutil
import sys
from contextlib import contextmanager
import pip as pipmodule
from pip._internal import logger
from pip._internal.commands.install import InstallCommand
from pip._internal.exceptions import DistributionNotFound
from pip._internal.exceptions import InstallationError
from pip._internal.index import BestVersionAlreadyInstalled
from pip._internal.index import HTMLPage
from pip._internal.index import Link
from pip._internal.index import PackageFinder
from pip._internal.req import InstallRequirement
from pip._internal.wheel import Wheel
from venv_update import colorize
from venv_update import raise_on_failure
from venv_update import timid_relpath
from venv_update import user_cache_dir
# Debian de-vendorizes the version of pip it ships
try: # :pragma:nocover: non-debian
from pip._vendor import pkg_resources
except ImportError: # :pragma:nocover: debian
import pkg_resources
try: # :pragma:nocover: pip>=18.1
from pip._internal.req.constructors import install_req_from_line
except ImportError: # :pragma:nocover: pip<18.1
install_req_from_line = InstallRequirement.from_line
# Thanks six!
PY2 = str is bytes
if PY2: # :pragma:nocover:
_reraise_src = 'def reraise(tp, value, tb=None): raise tp, value, tb'
exec(_reraise_src)
else: # :pragma:nocover:
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
class CACHE(object):
_cache_dir = user_cache_dir()
wheelhouse = os.path.join(_cache_dir, 'pip-faster', 'wheelhouse')
pip_wheelhouse = os.path.join(_cache_dir, 'pip', 'wheels')
def ignorecase_glob(glob):
return ''.join([
'[{}{}]'.format(char.lower(), char.upper())
if char.isalpha() else char
for char in glob
])
def optimistic_wheel_search(req, index_urls):
name = req.name.replace('-', '_').lower()
for index_url in index_urls:
expected_location = os.path.join(
CACHE.wheelhouse, index_url, ignorecase_glob(name) + '-*.whl',
)
for link in glob.glob(expected_location):
link = Link('file:' + link)
wheel = Wheel(link.filename)
if req.specifier.contains(wheel.version) and wheel.supported():
return link
def is_req_pinned(requirement):
if not requirement:
# url-style requirement
return False
for spec in requirement.specifier:
if spec.operator == '==' and not spec.version.endswith('.*'):
return True
return False
class FasterPackageFinder(PackageFinder):
def find_requirement(self, req, upgrade):
if is_req_pinned(req.req):
# if the version is pinned-down by a ==
# first try to use any installed package that satisfies the req
if req.satisfied_by:
logger.info('Faster! pinned requirement already installed.')
raise BestVersionAlreadyInstalled
# then try an optimistic search for a .whl file:
link = optimistic_wheel_search(req.req, self.index_urls)
if link is None:
# The wheel will be built during prepare_files
logger.debug('No wheel found locally for pinned requirement %s', req)
else:
logger.info('Faster! Pinned wheel found, without hitting PyPI.')
return link
else:
# unpinned requirements aren't very notable. only show with -v
logger.info('slow: full search for unpinned requirement %s', req)
# otherwise, do the full network search, per usual
try:
return super(FasterPackageFinder, self).find_requirement(req, upgrade)
except DistributionNotFound:
exc_info = sys.exc_info()
# Best effort: try and install from suitable version on-disk
link = optimistic_wheel_search(req.req, self.index_urls)
if link:
return link
else:
reraise(*exc_info)
def _can_be_cached(package):
return (
package.is_wheel and
# An assertion that we're looking in the pip wheel dir
package.link.path.startswith(CACHE.pip_wheelhouse)
)
def mkdirp(pth):
try:
os.makedirs(pth)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _store_wheel_in_cache(file_path, index_url):
filename = os.path.basename(file_path)
cache = os.path.join(CACHE.wheelhouse, index_url, filename)
cache_tmp = '{}.{}'.format(cache, random.randint(0, sys.maxsize))
cache_dir = os.path.dirname(cache)
mkdirp(cache_dir)
# Atomicity
shutil.copy(file_path, cache_tmp)
os.rename(cache_tmp, cache)
def cache_installed_wheels(index_url, installed_packages):
"""After installation, pip tells us what it installed and from where.
We build a structure that looks like
.cache/pip-faster/wheelhouse/$index_url/$wheel
"""
for installed_package in installed_packages:
if not _can_be_cached(installed_package):
continue
_store_wheel_in_cache(installed_package.link.path, index_url)
def get_patched_download_http_url(orig_download_http_url, index_urls):
def pipfaster_download_http_url(link, *args, **kwargs):
file_path, content_type = orig_download_http_url(link, *args, **kwargs)
if link.is_wheel:
for index_url in index_urls:
if (
# pip <18.1
isinstance(link.comes_from, HTMLPage) and
link.comes_from.url.startswith(index_url)
) or (
# pip >= 18.1
isinstance(link.comes_from, (str, type(''))) and
link.comes_from.startswith(index_url)
):
_store_wheel_in_cache(file_path, index_url)
break
return file_path, content_type
return pipfaster_download_http_url
def pip(args):
"""Run pip, in-process."""
from sys import stdout
stdout.write(colorize(('pip',) + args))
stdout.write('\n')
stdout.flush()
return pipmodule._internal.main(list(args))
def dist_to_req(dist):
"""Make a pip.FrozenRequirement from a pkg_resources distribution object"""
try: # :pragma:nocover: (pip>=10)
from pip._internal.operations.freeze import FrozenRequirement
except ImportError: # :pragma:nocover: (pip<10)
from pip import FrozenRequirement
# normalize the casing, dashes in the req name
orig_name, dist.project_name = dist.project_name, dist.key
result = FrozenRequirement.from_dist(dist, [])
# put things back the way we found it.
dist.project_name = orig_name
return result
def pip_get_installed():
"""Code extracted from the middle of the pip freeze command.
FIXME: does not list anything installed via -e
"""
from pip._internal.utils.misc import dist_is_local
return tuple(
dist_to_req(dist)
for dist in fresh_working_set()
if dist_is_local(dist)
if dist.key != 'python' # See #220
)
def normalize_name(name):
"""Normalize a python package name a la PEP 503"""
# https://www.python.org/dev/peps/pep-0503/#normalized-names
return re.sub('[-_.]+', '-', name).lower()
def fresh_working_set():
"""return a pkg_resources "working set", representing the *currently* installed packages"""
class WorkingSetPlusEditableInstalls(pkg_resources.WorkingSet):
def __init__(self, *args, **kwargs):
self._normalized_name_mapping = {}
super(WorkingSetPlusEditableInstalls, self).__init__(*args, **kwargs)
def add_entry(self, entry):
"""Same as the original .add_entry, but sets only=False, so that egg-links are honored."""
logger.debug('working-set entry: %r', entry)
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in pkg_resources.find_distributions(entry, False):
# eggs override anything that's installed normally
# fun fact: pkg_resources.working_set's results depend on the
# ordering of os.listdir since the order of os.listdir is
# entirely arbitrary (an implemenation detail of file system),
# without calling site.main(), an .egg-link file may or may not
# be honored, depending on the filesystem
replace = (dist.precedence == pkg_resources.EGG_DIST)
self._normalized_name_mapping[normalize_name(dist.key)] = dist.key
self.add(dist, entry, False, replace=replace)
def find_normalized(self, req):
req = _package_req_to_pkg_resources_req(str(req))
req.key = self._normalized_name_mapping.get(normalize_name(req.key), req.key)
return self.find(req)
return WorkingSetPlusEditableInstalls()
def pretty_req(req):
"""
return a copy of a pip requirement that is a bit more readable,
at the expense of removing some of its data
"""
from copy import copy
req = copy(req)
req.link = None
req.satisfied_by = None
return req
def _package_req_to_pkg_resources_req(req):
return pkg_resources.Requirement.parse(str(req))
def trace_requirements(requirements):
"""given an iterable of pip InstallRequirements,
return the set of required packages, given their transitive requirements.
"""
requirements = tuple(pretty_req(r) for r in requirements)
working_set = fresh_working_set()
# breadth-first traversal:
from collections import deque
queue = deque(requirements)
queued = {_package_req_to_pkg_resources_req(req.req) for req in queue}
errors = []
result = []
while queue:
req = queue.popleft()
logger.debug('tracing: %s', req)
try:
dist = working_set.find_normalized(_package_req_to_pkg_resources_req(req.req))
except pkg_resources.VersionConflict as conflict:
dist = conflict.args[0]
errors.append('Error: version conflict: {} ({}) <-> {}'.format(
dist, timid_relpath(dist.location), req
))
assert dist is not None, 'Should be unreachable in pip8+'
result.append(dist_to_req(dist))
# TODO: pip does no validation of extras. should we?
extras = [extra for extra in req.extras if extra in dist.extras]
for sub_req in sorted(dist.requires(extras=extras), key=lambda req: req.key):
sub_req = InstallRequirement(sub_req, req)
if req_cycle(sub_req):
logger.warning('Circular dependency! %s', sub_req)
continue
elif sub_req.req in queued:
logger.debug('already queued: %s', sub_req)
continue
else:
logger.debug('adding sub-requirement %s', sub_req)
queue.append(sub_req)
queued.add(sub_req.req)
if errors:
raise InstallationError('\n'.join(errors))
return result
def reqnames(reqs):
return {req.name for req in reqs}
class FasterInstallCommand(InstallCommand):
def __init__(self, *args, **kw):
super(FasterInstallCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'--prune',
action='store_true',
dest='prune',
default=False,
help='Uninstall any non-required packages.',
)
cmd_opts.add_option(
'--no-prune',
action='store_false',
dest='prune',
help='Do not uninstall any non-required packages.',
)
def run(self, options, args):
"""update install options with caching values"""
if options.prune:
previously_installed = pip_get_installed()
index_urls = [options.index_url] + options.extra_index_urls
with pipfaster_download_cacher(index_urls):
requirement_set = super(FasterInstallCommand, self).run(
options, args,
)
required = requirement_set.requirements.values()
# With extra_index_urls we don't know where the wheel is from
if not options.extra_index_urls:
cache_installed_wheels(options.index_url, requirement_set.successfully_downloaded)
if not options.ignore_dependencies:
# transitive requirements, previously installed, are also required
# this has a side-effect of finding any missing / conflicting requirements
required = trace_requirements(required)
if not options.prune:
return requirement_set
extraneous = (
reqnames(previously_installed) -
reqnames(required) -
# the stage1 bootstrap packages
reqnames(trace_requirements([install_req_from_line('venv-update')])) -
# See #186
frozenset(('pkg-resources',))
)
if extraneous:
extraneous = sorted(extraneous)
pip(('uninstall', '--yes') + tuple(extraneous))
# TODO: Cleanup: remove stale values from the cache and wheelhouse that have not been accessed in a week.
# TODO: a pip_faster.patch module
def patch(attrs, updates):
"""Perform a set of updates to a attribute dictionary, return the original values."""
orig = {}
for attr, value in updates:
orig[attr] = attrs[attr]
attrs[attr] = value
return orig
@contextmanager
def patched(attrs, updates):
"""A context in which some attributes temporarily have a modified value."""
orig = patch(attrs, updates.items())
try:
yield orig
finally:
patch(attrs, orig.items())
# END: pip_faster.patch module
def pipfaster_install_prune_option():
return patched(pipmodule._internal.commands.commands_dict, {FasterInstallCommand.name: FasterInstallCommand})
def pipfaster_packagefinder():
"""Provide a short-circuited search when the requirement is pinned and appears on disk.
Suggested upstream at: https://github.com/pypa/pip/pull/2114
"""
# A poor man's dependency injection: monkeypatch :(
try: # :pragma:nocover: pip>=18.1
from pip._internal.cli import base_command
except ImportError: # :pragma:nocover: pip<18.1
from pip._internal import basecommand as base_command
return patched(vars(base_command), {'PackageFinder': FasterPackageFinder})
def pipfaster_download_cacher(index_urls):
"""vanilla pip stores a cache of the http session in its cache and not the
wheel files. We intercept the download and save those files into our
cache
"""
from pip._internal import download
orig = download._download_http_url
patched_fn = get_patched_download_http_url(orig, index_urls)
return patched(vars(download), {'_download_http_url': patched_fn})
def main():
with pipfaster_install_prune_option():
with pipfaster_packagefinder():
raise_on_failure(pipmodule._internal.main)
if __name__ == '__main__':
exit(main())
|
Yelp/venv-update | pip_faster.py | pretty_req | python | def pretty_req(req):
from copy import copy
req = copy(req)
req.link = None
req.satisfied_by = None
return req | return a copy of a pip requirement that is a bit more readable,
at the expense of removing some of its data | train | https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/pip_faster.py#L296-L305 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''pip-faster is a thin wrapper around pip.
It only adds a --prune option to the `install` subcommand.
`pip-faster install --prune` will *uninstall* any installed packages that are
not required.
Otherwise, you should find that pip-faster gives the same results as pip, just
more quickly, especially in the case of pinned requirements (e.g.
package-x==1.2.3).
Version control at: https://github.com/yelp/venv-update
'''
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import errno
import glob
import os
import random
import re
import shutil
import sys
from contextlib import contextmanager
import pip as pipmodule
from pip._internal import logger
from pip._internal.commands.install import InstallCommand
from pip._internal.exceptions import DistributionNotFound
from pip._internal.exceptions import InstallationError
from pip._internal.index import BestVersionAlreadyInstalled
from pip._internal.index import HTMLPage
from pip._internal.index import Link
from pip._internal.index import PackageFinder
from pip._internal.req import InstallRequirement
from pip._internal.wheel import Wheel
from venv_update import colorize
from venv_update import raise_on_failure
from venv_update import timid_relpath
from venv_update import user_cache_dir
# Debian de-vendorizes the version of pip it ships
try: # :pragma:nocover: non-debian
from pip._vendor import pkg_resources
except ImportError: # :pragma:nocover: debian
import pkg_resources
try: # :pragma:nocover: pip>=18.1
from pip._internal.req.constructors import install_req_from_line
except ImportError: # :pragma:nocover: pip<18.1
install_req_from_line = InstallRequirement.from_line
# Thanks six!
PY2 = str is bytes
if PY2: # :pragma:nocover:
_reraise_src = 'def reraise(tp, value, tb=None): raise tp, value, tb'
exec(_reraise_src)
else: # :pragma:nocover:
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
class CACHE(object):
_cache_dir = user_cache_dir()
wheelhouse = os.path.join(_cache_dir, 'pip-faster', 'wheelhouse')
pip_wheelhouse = os.path.join(_cache_dir, 'pip', 'wheels')
def ignorecase_glob(glob):
return ''.join([
'[{}{}]'.format(char.lower(), char.upper())
if char.isalpha() else char
for char in glob
])
def optimistic_wheel_search(req, index_urls):
name = req.name.replace('-', '_').lower()
for index_url in index_urls:
expected_location = os.path.join(
CACHE.wheelhouse, index_url, ignorecase_glob(name) + '-*.whl',
)
for link in glob.glob(expected_location):
link = Link('file:' + link)
wheel = Wheel(link.filename)
if req.specifier.contains(wheel.version) and wheel.supported():
return link
def is_req_pinned(requirement):
if not requirement:
# url-style requirement
return False
for spec in requirement.specifier:
if spec.operator == '==' and not spec.version.endswith('.*'):
return True
return False
class FasterPackageFinder(PackageFinder):
def find_requirement(self, req, upgrade):
if is_req_pinned(req.req):
# if the version is pinned-down by a ==
# first try to use any installed package that satisfies the req
if req.satisfied_by:
logger.info('Faster! pinned requirement already installed.')
raise BestVersionAlreadyInstalled
# then try an optimistic search for a .whl file:
link = optimistic_wheel_search(req.req, self.index_urls)
if link is None:
# The wheel will be built during prepare_files
logger.debug('No wheel found locally for pinned requirement %s', req)
else:
logger.info('Faster! Pinned wheel found, without hitting PyPI.')
return link
else:
# unpinned requirements aren't very notable. only show with -v
logger.info('slow: full search for unpinned requirement %s', req)
# otherwise, do the full network search, per usual
try:
return super(FasterPackageFinder, self).find_requirement(req, upgrade)
except DistributionNotFound:
exc_info = sys.exc_info()
# Best effort: try and install from suitable version on-disk
link = optimistic_wheel_search(req.req, self.index_urls)
if link:
return link
else:
reraise(*exc_info)
def _can_be_cached(package):
return (
package.is_wheel and
# An assertion that we're looking in the pip wheel dir
package.link.path.startswith(CACHE.pip_wheelhouse)
)
def mkdirp(pth):
try:
os.makedirs(pth)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _store_wheel_in_cache(file_path, index_url):
filename = os.path.basename(file_path)
cache = os.path.join(CACHE.wheelhouse, index_url, filename)
cache_tmp = '{}.{}'.format(cache, random.randint(0, sys.maxsize))
cache_dir = os.path.dirname(cache)
mkdirp(cache_dir)
# Atomicity
shutil.copy(file_path, cache_tmp)
os.rename(cache_tmp, cache)
def cache_installed_wheels(index_url, installed_packages):
"""After installation, pip tells us what it installed and from where.
We build a structure that looks like
.cache/pip-faster/wheelhouse/$index_url/$wheel
"""
for installed_package in installed_packages:
if not _can_be_cached(installed_package):
continue
_store_wheel_in_cache(installed_package.link.path, index_url)
def get_patched_download_http_url(orig_download_http_url, index_urls):
def pipfaster_download_http_url(link, *args, **kwargs):
file_path, content_type = orig_download_http_url(link, *args, **kwargs)
if link.is_wheel:
for index_url in index_urls:
if (
# pip <18.1
isinstance(link.comes_from, HTMLPage) and
link.comes_from.url.startswith(index_url)
) or (
# pip >= 18.1
isinstance(link.comes_from, (str, type(''))) and
link.comes_from.startswith(index_url)
):
_store_wheel_in_cache(file_path, index_url)
break
return file_path, content_type
return pipfaster_download_http_url
def pip(args):
"""Run pip, in-process."""
from sys import stdout
stdout.write(colorize(('pip',) + args))
stdout.write('\n')
stdout.flush()
return pipmodule._internal.main(list(args))
def dist_to_req(dist):
"""Make a pip.FrozenRequirement from a pkg_resources distribution object"""
try: # :pragma:nocover: (pip>=10)
from pip._internal.operations.freeze import FrozenRequirement
except ImportError: # :pragma:nocover: (pip<10)
from pip import FrozenRequirement
# normalize the casing, dashes in the req name
orig_name, dist.project_name = dist.project_name, dist.key
result = FrozenRequirement.from_dist(dist, [])
# put things back the way we found it.
dist.project_name = orig_name
return result
def pip_get_installed():
"""Code extracted from the middle of the pip freeze command.
FIXME: does not list anything installed via -e
"""
from pip._internal.utils.misc import dist_is_local
return tuple(
dist_to_req(dist)
for dist in fresh_working_set()
if dist_is_local(dist)
if dist.key != 'python' # See #220
)
def normalize_name(name):
"""Normalize a python package name a la PEP 503"""
# https://www.python.org/dev/peps/pep-0503/#normalized-names
return re.sub('[-_.]+', '-', name).lower()
def fresh_working_set():
"""return a pkg_resources "working set", representing the *currently* installed packages"""
class WorkingSetPlusEditableInstalls(pkg_resources.WorkingSet):
def __init__(self, *args, **kwargs):
self._normalized_name_mapping = {}
super(WorkingSetPlusEditableInstalls, self).__init__(*args, **kwargs)
def add_entry(self, entry):
"""Same as the original .add_entry, but sets only=False, so that egg-links are honored."""
logger.debug('working-set entry: %r', entry)
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in pkg_resources.find_distributions(entry, False):
# eggs override anything that's installed normally
# fun fact: pkg_resources.working_set's results depend on the
# ordering of os.listdir since the order of os.listdir is
# entirely arbitrary (an implemenation detail of file system),
# without calling site.main(), an .egg-link file may or may not
# be honored, depending on the filesystem
replace = (dist.precedence == pkg_resources.EGG_DIST)
self._normalized_name_mapping[normalize_name(dist.key)] = dist.key
self.add(dist, entry, False, replace=replace)
def find_normalized(self, req):
req = _package_req_to_pkg_resources_req(str(req))
req.key = self._normalized_name_mapping.get(normalize_name(req.key), req.key)
return self.find(req)
return WorkingSetPlusEditableInstalls()
def req_cycle(req):
"""is this requirement cyclic?"""
cls = req.__class__
seen = {req.name}
while isinstance(req.comes_from, cls):
req = req.comes_from
if req.name in seen:
return True
else:
seen.add(req.name)
return False
def _package_req_to_pkg_resources_req(req):
return pkg_resources.Requirement.parse(str(req))
def trace_requirements(requirements):
"""given an iterable of pip InstallRequirements,
return the set of required packages, given their transitive requirements.
"""
requirements = tuple(pretty_req(r) for r in requirements)
working_set = fresh_working_set()
# breadth-first traversal:
from collections import deque
queue = deque(requirements)
queued = {_package_req_to_pkg_resources_req(req.req) for req in queue}
errors = []
result = []
while queue:
req = queue.popleft()
logger.debug('tracing: %s', req)
try:
dist = working_set.find_normalized(_package_req_to_pkg_resources_req(req.req))
except pkg_resources.VersionConflict as conflict:
dist = conflict.args[0]
errors.append('Error: version conflict: {} ({}) <-> {}'.format(
dist, timid_relpath(dist.location), req
))
assert dist is not None, 'Should be unreachable in pip8+'
result.append(dist_to_req(dist))
# TODO: pip does no validation of extras. should we?
extras = [extra for extra in req.extras if extra in dist.extras]
for sub_req in sorted(dist.requires(extras=extras), key=lambda req: req.key):
sub_req = InstallRequirement(sub_req, req)
if req_cycle(sub_req):
logger.warning('Circular dependency! %s', sub_req)
continue
elif sub_req.req in queued:
logger.debug('already queued: %s', sub_req)
continue
else:
logger.debug('adding sub-requirement %s', sub_req)
queue.append(sub_req)
queued.add(sub_req.req)
if errors:
raise InstallationError('\n'.join(errors))
return result
def reqnames(reqs):
return {req.name for req in reqs}
class FasterInstallCommand(InstallCommand):
def __init__(self, *args, **kw):
super(FasterInstallCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'--prune',
action='store_true',
dest='prune',
default=False,
help='Uninstall any non-required packages.',
)
cmd_opts.add_option(
'--no-prune',
action='store_false',
dest='prune',
help='Do not uninstall any non-required packages.',
)
def run(self, options, args):
"""update install options with caching values"""
if options.prune:
previously_installed = pip_get_installed()
index_urls = [options.index_url] + options.extra_index_urls
with pipfaster_download_cacher(index_urls):
requirement_set = super(FasterInstallCommand, self).run(
options, args,
)
required = requirement_set.requirements.values()
# With extra_index_urls we don't know where the wheel is from
if not options.extra_index_urls:
cache_installed_wheels(options.index_url, requirement_set.successfully_downloaded)
if not options.ignore_dependencies:
# transitive requirements, previously installed, are also required
# this has a side-effect of finding any missing / conflicting requirements
required = trace_requirements(required)
if not options.prune:
return requirement_set
extraneous = (
reqnames(previously_installed) -
reqnames(required) -
# the stage1 bootstrap packages
reqnames(trace_requirements([install_req_from_line('venv-update')])) -
# See #186
frozenset(('pkg-resources',))
)
if extraneous:
extraneous = sorted(extraneous)
pip(('uninstall', '--yes') + tuple(extraneous))
# TODO: Cleanup: remove stale values from the cache and wheelhouse that have not been accessed in a week.
# TODO: a pip_faster.patch module
def patch(attrs, updates):
"""Perform a set of updates to a attribute dictionary, return the original values."""
orig = {}
for attr, value in updates:
orig[attr] = attrs[attr]
attrs[attr] = value
return orig
@contextmanager
def patched(attrs, updates):
"""A context in which some attributes temporarily have a modified value."""
orig = patch(attrs, updates.items())
try:
yield orig
finally:
patch(attrs, orig.items())
# END: pip_faster.patch module
def pipfaster_install_prune_option():
return patched(pipmodule._internal.commands.commands_dict, {FasterInstallCommand.name: FasterInstallCommand})
def pipfaster_packagefinder():
"""Provide a short-circuited search when the requirement is pinned and appears on disk.
Suggested upstream at: https://github.com/pypa/pip/pull/2114
"""
# A poor man's dependency injection: monkeypatch :(
try: # :pragma:nocover: pip>=18.1
from pip._internal.cli import base_command
except ImportError: # :pragma:nocover: pip<18.1
from pip._internal import basecommand as base_command
return patched(vars(base_command), {'PackageFinder': FasterPackageFinder})
def pipfaster_download_cacher(index_urls):
"""vanilla pip stores a cache of the http session in its cache and not the
wheel files. We intercept the download and save those files into our
cache
"""
from pip._internal import download
orig = download._download_http_url
patched_fn = get_patched_download_http_url(orig, index_urls)
return patched(vars(download), {'_download_http_url': patched_fn})
def main():
with pipfaster_install_prune_option():
with pipfaster_packagefinder():
raise_on_failure(pipmodule._internal.main)
if __name__ == '__main__':
exit(main())
|
Yelp/venv-update | pip_faster.py | trace_requirements | python | def trace_requirements(requirements):
requirements = tuple(pretty_req(r) for r in requirements)
working_set = fresh_working_set()
# breadth-first traversal:
from collections import deque
queue = deque(requirements)
queued = {_package_req_to_pkg_resources_req(req.req) for req in queue}
errors = []
result = []
while queue:
req = queue.popleft()
logger.debug('tracing: %s', req)
try:
dist = working_set.find_normalized(_package_req_to_pkg_resources_req(req.req))
except pkg_resources.VersionConflict as conflict:
dist = conflict.args[0]
errors.append('Error: version conflict: {} ({}) <-> {}'.format(
dist, timid_relpath(dist.location), req
))
assert dist is not None, 'Should be unreachable in pip8+'
result.append(dist_to_req(dist))
# TODO: pip does no validation of extras. should we?
extras = [extra for extra in req.extras if extra in dist.extras]
for sub_req in sorted(dist.requires(extras=extras), key=lambda req: req.key):
sub_req = InstallRequirement(sub_req, req)
if req_cycle(sub_req):
logger.warning('Circular dependency! %s', sub_req)
continue
elif sub_req.req in queued:
logger.debug('already queued: %s', sub_req)
continue
else:
logger.debug('adding sub-requirement %s', sub_req)
queue.append(sub_req)
queued.add(sub_req.req)
if errors:
raise InstallationError('\n'.join(errors))
return result | given an iterable of pip InstallRequirements,
return the set of required packages, given their transitive requirements. | train | https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/pip_faster.py#L312-L359 | [
"def timid_relpath(arg):\n \"\"\"convert an argument to a relative path, carefully\"\"\"\n # TODO-TEST: unit tests\n from os.path import isabs, relpath, sep\n if isabs(arg):\n result = relpath(arg)\n if result.count(sep) + 1 < arg.count(sep):\n return result\n\n return arg\n",
"def fresh_working_set():\n \"\"\"return a pkg_resources \"working set\", representing the *currently* installed packages\"\"\"\n class WorkingSetPlusEditableInstalls(pkg_resources.WorkingSet):\n\n def __init__(self, *args, **kwargs):\n self._normalized_name_mapping = {}\n super(WorkingSetPlusEditableInstalls, self).__init__(*args, **kwargs)\n\n def add_entry(self, entry):\n \"\"\"Same as the original .add_entry, but sets only=False, so that egg-links are honored.\"\"\"\n logger.debug('working-set entry: %r', entry)\n self.entry_keys.setdefault(entry, [])\n self.entries.append(entry)\n for dist in pkg_resources.find_distributions(entry, False):\n\n # eggs override anything that's installed normally\n # fun fact: pkg_resources.working_set's results depend on the\n # ordering of os.listdir since the order of os.listdir is\n # entirely arbitrary (an implemenation detail of file system),\n # without calling site.main(), an .egg-link file may or may not\n # be honored, depending on the filesystem\n replace = (dist.precedence == pkg_resources.EGG_DIST)\n self._normalized_name_mapping[normalize_name(dist.key)] = dist.key\n self.add(dist, entry, False, replace=replace)\n\n def find_normalized(self, req):\n req = _package_req_to_pkg_resources_req(str(req))\n req.key = self._normalized_name_mapping.get(normalize_name(req.key), req.key)\n return self.find(req)\n\n return WorkingSetPlusEditableInstalls()\n",
"def _package_req_to_pkg_resources_req(req):\n return pkg_resources.Requirement.parse(str(req))\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''pip-faster is a thin wrapper around pip.
It only adds a --prune option to the `install` subcommand.
`pip-faster install --prune` will *uninstall* any installed packages that are
not required.
Otherwise, you should find that pip-faster gives the same results as pip, just
more quickly, especially in the case of pinned requirements (e.g.
package-x==1.2.3).
Version control at: https://github.com/yelp/venv-update
'''
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import errno
import glob
import os
import random
import re
import shutil
import sys
from contextlib import contextmanager
import pip as pipmodule
from pip._internal import logger
from pip._internal.commands.install import InstallCommand
from pip._internal.exceptions import DistributionNotFound
from pip._internal.exceptions import InstallationError
from pip._internal.index import BestVersionAlreadyInstalled
from pip._internal.index import HTMLPage
from pip._internal.index import Link
from pip._internal.index import PackageFinder
from pip._internal.req import InstallRequirement
from pip._internal.wheel import Wheel
from venv_update import colorize
from venv_update import raise_on_failure
from venv_update import timid_relpath
from venv_update import user_cache_dir
# Debian de-vendorizes the version of pip it ships
try: # :pragma:nocover: non-debian
from pip._vendor import pkg_resources
except ImportError: # :pragma:nocover: debian
import pkg_resources
try: # :pragma:nocover: pip>=18.1
from pip._internal.req.constructors import install_req_from_line
except ImportError: # :pragma:nocover: pip<18.1
install_req_from_line = InstallRequirement.from_line
# Thanks six!
PY2 = str is bytes
if PY2: # :pragma:nocover:
_reraise_src = 'def reraise(tp, value, tb=None): raise tp, value, tb'
exec(_reraise_src)
else: # :pragma:nocover:
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
class CACHE(object):
_cache_dir = user_cache_dir()
wheelhouse = os.path.join(_cache_dir, 'pip-faster', 'wheelhouse')
pip_wheelhouse = os.path.join(_cache_dir, 'pip', 'wheels')
def ignorecase_glob(glob):
return ''.join([
'[{}{}]'.format(char.lower(), char.upper())
if char.isalpha() else char
for char in glob
])
def optimistic_wheel_search(req, index_urls):
name = req.name.replace('-', '_').lower()
for index_url in index_urls:
expected_location = os.path.join(
CACHE.wheelhouse, index_url, ignorecase_glob(name) + '-*.whl',
)
for link in glob.glob(expected_location):
link = Link('file:' + link)
wheel = Wheel(link.filename)
if req.specifier.contains(wheel.version) and wheel.supported():
return link
def is_req_pinned(requirement):
if not requirement:
# url-style requirement
return False
for spec in requirement.specifier:
if spec.operator == '==' and not spec.version.endswith('.*'):
return True
return False
class FasterPackageFinder(PackageFinder):
def find_requirement(self, req, upgrade):
if is_req_pinned(req.req):
# if the version is pinned-down by a ==
# first try to use any installed package that satisfies the req
if req.satisfied_by:
logger.info('Faster! pinned requirement already installed.')
raise BestVersionAlreadyInstalled
# then try an optimistic search for a .whl file:
link = optimistic_wheel_search(req.req, self.index_urls)
if link is None:
# The wheel will be built during prepare_files
logger.debug('No wheel found locally for pinned requirement %s', req)
else:
logger.info('Faster! Pinned wheel found, without hitting PyPI.')
return link
else:
# unpinned requirements aren't very notable. only show with -v
logger.info('slow: full search for unpinned requirement %s', req)
# otherwise, do the full network search, per usual
try:
return super(FasterPackageFinder, self).find_requirement(req, upgrade)
except DistributionNotFound:
exc_info = sys.exc_info()
# Best effort: try and install from suitable version on-disk
link = optimistic_wheel_search(req.req, self.index_urls)
if link:
return link
else:
reraise(*exc_info)
def _can_be_cached(package):
return (
package.is_wheel and
# An assertion that we're looking in the pip wheel dir
package.link.path.startswith(CACHE.pip_wheelhouse)
)
def mkdirp(pth):
try:
os.makedirs(pth)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _store_wheel_in_cache(file_path, index_url):
filename = os.path.basename(file_path)
cache = os.path.join(CACHE.wheelhouse, index_url, filename)
cache_tmp = '{}.{}'.format(cache, random.randint(0, sys.maxsize))
cache_dir = os.path.dirname(cache)
mkdirp(cache_dir)
# Atomicity
shutil.copy(file_path, cache_tmp)
os.rename(cache_tmp, cache)
def cache_installed_wheels(index_url, installed_packages):
"""After installation, pip tells us what it installed and from where.
We build a structure that looks like
.cache/pip-faster/wheelhouse/$index_url/$wheel
"""
for installed_package in installed_packages:
if not _can_be_cached(installed_package):
continue
_store_wheel_in_cache(installed_package.link.path, index_url)
def get_patched_download_http_url(orig_download_http_url, index_urls):
def pipfaster_download_http_url(link, *args, **kwargs):
file_path, content_type = orig_download_http_url(link, *args, **kwargs)
if link.is_wheel:
for index_url in index_urls:
if (
# pip <18.1
isinstance(link.comes_from, HTMLPage) and
link.comes_from.url.startswith(index_url)
) or (
# pip >= 18.1
isinstance(link.comes_from, (str, type(''))) and
link.comes_from.startswith(index_url)
):
_store_wheel_in_cache(file_path, index_url)
break
return file_path, content_type
return pipfaster_download_http_url
def pip(args):
"""Run pip, in-process."""
from sys import stdout
stdout.write(colorize(('pip',) + args))
stdout.write('\n')
stdout.flush()
return pipmodule._internal.main(list(args))
def dist_to_req(dist):
"""Make a pip.FrozenRequirement from a pkg_resources distribution object"""
try: # :pragma:nocover: (pip>=10)
from pip._internal.operations.freeze import FrozenRequirement
except ImportError: # :pragma:nocover: (pip<10)
from pip import FrozenRequirement
# normalize the casing, dashes in the req name
orig_name, dist.project_name = dist.project_name, dist.key
result = FrozenRequirement.from_dist(dist, [])
# put things back the way we found it.
dist.project_name = orig_name
return result
def pip_get_installed():
"""Code extracted from the middle of the pip freeze command.
FIXME: does not list anything installed via -e
"""
from pip._internal.utils.misc import dist_is_local
return tuple(
dist_to_req(dist)
for dist in fresh_working_set()
if dist_is_local(dist)
if dist.key != 'python' # See #220
)
def normalize_name(name):
"""Normalize a python package name a la PEP 503"""
# https://www.python.org/dev/peps/pep-0503/#normalized-names
return re.sub('[-_.]+', '-', name).lower()
def fresh_working_set():
"""return a pkg_resources "working set", representing the *currently* installed packages"""
class WorkingSetPlusEditableInstalls(pkg_resources.WorkingSet):
def __init__(self, *args, **kwargs):
self._normalized_name_mapping = {}
super(WorkingSetPlusEditableInstalls, self).__init__(*args, **kwargs)
def add_entry(self, entry):
"""Same as the original .add_entry, but sets only=False, so that egg-links are honored."""
logger.debug('working-set entry: %r', entry)
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in pkg_resources.find_distributions(entry, False):
# eggs override anything that's installed normally
# fun fact: pkg_resources.working_set's results depend on the
# ordering of os.listdir since the order of os.listdir is
# entirely arbitrary (an implemenation detail of file system),
# without calling site.main(), an .egg-link file may or may not
# be honored, depending on the filesystem
replace = (dist.precedence == pkg_resources.EGG_DIST)
self._normalized_name_mapping[normalize_name(dist.key)] = dist.key
self.add(dist, entry, False, replace=replace)
def find_normalized(self, req):
req = _package_req_to_pkg_resources_req(str(req))
req.key = self._normalized_name_mapping.get(normalize_name(req.key), req.key)
return self.find(req)
return WorkingSetPlusEditableInstalls()
def req_cycle(req):
"""is this requirement cyclic?"""
cls = req.__class__
seen = {req.name}
while isinstance(req.comes_from, cls):
req = req.comes_from
if req.name in seen:
return True
else:
seen.add(req.name)
return False
def pretty_req(req):
"""
return a copy of a pip requirement that is a bit more readable,
at the expense of removing some of its data
"""
from copy import copy
req = copy(req)
req.link = None
req.satisfied_by = None
return req
def _package_req_to_pkg_resources_req(req):
return pkg_resources.Requirement.parse(str(req))
def reqnames(reqs):
return {req.name for req in reqs}
class FasterInstallCommand(InstallCommand):
def __init__(self, *args, **kw):
super(FasterInstallCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'--prune',
action='store_true',
dest='prune',
default=False,
help='Uninstall any non-required packages.',
)
cmd_opts.add_option(
'--no-prune',
action='store_false',
dest='prune',
help='Do not uninstall any non-required packages.',
)
def run(self, options, args):
"""update install options with caching values"""
if options.prune:
previously_installed = pip_get_installed()
index_urls = [options.index_url] + options.extra_index_urls
with pipfaster_download_cacher(index_urls):
requirement_set = super(FasterInstallCommand, self).run(
options, args,
)
required = requirement_set.requirements.values()
# With extra_index_urls we don't know where the wheel is from
if not options.extra_index_urls:
cache_installed_wheels(options.index_url, requirement_set.successfully_downloaded)
if not options.ignore_dependencies:
# transitive requirements, previously installed, are also required
# this has a side-effect of finding any missing / conflicting requirements
required = trace_requirements(required)
if not options.prune:
return requirement_set
extraneous = (
reqnames(previously_installed) -
reqnames(required) -
# the stage1 bootstrap packages
reqnames(trace_requirements([install_req_from_line('venv-update')])) -
# See #186
frozenset(('pkg-resources',))
)
if extraneous:
extraneous = sorted(extraneous)
pip(('uninstall', '--yes') + tuple(extraneous))
# TODO: Cleanup: remove stale values from the cache and wheelhouse that have not been accessed in a week.
# TODO: a pip_faster.patch module
def patch(attrs, updates):
"""Perform a set of updates to a attribute dictionary, return the original values."""
orig = {}
for attr, value in updates:
orig[attr] = attrs[attr]
attrs[attr] = value
return orig
@contextmanager
def patched(attrs, updates):
"""A context in which some attributes temporarily have a modified value."""
orig = patch(attrs, updates.items())
try:
yield orig
finally:
patch(attrs, orig.items())
# END: pip_faster.patch module
def pipfaster_install_prune_option():
return patched(pipmodule._internal.commands.commands_dict, {FasterInstallCommand.name: FasterInstallCommand})
def pipfaster_packagefinder():
"""Provide a short-circuited search when the requirement is pinned and appears on disk.
Suggested upstream at: https://github.com/pypa/pip/pull/2114
"""
# A poor man's dependency injection: monkeypatch :(
try: # :pragma:nocover: pip>=18.1
from pip._internal.cli import base_command
except ImportError: # :pragma:nocover: pip<18.1
from pip._internal import basecommand as base_command
return patched(vars(base_command), {'PackageFinder': FasterPackageFinder})
def pipfaster_download_cacher(index_urls):
"""vanilla pip stores a cache of the http session in its cache and not the
wheel files. We intercept the download and save those files into our
cache
"""
from pip._internal import download
orig = download._download_http_url
patched_fn = get_patched_download_http_url(orig, index_urls)
return patched(vars(download), {'_download_http_url': patched_fn})
def main():
with pipfaster_install_prune_option():
with pipfaster_packagefinder():
raise_on_failure(pipmodule._internal.main)
if __name__ == '__main__':
exit(main())
|
Yelp/venv-update | pip_faster.py | patch | python | def patch(attrs, updates):
orig = {}
for attr, value in updates:
orig[attr] = attrs[attr]
attrs[attr] = value
return orig | Perform a set of updates to a attribute dictionary, return the original values. | train | https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/pip_faster.py#L430-L436 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''pip-faster is a thin wrapper around pip.
It only adds a --prune option to the `install` subcommand.
`pip-faster install --prune` will *uninstall* any installed packages that are
not required.
Otherwise, you should find that pip-faster gives the same results as pip, just
more quickly, especially in the case of pinned requirements (e.g.
package-x==1.2.3).
Version control at: https://github.com/yelp/venv-update
'''
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import errno
import glob
import os
import random
import re
import shutil
import sys
from contextlib import contextmanager
import pip as pipmodule
from pip._internal import logger
from pip._internal.commands.install import InstallCommand
from pip._internal.exceptions import DistributionNotFound
from pip._internal.exceptions import InstallationError
from pip._internal.index import BestVersionAlreadyInstalled
from pip._internal.index import HTMLPage
from pip._internal.index import Link
from pip._internal.index import PackageFinder
from pip._internal.req import InstallRequirement
from pip._internal.wheel import Wheel
from venv_update import colorize
from venv_update import raise_on_failure
from venv_update import timid_relpath
from venv_update import user_cache_dir
# Debian de-vendorizes the version of pip it ships
try: # :pragma:nocover: non-debian
from pip._vendor import pkg_resources
except ImportError: # :pragma:nocover: debian
import pkg_resources
try: # :pragma:nocover: pip>=18.1
from pip._internal.req.constructors import install_req_from_line
except ImportError: # :pragma:nocover: pip<18.1
install_req_from_line = InstallRequirement.from_line
# Thanks six!
PY2 = str is bytes
if PY2: # :pragma:nocover:
_reraise_src = 'def reraise(tp, value, tb=None): raise tp, value, tb'
exec(_reraise_src)
else: # :pragma:nocover:
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
class CACHE(object):
_cache_dir = user_cache_dir()
wheelhouse = os.path.join(_cache_dir, 'pip-faster', 'wheelhouse')
pip_wheelhouse = os.path.join(_cache_dir, 'pip', 'wheels')
def ignorecase_glob(glob):
return ''.join([
'[{}{}]'.format(char.lower(), char.upper())
if char.isalpha() else char
for char in glob
])
def optimistic_wheel_search(req, index_urls):
name = req.name.replace('-', '_').lower()
for index_url in index_urls:
expected_location = os.path.join(
CACHE.wheelhouse, index_url, ignorecase_glob(name) + '-*.whl',
)
for link in glob.glob(expected_location):
link = Link('file:' + link)
wheel = Wheel(link.filename)
if req.specifier.contains(wheel.version) and wheel.supported():
return link
def is_req_pinned(requirement):
if not requirement:
# url-style requirement
return False
for spec in requirement.specifier:
if spec.operator == '==' and not spec.version.endswith('.*'):
return True
return False
class FasterPackageFinder(PackageFinder):
def find_requirement(self, req, upgrade):
if is_req_pinned(req.req):
# if the version is pinned-down by a ==
# first try to use any installed package that satisfies the req
if req.satisfied_by:
logger.info('Faster! pinned requirement already installed.')
raise BestVersionAlreadyInstalled
# then try an optimistic search for a .whl file:
link = optimistic_wheel_search(req.req, self.index_urls)
if link is None:
# The wheel will be built during prepare_files
logger.debug('No wheel found locally for pinned requirement %s', req)
else:
logger.info('Faster! Pinned wheel found, without hitting PyPI.')
return link
else:
# unpinned requirements aren't very notable. only show with -v
logger.info('slow: full search for unpinned requirement %s', req)
# otherwise, do the full network search, per usual
try:
return super(FasterPackageFinder, self).find_requirement(req, upgrade)
except DistributionNotFound:
exc_info = sys.exc_info()
# Best effort: try and install from suitable version on-disk
link = optimistic_wheel_search(req.req, self.index_urls)
if link:
return link
else:
reraise(*exc_info)
def _can_be_cached(package):
return (
package.is_wheel and
# An assertion that we're looking in the pip wheel dir
package.link.path.startswith(CACHE.pip_wheelhouse)
)
def mkdirp(pth):
try:
os.makedirs(pth)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _store_wheel_in_cache(file_path, index_url):
filename = os.path.basename(file_path)
cache = os.path.join(CACHE.wheelhouse, index_url, filename)
cache_tmp = '{}.{}'.format(cache, random.randint(0, sys.maxsize))
cache_dir = os.path.dirname(cache)
mkdirp(cache_dir)
# Atomicity
shutil.copy(file_path, cache_tmp)
os.rename(cache_tmp, cache)
def cache_installed_wheels(index_url, installed_packages):
"""After installation, pip tells us what it installed and from where.
We build a structure that looks like
.cache/pip-faster/wheelhouse/$index_url/$wheel
"""
for installed_package in installed_packages:
if not _can_be_cached(installed_package):
continue
_store_wheel_in_cache(installed_package.link.path, index_url)
def get_patched_download_http_url(orig_download_http_url, index_urls):
def pipfaster_download_http_url(link, *args, **kwargs):
file_path, content_type = orig_download_http_url(link, *args, **kwargs)
if link.is_wheel:
for index_url in index_urls:
if (
# pip <18.1
isinstance(link.comes_from, HTMLPage) and
link.comes_from.url.startswith(index_url)
) or (
# pip >= 18.1
isinstance(link.comes_from, (str, type(''))) and
link.comes_from.startswith(index_url)
):
_store_wheel_in_cache(file_path, index_url)
break
return file_path, content_type
return pipfaster_download_http_url
def pip(args):
"""Run pip, in-process."""
from sys import stdout
stdout.write(colorize(('pip',) + args))
stdout.write('\n')
stdout.flush()
return pipmodule._internal.main(list(args))
def dist_to_req(dist):
"""Make a pip.FrozenRequirement from a pkg_resources distribution object"""
try: # :pragma:nocover: (pip>=10)
from pip._internal.operations.freeze import FrozenRequirement
except ImportError: # :pragma:nocover: (pip<10)
from pip import FrozenRequirement
# normalize the casing, dashes in the req name
orig_name, dist.project_name = dist.project_name, dist.key
result = FrozenRequirement.from_dist(dist, [])
# put things back the way we found it.
dist.project_name = orig_name
return result
def pip_get_installed():
"""Code extracted from the middle of the pip freeze command.
FIXME: does not list anything installed via -e
"""
from pip._internal.utils.misc import dist_is_local
return tuple(
dist_to_req(dist)
for dist in fresh_working_set()
if dist_is_local(dist)
if dist.key != 'python' # See #220
)
def normalize_name(name):
"""Normalize a python package name a la PEP 503"""
# https://www.python.org/dev/peps/pep-0503/#normalized-names
return re.sub('[-_.]+', '-', name).lower()
def fresh_working_set():
"""return a pkg_resources "working set", representing the *currently* installed packages"""
class WorkingSetPlusEditableInstalls(pkg_resources.WorkingSet):
def __init__(self, *args, **kwargs):
self._normalized_name_mapping = {}
super(WorkingSetPlusEditableInstalls, self).__init__(*args, **kwargs)
def add_entry(self, entry):
"""Same as the original .add_entry, but sets only=False, so that egg-links are honored."""
logger.debug('working-set entry: %r', entry)
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in pkg_resources.find_distributions(entry, False):
# eggs override anything that's installed normally
# fun fact: pkg_resources.working_set's results depend on the
# ordering of os.listdir since the order of os.listdir is
# entirely arbitrary (an implemenation detail of file system),
# without calling site.main(), an .egg-link file may or may not
# be honored, depending on the filesystem
replace = (dist.precedence == pkg_resources.EGG_DIST)
self._normalized_name_mapping[normalize_name(dist.key)] = dist.key
self.add(dist, entry, False, replace=replace)
def find_normalized(self, req):
req = _package_req_to_pkg_resources_req(str(req))
req.key = self._normalized_name_mapping.get(normalize_name(req.key), req.key)
return self.find(req)
return WorkingSetPlusEditableInstalls()
def req_cycle(req):
"""is this requirement cyclic?"""
cls = req.__class__
seen = {req.name}
while isinstance(req.comes_from, cls):
req = req.comes_from
if req.name in seen:
return True
else:
seen.add(req.name)
return False
def pretty_req(req):
"""
return a copy of a pip requirement that is a bit more readable,
at the expense of removing some of its data
"""
from copy import copy
req = copy(req)
req.link = None
req.satisfied_by = None
return req
def _package_req_to_pkg_resources_req(req):
return pkg_resources.Requirement.parse(str(req))
def trace_requirements(requirements):
"""given an iterable of pip InstallRequirements,
return the set of required packages, given their transitive requirements.
"""
requirements = tuple(pretty_req(r) for r in requirements)
working_set = fresh_working_set()
# breadth-first traversal:
from collections import deque
queue = deque(requirements)
queued = {_package_req_to_pkg_resources_req(req.req) for req in queue}
errors = []
result = []
while queue:
req = queue.popleft()
logger.debug('tracing: %s', req)
try:
dist = working_set.find_normalized(_package_req_to_pkg_resources_req(req.req))
except pkg_resources.VersionConflict as conflict:
dist = conflict.args[0]
errors.append('Error: version conflict: {} ({}) <-> {}'.format(
dist, timid_relpath(dist.location), req
))
assert dist is not None, 'Should be unreachable in pip8+'
result.append(dist_to_req(dist))
# TODO: pip does no validation of extras. should we?
extras = [extra for extra in req.extras if extra in dist.extras]
for sub_req in sorted(dist.requires(extras=extras), key=lambda req: req.key):
sub_req = InstallRequirement(sub_req, req)
if req_cycle(sub_req):
logger.warning('Circular dependency! %s', sub_req)
continue
elif sub_req.req in queued:
logger.debug('already queued: %s', sub_req)
continue
else:
logger.debug('adding sub-requirement %s', sub_req)
queue.append(sub_req)
queued.add(sub_req.req)
if errors:
raise InstallationError('\n'.join(errors))
return result
def reqnames(reqs):
return {req.name for req in reqs}
class FasterInstallCommand(InstallCommand):
def __init__(self, *args, **kw):
super(FasterInstallCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'--prune',
action='store_true',
dest='prune',
default=False,
help='Uninstall any non-required packages.',
)
cmd_opts.add_option(
'--no-prune',
action='store_false',
dest='prune',
help='Do not uninstall any non-required packages.',
)
def run(self, options, args):
"""update install options with caching values"""
if options.prune:
previously_installed = pip_get_installed()
index_urls = [options.index_url] + options.extra_index_urls
with pipfaster_download_cacher(index_urls):
requirement_set = super(FasterInstallCommand, self).run(
options, args,
)
required = requirement_set.requirements.values()
# With extra_index_urls we don't know where the wheel is from
if not options.extra_index_urls:
cache_installed_wheels(options.index_url, requirement_set.successfully_downloaded)
if not options.ignore_dependencies:
# transitive requirements, previously installed, are also required
# this has a side-effect of finding any missing / conflicting requirements
required = trace_requirements(required)
if not options.prune:
return requirement_set
extraneous = (
reqnames(previously_installed) -
reqnames(required) -
# the stage1 bootstrap packages
reqnames(trace_requirements([install_req_from_line('venv-update')])) -
# See #186
frozenset(('pkg-resources',))
)
if extraneous:
extraneous = sorted(extraneous)
pip(('uninstall', '--yes') + tuple(extraneous))
# TODO: Cleanup: remove stale values from the cache and wheelhouse that have not been accessed in a week.
# TODO: a pip_faster.patch module
@contextmanager
def patched(attrs, updates):
"""A context in which some attributes temporarily have a modified value."""
orig = patch(attrs, updates.items())
try:
yield orig
finally:
patch(attrs, orig.items())
# END: pip_faster.patch module
def pipfaster_install_prune_option():
return patched(pipmodule._internal.commands.commands_dict, {FasterInstallCommand.name: FasterInstallCommand})
def pipfaster_packagefinder():
"""Provide a short-circuited search when the requirement is pinned and appears on disk.
Suggested upstream at: https://github.com/pypa/pip/pull/2114
"""
# A poor man's dependency injection: monkeypatch :(
try: # :pragma:nocover: pip>=18.1
from pip._internal.cli import base_command
except ImportError: # :pragma:nocover: pip<18.1
from pip._internal import basecommand as base_command
return patched(vars(base_command), {'PackageFinder': FasterPackageFinder})
def pipfaster_download_cacher(index_urls):
"""vanilla pip stores a cache of the http session in its cache and not the
wheel files. We intercept the download and save those files into our
cache
"""
from pip._internal import download
orig = download._download_http_url
patched_fn = get_patched_download_http_url(orig, index_urls)
return patched(vars(download), {'_download_http_url': patched_fn})
def main():
with pipfaster_install_prune_option():
with pipfaster_packagefinder():
raise_on_failure(pipmodule._internal.main)
if __name__ == '__main__':
exit(main())
|
Yelp/venv-update | pip_faster.py | patched | python | def patched(attrs, updates):
orig = patch(attrs, updates.items())
try:
yield orig
finally:
patch(attrs, orig.items()) | A context in which some attributes temporarily have a modified value. | train | https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/pip_faster.py#L440-L446 | [
"def patch(attrs, updates):\n \"\"\"Perform a set of updates to a attribute dictionary, return the original values.\"\"\"\n orig = {}\n for attr, value in updates:\n orig[attr] = attrs[attr]\n attrs[attr] = value\n return orig\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''pip-faster is a thin wrapper around pip.
It only adds a --prune option to the `install` subcommand.
`pip-faster install --prune` will *uninstall* any installed packages that are
not required.
Otherwise, you should find that pip-faster gives the same results as pip, just
more quickly, especially in the case of pinned requirements (e.g.
package-x==1.2.3).
Version control at: https://github.com/yelp/venv-update
'''
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import errno
import glob
import os
import random
import re
import shutil
import sys
from contextlib import contextmanager
import pip as pipmodule
from pip._internal import logger
from pip._internal.commands.install import InstallCommand
from pip._internal.exceptions import DistributionNotFound
from pip._internal.exceptions import InstallationError
from pip._internal.index import BestVersionAlreadyInstalled
from pip._internal.index import HTMLPage
from pip._internal.index import Link
from pip._internal.index import PackageFinder
from pip._internal.req import InstallRequirement
from pip._internal.wheel import Wheel
from venv_update import colorize
from venv_update import raise_on_failure
from venv_update import timid_relpath
from venv_update import user_cache_dir
# Debian de-vendorizes the version of pip it ships
try: # :pragma:nocover: non-debian
from pip._vendor import pkg_resources
except ImportError: # :pragma:nocover: debian
import pkg_resources
try: # :pragma:nocover: pip>=18.1
from pip._internal.req.constructors import install_req_from_line
except ImportError: # :pragma:nocover: pip<18.1
install_req_from_line = InstallRequirement.from_line
# Thanks six!
PY2 = str is bytes
if PY2: # :pragma:nocover:
_reraise_src = 'def reraise(tp, value, tb=None): raise tp, value, tb'
exec(_reraise_src)
else: # :pragma:nocover:
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
class CACHE(object):
_cache_dir = user_cache_dir()
wheelhouse = os.path.join(_cache_dir, 'pip-faster', 'wheelhouse')
pip_wheelhouse = os.path.join(_cache_dir, 'pip', 'wheels')
def ignorecase_glob(glob):
return ''.join([
'[{}{}]'.format(char.lower(), char.upper())
if char.isalpha() else char
for char in glob
])
def optimistic_wheel_search(req, index_urls):
name = req.name.replace('-', '_').lower()
for index_url in index_urls:
expected_location = os.path.join(
CACHE.wheelhouse, index_url, ignorecase_glob(name) + '-*.whl',
)
for link in glob.glob(expected_location):
link = Link('file:' + link)
wheel = Wheel(link.filename)
if req.specifier.contains(wheel.version) and wheel.supported():
return link
def is_req_pinned(requirement):
if not requirement:
# url-style requirement
return False
for spec in requirement.specifier:
if spec.operator == '==' and not spec.version.endswith('.*'):
return True
return False
class FasterPackageFinder(PackageFinder):
def find_requirement(self, req, upgrade):
if is_req_pinned(req.req):
# if the version is pinned-down by a ==
# first try to use any installed package that satisfies the req
if req.satisfied_by:
logger.info('Faster! pinned requirement already installed.')
raise BestVersionAlreadyInstalled
# then try an optimistic search for a .whl file:
link = optimistic_wheel_search(req.req, self.index_urls)
if link is None:
# The wheel will be built during prepare_files
logger.debug('No wheel found locally for pinned requirement %s', req)
else:
logger.info('Faster! Pinned wheel found, without hitting PyPI.')
return link
else:
# unpinned requirements aren't very notable. only show with -v
logger.info('slow: full search for unpinned requirement %s', req)
# otherwise, do the full network search, per usual
try:
return super(FasterPackageFinder, self).find_requirement(req, upgrade)
except DistributionNotFound:
exc_info = sys.exc_info()
# Best effort: try and install from suitable version on-disk
link = optimistic_wheel_search(req.req, self.index_urls)
if link:
return link
else:
reraise(*exc_info)
def _can_be_cached(package):
return (
package.is_wheel and
# An assertion that we're looking in the pip wheel dir
package.link.path.startswith(CACHE.pip_wheelhouse)
)
def mkdirp(pth):
try:
os.makedirs(pth)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _store_wheel_in_cache(file_path, index_url):
filename = os.path.basename(file_path)
cache = os.path.join(CACHE.wheelhouse, index_url, filename)
cache_tmp = '{}.{}'.format(cache, random.randint(0, sys.maxsize))
cache_dir = os.path.dirname(cache)
mkdirp(cache_dir)
# Atomicity
shutil.copy(file_path, cache_tmp)
os.rename(cache_tmp, cache)
def cache_installed_wheels(index_url, installed_packages):
"""After installation, pip tells us what it installed and from where.
We build a structure that looks like
.cache/pip-faster/wheelhouse/$index_url/$wheel
"""
for installed_package in installed_packages:
if not _can_be_cached(installed_package):
continue
_store_wheel_in_cache(installed_package.link.path, index_url)
def get_patched_download_http_url(orig_download_http_url, index_urls):
def pipfaster_download_http_url(link, *args, **kwargs):
file_path, content_type = orig_download_http_url(link, *args, **kwargs)
if link.is_wheel:
for index_url in index_urls:
if (
# pip <18.1
isinstance(link.comes_from, HTMLPage) and
link.comes_from.url.startswith(index_url)
) or (
# pip >= 18.1
isinstance(link.comes_from, (str, type(''))) and
link.comes_from.startswith(index_url)
):
_store_wheel_in_cache(file_path, index_url)
break
return file_path, content_type
return pipfaster_download_http_url
def pip(args):
"""Run pip, in-process."""
from sys import stdout
stdout.write(colorize(('pip',) + args))
stdout.write('\n')
stdout.flush()
return pipmodule._internal.main(list(args))
def dist_to_req(dist):
"""Make a pip.FrozenRequirement from a pkg_resources distribution object"""
try: # :pragma:nocover: (pip>=10)
from pip._internal.operations.freeze import FrozenRequirement
except ImportError: # :pragma:nocover: (pip<10)
from pip import FrozenRequirement
# normalize the casing, dashes in the req name
orig_name, dist.project_name = dist.project_name, dist.key
result = FrozenRequirement.from_dist(dist, [])
# put things back the way we found it.
dist.project_name = orig_name
return result
def pip_get_installed():
"""Code extracted from the middle of the pip freeze command.
FIXME: does not list anything installed via -e
"""
from pip._internal.utils.misc import dist_is_local
return tuple(
dist_to_req(dist)
for dist in fresh_working_set()
if dist_is_local(dist)
if dist.key != 'python' # See #220
)
def normalize_name(name):
"""Normalize a python package name a la PEP 503"""
# https://www.python.org/dev/peps/pep-0503/#normalized-names
return re.sub('[-_.]+', '-', name).lower()
def fresh_working_set():
"""return a pkg_resources "working set", representing the *currently* installed packages"""
class WorkingSetPlusEditableInstalls(pkg_resources.WorkingSet):
def __init__(self, *args, **kwargs):
self._normalized_name_mapping = {}
super(WorkingSetPlusEditableInstalls, self).__init__(*args, **kwargs)
def add_entry(self, entry):
"""Same as the original .add_entry, but sets only=False, so that egg-links are honored."""
logger.debug('working-set entry: %r', entry)
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in pkg_resources.find_distributions(entry, False):
# eggs override anything that's installed normally
# fun fact: pkg_resources.working_set's results depend on the
# ordering of os.listdir since the order of os.listdir is
# entirely arbitrary (an implemenation detail of file system),
# without calling site.main(), an .egg-link file may or may not
# be honored, depending on the filesystem
replace = (dist.precedence == pkg_resources.EGG_DIST)
self._normalized_name_mapping[normalize_name(dist.key)] = dist.key
self.add(dist, entry, False, replace=replace)
def find_normalized(self, req):
req = _package_req_to_pkg_resources_req(str(req))
req.key = self._normalized_name_mapping.get(normalize_name(req.key), req.key)
return self.find(req)
return WorkingSetPlusEditableInstalls()
def req_cycle(req):
"""is this requirement cyclic?"""
cls = req.__class__
seen = {req.name}
while isinstance(req.comes_from, cls):
req = req.comes_from
if req.name in seen:
return True
else:
seen.add(req.name)
return False
def pretty_req(req):
"""
return a copy of a pip requirement that is a bit more readable,
at the expense of removing some of its data
"""
from copy import copy
req = copy(req)
req.link = None
req.satisfied_by = None
return req
def _package_req_to_pkg_resources_req(req):
return pkg_resources.Requirement.parse(str(req))
def trace_requirements(requirements):
"""given an iterable of pip InstallRequirements,
return the set of required packages, given their transitive requirements.
"""
requirements = tuple(pretty_req(r) for r in requirements)
working_set = fresh_working_set()
# breadth-first traversal:
from collections import deque
queue = deque(requirements)
queued = {_package_req_to_pkg_resources_req(req.req) for req in queue}
errors = []
result = []
while queue:
req = queue.popleft()
logger.debug('tracing: %s', req)
try:
dist = working_set.find_normalized(_package_req_to_pkg_resources_req(req.req))
except pkg_resources.VersionConflict as conflict:
dist = conflict.args[0]
errors.append('Error: version conflict: {} ({}) <-> {}'.format(
dist, timid_relpath(dist.location), req
))
assert dist is not None, 'Should be unreachable in pip8+'
result.append(dist_to_req(dist))
# TODO: pip does no validation of extras. should we?
extras = [extra for extra in req.extras if extra in dist.extras]
for sub_req in sorted(dist.requires(extras=extras), key=lambda req: req.key):
sub_req = InstallRequirement(sub_req, req)
if req_cycle(sub_req):
logger.warning('Circular dependency! %s', sub_req)
continue
elif sub_req.req in queued:
logger.debug('already queued: %s', sub_req)
continue
else:
logger.debug('adding sub-requirement %s', sub_req)
queue.append(sub_req)
queued.add(sub_req.req)
if errors:
raise InstallationError('\n'.join(errors))
return result
def reqnames(reqs):
return {req.name for req in reqs}
class FasterInstallCommand(InstallCommand):
def __init__(self, *args, **kw):
super(FasterInstallCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'--prune',
action='store_true',
dest='prune',
default=False,
help='Uninstall any non-required packages.',
)
cmd_opts.add_option(
'--no-prune',
action='store_false',
dest='prune',
help='Do not uninstall any non-required packages.',
)
def run(self, options, args):
"""update install options with caching values"""
if options.prune:
previously_installed = pip_get_installed()
index_urls = [options.index_url] + options.extra_index_urls
with pipfaster_download_cacher(index_urls):
requirement_set = super(FasterInstallCommand, self).run(
options, args,
)
required = requirement_set.requirements.values()
# With extra_index_urls we don't know where the wheel is from
if not options.extra_index_urls:
cache_installed_wheels(options.index_url, requirement_set.successfully_downloaded)
if not options.ignore_dependencies:
# transitive requirements, previously installed, are also required
# this has a side-effect of finding any missing / conflicting requirements
required = trace_requirements(required)
if not options.prune:
return requirement_set
extraneous = (
reqnames(previously_installed) -
reqnames(required) -
# the stage1 bootstrap packages
reqnames(trace_requirements([install_req_from_line('venv-update')])) -
# See #186
frozenset(('pkg-resources',))
)
if extraneous:
extraneous = sorted(extraneous)
pip(('uninstall', '--yes') + tuple(extraneous))
# TODO: Cleanup: remove stale values from the cache and wheelhouse that have not been accessed in a week.
# TODO: a pip_faster.patch module
def patch(attrs, updates):
"""Perform a set of updates to a attribute dictionary, return the original values."""
orig = {}
for attr, value in updates:
orig[attr] = attrs[attr]
attrs[attr] = value
return orig
@contextmanager
# END: pip_faster.patch module
def pipfaster_install_prune_option():
return patched(pipmodule._internal.commands.commands_dict, {FasterInstallCommand.name: FasterInstallCommand})
def pipfaster_packagefinder():
"""Provide a short-circuited search when the requirement is pinned and appears on disk.
Suggested upstream at: https://github.com/pypa/pip/pull/2114
"""
# A poor man's dependency injection: monkeypatch :(
try: # :pragma:nocover: pip>=18.1
from pip._internal.cli import base_command
except ImportError: # :pragma:nocover: pip<18.1
from pip._internal import basecommand as base_command
return patched(vars(base_command), {'PackageFinder': FasterPackageFinder})
def pipfaster_download_cacher(index_urls):
"""vanilla pip stores a cache of the http session in its cache and not the
wheel files. We intercept the download and save those files into our
cache
"""
from pip._internal import download
orig = download._download_http_url
patched_fn = get_patched_download_http_url(orig, index_urls)
return patched(vars(download), {'_download_http_url': patched_fn})
def main():
with pipfaster_install_prune_option():
with pipfaster_packagefinder():
raise_on_failure(pipmodule._internal.main)
if __name__ == '__main__':
exit(main())
|
Yelp/venv-update | pip_faster.py | pipfaster_packagefinder | python | def pipfaster_packagefinder():
# A poor man's dependency injection: monkeypatch :(
try: # :pragma:nocover: pip>=18.1
from pip._internal.cli import base_command
except ImportError: # :pragma:nocover: pip<18.1
from pip._internal import basecommand as base_command
return patched(vars(base_command), {'PackageFinder': FasterPackageFinder}) | Provide a short-circuited search when the requirement is pinned and appears on disk.
Suggested upstream at: https://github.com/pypa/pip/pull/2114 | train | https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/pip_faster.py#L454-L464 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''pip-faster is a thin wrapper around pip.
It only adds a --prune option to the `install` subcommand.
`pip-faster install --prune` will *uninstall* any installed packages that are
not required.
Otherwise, you should find that pip-faster gives the same results as pip, just
more quickly, especially in the case of pinned requirements (e.g.
package-x==1.2.3).
Version control at: https://github.com/yelp/venv-update
'''
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import errno
import glob
import os
import random
import re
import shutil
import sys
from contextlib import contextmanager
import pip as pipmodule
from pip._internal import logger
from pip._internal.commands.install import InstallCommand
from pip._internal.exceptions import DistributionNotFound
from pip._internal.exceptions import InstallationError
from pip._internal.index import BestVersionAlreadyInstalled
from pip._internal.index import HTMLPage
from pip._internal.index import Link
from pip._internal.index import PackageFinder
from pip._internal.req import InstallRequirement
from pip._internal.wheel import Wheel
from venv_update import colorize
from venv_update import raise_on_failure
from venv_update import timid_relpath
from venv_update import user_cache_dir
# Debian de-vendorizes the version of pip it ships
try: # :pragma:nocover: non-debian
from pip._vendor import pkg_resources
except ImportError: # :pragma:nocover: debian
import pkg_resources
try: # :pragma:nocover: pip>=18.1
from pip._internal.req.constructors import install_req_from_line
except ImportError: # :pragma:nocover: pip<18.1
install_req_from_line = InstallRequirement.from_line
# Thanks six!
PY2 = str is bytes
if PY2: # :pragma:nocover:
_reraise_src = 'def reraise(tp, value, tb=None): raise tp, value, tb'
exec(_reraise_src)
else: # :pragma:nocover:
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
class CACHE(object):
_cache_dir = user_cache_dir()
wheelhouse = os.path.join(_cache_dir, 'pip-faster', 'wheelhouse')
pip_wheelhouse = os.path.join(_cache_dir, 'pip', 'wheels')
def ignorecase_glob(glob):
return ''.join([
'[{}{}]'.format(char.lower(), char.upper())
if char.isalpha() else char
for char in glob
])
def optimistic_wheel_search(req, index_urls):
name = req.name.replace('-', '_').lower()
for index_url in index_urls:
expected_location = os.path.join(
CACHE.wheelhouse, index_url, ignorecase_glob(name) + '-*.whl',
)
for link in glob.glob(expected_location):
link = Link('file:' + link)
wheel = Wheel(link.filename)
if req.specifier.contains(wheel.version) and wheel.supported():
return link
def is_req_pinned(requirement):
if not requirement:
# url-style requirement
return False
for spec in requirement.specifier:
if spec.operator == '==' and not spec.version.endswith('.*'):
return True
return False
class FasterPackageFinder(PackageFinder):
def find_requirement(self, req, upgrade):
if is_req_pinned(req.req):
# if the version is pinned-down by a ==
# first try to use any installed package that satisfies the req
if req.satisfied_by:
logger.info('Faster! pinned requirement already installed.')
raise BestVersionAlreadyInstalled
# then try an optimistic search for a .whl file:
link = optimistic_wheel_search(req.req, self.index_urls)
if link is None:
# The wheel will be built during prepare_files
logger.debug('No wheel found locally for pinned requirement %s', req)
else:
logger.info('Faster! Pinned wheel found, without hitting PyPI.')
return link
else:
# unpinned requirements aren't very notable. only show with -v
logger.info('slow: full search for unpinned requirement %s', req)
# otherwise, do the full network search, per usual
try:
return super(FasterPackageFinder, self).find_requirement(req, upgrade)
except DistributionNotFound:
exc_info = sys.exc_info()
# Best effort: try and install from suitable version on-disk
link = optimistic_wheel_search(req.req, self.index_urls)
if link:
return link
else:
reraise(*exc_info)
def _can_be_cached(package):
return (
package.is_wheel and
# An assertion that we're looking in the pip wheel dir
package.link.path.startswith(CACHE.pip_wheelhouse)
)
def mkdirp(pth):
try:
os.makedirs(pth)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _store_wheel_in_cache(file_path, index_url):
filename = os.path.basename(file_path)
cache = os.path.join(CACHE.wheelhouse, index_url, filename)
cache_tmp = '{}.{}'.format(cache, random.randint(0, sys.maxsize))
cache_dir = os.path.dirname(cache)
mkdirp(cache_dir)
# Atomicity
shutil.copy(file_path, cache_tmp)
os.rename(cache_tmp, cache)
def cache_installed_wheels(index_url, installed_packages):
"""After installation, pip tells us what it installed and from where.
We build a structure that looks like
.cache/pip-faster/wheelhouse/$index_url/$wheel
"""
for installed_package in installed_packages:
if not _can_be_cached(installed_package):
continue
_store_wheel_in_cache(installed_package.link.path, index_url)
def get_patched_download_http_url(orig_download_http_url, index_urls):
def pipfaster_download_http_url(link, *args, **kwargs):
file_path, content_type = orig_download_http_url(link, *args, **kwargs)
if link.is_wheel:
for index_url in index_urls:
if (
# pip <18.1
isinstance(link.comes_from, HTMLPage) and
link.comes_from.url.startswith(index_url)
) or (
# pip >= 18.1
isinstance(link.comes_from, (str, type(''))) and
link.comes_from.startswith(index_url)
):
_store_wheel_in_cache(file_path, index_url)
break
return file_path, content_type
return pipfaster_download_http_url
def pip(args):
"""Run pip, in-process."""
from sys import stdout
stdout.write(colorize(('pip',) + args))
stdout.write('\n')
stdout.flush()
return pipmodule._internal.main(list(args))
def dist_to_req(dist):
"""Make a pip.FrozenRequirement from a pkg_resources distribution object"""
try: # :pragma:nocover: (pip>=10)
from pip._internal.operations.freeze import FrozenRequirement
except ImportError: # :pragma:nocover: (pip<10)
from pip import FrozenRequirement
# normalize the casing, dashes in the req name
orig_name, dist.project_name = dist.project_name, dist.key
result = FrozenRequirement.from_dist(dist, [])
# put things back the way we found it.
dist.project_name = orig_name
return result
def pip_get_installed():
"""Code extracted from the middle of the pip freeze command.
FIXME: does not list anything installed via -e
"""
from pip._internal.utils.misc import dist_is_local
return tuple(
dist_to_req(dist)
for dist in fresh_working_set()
if dist_is_local(dist)
if dist.key != 'python' # See #220
)
def normalize_name(name):
"""Normalize a python package name a la PEP 503"""
# https://www.python.org/dev/peps/pep-0503/#normalized-names
return re.sub('[-_.]+', '-', name).lower()
def fresh_working_set():
"""return a pkg_resources "working set", representing the *currently* installed packages"""
class WorkingSetPlusEditableInstalls(pkg_resources.WorkingSet):
def __init__(self, *args, **kwargs):
self._normalized_name_mapping = {}
super(WorkingSetPlusEditableInstalls, self).__init__(*args, **kwargs)
def add_entry(self, entry):
"""Same as the original .add_entry, but sets only=False, so that egg-links are honored."""
logger.debug('working-set entry: %r', entry)
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in pkg_resources.find_distributions(entry, False):
# eggs override anything that's installed normally
# fun fact: pkg_resources.working_set's results depend on the
# ordering of os.listdir since the order of os.listdir is
# entirely arbitrary (an implemenation detail of file system),
# without calling site.main(), an .egg-link file may or may not
# be honored, depending on the filesystem
replace = (dist.precedence == pkg_resources.EGG_DIST)
self._normalized_name_mapping[normalize_name(dist.key)] = dist.key
self.add(dist, entry, False, replace=replace)
def find_normalized(self, req):
req = _package_req_to_pkg_resources_req(str(req))
req.key = self._normalized_name_mapping.get(normalize_name(req.key), req.key)
return self.find(req)
return WorkingSetPlusEditableInstalls()
def req_cycle(req):
"""is this requirement cyclic?"""
cls = req.__class__
seen = {req.name}
while isinstance(req.comes_from, cls):
req = req.comes_from
if req.name in seen:
return True
else:
seen.add(req.name)
return False
def pretty_req(req):
"""
return a copy of a pip requirement that is a bit more readable,
at the expense of removing some of its data
"""
from copy import copy
req = copy(req)
req.link = None
req.satisfied_by = None
return req
def _package_req_to_pkg_resources_req(req):
return pkg_resources.Requirement.parse(str(req))
def trace_requirements(requirements):
"""given an iterable of pip InstallRequirements,
return the set of required packages, given their transitive requirements.
"""
requirements = tuple(pretty_req(r) for r in requirements)
working_set = fresh_working_set()
# breadth-first traversal:
from collections import deque
queue = deque(requirements)
queued = {_package_req_to_pkg_resources_req(req.req) for req in queue}
errors = []
result = []
while queue:
req = queue.popleft()
logger.debug('tracing: %s', req)
try:
dist = working_set.find_normalized(_package_req_to_pkg_resources_req(req.req))
except pkg_resources.VersionConflict as conflict:
dist = conflict.args[0]
errors.append('Error: version conflict: {} ({}) <-> {}'.format(
dist, timid_relpath(dist.location), req
))
assert dist is not None, 'Should be unreachable in pip8+'
result.append(dist_to_req(dist))
# TODO: pip does no validation of extras. should we?
extras = [extra for extra in req.extras if extra in dist.extras]
for sub_req in sorted(dist.requires(extras=extras), key=lambda req: req.key):
sub_req = InstallRequirement(sub_req, req)
if req_cycle(sub_req):
logger.warning('Circular dependency! %s', sub_req)
continue
elif sub_req.req in queued:
logger.debug('already queued: %s', sub_req)
continue
else:
logger.debug('adding sub-requirement %s', sub_req)
queue.append(sub_req)
queued.add(sub_req.req)
if errors:
raise InstallationError('\n'.join(errors))
return result
def reqnames(reqs):
return {req.name for req in reqs}
class FasterInstallCommand(InstallCommand):
def __init__(self, *args, **kw):
super(FasterInstallCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'--prune',
action='store_true',
dest='prune',
default=False,
help='Uninstall any non-required packages.',
)
cmd_opts.add_option(
'--no-prune',
action='store_false',
dest='prune',
help='Do not uninstall any non-required packages.',
)
def run(self, options, args):
"""update install options with caching values"""
if options.prune:
previously_installed = pip_get_installed()
index_urls = [options.index_url] + options.extra_index_urls
with pipfaster_download_cacher(index_urls):
requirement_set = super(FasterInstallCommand, self).run(
options, args,
)
required = requirement_set.requirements.values()
# With extra_index_urls we don't know where the wheel is from
if not options.extra_index_urls:
cache_installed_wheels(options.index_url, requirement_set.successfully_downloaded)
if not options.ignore_dependencies:
# transitive requirements, previously installed, are also required
# this has a side-effect of finding any missing / conflicting requirements
required = trace_requirements(required)
if not options.prune:
return requirement_set
extraneous = (
reqnames(previously_installed) -
reqnames(required) -
# the stage1 bootstrap packages
reqnames(trace_requirements([install_req_from_line('venv-update')])) -
# See #186
frozenset(('pkg-resources',))
)
if extraneous:
extraneous = sorted(extraneous)
pip(('uninstall', '--yes') + tuple(extraneous))
# TODO: Cleanup: remove stale values from the cache and wheelhouse that have not been accessed in a week.
# TODO: a pip_faster.patch module
def patch(attrs, updates):
"""Perform a set of updates to a attribute dictionary, return the original values."""
orig = {}
for attr, value in updates:
orig[attr] = attrs[attr]
attrs[attr] = value
return orig
@contextmanager
def patched(attrs, updates):
"""A context in which some attributes temporarily have a modified value."""
orig = patch(attrs, updates.items())
try:
yield orig
finally:
patch(attrs, orig.items())
# END: pip_faster.patch module
def pipfaster_install_prune_option():
return patched(pipmodule._internal.commands.commands_dict, {FasterInstallCommand.name: FasterInstallCommand})
def pipfaster_download_cacher(index_urls):
"""vanilla pip stores a cache of the http session in its cache and not the
wheel files. We intercept the download and save those files into our
cache
"""
from pip._internal import download
orig = download._download_http_url
patched_fn = get_patched_download_http_url(orig, index_urls)
return patched(vars(download), {'_download_http_url': patched_fn})
def main():
with pipfaster_install_prune_option():
with pipfaster_packagefinder():
raise_on_failure(pipmodule._internal.main)
if __name__ == '__main__':
exit(main())
|
Yelp/venv-update | pip_faster.py | pipfaster_download_cacher | python | def pipfaster_download_cacher(index_urls):
from pip._internal import download
orig = download._download_http_url
patched_fn = get_patched_download_http_url(orig, index_urls)
return patched(vars(download), {'_download_http_url': patched_fn}) | vanilla pip stores a cache of the http session in its cache and not the
wheel files. We intercept the download and save those files into our
cache | train | https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/pip_faster.py#L467-L475 | [
"def get_patched_download_http_url(orig_download_http_url, index_urls):\n def pipfaster_download_http_url(link, *args, **kwargs):\n file_path, content_type = orig_download_http_url(link, *args, **kwargs)\n if link.is_wheel:\n for index_url in index_urls:\n if (\n # pip <18.1\n isinstance(link.comes_from, HTMLPage) and\n link.comes_from.url.startswith(index_url)\n ) or (\n # pip >= 18.1\n isinstance(link.comes_from, (str, type(''))) and\n link.comes_from.startswith(index_url)\n ):\n _store_wheel_in_cache(file_path, index_url)\n break\n return file_path, content_type\n return pipfaster_download_http_url\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''pip-faster is a thin wrapper around pip.
It only adds a --prune option to the `install` subcommand.
`pip-faster install --prune` will *uninstall* any installed packages that are
not required.
Otherwise, you should find that pip-faster gives the same results as pip, just
more quickly, especially in the case of pinned requirements (e.g.
package-x==1.2.3).
Version control at: https://github.com/yelp/venv-update
'''
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import errno
import glob
import os
import random
import re
import shutil
import sys
from contextlib import contextmanager
import pip as pipmodule
from pip._internal import logger
from pip._internal.commands.install import InstallCommand
from pip._internal.exceptions import DistributionNotFound
from pip._internal.exceptions import InstallationError
from pip._internal.index import BestVersionAlreadyInstalled
from pip._internal.index import HTMLPage
from pip._internal.index import Link
from pip._internal.index import PackageFinder
from pip._internal.req import InstallRequirement
from pip._internal.wheel import Wheel
from venv_update import colorize
from venv_update import raise_on_failure
from venv_update import timid_relpath
from venv_update import user_cache_dir
# Debian de-vendorizes the version of pip it ships
try: # :pragma:nocover: non-debian
from pip._vendor import pkg_resources
except ImportError: # :pragma:nocover: debian
import pkg_resources
try: # :pragma:nocover: pip>=18.1
from pip._internal.req.constructors import install_req_from_line
except ImportError: # :pragma:nocover: pip<18.1
install_req_from_line = InstallRequirement.from_line
# Thanks six!
PY2 = str is bytes
if PY2: # :pragma:nocover:
_reraise_src = 'def reraise(tp, value, tb=None): raise tp, value, tb'
exec(_reraise_src)
else: # :pragma:nocover:
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
class CACHE(object):
_cache_dir = user_cache_dir()
wheelhouse = os.path.join(_cache_dir, 'pip-faster', 'wheelhouse')
pip_wheelhouse = os.path.join(_cache_dir, 'pip', 'wheels')
def ignorecase_glob(glob):
return ''.join([
'[{}{}]'.format(char.lower(), char.upper())
if char.isalpha() else char
for char in glob
])
def optimistic_wheel_search(req, index_urls):
name = req.name.replace('-', '_').lower()
for index_url in index_urls:
expected_location = os.path.join(
CACHE.wheelhouse, index_url, ignorecase_glob(name) + '-*.whl',
)
for link in glob.glob(expected_location):
link = Link('file:' + link)
wheel = Wheel(link.filename)
if req.specifier.contains(wheel.version) and wheel.supported():
return link
def is_req_pinned(requirement):
if not requirement:
# url-style requirement
return False
for spec in requirement.specifier:
if spec.operator == '==' and not spec.version.endswith('.*'):
return True
return False
class FasterPackageFinder(PackageFinder):
def find_requirement(self, req, upgrade):
if is_req_pinned(req.req):
# if the version is pinned-down by a ==
# first try to use any installed package that satisfies the req
if req.satisfied_by:
logger.info('Faster! pinned requirement already installed.')
raise BestVersionAlreadyInstalled
# then try an optimistic search for a .whl file:
link = optimistic_wheel_search(req.req, self.index_urls)
if link is None:
# The wheel will be built during prepare_files
logger.debug('No wheel found locally for pinned requirement %s', req)
else:
logger.info('Faster! Pinned wheel found, without hitting PyPI.')
return link
else:
# unpinned requirements aren't very notable. only show with -v
logger.info('slow: full search for unpinned requirement %s', req)
# otherwise, do the full network search, per usual
try:
return super(FasterPackageFinder, self).find_requirement(req, upgrade)
except DistributionNotFound:
exc_info = sys.exc_info()
# Best effort: try and install from suitable version on-disk
link = optimistic_wheel_search(req.req, self.index_urls)
if link:
return link
else:
reraise(*exc_info)
def _can_be_cached(package):
return (
package.is_wheel and
# An assertion that we're looking in the pip wheel dir
package.link.path.startswith(CACHE.pip_wheelhouse)
)
def mkdirp(pth):
try:
os.makedirs(pth)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _store_wheel_in_cache(file_path, index_url):
filename = os.path.basename(file_path)
cache = os.path.join(CACHE.wheelhouse, index_url, filename)
cache_tmp = '{}.{}'.format(cache, random.randint(0, sys.maxsize))
cache_dir = os.path.dirname(cache)
mkdirp(cache_dir)
# Atomicity
shutil.copy(file_path, cache_tmp)
os.rename(cache_tmp, cache)
def cache_installed_wheels(index_url, installed_packages):
"""After installation, pip tells us what it installed and from where.
We build a structure that looks like
.cache/pip-faster/wheelhouse/$index_url/$wheel
"""
for installed_package in installed_packages:
if not _can_be_cached(installed_package):
continue
_store_wheel_in_cache(installed_package.link.path, index_url)
def get_patched_download_http_url(orig_download_http_url, index_urls):
def pipfaster_download_http_url(link, *args, **kwargs):
file_path, content_type = orig_download_http_url(link, *args, **kwargs)
if link.is_wheel:
for index_url in index_urls:
if (
# pip <18.1
isinstance(link.comes_from, HTMLPage) and
link.comes_from.url.startswith(index_url)
) or (
# pip >= 18.1
isinstance(link.comes_from, (str, type(''))) and
link.comes_from.startswith(index_url)
):
_store_wheel_in_cache(file_path, index_url)
break
return file_path, content_type
return pipfaster_download_http_url
def pip(args):
"""Run pip, in-process."""
from sys import stdout
stdout.write(colorize(('pip',) + args))
stdout.write('\n')
stdout.flush()
return pipmodule._internal.main(list(args))
def dist_to_req(dist):
"""Make a pip.FrozenRequirement from a pkg_resources distribution object"""
try: # :pragma:nocover: (pip>=10)
from pip._internal.operations.freeze import FrozenRequirement
except ImportError: # :pragma:nocover: (pip<10)
from pip import FrozenRequirement
# normalize the casing, dashes in the req name
orig_name, dist.project_name = dist.project_name, dist.key
result = FrozenRequirement.from_dist(dist, [])
# put things back the way we found it.
dist.project_name = orig_name
return result
def pip_get_installed():
"""Code extracted from the middle of the pip freeze command.
FIXME: does not list anything installed via -e
"""
from pip._internal.utils.misc import dist_is_local
return tuple(
dist_to_req(dist)
for dist in fresh_working_set()
if dist_is_local(dist)
if dist.key != 'python' # See #220
)
def normalize_name(name):
"""Normalize a python package name a la PEP 503"""
# https://www.python.org/dev/peps/pep-0503/#normalized-names
return re.sub('[-_.]+', '-', name).lower()
def fresh_working_set():
"""return a pkg_resources "working set", representing the *currently* installed packages"""
class WorkingSetPlusEditableInstalls(pkg_resources.WorkingSet):
def __init__(self, *args, **kwargs):
self._normalized_name_mapping = {}
super(WorkingSetPlusEditableInstalls, self).__init__(*args, **kwargs)
def add_entry(self, entry):
"""Same as the original .add_entry, but sets only=False, so that egg-links are honored."""
logger.debug('working-set entry: %r', entry)
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in pkg_resources.find_distributions(entry, False):
# eggs override anything that's installed normally
# fun fact: pkg_resources.working_set's results depend on the
# ordering of os.listdir since the order of os.listdir is
# entirely arbitrary (an implemenation detail of file system),
# without calling site.main(), an .egg-link file may or may not
# be honored, depending on the filesystem
replace = (dist.precedence == pkg_resources.EGG_DIST)
self._normalized_name_mapping[normalize_name(dist.key)] = dist.key
self.add(dist, entry, False, replace=replace)
def find_normalized(self, req):
req = _package_req_to_pkg_resources_req(str(req))
req.key = self._normalized_name_mapping.get(normalize_name(req.key), req.key)
return self.find(req)
return WorkingSetPlusEditableInstalls()
def req_cycle(req):
"""is this requirement cyclic?"""
cls = req.__class__
seen = {req.name}
while isinstance(req.comes_from, cls):
req = req.comes_from
if req.name in seen:
return True
else:
seen.add(req.name)
return False
def pretty_req(req):
"""
return a copy of a pip requirement that is a bit more readable,
at the expense of removing some of its data
"""
from copy import copy
req = copy(req)
req.link = None
req.satisfied_by = None
return req
def _package_req_to_pkg_resources_req(req):
return pkg_resources.Requirement.parse(str(req))
def trace_requirements(requirements):
"""given an iterable of pip InstallRequirements,
return the set of required packages, given their transitive requirements.
"""
requirements = tuple(pretty_req(r) for r in requirements)
working_set = fresh_working_set()
# breadth-first traversal:
from collections import deque
queue = deque(requirements)
queued = {_package_req_to_pkg_resources_req(req.req) for req in queue}
errors = []
result = []
while queue:
req = queue.popleft()
logger.debug('tracing: %s', req)
try:
dist = working_set.find_normalized(_package_req_to_pkg_resources_req(req.req))
except pkg_resources.VersionConflict as conflict:
dist = conflict.args[0]
errors.append('Error: version conflict: {} ({}) <-> {}'.format(
dist, timid_relpath(dist.location), req
))
assert dist is not None, 'Should be unreachable in pip8+'
result.append(dist_to_req(dist))
# TODO: pip does no validation of extras. should we?
extras = [extra for extra in req.extras if extra in dist.extras]
for sub_req in sorted(dist.requires(extras=extras), key=lambda req: req.key):
sub_req = InstallRequirement(sub_req, req)
if req_cycle(sub_req):
logger.warning('Circular dependency! %s', sub_req)
continue
elif sub_req.req in queued:
logger.debug('already queued: %s', sub_req)
continue
else:
logger.debug('adding sub-requirement %s', sub_req)
queue.append(sub_req)
queued.add(sub_req.req)
if errors:
raise InstallationError('\n'.join(errors))
return result
def reqnames(reqs):
return {req.name for req in reqs}
class FasterInstallCommand(InstallCommand):
def __init__(self, *args, **kw):
super(FasterInstallCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'--prune',
action='store_true',
dest='prune',
default=False,
help='Uninstall any non-required packages.',
)
cmd_opts.add_option(
'--no-prune',
action='store_false',
dest='prune',
help='Do not uninstall any non-required packages.',
)
def run(self, options, args):
"""update install options with caching values"""
if options.prune:
previously_installed = pip_get_installed()
index_urls = [options.index_url] + options.extra_index_urls
with pipfaster_download_cacher(index_urls):
requirement_set = super(FasterInstallCommand, self).run(
options, args,
)
required = requirement_set.requirements.values()
# With extra_index_urls we don't know where the wheel is from
if not options.extra_index_urls:
cache_installed_wheels(options.index_url, requirement_set.successfully_downloaded)
if not options.ignore_dependencies:
# transitive requirements, previously installed, are also required
# this has a side-effect of finding any missing / conflicting requirements
required = trace_requirements(required)
if not options.prune:
return requirement_set
extraneous = (
reqnames(previously_installed) -
reqnames(required) -
# the stage1 bootstrap packages
reqnames(trace_requirements([install_req_from_line('venv-update')])) -
# See #186
frozenset(('pkg-resources',))
)
if extraneous:
extraneous = sorted(extraneous)
pip(('uninstall', '--yes') + tuple(extraneous))
# TODO: Cleanup: remove stale values from the cache and wheelhouse that have not been accessed in a week.
# TODO: a pip_faster.patch module
def patch(attrs, updates):
"""Perform a set of updates to a attribute dictionary, return the original values."""
orig = {}
for attr, value in updates:
orig[attr] = attrs[attr]
attrs[attr] = value
return orig
@contextmanager
def patched(attrs, updates):
"""A context in which some attributes temporarily have a modified value."""
orig = patch(attrs, updates.items())
try:
yield orig
finally:
patch(attrs, orig.items())
# END: pip_faster.patch module
def pipfaster_install_prune_option():
return patched(pipmodule._internal.commands.commands_dict, {FasterInstallCommand.name: FasterInstallCommand})
def pipfaster_packagefinder():
"""Provide a short-circuited search when the requirement is pinned and appears on disk.
Suggested upstream at: https://github.com/pypa/pip/pull/2114
"""
# A poor man's dependency injection: monkeypatch :(
try: # :pragma:nocover: pip>=18.1
from pip._internal.cli import base_command
except ImportError: # :pragma:nocover: pip<18.1
from pip._internal import basecommand as base_command
return patched(vars(base_command), {'PackageFinder': FasterPackageFinder})
def main():
with pipfaster_install_prune_option():
with pipfaster_packagefinder():
raise_on_failure(pipmodule._internal.main)
if __name__ == '__main__':
exit(main())
|
Yelp/venv-update | pip_faster.py | FasterInstallCommand.run | python | def run(self, options, args):
if options.prune:
previously_installed = pip_get_installed()
index_urls = [options.index_url] + options.extra_index_urls
with pipfaster_download_cacher(index_urls):
requirement_set = super(FasterInstallCommand, self).run(
options, args,
)
required = requirement_set.requirements.values()
# With extra_index_urls we don't know where the wheel is from
if not options.extra_index_urls:
cache_installed_wheels(options.index_url, requirement_set.successfully_downloaded)
if not options.ignore_dependencies:
# transitive requirements, previously installed, are also required
# this has a side-effect of finding any missing / conflicting requirements
required = trace_requirements(required)
if not options.prune:
return requirement_set
extraneous = (
reqnames(previously_installed) -
reqnames(required) -
# the stage1 bootstrap packages
reqnames(trace_requirements([install_req_from_line('venv-update')])) -
# See #186
frozenset(('pkg-resources',))
)
if extraneous:
extraneous = sorted(extraneous)
pip(('uninstall', '--yes') + tuple(extraneous)) | update install options with caching values | train | https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/pip_faster.py#L387-L423 | [
"def pip(args):\n \"\"\"Run pip, in-process.\"\"\"\n from sys import stdout\n stdout.write(colorize(('pip',) + args))\n stdout.write('\\n')\n stdout.flush()\n\n return pipmodule._internal.main(list(args))\n",
"def cache_installed_wheels(index_url, installed_packages):\n \"\"\"After installation, pip tells us what it installed and from where.\n\n We build a structure that looks like\n\n .cache/pip-faster/wheelhouse/$index_url/$wheel\n \"\"\"\n for installed_package in installed_packages:\n if not _can_be_cached(installed_package):\n continue\n _store_wheel_in_cache(installed_package.link.path, index_url)\n",
"def pip_get_installed():\n \"\"\"Code extracted from the middle of the pip freeze command.\n FIXME: does not list anything installed via -e\n \"\"\"\n from pip._internal.utils.misc import dist_is_local\n\n return tuple(\n dist_to_req(dist)\n for dist in fresh_working_set()\n if dist_is_local(dist)\n if dist.key != 'python' # See #220\n )\n",
"def trace_requirements(requirements):\n \"\"\"given an iterable of pip InstallRequirements,\n return the set of required packages, given their transitive requirements.\n \"\"\"\n requirements = tuple(pretty_req(r) for r in requirements)\n working_set = fresh_working_set()\n\n # breadth-first traversal:\n from collections import deque\n queue = deque(requirements)\n queued = {_package_req_to_pkg_resources_req(req.req) for req in queue}\n errors = []\n result = []\n while queue:\n req = queue.popleft()\n\n logger.debug('tracing: %s', req)\n try:\n dist = working_set.find_normalized(_package_req_to_pkg_resources_req(req.req))\n except pkg_resources.VersionConflict as conflict:\n dist = conflict.args[0]\n errors.append('Error: version conflict: {} ({}) <-> {}'.format(\n dist, timid_relpath(dist.location), req\n ))\n\n assert dist is not None, 'Should be unreachable in pip8+'\n result.append(dist_to_req(dist))\n\n # TODO: pip does no validation of extras. should we?\n extras = [extra for extra in req.extras if extra in dist.extras]\n for sub_req in sorted(dist.requires(extras=extras), key=lambda req: req.key):\n sub_req = InstallRequirement(sub_req, req)\n\n if req_cycle(sub_req):\n logger.warning('Circular dependency! %s', sub_req)\n continue\n elif sub_req.req in queued:\n logger.debug('already queued: %s', sub_req)\n continue\n else:\n logger.debug('adding sub-requirement %s', sub_req)\n queue.append(sub_req)\n queued.add(sub_req.req)\n\n if errors:\n raise InstallationError('\\n'.join(errors))\n\n return result\n",
"def reqnames(reqs):\n return {req.name for req in reqs}\n",
"def pipfaster_download_cacher(index_urls):\n \"\"\"vanilla pip stores a cache of the http session in its cache and not the\n wheel files. We intercept the download and save those files into our\n cache\n \"\"\"\n from pip._internal import download\n orig = download._download_http_url\n patched_fn = get_patched_download_http_url(orig, index_urls)\n return patched(vars(download), {'_download_http_url': patched_fn})\n"
] | class FasterInstallCommand(InstallCommand):
def __init__(self, *args, **kw):
super(FasterInstallCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'--prune',
action='store_true',
dest='prune',
default=False,
help='Uninstall any non-required packages.',
)
cmd_opts.add_option(
'--no-prune',
action='store_false',
dest='prune',
help='Do not uninstall any non-required packages.',
)
|
fabiobatalha/crossrefapi | crossref/restful.py | Endpoint.version | python | def version(self):
request_params = dict(self.request_params)
request_url = str(self.request_url)
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
).json()
return result['message-version'] | This attribute retrieve the API version.
>>> Works().version
'1.0.0' | train | https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L157-L174 | null | class Endpoint:
CURSOR_AS_ITER_METHOD = False
def __init__(self, request_url=None, request_params=None, context=None, etiquette=None, throttle=True):
self.do_http_request = HTTPRequest(throttle=throttle).do_http_request
self.etiquette = etiquette or Etiquette()
self.request_url = request_url or build_url_endpoint(self.ENDPOINT, context)
self.request_params = request_params or dict()
self.context = context or ''
@property
def _rate_limits(self):
request_params = dict(self.request_params)
request_url = str(self.request_url)
result = self.do_http_request(
'get',
request_url,
only_headers=True,
custom_header=str(self.etiquette),
throttle=False
)
rate_limits = {
'X-Rate-Limit-Limit': result.headers.get('X-Rate-Limit-Limit', 'undefined'),
'X-Rate-Limit-Interval': result.headers.get('X-Rate-Limit-Interval', 'undefined')
}
return rate_limits
def _escaped_pagging(self):
escape_pagging = ['offset', 'rows']
request_params = dict(self.request_params)
for item in escape_pagging:
try:
del(request_params[item])
except KeyError:
pass
return request_params
@property
@property
def x_rate_limit_limit(self):
return self._rate_limits.get('X-Rate-Limit-Limit', 'undefined')
@property
def x_rate_limit_interval(self):
return self._rate_limits.get('X-Rate-Limit-Interval', 'undefined')
def count(self):
"""
This method retrieve the total of records resulting from a given query.
This attribute can be used compounded with query, filter,
sort, order and facet methods.
Examples:
>>> from crossref.restful import Works
>>> Works().query('zika').count()
3597
>>> Works().query('zika').filter(prefix='10.1590').count()
61
>>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').filter(has_abstract='true').count()
14
>>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').filter(has_abstract='true').query(author='Marli').count()
1
"""
request_params = dict(self.request_params)
request_url = str(self.request_url)
request_params['rows'] = 0
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
).json()
return int(result['message']['total-results'])
@property
def url(self):
"""
This attribute retrieve the url that will be used as a HTTP request to
the Crossref API.
This attribute can be used compounded with query, filter,
sort, order and facet methods.
Examples:
>>> from crossref.restful import Works
>>> Works().query('zika').url
'https://api.crossref.org/works?query=zika'
>>> Works().query('zika').filter(prefix='10.1590').url
'https://api.crossref.org/works?query=zika&filter=prefix%3A10.1590'
>>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').url
'https://api.crossref.org/works?sort=published&order=desc&query=zika&filter=prefix%3A10.1590'
>>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').filter(has_abstract='true').query(author='Marli').url
'https://api.crossref.org/works?sort=published&filter=prefix%3A10.1590%2Chas-abstract%3Atrue&query=zika&order=desc&query.author=Marli'
"""
request_params = self._escaped_pagging()
sorted_request_params = sorted([(k, v) for k, v in request_params.items()])
req = requests.Request(
'get', self.request_url, params=sorted_request_params).prepare()
return req.url
def all(self):
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = {}
return iter(self.__class__(request_url, request_params, context, self.etiquette))
def __iter__(self):
request_url = str(self.request_url)
if 'sample' in self.request_params:
request_params = self._escaped_pagging()
result = self.do_http_request(
'get',
self.request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
raise StopIteration()
result = result.json()
for item in result['message']['items']:
yield item
return
if self.CURSOR_AS_ITER_METHOD is True:
request_params = dict(self.request_params)
request_params['cursor'] = '*'
request_params['rows'] = LIMIT
while True:
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
raise StopIteration()
result = result.json()
if len(result['message']['items']) == 0:
return
for item in result['message']['items']:
yield item
request_params['cursor'] = result['message']['next-cursor']
else:
request_params = dict(self.request_params)
request_params['offset'] = 0
request_params['rows'] = LIMIT
while True:
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
raise StopIteration()
result = result.json()
if len(result['message']['items']) == 0:
return
for item in result['message']['items']:
yield item
request_params['offset'] += LIMIT + 1
if request_params['offset'] >= MAXOFFSET:
raise MaxOffsetError(
'Offset exceded the max offset of %d',
MAXOFFSET
)
|
fabiobatalha/crossrefapi | crossref/restful.py | Endpoint.count | python | def count(self):
request_params = dict(self.request_params)
request_url = str(self.request_url)
request_params['rows'] = 0
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
).json()
return int(result['message']['total-results']) | This method retrieve the total of records resulting from a given query.
This attribute can be used compounded with query, filter,
sort, order and facet methods.
Examples:
>>> from crossref.restful import Works
>>> Works().query('zika').count()
3597
>>> Works().query('zika').filter(prefix='10.1590').count()
61
>>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').filter(has_abstract='true').count()
14
>>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').filter(has_abstract='true').query(author='Marli').count()
1 | train | https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L186-L215 | null | class Endpoint:
CURSOR_AS_ITER_METHOD = False
def __init__(self, request_url=None, request_params=None, context=None, etiquette=None, throttle=True):
self.do_http_request = HTTPRequest(throttle=throttle).do_http_request
self.etiquette = etiquette or Etiquette()
self.request_url = request_url or build_url_endpoint(self.ENDPOINT, context)
self.request_params = request_params or dict()
self.context = context or ''
@property
def _rate_limits(self):
request_params = dict(self.request_params)
request_url = str(self.request_url)
result = self.do_http_request(
'get',
request_url,
only_headers=True,
custom_header=str(self.etiquette),
throttle=False
)
rate_limits = {
'X-Rate-Limit-Limit': result.headers.get('X-Rate-Limit-Limit', 'undefined'),
'X-Rate-Limit-Interval': result.headers.get('X-Rate-Limit-Interval', 'undefined')
}
return rate_limits
def _escaped_pagging(self):
escape_pagging = ['offset', 'rows']
request_params = dict(self.request_params)
for item in escape_pagging:
try:
del(request_params[item])
except KeyError:
pass
return request_params
@property
def version(self):
"""
This attribute retrieve the API version.
>>> Works().version
'1.0.0'
"""
request_params = dict(self.request_params)
request_url = str(self.request_url)
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
).json()
return result['message-version']
@property
def x_rate_limit_limit(self):
return self._rate_limits.get('X-Rate-Limit-Limit', 'undefined')
@property
def x_rate_limit_interval(self):
return self._rate_limits.get('X-Rate-Limit-Interval', 'undefined')
@property
def url(self):
"""
This attribute retrieve the url that will be used as a HTTP request to
the Crossref API.
This attribute can be used compounded with query, filter,
sort, order and facet methods.
Examples:
>>> from crossref.restful import Works
>>> Works().query('zika').url
'https://api.crossref.org/works?query=zika'
>>> Works().query('zika').filter(prefix='10.1590').url
'https://api.crossref.org/works?query=zika&filter=prefix%3A10.1590'
>>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').url
'https://api.crossref.org/works?sort=published&order=desc&query=zika&filter=prefix%3A10.1590'
>>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').filter(has_abstract='true').query(author='Marli').url
'https://api.crossref.org/works?sort=published&filter=prefix%3A10.1590%2Chas-abstract%3Atrue&query=zika&order=desc&query.author=Marli'
"""
request_params = self._escaped_pagging()
sorted_request_params = sorted([(k, v) for k, v in request_params.items()])
req = requests.Request(
'get', self.request_url, params=sorted_request_params).prepare()
return req.url
def all(self):
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = {}
return iter(self.__class__(request_url, request_params, context, self.etiquette))
def __iter__(self):
request_url = str(self.request_url)
if 'sample' in self.request_params:
request_params = self._escaped_pagging()
result = self.do_http_request(
'get',
self.request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
raise StopIteration()
result = result.json()
for item in result['message']['items']:
yield item
return
if self.CURSOR_AS_ITER_METHOD is True:
request_params = dict(self.request_params)
request_params['cursor'] = '*'
request_params['rows'] = LIMIT
while True:
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
raise StopIteration()
result = result.json()
if len(result['message']['items']) == 0:
return
for item in result['message']['items']:
yield item
request_params['cursor'] = result['message']['next-cursor']
else:
request_params = dict(self.request_params)
request_params['offset'] = 0
request_params['rows'] = LIMIT
while True:
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
raise StopIteration()
result = result.json()
if len(result['message']['items']) == 0:
return
for item in result['message']['items']:
yield item
request_params['offset'] += LIMIT + 1
if request_params['offset'] >= MAXOFFSET:
raise MaxOffsetError(
'Offset exceded the max offset of %d',
MAXOFFSET
)
|
fabiobatalha/crossrefapi | crossref/restful.py | Endpoint.url | python | def url(self):
request_params = self._escaped_pagging()
sorted_request_params = sorted([(k, v) for k, v in request_params.items()])
req = requests.Request(
'get', self.request_url, params=sorted_request_params).prepare()
return req.url | This attribute retrieve the url that will be used as a HTTP request to
the Crossref API.
This attribute can be used compounded with query, filter,
sort, order and facet methods.
Examples:
>>> from crossref.restful import Works
>>> Works().query('zika').url
'https://api.crossref.org/works?query=zika'
>>> Works().query('zika').filter(prefix='10.1590').url
'https://api.crossref.org/works?query=zika&filter=prefix%3A10.1590'
>>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').url
'https://api.crossref.org/works?sort=published&order=desc&query=zika&filter=prefix%3A10.1590'
>>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').filter(has_abstract='true').query(author='Marli').url
'https://api.crossref.org/works?sort=published&filter=prefix%3A10.1590%2Chas-abstract%3Atrue&query=zika&order=desc&query.author=Marli' | train | https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L218-L243 | [
"def _escaped_pagging(self):\n escape_pagging = ['offset', 'rows']\n request_params = dict(self.request_params)\n\n for item in escape_pagging:\n try:\n del(request_params[item])\n except KeyError:\n pass\n\n return request_params\n"
] | class Endpoint:
CURSOR_AS_ITER_METHOD = False
def __init__(self, request_url=None, request_params=None, context=None, etiquette=None, throttle=True):
self.do_http_request = HTTPRequest(throttle=throttle).do_http_request
self.etiquette = etiquette or Etiquette()
self.request_url = request_url or build_url_endpoint(self.ENDPOINT, context)
self.request_params = request_params or dict()
self.context = context or ''
@property
def _rate_limits(self):
request_params = dict(self.request_params)
request_url = str(self.request_url)
result = self.do_http_request(
'get',
request_url,
only_headers=True,
custom_header=str(self.etiquette),
throttle=False
)
rate_limits = {
'X-Rate-Limit-Limit': result.headers.get('X-Rate-Limit-Limit', 'undefined'),
'X-Rate-Limit-Interval': result.headers.get('X-Rate-Limit-Interval', 'undefined')
}
return rate_limits
def _escaped_pagging(self):
escape_pagging = ['offset', 'rows']
request_params = dict(self.request_params)
for item in escape_pagging:
try:
del(request_params[item])
except KeyError:
pass
return request_params
@property
def version(self):
"""
This attribute retrieve the API version.
>>> Works().version
'1.0.0'
"""
request_params = dict(self.request_params)
request_url = str(self.request_url)
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
).json()
return result['message-version']
@property
def x_rate_limit_limit(self):
return self._rate_limits.get('X-Rate-Limit-Limit', 'undefined')
@property
def x_rate_limit_interval(self):
return self._rate_limits.get('X-Rate-Limit-Interval', 'undefined')
def count(self):
"""
This method retrieve the total of records resulting from a given query.
This attribute can be used compounded with query, filter,
sort, order and facet methods.
Examples:
>>> from crossref.restful import Works
>>> Works().query('zika').count()
3597
>>> Works().query('zika').filter(prefix='10.1590').count()
61
>>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').filter(has_abstract='true').count()
14
>>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').filter(has_abstract='true').query(author='Marli').count()
1
"""
request_params = dict(self.request_params)
request_url = str(self.request_url)
request_params['rows'] = 0
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
).json()
return int(result['message']['total-results'])
@property
def all(self):
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = {}
return iter(self.__class__(request_url, request_params, context, self.etiquette))
def __iter__(self):
request_url = str(self.request_url)
if 'sample' in self.request_params:
request_params = self._escaped_pagging()
result = self.do_http_request(
'get',
self.request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
raise StopIteration()
result = result.json()
for item in result['message']['items']:
yield item
return
if self.CURSOR_AS_ITER_METHOD is True:
request_params = dict(self.request_params)
request_params['cursor'] = '*'
request_params['rows'] = LIMIT
while True:
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
raise StopIteration()
result = result.json()
if len(result['message']['items']) == 0:
return
for item in result['message']['items']:
yield item
request_params['cursor'] = result['message']['next-cursor']
else:
request_params = dict(self.request_params)
request_params['offset'] = 0
request_params['rows'] = LIMIT
while True:
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
raise StopIteration()
result = result.json()
if len(result['message']['items']) == 0:
return
for item in result['message']['items']:
yield item
request_params['offset'] += LIMIT + 1
if request_params['offset'] >= MAXOFFSET:
raise MaxOffsetError(
'Offset exceded the max offset of %d',
MAXOFFSET
)
|
fabiobatalha/crossrefapi | crossref/restful.py | Works.order | python | def order(self, order='asc'):
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
if order not in self.ORDER_VALUES:
raise UrlSyntaxError(
'Sort order specified as %s but must be one of: %s' % (
str(order),
', '.join(self.ORDER_VALUES)
)
)
request_params['order'] = order
return self.__class__(request_url, request_params, context, self.etiquette) | This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded with query, filter,
sort and facet methods.
kwargs: valid SORT_VALUES arguments.
return: iterable object of Works metadata
Example 1:
>>> from crossref.restful import Works
>>> works.query('zika').sort('deposited').order('asc').url
'https://api.crossref.org/works?sort=deposited&query=zika&order=asc'
>>> query = works.query('zika').sort('deposited').order('asc')
>>> for item in query:
... print(item['title'], item['deposited']['date-time'])
...
['A Facile Preparation of 1-(6-Hydroxyindol-1-yl)-2,2-dimethylpropan-1-one'] 2007-02-13T20:56:13Z
['Contributions to the Flora of the Lake Champlain Valley, New York and Vermont, III'] 2007-02-13T20:56:13Z
['Pilularia americana A. Braun in Klamath County, Oregon'] 2007-02-13T20:56:13Z
...
Example 2:
>>> from crossref.restful import Works
>>> works.query('zika').sort('deposited').order('desc').url
'https://api.crossref.org/works?sort=deposited&query=zika&order=desc'
>>> query = works.query('zika').sort('deposited').order('desc')
>>> for item in query:
... print(item['title'], item['deposited']['date-time'])
...
["Planning for the unexpected: Ebola virus, Zika virus, what's next?"] 2017-05-29T12:55:53Z
['Sensitivity of RT-PCR method in samples shown to be positive for Zika virus by RT-qPCR in vector competence studies'] 2017-05-29T12:53:54Z
['Re-evaluation of routine dengue virus serology in travelers in the era of Zika virus emergence'] 2017-05-29T10:46:11Z
... | train | https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L535-L589 | [
"def build_url_endpoint(endpoint, context=None):\n\n endpoint = '/'.join([i for i in [context, endpoint] if i])\n\n return 'https://%s/%s' % (API, endpoint)\n"
] | class Works(Endpoint):
CURSOR_AS_ITER_METHOD = True
ENDPOINT = 'works'
ORDER_VALUES = ('asc', 'desc', '1', '-1')
SORT_VALUES = (
'created',
'deposited',
'indexed',
'is-referenced-by-count',
'issued',
'published',
'published-online',
'published-print',
'references-count',
'relevance',
'score',
'submitted',
'updated'
)
FIELDS_QUERY = (
'affiliation',
'author',
'bibliographic',
'chair',
'container_title',
'contributor',
'editor',
'event_acronym',
'event_location',
'event_name',
'event_sponsor',
'event_theme',
'funder_name',
'publisher_location',
'publisher_name',
'title',
'translator'
)
FIELDS_SELECT = (
'abstract',
'URL',
'member',
'posted',
'score',
'created',
'degree',
'update-policy',
'short-title',
'license',
'ISSN',
'container-title',
'issued',
'update-to',
'issue',
'prefix',
'approved',
'indexed',
'article-number',
'clinical-trial-number',
'accepted',
'author',
'group-title',
'DOI',
'is-referenced-by-count',
'updated-by',
'event',
'chair',
'standards-body',
'original-title',
'funder',
'translator',
'archive',
'published-print',
'alternative-id',
'subject',
'subtitle',
'published-online',
'publisher-location',
'content-domain',
'reference',
'title',
'link',
'type',
'publisher',
'volume',
'references-count',
'ISBN',
'issn-type',
'assertion',
'deposited',
'page',
'content-created',
'short-container-title',
'relation',
'editor'
)
FILTER_VALIDATOR = {
'alternative_id': None,
'archive': validators.archive,
'article_number': None,
'assertion': None,
'assertion-group': None,
'award.funder': None,
'award.number': None,
'category-name': None,
'clinical-trial-number': None,
'container-title': None,
'content-domain': None,
'directory': validators.directory,
'doi': None,
'from-accepted-date': validators.is_date,
'from-created-date': validators.is_date,
'from-deposit-date': validators.is_date,
'from-event-end-date': validators.is_date,
'from-event-start-date': validators.is_date,
'from-index-date': validators.is_date,
'from-issued-date': validators.is_date,
'from-online-pub-date': validators.is_date,
'from-posted-date': validators.is_date,
'from-print-pub-date': validators.is_date,
'from-pub-date': validators.is_date,
'from-update-date': validators.is_date,
'full-text.application': None,
'full-text.type': None,
'full-text.version': None,
'funder': None,
'funder-doi-asserted-by': None,
'group-title': None,
'has-abstract': validators.is_bool,
'has-affiliation': validators.is_bool,
'has-archive': validators.is_bool,
'has-assertion': validators.is_bool,
'has-authenticated-orcid': validators.is_bool,
'has-award': validators.is_bool,
'has-clinical-trial-number': validators.is_bool,
'has-content-domain': validators.is_bool,
'has-domain-restriction': validators.is_bool,
'has-event': validators.is_bool,
'has-full-text': validators.is_bool,
'has-funder': validators.is_bool,
'has-funder-doi': validators.is_bool,
'has-license': validators.is_bool,
'has-orcid': validators.is_bool,
'has-references': validators.is_bool,
'has-relation': validators.is_bool,
'has-update': validators.is_bool,
'has-update-policy': validators.is_bool,
'is-update': validators.is_bool,
'isbn': None,
'issn': None,
'license.delay': validators.is_integer,
'license.url': None,
'license.version': None,
'location': None,
'member': validators.is_integer,
'orcid': None,
'prefix': None,
'relation.object': None,
'relation.object-type': None,
'relation.type': None,
'type': validators.document_type,
'type-name': None,
'until-accepted-date': validators.is_date,
'until-created-date': validators.is_date,
'until-deposit-date': validators.is_date,
'until-event-end-date': validators.is_date,
'until-event-start-date': validators.is_date,
'until-index-date': validators.is_date,
'until-issued-date': validators.is_date,
'until-online-pub-date': validators.is_date,
'until-posted-date': validators.is_date,
'until-print-pub-date': validators.is_date,
'until-pub-date': validators.is_date,
'until-update-date': validators.is_date,
'update-type': None,
'updates': None
}
FACET_VALUES = {
'archive': None,
'affiliation': None,
'assertion': None,
'assertion-group': None,
'category-name': None,
'container-title': 1000,
'license': None,
'funder-doi': None,
'funder-name': None,
'issn': 1000,
'orcid': 1000,
'published': None,
'publisher-name': None,
'relation-type': None,
'source': None,
'type-name': None,
'update-type': None
}
def select(self, *args):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded with query, filter,
sort and facet methods.
args: valid FIELDS_SELECT arguments.
return: iterable object of Works metadata
Example 1:
>>> from crossref.restful import Works
>>> works = Works()
>>> for i in works.filter(has_funder='true', has_license='true').sample(5).select('DOI, prefix'):
... print(i)
...
{'DOI': '10.1016/j.jdiacomp.2016.06.005', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.mssp.2015.07.076', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1002/slct.201700168', 'prefix': '10.1002', 'member': 'http://id.crossref.org/member/311'}
{'DOI': '10.1016/j.actbio.2017.01.034', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.optcom.2013.11.013', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
...
Example 2:
>>> from crossref.restful import Works
>>> works = Works()
>>> for i in works.filter(has_funder='true', has_license='true').sample(5).select('DOI').select('prefix'):
>>> print(i)
...
{'DOI': '10.1016/j.sajb.2016.03.010', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.jneumeth.2009.08.017', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.tetlet.2016.05.058', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1007/s00170-017-0689-z', 'prefix': '10.1007', 'member': 'http://id.crossref.org/member/297'}
{'DOI': '10.1016/j.dsr.2016.03.004', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
...
Example: 3:
>>> from crossref.restful import Works
>>> works = Works()
>>>: for i in works.filter(has_funder='true', has_license='true').sample(5).select(['DOI', 'prefix']):
>>> print(i)
...
{'DOI': '10.1111/zoj.12146', 'prefix': '10.1093', 'member': 'http://id.crossref.org/member/286'}
{'DOI': '10.1016/j.bios.2014.04.018', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.cej.2016.10.011', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.dci.2017.08.001', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.icheatmasstransfer.2016.09.012', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
...
Example: 4:
>>> from crossref.restful import Works
>>> works = Works()
>>>: for i in works.filter(has_funder='true', has_license='true').sample(5).select('DOI', 'prefix'):
>>> print(i)
...
{'DOI': '10.1111/zoj.12146', 'prefix': '10.1093', 'member': 'http://id.crossref.org/member/286'}
{'DOI': '10.1016/j.bios.2014.04.018', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.cej.2016.10.011', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.dci.2017.08.001', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.icheatmasstransfer.2016.09.012', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
select_args = []
invalid_select_args = []
for item in args:
if isinstance(item, list):
select_args += [i.strip() for i in item]
if isinstance(item, str):
select_args += [i.strip() for i in item.split(',')]
invalid_select_args = set(select_args) - set(self.FIELDS_SELECT)
if len(invalid_select_args) != 0:
raise UrlSyntaxError(
'Select field\'s specified as (%s) but must be one of: %s' % (
', '.join(invalid_select_args),
', '.join(self.FIELDS_SELECT)
)
)
request_params['select'] = ','.join(
sorted([i for i in set(request_params.get('select', '').split(',') + select_args) if i])
)
return self.__class__(request_url, request_params, context, self.etiquette)
def sort(self, sort='score'):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded with query, filter,
order and facet methods.
kwargs: valid SORT_VALUES arguments.
return: iterable object of Works metadata
Example 1:
>>> from crossref.restful import Works
>>> works = Works()
>>> query = works.sort('deposited')
>>> for item in query:
... print(item['title'])
...
['Integralidade e transdisciplinaridade em equipes multiprofissionais na saúde coletiva']
['Aprendizagem em grupo operativo de diabetes: uma abordagem etnográfica']
['A rotatividade de enfermeiros e médicos: um impasse na implementação da Estratégia de Saúde da Família']
...
Example 2:
>>> from crossref.restful import Works
>>> works = Works()
>>> query = works.sort('relevance')
>>> for item in query:
... print(item['title'])
...
['Proceedings of the American Physical Society']
['Annual Meeting of the Research Society on Alcoholism']
['Local steroid injections: Comment on the American college of rheumatology guidelines for the management of osteoarthritis of the hip and on the letter by Swezey']
['Intraventricular neurocytoma']
['Mammography accreditation']
['Temporal lobe necrosis in nasopharyngeal carcinoma: Pictorial essay']
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
if sort not in self.SORT_VALUES:
raise UrlSyntaxError(
'Sort field specified as %s but must be one of: %s' % (
str(sort),
', '.join(self.SORT_VALUES)
)
)
request_params['sort'] = sort
return self.__class__(request_url, request_params, context, self.etiquette)
def filter(self, **kwargs):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded and recursively with query, filter,
order, sort and facet methods.
kwargs: valid FILTER_VALIDATOR arguments.
return: iterable object of Works metadata
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> query = works.filter(has_funder='true', has_license='true')
>>> for item in query:
... print(item['title'])
...
['Design of smiling-face-shaped band-notched UWB antenna']
['Phase I clinical and pharmacokinetic study of PM01183 (a tetrahydroisoquinoline, Lurbinectedin) in combination with gemcitabine in patients with advanced solid tumors']
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
for fltr, value in kwargs.items():
decoded_fltr = fltr.replace('__', '.').replace('_', '-')
if decoded_fltr not in self.FILTER_VALIDATOR.keys():
raise UrlSyntaxError(
'Filter %s specified but there is no such filter for this route. Valid filters for this route are: %s' % (
str(decoded_fltr),
', '.join(self.FILTER_VALIDATOR.keys())
)
)
if self.FILTER_VALIDATOR[decoded_fltr] is not None:
self.FILTER_VALIDATOR[decoded_fltr](str(value))
if 'filter' not in request_params:
request_params['filter'] = decoded_fltr + ':' + str(value)
else:
request_params['filter'] += ',' + decoded_fltr + ':' + str(value)
return self.__class__(request_url, request_params, context, self.etiquette)
def facet(self, facet_name, facet_count=100):
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
request_params['rows'] = 0
if facet_name not in self.FACET_VALUES.keys():
raise UrlSyntaxError('Facet %s specified but there is no such facet for this route. Valid facets for this route are: *, affiliation, funder-name, funder-doi, publisher-name, orcid, container-title, assertion, archive, update-type, issn, published, source, type-name, license, category-name, relation-type, assertion-group' %
str(facet_name),
', '.join(self.FACET_VALUES.keys())
)
facet_count = self.FACET_VALUES[facet_name] if self.FACET_VALUES[facet_name] is not None and self.FACET_VALUES[facet_name] <= facet_count else facet_count
request_params['facet'] = '%s:%s' % (facet_name, facet_count)
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
).json()
return result['message']['facets']
def query(self, *args, **kwargs):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded and recursively with query, filter,
order, sort and facet methods.
args: strings (String)
kwargs: valid FIELDS_QUERY arguments.
return: iterable object of Works metadata
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> query = works.query('Zika Virus')
>>> query.url
'https://api.crossref.org/works?query=Zika+Virus'
>>> for item in query:
... print(item['title'])
...
['Zika Virus']
['Zika virus disease']
['Zika Virus: Laboratory Diagnosis']
['Spread of Zika virus disease']
['Carditis in Zika Virus Infection']
['Understanding Zika virus']
['Zika Virus: History and Infectology']
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
if args:
request_params['query'] = ' '.join([str(i) for i in args])
for field, value in kwargs.items():
if field not in self.FIELDS_QUERY:
raise UrlSyntaxError(
'Field query %s specified but there is no such field query for this route. Valid field queries for this route are: %s' % (
str(field), ', '.join(self.FIELDS_QUERY)
)
)
request_params['query.%s' % field.replace('_', '-')] = value
return self.__class__(request_url, request_params, context, self.etiquette)
def sample(self, sample_size=20):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
kwargs: sample_size (Integer) between 0 and 100.
return: iterable object of Works metadata
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.sample(2).url
'https://api.crossref.org/works?sample=2'
>>> [i['title'] for i in works.sample(2)]
[['A study on the hemolytic properties ofPrevotella nigrescens'],
['The geometry and the radial breathing mode of carbon nanotubes: beyond the ideal behaviour']]
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
try:
if sample_size > 100:
raise UrlSyntaxError(
'Integer specified as %s but must be a positive integer less than or equal to 100.' % str(sample_size)
)
except TypeError:
raise UrlSyntaxError(
'Integer specified as %s but must be a positive integer less than or equal to 100.' % str(sample_size)
)
request_params['sample'] = sample_size
return self.__class__(request_url, request_params, context, self.etiquette)
def doi(self, doi, only_message=True):
"""
This method retrieve the DOI metadata related to a given DOI
number.
args: Crossref DOI id (String)
return: JSON
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi('10.1590/S0004-28032013005000001')
{'is-referenced-by-count': 6, 'reference-count': 216, 'DOI': '10.1590/s0004-28032013005000001',
'subtitle': [], 'issued': {'date-parts': [[2013, 4, 19]]}, 'source': 'Crossref',
'short-container-title': ['Arq. Gastroenterol.'], 'references-count': 216, 'short-title': [],
'deposited': {'timestamp': 1495911725000, 'date-time': '2017-05-27T19:02:05Z',
'date-parts': [[2017, 5, 27]]}, 'ISSN': ['0004-2803'], 'type': 'journal-article',
'URL': 'http://dx.doi.org/10.1590/s0004-28032013005000001',
'indexed': {'timestamp': 1496034748592, 'date-time': '2017-05-29T05:12:28Z',
'date-parts': [[2017, 5, 29]]}, 'content-domain': {'crossmark-restriction': False, 'domain': []},
'created': {'timestamp': 1374613284000, 'date-time': '2013-07-23T21:01:24Z',
'date-parts': [[2013, 7, 23]]}, 'issn-type': [{'value': '0004-2803', 'type': 'electronic'}],
'page': '81-96', 'volume': '50', 'original-title': [], 'subject': ['Gastroenterology'],
'relation': {}, 'container-title': ['Arquivos de Gastroenterologia'], 'member': '530',
'prefix': '10.1590', 'published-print': {'date-parts': [[2013, 4, 19]]},
'title': ['3rd BRAZILIAN CONSENSUS ON Helicobacter pylori'],
'publisher': 'FapUNIFESP (SciELO)', 'alternative-id': ['S0004-28032013000200081'],
'abstract': '<jats:p>Significant abstract data..... .</jats:p>',
'author': [{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}],
'family': 'Coelho', 'given': 'Luiz Gonzaga'}, {'affiliation': [
{'name': 'Universidade Federal do Rio Grande do Sul, Brazil'}], 'family': 'Maguinilk',
'given': 'Ismael'}, {'affiliation': [
{'name': 'Presidente de Honra do Núcleo Brasileiro para Estudo do Helicobacter, Brazil'}],
'family': 'Zaterka', 'given': 'Schlioma'}, {'affiliation': [
{'name': 'Universidade Federal do Piauí, Brasil'}], 'family': 'Parente', 'given': 'José Miguel'},
{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}],
'family': 'Passos', 'given': 'Maria do Carmo Friche'}, {'affiliation': [
{'name': 'Universidade de São Paulo, Brasil'}], 'family': 'Moraes-Filho',
'given': 'Joaquim Prado P.'}], 'score': 1.0, 'issue': '2'}
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, doi])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return
result = result.json()
return result['message'] if only_message is True else result
def agency(self, doi, only_message=True):
"""
This method retrieve the DOI Agency metadata related to a given DOI
number.
args: Crossref DOI id (String)
return: JSON
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.agency('10.1590/S0004-28032013005000001')
{'DOI': '10.1590/s0004-28032013005000001', 'agency': {'label': 'CrossRef', 'id': 'crossref'}}
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, doi, 'agency'])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return
result = result.json()
return result['message'] if only_message is True else result
def doi_exists(self, doi):
"""
This method retrieve a boolean according to the existence of a crossref
DOI number. It returns False if the API results a 404 status code.
args: Crossref DOI id (String)
return: Boolean
Example 1:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi_exists('10.1590/S0004-28032013005000001')
True
Example 2:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi_exists('10.1590/S0004-28032013005000001_invalid_doi')
False
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, doi])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
only_headers=True,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return False
return True
|
fabiobatalha/crossrefapi | crossref/restful.py | Works.select | python | def select(self, *args):
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
select_args = []
invalid_select_args = []
for item in args:
if isinstance(item, list):
select_args += [i.strip() for i in item]
if isinstance(item, str):
select_args += [i.strip() for i in item.split(',')]
invalid_select_args = set(select_args) - set(self.FIELDS_SELECT)
if len(invalid_select_args) != 0:
raise UrlSyntaxError(
'Select field\'s specified as (%s) but must be one of: %s' % (
', '.join(invalid_select_args),
', '.join(self.FIELDS_SELECT)
)
)
request_params['select'] = ','.join(
sorted([i for i in set(request_params.get('select', '').split(',') + select_args) if i])
)
return self.__class__(request_url, request_params, context, self.etiquette) | This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded with query, filter,
sort and facet methods.
args: valid FIELDS_SELECT arguments.
return: iterable object of Works metadata
Example 1:
>>> from crossref.restful import Works
>>> works = Works()
>>> for i in works.filter(has_funder='true', has_license='true').sample(5).select('DOI, prefix'):
... print(i)
...
{'DOI': '10.1016/j.jdiacomp.2016.06.005', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.mssp.2015.07.076', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1002/slct.201700168', 'prefix': '10.1002', 'member': 'http://id.crossref.org/member/311'}
{'DOI': '10.1016/j.actbio.2017.01.034', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.optcom.2013.11.013', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
...
Example 2:
>>> from crossref.restful import Works
>>> works = Works()
>>> for i in works.filter(has_funder='true', has_license='true').sample(5).select('DOI').select('prefix'):
>>> print(i)
...
{'DOI': '10.1016/j.sajb.2016.03.010', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.jneumeth.2009.08.017', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.tetlet.2016.05.058', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1007/s00170-017-0689-z', 'prefix': '10.1007', 'member': 'http://id.crossref.org/member/297'}
{'DOI': '10.1016/j.dsr.2016.03.004', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
...
Example: 3:
>>> from crossref.restful import Works
>>> works = Works()
>>>: for i in works.filter(has_funder='true', has_license='true').sample(5).select(['DOI', 'prefix']):
>>> print(i)
...
{'DOI': '10.1111/zoj.12146', 'prefix': '10.1093', 'member': 'http://id.crossref.org/member/286'}
{'DOI': '10.1016/j.bios.2014.04.018', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.cej.2016.10.011', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.dci.2017.08.001', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.icheatmasstransfer.2016.09.012', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
...
Example: 4:
>>> from crossref.restful import Works
>>> works = Works()
>>>: for i in works.filter(has_funder='true', has_license='true').sample(5).select('DOI', 'prefix'):
>>> print(i)
...
{'DOI': '10.1111/zoj.12146', 'prefix': '10.1093', 'member': 'http://id.crossref.org/member/286'}
{'DOI': '10.1016/j.bios.2014.04.018', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.cej.2016.10.011', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.dci.2017.08.001', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.icheatmasstransfer.2016.09.012', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
... | train | https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L591-L683 | [
"def build_url_endpoint(endpoint, context=None):\n\n endpoint = '/'.join([i for i in [context, endpoint] if i])\n\n return 'https://%s/%s' % (API, endpoint)\n"
] | class Works(Endpoint):
CURSOR_AS_ITER_METHOD = True
ENDPOINT = 'works'
ORDER_VALUES = ('asc', 'desc', '1', '-1')
SORT_VALUES = (
'created',
'deposited',
'indexed',
'is-referenced-by-count',
'issued',
'published',
'published-online',
'published-print',
'references-count',
'relevance',
'score',
'submitted',
'updated'
)
FIELDS_QUERY = (
'affiliation',
'author',
'bibliographic',
'chair',
'container_title',
'contributor',
'editor',
'event_acronym',
'event_location',
'event_name',
'event_sponsor',
'event_theme',
'funder_name',
'publisher_location',
'publisher_name',
'title',
'translator'
)
FIELDS_SELECT = (
'abstract',
'URL',
'member',
'posted',
'score',
'created',
'degree',
'update-policy',
'short-title',
'license',
'ISSN',
'container-title',
'issued',
'update-to',
'issue',
'prefix',
'approved',
'indexed',
'article-number',
'clinical-trial-number',
'accepted',
'author',
'group-title',
'DOI',
'is-referenced-by-count',
'updated-by',
'event',
'chair',
'standards-body',
'original-title',
'funder',
'translator',
'archive',
'published-print',
'alternative-id',
'subject',
'subtitle',
'published-online',
'publisher-location',
'content-domain',
'reference',
'title',
'link',
'type',
'publisher',
'volume',
'references-count',
'ISBN',
'issn-type',
'assertion',
'deposited',
'page',
'content-created',
'short-container-title',
'relation',
'editor'
)
FILTER_VALIDATOR = {
'alternative_id': None,
'archive': validators.archive,
'article_number': None,
'assertion': None,
'assertion-group': None,
'award.funder': None,
'award.number': None,
'category-name': None,
'clinical-trial-number': None,
'container-title': None,
'content-domain': None,
'directory': validators.directory,
'doi': None,
'from-accepted-date': validators.is_date,
'from-created-date': validators.is_date,
'from-deposit-date': validators.is_date,
'from-event-end-date': validators.is_date,
'from-event-start-date': validators.is_date,
'from-index-date': validators.is_date,
'from-issued-date': validators.is_date,
'from-online-pub-date': validators.is_date,
'from-posted-date': validators.is_date,
'from-print-pub-date': validators.is_date,
'from-pub-date': validators.is_date,
'from-update-date': validators.is_date,
'full-text.application': None,
'full-text.type': None,
'full-text.version': None,
'funder': None,
'funder-doi-asserted-by': None,
'group-title': None,
'has-abstract': validators.is_bool,
'has-affiliation': validators.is_bool,
'has-archive': validators.is_bool,
'has-assertion': validators.is_bool,
'has-authenticated-orcid': validators.is_bool,
'has-award': validators.is_bool,
'has-clinical-trial-number': validators.is_bool,
'has-content-domain': validators.is_bool,
'has-domain-restriction': validators.is_bool,
'has-event': validators.is_bool,
'has-full-text': validators.is_bool,
'has-funder': validators.is_bool,
'has-funder-doi': validators.is_bool,
'has-license': validators.is_bool,
'has-orcid': validators.is_bool,
'has-references': validators.is_bool,
'has-relation': validators.is_bool,
'has-update': validators.is_bool,
'has-update-policy': validators.is_bool,
'is-update': validators.is_bool,
'isbn': None,
'issn': None,
'license.delay': validators.is_integer,
'license.url': None,
'license.version': None,
'location': None,
'member': validators.is_integer,
'orcid': None,
'prefix': None,
'relation.object': None,
'relation.object-type': None,
'relation.type': None,
'type': validators.document_type,
'type-name': None,
'until-accepted-date': validators.is_date,
'until-created-date': validators.is_date,
'until-deposit-date': validators.is_date,
'until-event-end-date': validators.is_date,
'until-event-start-date': validators.is_date,
'until-index-date': validators.is_date,
'until-issued-date': validators.is_date,
'until-online-pub-date': validators.is_date,
'until-posted-date': validators.is_date,
'until-print-pub-date': validators.is_date,
'until-pub-date': validators.is_date,
'until-update-date': validators.is_date,
'update-type': None,
'updates': None
}
FACET_VALUES = {
'archive': None,
'affiliation': None,
'assertion': None,
'assertion-group': None,
'category-name': None,
'container-title': 1000,
'license': None,
'funder-doi': None,
'funder-name': None,
'issn': 1000,
'orcid': 1000,
'published': None,
'publisher-name': None,
'relation-type': None,
'source': None,
'type-name': None,
'update-type': None
}
def order(self, order='asc'):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded with query, filter,
sort and facet methods.
kwargs: valid SORT_VALUES arguments.
return: iterable object of Works metadata
Example 1:
>>> from crossref.restful import Works
>>> works.query('zika').sort('deposited').order('asc').url
'https://api.crossref.org/works?sort=deposited&query=zika&order=asc'
>>> query = works.query('zika').sort('deposited').order('asc')
>>> for item in query:
... print(item['title'], item['deposited']['date-time'])
...
['A Facile Preparation of 1-(6-Hydroxyindol-1-yl)-2,2-dimethylpropan-1-one'] 2007-02-13T20:56:13Z
['Contributions to the Flora of the Lake Champlain Valley, New York and Vermont, III'] 2007-02-13T20:56:13Z
['Pilularia americana A. Braun in Klamath County, Oregon'] 2007-02-13T20:56:13Z
...
Example 2:
>>> from crossref.restful import Works
>>> works.query('zika').sort('deposited').order('desc').url
'https://api.crossref.org/works?sort=deposited&query=zika&order=desc'
>>> query = works.query('zika').sort('deposited').order('desc')
>>> for item in query:
... print(item['title'], item['deposited']['date-time'])
...
["Planning for the unexpected: Ebola virus, Zika virus, what's next?"] 2017-05-29T12:55:53Z
['Sensitivity of RT-PCR method in samples shown to be positive for Zika virus by RT-qPCR in vector competence studies'] 2017-05-29T12:53:54Z
['Re-evaluation of routine dengue virus serology in travelers in the era of Zika virus emergence'] 2017-05-29T10:46:11Z
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
if order not in self.ORDER_VALUES:
raise UrlSyntaxError(
'Sort order specified as %s but must be one of: %s' % (
str(order),
', '.join(self.ORDER_VALUES)
)
)
request_params['order'] = order
return self.__class__(request_url, request_params, context, self.etiquette)
def sort(self, sort='score'):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded with query, filter,
order and facet methods.
kwargs: valid SORT_VALUES arguments.
return: iterable object of Works metadata
Example 1:
>>> from crossref.restful import Works
>>> works = Works()
>>> query = works.sort('deposited')
>>> for item in query:
... print(item['title'])
...
['Integralidade e transdisciplinaridade em equipes multiprofissionais na saúde coletiva']
['Aprendizagem em grupo operativo de diabetes: uma abordagem etnográfica']
['A rotatividade de enfermeiros e médicos: um impasse na implementação da Estratégia de Saúde da Família']
...
Example 2:
>>> from crossref.restful import Works
>>> works = Works()
>>> query = works.sort('relevance')
>>> for item in query:
... print(item['title'])
...
['Proceedings of the American Physical Society']
['Annual Meeting of the Research Society on Alcoholism']
['Local steroid injections: Comment on the American college of rheumatology guidelines for the management of osteoarthritis of the hip and on the letter by Swezey']
['Intraventricular neurocytoma']
['Mammography accreditation']
['Temporal lobe necrosis in nasopharyngeal carcinoma: Pictorial essay']
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
if sort not in self.SORT_VALUES:
raise UrlSyntaxError(
'Sort field specified as %s but must be one of: %s' % (
str(sort),
', '.join(self.SORT_VALUES)
)
)
request_params['sort'] = sort
return self.__class__(request_url, request_params, context, self.etiquette)
def filter(self, **kwargs):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded and recursively with query, filter,
order, sort and facet methods.
kwargs: valid FILTER_VALIDATOR arguments.
return: iterable object of Works metadata
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> query = works.filter(has_funder='true', has_license='true')
>>> for item in query:
... print(item['title'])
...
['Design of smiling-face-shaped band-notched UWB antenna']
['Phase I clinical and pharmacokinetic study of PM01183 (a tetrahydroisoquinoline, Lurbinectedin) in combination with gemcitabine in patients with advanced solid tumors']
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
for fltr, value in kwargs.items():
decoded_fltr = fltr.replace('__', '.').replace('_', '-')
if decoded_fltr not in self.FILTER_VALIDATOR.keys():
raise UrlSyntaxError(
'Filter %s specified but there is no such filter for this route. Valid filters for this route are: %s' % (
str(decoded_fltr),
', '.join(self.FILTER_VALIDATOR.keys())
)
)
if self.FILTER_VALIDATOR[decoded_fltr] is not None:
self.FILTER_VALIDATOR[decoded_fltr](str(value))
if 'filter' not in request_params:
request_params['filter'] = decoded_fltr + ':' + str(value)
else:
request_params['filter'] += ',' + decoded_fltr + ':' + str(value)
return self.__class__(request_url, request_params, context, self.etiquette)
def facet(self, facet_name, facet_count=100):
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
request_params['rows'] = 0
if facet_name not in self.FACET_VALUES.keys():
raise UrlSyntaxError('Facet %s specified but there is no such facet for this route. Valid facets for this route are: *, affiliation, funder-name, funder-doi, publisher-name, orcid, container-title, assertion, archive, update-type, issn, published, source, type-name, license, category-name, relation-type, assertion-group' %
str(facet_name),
', '.join(self.FACET_VALUES.keys())
)
facet_count = self.FACET_VALUES[facet_name] if self.FACET_VALUES[facet_name] is not None and self.FACET_VALUES[facet_name] <= facet_count else facet_count
request_params['facet'] = '%s:%s' % (facet_name, facet_count)
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
).json()
return result['message']['facets']
def query(self, *args, **kwargs):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded and recursively with query, filter,
order, sort and facet methods.
args: strings (String)
kwargs: valid FIELDS_QUERY arguments.
return: iterable object of Works metadata
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> query = works.query('Zika Virus')
>>> query.url
'https://api.crossref.org/works?query=Zika+Virus'
>>> for item in query:
... print(item['title'])
...
['Zika Virus']
['Zika virus disease']
['Zika Virus: Laboratory Diagnosis']
['Spread of Zika virus disease']
['Carditis in Zika Virus Infection']
['Understanding Zika virus']
['Zika Virus: History and Infectology']
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
if args:
request_params['query'] = ' '.join([str(i) for i in args])
for field, value in kwargs.items():
if field not in self.FIELDS_QUERY:
raise UrlSyntaxError(
'Field query %s specified but there is no such field query for this route. Valid field queries for this route are: %s' % (
str(field), ', '.join(self.FIELDS_QUERY)
)
)
request_params['query.%s' % field.replace('_', '-')] = value
return self.__class__(request_url, request_params, context, self.etiquette)
def sample(self, sample_size=20):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
kwargs: sample_size (Integer) between 0 and 100.
return: iterable object of Works metadata
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.sample(2).url
'https://api.crossref.org/works?sample=2'
>>> [i['title'] for i in works.sample(2)]
[['A study on the hemolytic properties ofPrevotella nigrescens'],
['The geometry and the radial breathing mode of carbon nanotubes: beyond the ideal behaviour']]
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
try:
if sample_size > 100:
raise UrlSyntaxError(
'Integer specified as %s but must be a positive integer less than or equal to 100.' % str(sample_size)
)
except TypeError:
raise UrlSyntaxError(
'Integer specified as %s but must be a positive integer less than or equal to 100.' % str(sample_size)
)
request_params['sample'] = sample_size
return self.__class__(request_url, request_params, context, self.etiquette)
def doi(self, doi, only_message=True):
"""
This method retrieve the DOI metadata related to a given DOI
number.
args: Crossref DOI id (String)
return: JSON
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi('10.1590/S0004-28032013005000001')
{'is-referenced-by-count': 6, 'reference-count': 216, 'DOI': '10.1590/s0004-28032013005000001',
'subtitle': [], 'issued': {'date-parts': [[2013, 4, 19]]}, 'source': 'Crossref',
'short-container-title': ['Arq. Gastroenterol.'], 'references-count': 216, 'short-title': [],
'deposited': {'timestamp': 1495911725000, 'date-time': '2017-05-27T19:02:05Z',
'date-parts': [[2017, 5, 27]]}, 'ISSN': ['0004-2803'], 'type': 'journal-article',
'URL': 'http://dx.doi.org/10.1590/s0004-28032013005000001',
'indexed': {'timestamp': 1496034748592, 'date-time': '2017-05-29T05:12:28Z',
'date-parts': [[2017, 5, 29]]}, 'content-domain': {'crossmark-restriction': False, 'domain': []},
'created': {'timestamp': 1374613284000, 'date-time': '2013-07-23T21:01:24Z',
'date-parts': [[2013, 7, 23]]}, 'issn-type': [{'value': '0004-2803', 'type': 'electronic'}],
'page': '81-96', 'volume': '50', 'original-title': [], 'subject': ['Gastroenterology'],
'relation': {}, 'container-title': ['Arquivos de Gastroenterologia'], 'member': '530',
'prefix': '10.1590', 'published-print': {'date-parts': [[2013, 4, 19]]},
'title': ['3rd BRAZILIAN CONSENSUS ON Helicobacter pylori'],
'publisher': 'FapUNIFESP (SciELO)', 'alternative-id': ['S0004-28032013000200081'],
'abstract': '<jats:p>Significant abstract data..... .</jats:p>',
'author': [{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}],
'family': 'Coelho', 'given': 'Luiz Gonzaga'}, {'affiliation': [
{'name': 'Universidade Federal do Rio Grande do Sul, Brazil'}], 'family': 'Maguinilk',
'given': 'Ismael'}, {'affiliation': [
{'name': 'Presidente de Honra do Núcleo Brasileiro para Estudo do Helicobacter, Brazil'}],
'family': 'Zaterka', 'given': 'Schlioma'}, {'affiliation': [
{'name': 'Universidade Federal do Piauí, Brasil'}], 'family': 'Parente', 'given': 'José Miguel'},
{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}],
'family': 'Passos', 'given': 'Maria do Carmo Friche'}, {'affiliation': [
{'name': 'Universidade de São Paulo, Brasil'}], 'family': 'Moraes-Filho',
'given': 'Joaquim Prado P.'}], 'score': 1.0, 'issue': '2'}
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, doi])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return
result = result.json()
return result['message'] if only_message is True else result
def agency(self, doi, only_message=True):
"""
This method retrieve the DOI Agency metadata related to a given DOI
number.
args: Crossref DOI id (String)
return: JSON
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.agency('10.1590/S0004-28032013005000001')
{'DOI': '10.1590/s0004-28032013005000001', 'agency': {'label': 'CrossRef', 'id': 'crossref'}}
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, doi, 'agency'])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return
result = result.json()
return result['message'] if only_message is True else result
def doi_exists(self, doi):
"""
This method retrieve a boolean according to the existence of a crossref
DOI number. It returns False if the API results a 404 status code.
args: Crossref DOI id (String)
return: Boolean
Example 1:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi_exists('10.1590/S0004-28032013005000001')
True
Example 2:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi_exists('10.1590/S0004-28032013005000001_invalid_doi')
False
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, doi])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
only_headers=True,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return False
return True
|
fabiobatalha/crossrefapi | crossref/restful.py | Works.sort | python | def sort(self, sort='score'):
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
if sort not in self.SORT_VALUES:
raise UrlSyntaxError(
'Sort field specified as %s but must be one of: %s' % (
str(sort),
', '.join(self.SORT_VALUES)
)
)
request_params['sort'] = sort
return self.__class__(request_url, request_params, context, self.etiquette) | This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded with query, filter,
order and facet methods.
kwargs: valid SORT_VALUES arguments.
return: iterable object of Works metadata
Example 1:
>>> from crossref.restful import Works
>>> works = Works()
>>> query = works.sort('deposited')
>>> for item in query:
... print(item['title'])
...
['Integralidade e transdisciplinaridade em equipes multiprofissionais na saúde coletiva']
['Aprendizagem em grupo operativo de diabetes: uma abordagem etnográfica']
['A rotatividade de enfermeiros e médicos: um impasse na implementação da Estratégia de Saúde da Família']
...
Example 2:
>>> from crossref.restful import Works
>>> works = Works()
>>> query = works.sort('relevance')
>>> for item in query:
... print(item['title'])
...
['Proceedings of the American Physical Society']
['Annual Meeting of the Research Society on Alcoholism']
['Local steroid injections: Comment on the American college of rheumatology guidelines for the management of osteoarthritis of the hip and on the letter by Swezey']
['Intraventricular neurocytoma']
['Mammography accreditation']
['Temporal lobe necrosis in nasopharyngeal carcinoma: Pictorial essay']
... | train | https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L685-L739 | [
"def build_url_endpoint(endpoint, context=None):\n\n endpoint = '/'.join([i for i in [context, endpoint] if i])\n\n return 'https://%s/%s' % (API, endpoint)\n"
] | class Works(Endpoint):
CURSOR_AS_ITER_METHOD = True
ENDPOINT = 'works'
ORDER_VALUES = ('asc', 'desc', '1', '-1')
SORT_VALUES = (
'created',
'deposited',
'indexed',
'is-referenced-by-count',
'issued',
'published',
'published-online',
'published-print',
'references-count',
'relevance',
'score',
'submitted',
'updated'
)
FIELDS_QUERY = (
'affiliation',
'author',
'bibliographic',
'chair',
'container_title',
'contributor',
'editor',
'event_acronym',
'event_location',
'event_name',
'event_sponsor',
'event_theme',
'funder_name',
'publisher_location',
'publisher_name',
'title',
'translator'
)
FIELDS_SELECT = (
'abstract',
'URL',
'member',
'posted',
'score',
'created',
'degree',
'update-policy',
'short-title',
'license',
'ISSN',
'container-title',
'issued',
'update-to',
'issue',
'prefix',
'approved',
'indexed',
'article-number',
'clinical-trial-number',
'accepted',
'author',
'group-title',
'DOI',
'is-referenced-by-count',
'updated-by',
'event',
'chair',
'standards-body',
'original-title',
'funder',
'translator',
'archive',
'published-print',
'alternative-id',
'subject',
'subtitle',
'published-online',
'publisher-location',
'content-domain',
'reference',
'title',
'link',
'type',
'publisher',
'volume',
'references-count',
'ISBN',
'issn-type',
'assertion',
'deposited',
'page',
'content-created',
'short-container-title',
'relation',
'editor'
)
FILTER_VALIDATOR = {
'alternative_id': None,
'archive': validators.archive,
'article_number': None,
'assertion': None,
'assertion-group': None,
'award.funder': None,
'award.number': None,
'category-name': None,
'clinical-trial-number': None,
'container-title': None,
'content-domain': None,
'directory': validators.directory,
'doi': None,
'from-accepted-date': validators.is_date,
'from-created-date': validators.is_date,
'from-deposit-date': validators.is_date,
'from-event-end-date': validators.is_date,
'from-event-start-date': validators.is_date,
'from-index-date': validators.is_date,
'from-issued-date': validators.is_date,
'from-online-pub-date': validators.is_date,
'from-posted-date': validators.is_date,
'from-print-pub-date': validators.is_date,
'from-pub-date': validators.is_date,
'from-update-date': validators.is_date,
'full-text.application': None,
'full-text.type': None,
'full-text.version': None,
'funder': None,
'funder-doi-asserted-by': None,
'group-title': None,
'has-abstract': validators.is_bool,
'has-affiliation': validators.is_bool,
'has-archive': validators.is_bool,
'has-assertion': validators.is_bool,
'has-authenticated-orcid': validators.is_bool,
'has-award': validators.is_bool,
'has-clinical-trial-number': validators.is_bool,
'has-content-domain': validators.is_bool,
'has-domain-restriction': validators.is_bool,
'has-event': validators.is_bool,
'has-full-text': validators.is_bool,
'has-funder': validators.is_bool,
'has-funder-doi': validators.is_bool,
'has-license': validators.is_bool,
'has-orcid': validators.is_bool,
'has-references': validators.is_bool,
'has-relation': validators.is_bool,
'has-update': validators.is_bool,
'has-update-policy': validators.is_bool,
'is-update': validators.is_bool,
'isbn': None,
'issn': None,
'license.delay': validators.is_integer,
'license.url': None,
'license.version': None,
'location': None,
'member': validators.is_integer,
'orcid': None,
'prefix': None,
'relation.object': None,
'relation.object-type': None,
'relation.type': None,
'type': validators.document_type,
'type-name': None,
'until-accepted-date': validators.is_date,
'until-created-date': validators.is_date,
'until-deposit-date': validators.is_date,
'until-event-end-date': validators.is_date,
'until-event-start-date': validators.is_date,
'until-index-date': validators.is_date,
'until-issued-date': validators.is_date,
'until-online-pub-date': validators.is_date,
'until-posted-date': validators.is_date,
'until-print-pub-date': validators.is_date,
'until-pub-date': validators.is_date,
'until-update-date': validators.is_date,
'update-type': None,
'updates': None
}
FACET_VALUES = {
'archive': None,
'affiliation': None,
'assertion': None,
'assertion-group': None,
'category-name': None,
'container-title': 1000,
'license': None,
'funder-doi': None,
'funder-name': None,
'issn': 1000,
'orcid': 1000,
'published': None,
'publisher-name': None,
'relation-type': None,
'source': None,
'type-name': None,
'update-type': None
}
def order(self, order='asc'):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded with query, filter,
sort and facet methods.
kwargs: valid SORT_VALUES arguments.
return: iterable object of Works metadata
Example 1:
>>> from crossref.restful import Works
>>> works.query('zika').sort('deposited').order('asc').url
'https://api.crossref.org/works?sort=deposited&query=zika&order=asc'
>>> query = works.query('zika').sort('deposited').order('asc')
>>> for item in query:
... print(item['title'], item['deposited']['date-time'])
...
['A Facile Preparation of 1-(6-Hydroxyindol-1-yl)-2,2-dimethylpropan-1-one'] 2007-02-13T20:56:13Z
['Contributions to the Flora of the Lake Champlain Valley, New York and Vermont, III'] 2007-02-13T20:56:13Z
['Pilularia americana A. Braun in Klamath County, Oregon'] 2007-02-13T20:56:13Z
...
Example 2:
>>> from crossref.restful import Works
>>> works.query('zika').sort('deposited').order('desc').url
'https://api.crossref.org/works?sort=deposited&query=zika&order=desc'
>>> query = works.query('zika').sort('deposited').order('desc')
>>> for item in query:
... print(item['title'], item['deposited']['date-time'])
...
["Planning for the unexpected: Ebola virus, Zika virus, what's next?"] 2017-05-29T12:55:53Z
['Sensitivity of RT-PCR method in samples shown to be positive for Zika virus by RT-qPCR in vector competence studies'] 2017-05-29T12:53:54Z
['Re-evaluation of routine dengue virus serology in travelers in the era of Zika virus emergence'] 2017-05-29T10:46:11Z
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
if order not in self.ORDER_VALUES:
raise UrlSyntaxError(
'Sort order specified as %s but must be one of: %s' % (
str(order),
', '.join(self.ORDER_VALUES)
)
)
request_params['order'] = order
return self.__class__(request_url, request_params, context, self.etiquette)
def select(self, *args):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded with query, filter,
sort and facet methods.
args: valid FIELDS_SELECT arguments.
return: iterable object of Works metadata
Example 1:
>>> from crossref.restful import Works
>>> works = Works()
>>> for i in works.filter(has_funder='true', has_license='true').sample(5).select('DOI, prefix'):
... print(i)
...
{'DOI': '10.1016/j.jdiacomp.2016.06.005', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.mssp.2015.07.076', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1002/slct.201700168', 'prefix': '10.1002', 'member': 'http://id.crossref.org/member/311'}
{'DOI': '10.1016/j.actbio.2017.01.034', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.optcom.2013.11.013', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
...
Example 2:
>>> from crossref.restful import Works
>>> works = Works()
>>> for i in works.filter(has_funder='true', has_license='true').sample(5).select('DOI').select('prefix'):
>>> print(i)
...
{'DOI': '10.1016/j.sajb.2016.03.010', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.jneumeth.2009.08.017', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.tetlet.2016.05.058', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1007/s00170-017-0689-z', 'prefix': '10.1007', 'member': 'http://id.crossref.org/member/297'}
{'DOI': '10.1016/j.dsr.2016.03.004', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
...
Example: 3:
>>> from crossref.restful import Works
>>> works = Works()
>>>: for i in works.filter(has_funder='true', has_license='true').sample(5).select(['DOI', 'prefix']):
>>> print(i)
...
{'DOI': '10.1111/zoj.12146', 'prefix': '10.1093', 'member': 'http://id.crossref.org/member/286'}
{'DOI': '10.1016/j.bios.2014.04.018', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.cej.2016.10.011', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.dci.2017.08.001', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.icheatmasstransfer.2016.09.012', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
...
Example: 4:
>>> from crossref.restful import Works
>>> works = Works()
>>>: for i in works.filter(has_funder='true', has_license='true').sample(5).select('DOI', 'prefix'):
>>> print(i)
...
{'DOI': '10.1111/zoj.12146', 'prefix': '10.1093', 'member': 'http://id.crossref.org/member/286'}
{'DOI': '10.1016/j.bios.2014.04.018', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.cej.2016.10.011', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.dci.2017.08.001', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.icheatmasstransfer.2016.09.012', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
select_args = []
invalid_select_args = []
for item in args:
if isinstance(item, list):
select_args += [i.strip() for i in item]
if isinstance(item, str):
select_args += [i.strip() for i in item.split(',')]
invalid_select_args = set(select_args) - set(self.FIELDS_SELECT)
if len(invalid_select_args) != 0:
raise UrlSyntaxError(
'Select field\'s specified as (%s) but must be one of: %s' % (
', '.join(invalid_select_args),
', '.join(self.FIELDS_SELECT)
)
)
request_params['select'] = ','.join(
sorted([i for i in set(request_params.get('select', '').split(',') + select_args) if i])
)
return self.__class__(request_url, request_params, context, self.etiquette)
def filter(self, **kwargs):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded and recursively with query, filter,
order, sort and facet methods.
kwargs: valid FILTER_VALIDATOR arguments.
return: iterable object of Works metadata
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> query = works.filter(has_funder='true', has_license='true')
>>> for item in query:
... print(item['title'])
...
['Design of smiling-face-shaped band-notched UWB antenna']
['Phase I clinical and pharmacokinetic study of PM01183 (a tetrahydroisoquinoline, Lurbinectedin) in combination with gemcitabine in patients with advanced solid tumors']
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
for fltr, value in kwargs.items():
decoded_fltr = fltr.replace('__', '.').replace('_', '-')
if decoded_fltr not in self.FILTER_VALIDATOR.keys():
raise UrlSyntaxError(
'Filter %s specified but there is no such filter for this route. Valid filters for this route are: %s' % (
str(decoded_fltr),
', '.join(self.FILTER_VALIDATOR.keys())
)
)
if self.FILTER_VALIDATOR[decoded_fltr] is not None:
self.FILTER_VALIDATOR[decoded_fltr](str(value))
if 'filter' not in request_params:
request_params['filter'] = decoded_fltr + ':' + str(value)
else:
request_params['filter'] += ',' + decoded_fltr + ':' + str(value)
return self.__class__(request_url, request_params, context, self.etiquette)
def facet(self, facet_name, facet_count=100):
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
request_params['rows'] = 0
if facet_name not in self.FACET_VALUES.keys():
raise UrlSyntaxError('Facet %s specified but there is no such facet for this route. Valid facets for this route are: *, affiliation, funder-name, funder-doi, publisher-name, orcid, container-title, assertion, archive, update-type, issn, published, source, type-name, license, category-name, relation-type, assertion-group' %
str(facet_name),
', '.join(self.FACET_VALUES.keys())
)
facet_count = self.FACET_VALUES[facet_name] if self.FACET_VALUES[facet_name] is not None and self.FACET_VALUES[facet_name] <= facet_count else facet_count
request_params['facet'] = '%s:%s' % (facet_name, facet_count)
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
).json()
return result['message']['facets']
def query(self, *args, **kwargs):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded and recursively with query, filter,
order, sort and facet methods.
args: strings (String)
kwargs: valid FIELDS_QUERY arguments.
return: iterable object of Works metadata
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> query = works.query('Zika Virus')
>>> query.url
'https://api.crossref.org/works?query=Zika+Virus'
>>> for item in query:
... print(item['title'])
...
['Zika Virus']
['Zika virus disease']
['Zika Virus: Laboratory Diagnosis']
['Spread of Zika virus disease']
['Carditis in Zika Virus Infection']
['Understanding Zika virus']
['Zika Virus: History and Infectology']
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
if args:
request_params['query'] = ' '.join([str(i) for i in args])
for field, value in kwargs.items():
if field not in self.FIELDS_QUERY:
raise UrlSyntaxError(
'Field query %s specified but there is no such field query for this route. Valid field queries for this route are: %s' % (
str(field), ', '.join(self.FIELDS_QUERY)
)
)
request_params['query.%s' % field.replace('_', '-')] = value
return self.__class__(request_url, request_params, context, self.etiquette)
def sample(self, sample_size=20):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
kwargs: sample_size (Integer) between 0 and 100.
return: iterable object of Works metadata
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.sample(2).url
'https://api.crossref.org/works?sample=2'
>>> [i['title'] for i in works.sample(2)]
[['A study on the hemolytic properties ofPrevotella nigrescens'],
['The geometry and the radial breathing mode of carbon nanotubes: beyond the ideal behaviour']]
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
try:
if sample_size > 100:
raise UrlSyntaxError(
'Integer specified as %s but must be a positive integer less than or equal to 100.' % str(sample_size)
)
except TypeError:
raise UrlSyntaxError(
'Integer specified as %s but must be a positive integer less than or equal to 100.' % str(sample_size)
)
request_params['sample'] = sample_size
return self.__class__(request_url, request_params, context, self.etiquette)
def doi(self, doi, only_message=True):
"""
This method retrieve the DOI metadata related to a given DOI
number.
args: Crossref DOI id (String)
return: JSON
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi('10.1590/S0004-28032013005000001')
{'is-referenced-by-count': 6, 'reference-count': 216, 'DOI': '10.1590/s0004-28032013005000001',
'subtitle': [], 'issued': {'date-parts': [[2013, 4, 19]]}, 'source': 'Crossref',
'short-container-title': ['Arq. Gastroenterol.'], 'references-count': 216, 'short-title': [],
'deposited': {'timestamp': 1495911725000, 'date-time': '2017-05-27T19:02:05Z',
'date-parts': [[2017, 5, 27]]}, 'ISSN': ['0004-2803'], 'type': 'journal-article',
'URL': 'http://dx.doi.org/10.1590/s0004-28032013005000001',
'indexed': {'timestamp': 1496034748592, 'date-time': '2017-05-29T05:12:28Z',
'date-parts': [[2017, 5, 29]]}, 'content-domain': {'crossmark-restriction': False, 'domain': []},
'created': {'timestamp': 1374613284000, 'date-time': '2013-07-23T21:01:24Z',
'date-parts': [[2013, 7, 23]]}, 'issn-type': [{'value': '0004-2803', 'type': 'electronic'}],
'page': '81-96', 'volume': '50', 'original-title': [], 'subject': ['Gastroenterology'],
'relation': {}, 'container-title': ['Arquivos de Gastroenterologia'], 'member': '530',
'prefix': '10.1590', 'published-print': {'date-parts': [[2013, 4, 19]]},
'title': ['3rd BRAZILIAN CONSENSUS ON Helicobacter pylori'],
'publisher': 'FapUNIFESP (SciELO)', 'alternative-id': ['S0004-28032013000200081'],
'abstract': '<jats:p>Significant abstract data..... .</jats:p>',
'author': [{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}],
'family': 'Coelho', 'given': 'Luiz Gonzaga'}, {'affiliation': [
{'name': 'Universidade Federal do Rio Grande do Sul, Brazil'}], 'family': 'Maguinilk',
'given': 'Ismael'}, {'affiliation': [
{'name': 'Presidente de Honra do Núcleo Brasileiro para Estudo do Helicobacter, Brazil'}],
'family': 'Zaterka', 'given': 'Schlioma'}, {'affiliation': [
{'name': 'Universidade Federal do Piauí, Brasil'}], 'family': 'Parente', 'given': 'José Miguel'},
{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}],
'family': 'Passos', 'given': 'Maria do Carmo Friche'}, {'affiliation': [
{'name': 'Universidade de São Paulo, Brasil'}], 'family': 'Moraes-Filho',
'given': 'Joaquim Prado P.'}], 'score': 1.0, 'issue': '2'}
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, doi])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return
result = result.json()
return result['message'] if only_message is True else result
def agency(self, doi, only_message=True):
"""
This method retrieve the DOI Agency metadata related to a given DOI
number.
args: Crossref DOI id (String)
return: JSON
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.agency('10.1590/S0004-28032013005000001')
{'DOI': '10.1590/s0004-28032013005000001', 'agency': {'label': 'CrossRef', 'id': 'crossref'}}
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, doi, 'agency'])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return
result = result.json()
return result['message'] if only_message is True else result
def doi_exists(self, doi):
"""
This method retrieve a boolean according to the existence of a crossref
DOI number. It returns False if the API results a 404 status code.
args: Crossref DOI id (String)
return: Boolean
Example 1:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi_exists('10.1590/S0004-28032013005000001')
True
Example 2:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi_exists('10.1590/S0004-28032013005000001_invalid_doi')
False
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, doi])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
only_headers=True,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return False
return True
|
fabiobatalha/crossrefapi | crossref/restful.py | Works.filter | python | def filter(self, **kwargs):
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
for fltr, value in kwargs.items():
decoded_fltr = fltr.replace('__', '.').replace('_', '-')
if decoded_fltr not in self.FILTER_VALIDATOR.keys():
raise UrlSyntaxError(
'Filter %s specified but there is no such filter for this route. Valid filters for this route are: %s' % (
str(decoded_fltr),
', '.join(self.FILTER_VALIDATOR.keys())
)
)
if self.FILTER_VALIDATOR[decoded_fltr] is not None:
self.FILTER_VALIDATOR[decoded_fltr](str(value))
if 'filter' not in request_params:
request_params['filter'] = decoded_fltr + ':' + str(value)
else:
request_params['filter'] += ',' + decoded_fltr + ':' + str(value)
return self.__class__(request_url, request_params, context, self.etiquette) | This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded and recursively with query, filter,
order, sort and facet methods.
kwargs: valid FILTER_VALIDATOR arguments.
return: iterable object of Works metadata
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> query = works.filter(has_funder='true', has_license='true')
>>> for item in query:
... print(item['title'])
...
['Design of smiling-face-shaped band-notched UWB antenna']
['Phase I clinical and pharmacokinetic study of PM01183 (a tetrahydroisoquinoline, Lurbinectedin) in combination with gemcitabine in patients with advanced solid tumors']
... | train | https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L741-L787 | [
"def build_url_endpoint(endpoint, context=None):\n\n endpoint = '/'.join([i for i in [context, endpoint] if i])\n\n return 'https://%s/%s' % (API, endpoint)\n"
] | class Works(Endpoint):
CURSOR_AS_ITER_METHOD = True
ENDPOINT = 'works'
ORDER_VALUES = ('asc', 'desc', '1', '-1')
SORT_VALUES = (
'created',
'deposited',
'indexed',
'is-referenced-by-count',
'issued',
'published',
'published-online',
'published-print',
'references-count',
'relevance',
'score',
'submitted',
'updated'
)
FIELDS_QUERY = (
'affiliation',
'author',
'bibliographic',
'chair',
'container_title',
'contributor',
'editor',
'event_acronym',
'event_location',
'event_name',
'event_sponsor',
'event_theme',
'funder_name',
'publisher_location',
'publisher_name',
'title',
'translator'
)
FIELDS_SELECT = (
'abstract',
'URL',
'member',
'posted',
'score',
'created',
'degree',
'update-policy',
'short-title',
'license',
'ISSN',
'container-title',
'issued',
'update-to',
'issue',
'prefix',
'approved',
'indexed',
'article-number',
'clinical-trial-number',
'accepted',
'author',
'group-title',
'DOI',
'is-referenced-by-count',
'updated-by',
'event',
'chair',
'standards-body',
'original-title',
'funder',
'translator',
'archive',
'published-print',
'alternative-id',
'subject',
'subtitle',
'published-online',
'publisher-location',
'content-domain',
'reference',
'title',
'link',
'type',
'publisher',
'volume',
'references-count',
'ISBN',
'issn-type',
'assertion',
'deposited',
'page',
'content-created',
'short-container-title',
'relation',
'editor'
)
FILTER_VALIDATOR = {
'alternative_id': None,
'archive': validators.archive,
'article_number': None,
'assertion': None,
'assertion-group': None,
'award.funder': None,
'award.number': None,
'category-name': None,
'clinical-trial-number': None,
'container-title': None,
'content-domain': None,
'directory': validators.directory,
'doi': None,
'from-accepted-date': validators.is_date,
'from-created-date': validators.is_date,
'from-deposit-date': validators.is_date,
'from-event-end-date': validators.is_date,
'from-event-start-date': validators.is_date,
'from-index-date': validators.is_date,
'from-issued-date': validators.is_date,
'from-online-pub-date': validators.is_date,
'from-posted-date': validators.is_date,
'from-print-pub-date': validators.is_date,
'from-pub-date': validators.is_date,
'from-update-date': validators.is_date,
'full-text.application': None,
'full-text.type': None,
'full-text.version': None,
'funder': None,
'funder-doi-asserted-by': None,
'group-title': None,
'has-abstract': validators.is_bool,
'has-affiliation': validators.is_bool,
'has-archive': validators.is_bool,
'has-assertion': validators.is_bool,
'has-authenticated-orcid': validators.is_bool,
'has-award': validators.is_bool,
'has-clinical-trial-number': validators.is_bool,
'has-content-domain': validators.is_bool,
'has-domain-restriction': validators.is_bool,
'has-event': validators.is_bool,
'has-full-text': validators.is_bool,
'has-funder': validators.is_bool,
'has-funder-doi': validators.is_bool,
'has-license': validators.is_bool,
'has-orcid': validators.is_bool,
'has-references': validators.is_bool,
'has-relation': validators.is_bool,
'has-update': validators.is_bool,
'has-update-policy': validators.is_bool,
'is-update': validators.is_bool,
'isbn': None,
'issn': None,
'license.delay': validators.is_integer,
'license.url': None,
'license.version': None,
'location': None,
'member': validators.is_integer,
'orcid': None,
'prefix': None,
'relation.object': None,
'relation.object-type': None,
'relation.type': None,
'type': validators.document_type,
'type-name': None,
'until-accepted-date': validators.is_date,
'until-created-date': validators.is_date,
'until-deposit-date': validators.is_date,
'until-event-end-date': validators.is_date,
'until-event-start-date': validators.is_date,
'until-index-date': validators.is_date,
'until-issued-date': validators.is_date,
'until-online-pub-date': validators.is_date,
'until-posted-date': validators.is_date,
'until-print-pub-date': validators.is_date,
'until-pub-date': validators.is_date,
'until-update-date': validators.is_date,
'update-type': None,
'updates': None
}
FACET_VALUES = {
'archive': None,
'affiliation': None,
'assertion': None,
'assertion-group': None,
'category-name': None,
'container-title': 1000,
'license': None,
'funder-doi': None,
'funder-name': None,
'issn': 1000,
'orcid': 1000,
'published': None,
'publisher-name': None,
'relation-type': None,
'source': None,
'type-name': None,
'update-type': None
}
def order(self, order='asc'):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded with query, filter,
sort and facet methods.
kwargs: valid SORT_VALUES arguments.
return: iterable object of Works metadata
Example 1:
>>> from crossref.restful import Works
>>> works.query('zika').sort('deposited').order('asc').url
'https://api.crossref.org/works?sort=deposited&query=zika&order=asc'
>>> query = works.query('zika').sort('deposited').order('asc')
>>> for item in query:
... print(item['title'], item['deposited']['date-time'])
...
['A Facile Preparation of 1-(6-Hydroxyindol-1-yl)-2,2-dimethylpropan-1-one'] 2007-02-13T20:56:13Z
['Contributions to the Flora of the Lake Champlain Valley, New York and Vermont, III'] 2007-02-13T20:56:13Z
['Pilularia americana A. Braun in Klamath County, Oregon'] 2007-02-13T20:56:13Z
...
Example 2:
>>> from crossref.restful import Works
>>> works.query('zika').sort('deposited').order('desc').url
'https://api.crossref.org/works?sort=deposited&query=zika&order=desc'
>>> query = works.query('zika').sort('deposited').order('desc')
>>> for item in query:
... print(item['title'], item['deposited']['date-time'])
...
["Planning for the unexpected: Ebola virus, Zika virus, what's next?"] 2017-05-29T12:55:53Z
['Sensitivity of RT-PCR method in samples shown to be positive for Zika virus by RT-qPCR in vector competence studies'] 2017-05-29T12:53:54Z
['Re-evaluation of routine dengue virus serology in travelers in the era of Zika virus emergence'] 2017-05-29T10:46:11Z
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
if order not in self.ORDER_VALUES:
raise UrlSyntaxError(
'Sort order specified as %s but must be one of: %s' % (
str(order),
', '.join(self.ORDER_VALUES)
)
)
request_params['order'] = order
return self.__class__(request_url, request_params, context, self.etiquette)
def select(self, *args):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded with query, filter,
sort and facet methods.
args: valid FIELDS_SELECT arguments.
return: iterable object of Works metadata
Example 1:
>>> from crossref.restful import Works
>>> works = Works()
>>> for i in works.filter(has_funder='true', has_license='true').sample(5).select('DOI, prefix'):
... print(i)
...
{'DOI': '10.1016/j.jdiacomp.2016.06.005', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.mssp.2015.07.076', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1002/slct.201700168', 'prefix': '10.1002', 'member': 'http://id.crossref.org/member/311'}
{'DOI': '10.1016/j.actbio.2017.01.034', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.optcom.2013.11.013', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
...
Example 2:
>>> from crossref.restful import Works
>>> works = Works()
>>> for i in works.filter(has_funder='true', has_license='true').sample(5).select('DOI').select('prefix'):
>>> print(i)
...
{'DOI': '10.1016/j.sajb.2016.03.010', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.jneumeth.2009.08.017', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.tetlet.2016.05.058', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1007/s00170-017-0689-z', 'prefix': '10.1007', 'member': 'http://id.crossref.org/member/297'}
{'DOI': '10.1016/j.dsr.2016.03.004', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
...
Example: 3:
>>> from crossref.restful import Works
>>> works = Works()
>>>: for i in works.filter(has_funder='true', has_license='true').sample(5).select(['DOI', 'prefix']):
>>> print(i)
...
{'DOI': '10.1111/zoj.12146', 'prefix': '10.1093', 'member': 'http://id.crossref.org/member/286'}
{'DOI': '10.1016/j.bios.2014.04.018', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.cej.2016.10.011', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.dci.2017.08.001', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.icheatmasstransfer.2016.09.012', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
...
Example: 4:
>>> from crossref.restful import Works
>>> works = Works()
>>>: for i in works.filter(has_funder='true', has_license='true').sample(5).select('DOI', 'prefix'):
>>> print(i)
...
{'DOI': '10.1111/zoj.12146', 'prefix': '10.1093', 'member': 'http://id.crossref.org/member/286'}
{'DOI': '10.1016/j.bios.2014.04.018', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.cej.2016.10.011', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.dci.2017.08.001', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.icheatmasstransfer.2016.09.012', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
select_args = []
invalid_select_args = []
for item in args:
if isinstance(item, list):
select_args += [i.strip() for i in item]
if isinstance(item, str):
select_args += [i.strip() for i in item.split(',')]
invalid_select_args = set(select_args) - set(self.FIELDS_SELECT)
if len(invalid_select_args) != 0:
raise UrlSyntaxError(
'Select field\'s specified as (%s) but must be one of: %s' % (
', '.join(invalid_select_args),
', '.join(self.FIELDS_SELECT)
)
)
request_params['select'] = ','.join(
sorted([i for i in set(request_params.get('select', '').split(',') + select_args) if i])
)
return self.__class__(request_url, request_params, context, self.etiquette)
def sort(self, sort='score'):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded with query, filter,
order and facet methods.
kwargs: valid SORT_VALUES arguments.
return: iterable object of Works metadata
Example 1:
>>> from crossref.restful import Works
>>> works = Works()
>>> query = works.sort('deposited')
>>> for item in query:
... print(item['title'])
...
['Integralidade e transdisciplinaridade em equipes multiprofissionais na saúde coletiva']
['Aprendizagem em grupo operativo de diabetes: uma abordagem etnográfica']
['A rotatividade de enfermeiros e médicos: um impasse na implementação da Estratégia de Saúde da Família']
...
Example 2:
>>> from crossref.restful import Works
>>> works = Works()
>>> query = works.sort('relevance')
>>> for item in query:
... print(item['title'])
...
['Proceedings of the American Physical Society']
['Annual Meeting of the Research Society on Alcoholism']
['Local steroid injections: Comment on the American college of rheumatology guidelines for the management of osteoarthritis of the hip and on the letter by Swezey']
['Intraventricular neurocytoma']
['Mammography accreditation']
['Temporal lobe necrosis in nasopharyngeal carcinoma: Pictorial essay']
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
if sort not in self.SORT_VALUES:
raise UrlSyntaxError(
'Sort field specified as %s but must be one of: %s' % (
str(sort),
', '.join(self.SORT_VALUES)
)
)
request_params['sort'] = sort
return self.__class__(request_url, request_params, context, self.etiquette)
def facet(self, facet_name, facet_count=100):
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
request_params['rows'] = 0
if facet_name not in self.FACET_VALUES.keys():
raise UrlSyntaxError('Facet %s specified but there is no such facet for this route. Valid facets for this route are: *, affiliation, funder-name, funder-doi, publisher-name, orcid, container-title, assertion, archive, update-type, issn, published, source, type-name, license, category-name, relation-type, assertion-group' %
str(facet_name),
', '.join(self.FACET_VALUES.keys())
)
facet_count = self.FACET_VALUES[facet_name] if self.FACET_VALUES[facet_name] is not None and self.FACET_VALUES[facet_name] <= facet_count else facet_count
request_params['facet'] = '%s:%s' % (facet_name, facet_count)
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
).json()
return result['message']['facets']
def query(self, *args, **kwargs):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded and recursively with query, filter,
order, sort and facet methods.
args: strings (String)
kwargs: valid FIELDS_QUERY arguments.
return: iterable object of Works metadata
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> query = works.query('Zika Virus')
>>> query.url
'https://api.crossref.org/works?query=Zika+Virus'
>>> for item in query:
... print(item['title'])
...
['Zika Virus']
['Zika virus disease']
['Zika Virus: Laboratory Diagnosis']
['Spread of Zika virus disease']
['Carditis in Zika Virus Infection']
['Understanding Zika virus']
['Zika Virus: History and Infectology']
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
if args:
request_params['query'] = ' '.join([str(i) for i in args])
for field, value in kwargs.items():
if field not in self.FIELDS_QUERY:
raise UrlSyntaxError(
'Field query %s specified but there is no such field query for this route. Valid field queries for this route are: %s' % (
str(field), ', '.join(self.FIELDS_QUERY)
)
)
request_params['query.%s' % field.replace('_', '-')] = value
return self.__class__(request_url, request_params, context, self.etiquette)
def sample(self, sample_size=20):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
kwargs: sample_size (Integer) between 0 and 100.
return: iterable object of Works metadata
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.sample(2).url
'https://api.crossref.org/works?sample=2'
>>> [i['title'] for i in works.sample(2)]
[['A study on the hemolytic properties ofPrevotella nigrescens'],
['The geometry and the radial breathing mode of carbon nanotubes: beyond the ideal behaviour']]
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
try:
if sample_size > 100:
raise UrlSyntaxError(
'Integer specified as %s but must be a positive integer less than or equal to 100.' % str(sample_size)
)
except TypeError:
raise UrlSyntaxError(
'Integer specified as %s but must be a positive integer less than or equal to 100.' % str(sample_size)
)
request_params['sample'] = sample_size
return self.__class__(request_url, request_params, context, self.etiquette)
def doi(self, doi, only_message=True):
"""
This method retrieve the DOI metadata related to a given DOI
number.
args: Crossref DOI id (String)
return: JSON
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi('10.1590/S0004-28032013005000001')
{'is-referenced-by-count': 6, 'reference-count': 216, 'DOI': '10.1590/s0004-28032013005000001',
'subtitle': [], 'issued': {'date-parts': [[2013, 4, 19]]}, 'source': 'Crossref',
'short-container-title': ['Arq. Gastroenterol.'], 'references-count': 216, 'short-title': [],
'deposited': {'timestamp': 1495911725000, 'date-time': '2017-05-27T19:02:05Z',
'date-parts': [[2017, 5, 27]]}, 'ISSN': ['0004-2803'], 'type': 'journal-article',
'URL': 'http://dx.doi.org/10.1590/s0004-28032013005000001',
'indexed': {'timestamp': 1496034748592, 'date-time': '2017-05-29T05:12:28Z',
'date-parts': [[2017, 5, 29]]}, 'content-domain': {'crossmark-restriction': False, 'domain': []},
'created': {'timestamp': 1374613284000, 'date-time': '2013-07-23T21:01:24Z',
'date-parts': [[2013, 7, 23]]}, 'issn-type': [{'value': '0004-2803', 'type': 'electronic'}],
'page': '81-96', 'volume': '50', 'original-title': [], 'subject': ['Gastroenterology'],
'relation': {}, 'container-title': ['Arquivos de Gastroenterologia'], 'member': '530',
'prefix': '10.1590', 'published-print': {'date-parts': [[2013, 4, 19]]},
'title': ['3rd BRAZILIAN CONSENSUS ON Helicobacter pylori'],
'publisher': 'FapUNIFESP (SciELO)', 'alternative-id': ['S0004-28032013000200081'],
'abstract': '<jats:p>Significant abstract data..... .</jats:p>',
'author': [{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}],
'family': 'Coelho', 'given': 'Luiz Gonzaga'}, {'affiliation': [
{'name': 'Universidade Federal do Rio Grande do Sul, Brazil'}], 'family': 'Maguinilk',
'given': 'Ismael'}, {'affiliation': [
{'name': 'Presidente de Honra do Núcleo Brasileiro para Estudo do Helicobacter, Brazil'}],
'family': 'Zaterka', 'given': 'Schlioma'}, {'affiliation': [
{'name': 'Universidade Federal do Piauí, Brasil'}], 'family': 'Parente', 'given': 'José Miguel'},
{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}],
'family': 'Passos', 'given': 'Maria do Carmo Friche'}, {'affiliation': [
{'name': 'Universidade de São Paulo, Brasil'}], 'family': 'Moraes-Filho',
'given': 'Joaquim Prado P.'}], 'score': 1.0, 'issue': '2'}
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, doi])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return
result = result.json()
return result['message'] if only_message is True else result
def agency(self, doi, only_message=True):
"""
This method retrieve the DOI Agency metadata related to a given DOI
number.
args: Crossref DOI id (String)
return: JSON
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.agency('10.1590/S0004-28032013005000001')
{'DOI': '10.1590/s0004-28032013005000001', 'agency': {'label': 'CrossRef', 'id': 'crossref'}}
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, doi, 'agency'])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return
result = result.json()
return result['message'] if only_message is True else result
def doi_exists(self, doi):
"""
This method retrieve a boolean according to the existence of a crossref
DOI number. It returns False if the API results a 404 status code.
args: Crossref DOI id (String)
return: Boolean
Example 1:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi_exists('10.1590/S0004-28032013005000001')
True
Example 2:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi_exists('10.1590/S0004-28032013005000001_invalid_doi')
False
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, doi])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
only_headers=True,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return False
return True
|
fabiobatalha/crossrefapi | crossref/restful.py | Works.query | python | def query(self, *args, **kwargs):
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
if args:
request_params['query'] = ' '.join([str(i) for i in args])
for field, value in kwargs.items():
if field not in self.FIELDS_QUERY:
raise UrlSyntaxError(
'Field query %s specified but there is no such field query for this route. Valid field queries for this route are: %s' % (
str(field), ', '.join(self.FIELDS_QUERY)
)
)
request_params['query.%s' % field.replace('_', '-')] = value
return self.__class__(request_url, request_params, context, self.etiquette) | This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded and recursively with query, filter,
order, sort and facet methods.
args: strings (String)
kwargs: valid FIELDS_QUERY arguments.
return: iterable object of Works metadata
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> query = works.query('Zika Virus')
>>> query.url
'https://api.crossref.org/works?query=Zika+Virus'
>>> for item in query:
... print(item['title'])
...
['Zika Virus']
['Zika virus disease']
['Zika Virus: Laboratory Diagnosis']
['Spread of Zika virus disease']
['Carditis in Zika Virus Infection']
['Understanding Zika virus']
['Zika Virus: History and Infectology']
... | train | https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L813-L862 | [
"def build_url_endpoint(endpoint, context=None):\n\n endpoint = '/'.join([i for i in [context, endpoint] if i])\n\n return 'https://%s/%s' % (API, endpoint)\n"
] | class Works(Endpoint):
CURSOR_AS_ITER_METHOD = True
ENDPOINT = 'works'
ORDER_VALUES = ('asc', 'desc', '1', '-1')
SORT_VALUES = (
'created',
'deposited',
'indexed',
'is-referenced-by-count',
'issued',
'published',
'published-online',
'published-print',
'references-count',
'relevance',
'score',
'submitted',
'updated'
)
FIELDS_QUERY = (
'affiliation',
'author',
'bibliographic',
'chair',
'container_title',
'contributor',
'editor',
'event_acronym',
'event_location',
'event_name',
'event_sponsor',
'event_theme',
'funder_name',
'publisher_location',
'publisher_name',
'title',
'translator'
)
FIELDS_SELECT = (
'abstract',
'URL',
'member',
'posted',
'score',
'created',
'degree',
'update-policy',
'short-title',
'license',
'ISSN',
'container-title',
'issued',
'update-to',
'issue',
'prefix',
'approved',
'indexed',
'article-number',
'clinical-trial-number',
'accepted',
'author',
'group-title',
'DOI',
'is-referenced-by-count',
'updated-by',
'event',
'chair',
'standards-body',
'original-title',
'funder',
'translator',
'archive',
'published-print',
'alternative-id',
'subject',
'subtitle',
'published-online',
'publisher-location',
'content-domain',
'reference',
'title',
'link',
'type',
'publisher',
'volume',
'references-count',
'ISBN',
'issn-type',
'assertion',
'deposited',
'page',
'content-created',
'short-container-title',
'relation',
'editor'
)
FILTER_VALIDATOR = {
'alternative_id': None,
'archive': validators.archive,
'article_number': None,
'assertion': None,
'assertion-group': None,
'award.funder': None,
'award.number': None,
'category-name': None,
'clinical-trial-number': None,
'container-title': None,
'content-domain': None,
'directory': validators.directory,
'doi': None,
'from-accepted-date': validators.is_date,
'from-created-date': validators.is_date,
'from-deposit-date': validators.is_date,
'from-event-end-date': validators.is_date,
'from-event-start-date': validators.is_date,
'from-index-date': validators.is_date,
'from-issued-date': validators.is_date,
'from-online-pub-date': validators.is_date,
'from-posted-date': validators.is_date,
'from-print-pub-date': validators.is_date,
'from-pub-date': validators.is_date,
'from-update-date': validators.is_date,
'full-text.application': None,
'full-text.type': None,
'full-text.version': None,
'funder': None,
'funder-doi-asserted-by': None,
'group-title': None,
'has-abstract': validators.is_bool,
'has-affiliation': validators.is_bool,
'has-archive': validators.is_bool,
'has-assertion': validators.is_bool,
'has-authenticated-orcid': validators.is_bool,
'has-award': validators.is_bool,
'has-clinical-trial-number': validators.is_bool,
'has-content-domain': validators.is_bool,
'has-domain-restriction': validators.is_bool,
'has-event': validators.is_bool,
'has-full-text': validators.is_bool,
'has-funder': validators.is_bool,
'has-funder-doi': validators.is_bool,
'has-license': validators.is_bool,
'has-orcid': validators.is_bool,
'has-references': validators.is_bool,
'has-relation': validators.is_bool,
'has-update': validators.is_bool,
'has-update-policy': validators.is_bool,
'is-update': validators.is_bool,
'isbn': None,
'issn': None,
'license.delay': validators.is_integer,
'license.url': None,
'license.version': None,
'location': None,
'member': validators.is_integer,
'orcid': None,
'prefix': None,
'relation.object': None,
'relation.object-type': None,
'relation.type': None,
'type': validators.document_type,
'type-name': None,
'until-accepted-date': validators.is_date,
'until-created-date': validators.is_date,
'until-deposit-date': validators.is_date,
'until-event-end-date': validators.is_date,
'until-event-start-date': validators.is_date,
'until-index-date': validators.is_date,
'until-issued-date': validators.is_date,
'until-online-pub-date': validators.is_date,
'until-posted-date': validators.is_date,
'until-print-pub-date': validators.is_date,
'until-pub-date': validators.is_date,
'until-update-date': validators.is_date,
'update-type': None,
'updates': None
}
FACET_VALUES = {
'archive': None,
'affiliation': None,
'assertion': None,
'assertion-group': None,
'category-name': None,
'container-title': 1000,
'license': None,
'funder-doi': None,
'funder-name': None,
'issn': 1000,
'orcid': 1000,
'published': None,
'publisher-name': None,
'relation-type': None,
'source': None,
'type-name': None,
'update-type': None
}
def order(self, order='asc'):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded with query, filter,
sort and facet methods.
kwargs: valid SORT_VALUES arguments.
return: iterable object of Works metadata
Example 1:
>>> from crossref.restful import Works
>>> works.query('zika').sort('deposited').order('asc').url
'https://api.crossref.org/works?sort=deposited&query=zika&order=asc'
>>> query = works.query('zika').sort('deposited').order('asc')
>>> for item in query:
... print(item['title'], item['deposited']['date-time'])
...
['A Facile Preparation of 1-(6-Hydroxyindol-1-yl)-2,2-dimethylpropan-1-one'] 2007-02-13T20:56:13Z
['Contributions to the Flora of the Lake Champlain Valley, New York and Vermont, III'] 2007-02-13T20:56:13Z
['Pilularia americana A. Braun in Klamath County, Oregon'] 2007-02-13T20:56:13Z
...
Example 2:
>>> from crossref.restful import Works
>>> works.query('zika').sort('deposited').order('desc').url
'https://api.crossref.org/works?sort=deposited&query=zika&order=desc'
>>> query = works.query('zika').sort('deposited').order('desc')
>>> for item in query:
... print(item['title'], item['deposited']['date-time'])
...
["Planning for the unexpected: Ebola virus, Zika virus, what's next?"] 2017-05-29T12:55:53Z
['Sensitivity of RT-PCR method in samples shown to be positive for Zika virus by RT-qPCR in vector competence studies'] 2017-05-29T12:53:54Z
['Re-evaluation of routine dengue virus serology in travelers in the era of Zika virus emergence'] 2017-05-29T10:46:11Z
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
if order not in self.ORDER_VALUES:
raise UrlSyntaxError(
'Sort order specified as %s but must be one of: %s' % (
str(order),
', '.join(self.ORDER_VALUES)
)
)
request_params['order'] = order
return self.__class__(request_url, request_params, context, self.etiquette)
def select(self, *args):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded with query, filter,
sort and facet methods.
args: valid FIELDS_SELECT arguments.
return: iterable object of Works metadata
Example 1:
>>> from crossref.restful import Works
>>> works = Works()
>>> for i in works.filter(has_funder='true', has_license='true').sample(5).select('DOI, prefix'):
... print(i)
...
{'DOI': '10.1016/j.jdiacomp.2016.06.005', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.mssp.2015.07.076', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1002/slct.201700168', 'prefix': '10.1002', 'member': 'http://id.crossref.org/member/311'}
{'DOI': '10.1016/j.actbio.2017.01.034', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.optcom.2013.11.013', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
...
Example 2:
>>> from crossref.restful import Works
>>> works = Works()
>>> for i in works.filter(has_funder='true', has_license='true').sample(5).select('DOI').select('prefix'):
>>> print(i)
...
{'DOI': '10.1016/j.sajb.2016.03.010', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.jneumeth.2009.08.017', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.tetlet.2016.05.058', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1007/s00170-017-0689-z', 'prefix': '10.1007', 'member': 'http://id.crossref.org/member/297'}
{'DOI': '10.1016/j.dsr.2016.03.004', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
...
Example: 3:
>>> from crossref.restful import Works
>>> works = Works()
>>>: for i in works.filter(has_funder='true', has_license='true').sample(5).select(['DOI', 'prefix']):
>>> print(i)
...
{'DOI': '10.1111/zoj.12146', 'prefix': '10.1093', 'member': 'http://id.crossref.org/member/286'}
{'DOI': '10.1016/j.bios.2014.04.018', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.cej.2016.10.011', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.dci.2017.08.001', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.icheatmasstransfer.2016.09.012', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
...
Example: 4:
>>> from crossref.restful import Works
>>> works = Works()
>>>: for i in works.filter(has_funder='true', has_license='true').sample(5).select('DOI', 'prefix'):
>>> print(i)
...
{'DOI': '10.1111/zoj.12146', 'prefix': '10.1093', 'member': 'http://id.crossref.org/member/286'}
{'DOI': '10.1016/j.bios.2014.04.018', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.cej.2016.10.011', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.dci.2017.08.001', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.icheatmasstransfer.2016.09.012', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
select_args = []
invalid_select_args = []
for item in args:
if isinstance(item, list):
select_args += [i.strip() for i in item]
if isinstance(item, str):
select_args += [i.strip() for i in item.split(',')]
invalid_select_args = set(select_args) - set(self.FIELDS_SELECT)
if len(invalid_select_args) != 0:
raise UrlSyntaxError(
'Select field\'s specified as (%s) but must be one of: %s' % (
', '.join(invalid_select_args),
', '.join(self.FIELDS_SELECT)
)
)
request_params['select'] = ','.join(
sorted([i for i in set(request_params.get('select', '').split(',') + select_args) if i])
)
return self.__class__(request_url, request_params, context, self.etiquette)
def sort(self, sort='score'):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded with query, filter,
order and facet methods.
kwargs: valid SORT_VALUES arguments.
return: iterable object of Works metadata
Example 1:
>>> from crossref.restful import Works
>>> works = Works()
>>> query = works.sort('deposited')
>>> for item in query:
... print(item['title'])
...
['Integralidade e transdisciplinaridade em equipes multiprofissionais na saúde coletiva']
['Aprendizagem em grupo operativo de diabetes: uma abordagem etnográfica']
['A rotatividade de enfermeiros e médicos: um impasse na implementação da Estratégia de Saúde da Família']
...
Example 2:
>>> from crossref.restful import Works
>>> works = Works()
>>> query = works.sort('relevance')
>>> for item in query:
... print(item['title'])
...
['Proceedings of the American Physical Society']
['Annual Meeting of the Research Society on Alcoholism']
['Local steroid injections: Comment on the American college of rheumatology guidelines for the management of osteoarthritis of the hip and on the letter by Swezey']
['Intraventricular neurocytoma']
['Mammography accreditation']
['Temporal lobe necrosis in nasopharyngeal carcinoma: Pictorial essay']
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
if sort not in self.SORT_VALUES:
raise UrlSyntaxError(
'Sort field specified as %s but must be one of: %s' % (
str(sort),
', '.join(self.SORT_VALUES)
)
)
request_params['sort'] = sort
return self.__class__(request_url, request_params, context, self.etiquette)
def filter(self, **kwargs):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded and recursively with query, filter,
order, sort and facet methods.
kwargs: valid FILTER_VALIDATOR arguments.
return: iterable object of Works metadata
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> query = works.filter(has_funder='true', has_license='true')
>>> for item in query:
... print(item['title'])
...
['Design of smiling-face-shaped band-notched UWB antenna']
['Phase I clinical and pharmacokinetic study of PM01183 (a tetrahydroisoquinoline, Lurbinectedin) in combination with gemcitabine in patients with advanced solid tumors']
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
for fltr, value in kwargs.items():
decoded_fltr = fltr.replace('__', '.').replace('_', '-')
if decoded_fltr not in self.FILTER_VALIDATOR.keys():
raise UrlSyntaxError(
'Filter %s specified but there is no such filter for this route. Valid filters for this route are: %s' % (
str(decoded_fltr),
', '.join(self.FILTER_VALIDATOR.keys())
)
)
if self.FILTER_VALIDATOR[decoded_fltr] is not None:
self.FILTER_VALIDATOR[decoded_fltr](str(value))
if 'filter' not in request_params:
request_params['filter'] = decoded_fltr + ':' + str(value)
else:
request_params['filter'] += ',' + decoded_fltr + ':' + str(value)
return self.__class__(request_url, request_params, context, self.etiquette)
def facet(self, facet_name, facet_count=100):
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
request_params['rows'] = 0
if facet_name not in self.FACET_VALUES.keys():
raise UrlSyntaxError('Facet %s specified but there is no such facet for this route. Valid facets for this route are: *, affiliation, funder-name, funder-doi, publisher-name, orcid, container-title, assertion, archive, update-type, issn, published, source, type-name, license, category-name, relation-type, assertion-group' %
str(facet_name),
', '.join(self.FACET_VALUES.keys())
)
facet_count = self.FACET_VALUES[facet_name] if self.FACET_VALUES[facet_name] is not None and self.FACET_VALUES[facet_name] <= facet_count else facet_count
request_params['facet'] = '%s:%s' % (facet_name, facet_count)
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
).json()
return result['message']['facets']
def sample(self, sample_size=20):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
kwargs: sample_size (Integer) between 0 and 100.
return: iterable object of Works metadata
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.sample(2).url
'https://api.crossref.org/works?sample=2'
>>> [i['title'] for i in works.sample(2)]
[['A study on the hemolytic properties ofPrevotella nigrescens'],
['The geometry and the radial breathing mode of carbon nanotubes: beyond the ideal behaviour']]
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
try:
if sample_size > 100:
raise UrlSyntaxError(
'Integer specified as %s but must be a positive integer less than or equal to 100.' % str(sample_size)
)
except TypeError:
raise UrlSyntaxError(
'Integer specified as %s but must be a positive integer less than or equal to 100.' % str(sample_size)
)
request_params['sample'] = sample_size
return self.__class__(request_url, request_params, context, self.etiquette)
def doi(self, doi, only_message=True):
"""
This method retrieve the DOI metadata related to a given DOI
number.
args: Crossref DOI id (String)
return: JSON
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi('10.1590/S0004-28032013005000001')
{'is-referenced-by-count': 6, 'reference-count': 216, 'DOI': '10.1590/s0004-28032013005000001',
'subtitle': [], 'issued': {'date-parts': [[2013, 4, 19]]}, 'source': 'Crossref',
'short-container-title': ['Arq. Gastroenterol.'], 'references-count': 216, 'short-title': [],
'deposited': {'timestamp': 1495911725000, 'date-time': '2017-05-27T19:02:05Z',
'date-parts': [[2017, 5, 27]]}, 'ISSN': ['0004-2803'], 'type': 'journal-article',
'URL': 'http://dx.doi.org/10.1590/s0004-28032013005000001',
'indexed': {'timestamp': 1496034748592, 'date-time': '2017-05-29T05:12:28Z',
'date-parts': [[2017, 5, 29]]}, 'content-domain': {'crossmark-restriction': False, 'domain': []},
'created': {'timestamp': 1374613284000, 'date-time': '2013-07-23T21:01:24Z',
'date-parts': [[2013, 7, 23]]}, 'issn-type': [{'value': '0004-2803', 'type': 'electronic'}],
'page': '81-96', 'volume': '50', 'original-title': [], 'subject': ['Gastroenterology'],
'relation': {}, 'container-title': ['Arquivos de Gastroenterologia'], 'member': '530',
'prefix': '10.1590', 'published-print': {'date-parts': [[2013, 4, 19]]},
'title': ['3rd BRAZILIAN CONSENSUS ON Helicobacter pylori'],
'publisher': 'FapUNIFESP (SciELO)', 'alternative-id': ['S0004-28032013000200081'],
'abstract': '<jats:p>Significant abstract data..... .</jats:p>',
'author': [{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}],
'family': 'Coelho', 'given': 'Luiz Gonzaga'}, {'affiliation': [
{'name': 'Universidade Federal do Rio Grande do Sul, Brazil'}], 'family': 'Maguinilk',
'given': 'Ismael'}, {'affiliation': [
{'name': 'Presidente de Honra do Núcleo Brasileiro para Estudo do Helicobacter, Brazil'}],
'family': 'Zaterka', 'given': 'Schlioma'}, {'affiliation': [
{'name': 'Universidade Federal do Piauí, Brasil'}], 'family': 'Parente', 'given': 'José Miguel'},
{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}],
'family': 'Passos', 'given': 'Maria do Carmo Friche'}, {'affiliation': [
{'name': 'Universidade de São Paulo, Brasil'}], 'family': 'Moraes-Filho',
'given': 'Joaquim Prado P.'}], 'score': 1.0, 'issue': '2'}
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, doi])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return
result = result.json()
return result['message'] if only_message is True else result
def agency(self, doi, only_message=True):
"""
This method retrieve the DOI Agency metadata related to a given DOI
number.
args: Crossref DOI id (String)
return: JSON
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.agency('10.1590/S0004-28032013005000001')
{'DOI': '10.1590/s0004-28032013005000001', 'agency': {'label': 'CrossRef', 'id': 'crossref'}}
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, doi, 'agency'])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return
result = result.json()
return result['message'] if only_message is True else result
def doi_exists(self, doi):
"""
This method retrieve a boolean according to the existence of a crossref
DOI number. It returns False if the API results a 404 status code.
args: Crossref DOI id (String)
return: Boolean
Example 1:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi_exists('10.1590/S0004-28032013005000001')
True
Example 2:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi_exists('10.1590/S0004-28032013005000001_invalid_doi')
False
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, doi])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
only_headers=True,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return False
return True
|
fabiobatalha/crossrefapi | crossref/restful.py | Works.sample | python | def sample(self, sample_size=20):
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
try:
if sample_size > 100:
raise UrlSyntaxError(
'Integer specified as %s but must be a positive integer less than or equal to 100.' % str(sample_size)
)
except TypeError:
raise UrlSyntaxError(
'Integer specified as %s but must be a positive integer less than or equal to 100.' % str(sample_size)
)
request_params['sample'] = sample_size
return self.__class__(request_url, request_params, context, self.etiquette) | This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
kwargs: sample_size (Integer) between 0 and 100.
return: iterable object of Works metadata
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.sample(2).url
'https://api.crossref.org/works?sample=2'
>>> [i['title'] for i in works.sample(2)]
[['A study on the hemolytic properties ofPrevotella nigrescens'],
['The geometry and the radial breathing mode of carbon nanotubes: beyond the ideal behaviour']] | train | https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L864-L899 | [
"def build_url_endpoint(endpoint, context=None):\n\n endpoint = '/'.join([i for i in [context, endpoint] if i])\n\n return 'https://%s/%s' % (API, endpoint)\n"
] | class Works(Endpoint):
CURSOR_AS_ITER_METHOD = True
ENDPOINT = 'works'
ORDER_VALUES = ('asc', 'desc', '1', '-1')
SORT_VALUES = (
'created',
'deposited',
'indexed',
'is-referenced-by-count',
'issued',
'published',
'published-online',
'published-print',
'references-count',
'relevance',
'score',
'submitted',
'updated'
)
FIELDS_QUERY = (
'affiliation',
'author',
'bibliographic',
'chair',
'container_title',
'contributor',
'editor',
'event_acronym',
'event_location',
'event_name',
'event_sponsor',
'event_theme',
'funder_name',
'publisher_location',
'publisher_name',
'title',
'translator'
)
FIELDS_SELECT = (
'abstract',
'URL',
'member',
'posted',
'score',
'created',
'degree',
'update-policy',
'short-title',
'license',
'ISSN',
'container-title',
'issued',
'update-to',
'issue',
'prefix',
'approved',
'indexed',
'article-number',
'clinical-trial-number',
'accepted',
'author',
'group-title',
'DOI',
'is-referenced-by-count',
'updated-by',
'event',
'chair',
'standards-body',
'original-title',
'funder',
'translator',
'archive',
'published-print',
'alternative-id',
'subject',
'subtitle',
'published-online',
'publisher-location',
'content-domain',
'reference',
'title',
'link',
'type',
'publisher',
'volume',
'references-count',
'ISBN',
'issn-type',
'assertion',
'deposited',
'page',
'content-created',
'short-container-title',
'relation',
'editor'
)
FILTER_VALIDATOR = {
'alternative_id': None,
'archive': validators.archive,
'article_number': None,
'assertion': None,
'assertion-group': None,
'award.funder': None,
'award.number': None,
'category-name': None,
'clinical-trial-number': None,
'container-title': None,
'content-domain': None,
'directory': validators.directory,
'doi': None,
'from-accepted-date': validators.is_date,
'from-created-date': validators.is_date,
'from-deposit-date': validators.is_date,
'from-event-end-date': validators.is_date,
'from-event-start-date': validators.is_date,
'from-index-date': validators.is_date,
'from-issued-date': validators.is_date,
'from-online-pub-date': validators.is_date,
'from-posted-date': validators.is_date,
'from-print-pub-date': validators.is_date,
'from-pub-date': validators.is_date,
'from-update-date': validators.is_date,
'full-text.application': None,
'full-text.type': None,
'full-text.version': None,
'funder': None,
'funder-doi-asserted-by': None,
'group-title': None,
'has-abstract': validators.is_bool,
'has-affiliation': validators.is_bool,
'has-archive': validators.is_bool,
'has-assertion': validators.is_bool,
'has-authenticated-orcid': validators.is_bool,
'has-award': validators.is_bool,
'has-clinical-trial-number': validators.is_bool,
'has-content-domain': validators.is_bool,
'has-domain-restriction': validators.is_bool,
'has-event': validators.is_bool,
'has-full-text': validators.is_bool,
'has-funder': validators.is_bool,
'has-funder-doi': validators.is_bool,
'has-license': validators.is_bool,
'has-orcid': validators.is_bool,
'has-references': validators.is_bool,
'has-relation': validators.is_bool,
'has-update': validators.is_bool,
'has-update-policy': validators.is_bool,
'is-update': validators.is_bool,
'isbn': None,
'issn': None,
'license.delay': validators.is_integer,
'license.url': None,
'license.version': None,
'location': None,
'member': validators.is_integer,
'orcid': None,
'prefix': None,
'relation.object': None,
'relation.object-type': None,
'relation.type': None,
'type': validators.document_type,
'type-name': None,
'until-accepted-date': validators.is_date,
'until-created-date': validators.is_date,
'until-deposit-date': validators.is_date,
'until-event-end-date': validators.is_date,
'until-event-start-date': validators.is_date,
'until-index-date': validators.is_date,
'until-issued-date': validators.is_date,
'until-online-pub-date': validators.is_date,
'until-posted-date': validators.is_date,
'until-print-pub-date': validators.is_date,
'until-pub-date': validators.is_date,
'until-update-date': validators.is_date,
'update-type': None,
'updates': None
}
FACET_VALUES = {
'archive': None,
'affiliation': None,
'assertion': None,
'assertion-group': None,
'category-name': None,
'container-title': 1000,
'license': None,
'funder-doi': None,
'funder-name': None,
'issn': 1000,
'orcid': 1000,
'published': None,
'publisher-name': None,
'relation-type': None,
'source': None,
'type-name': None,
'update-type': None
}
def order(self, order='asc'):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded with query, filter,
sort and facet methods.
kwargs: valid SORT_VALUES arguments.
return: iterable object of Works metadata
Example 1:
>>> from crossref.restful import Works
>>> works.query('zika').sort('deposited').order('asc').url
'https://api.crossref.org/works?sort=deposited&query=zika&order=asc'
>>> query = works.query('zika').sort('deposited').order('asc')
>>> for item in query:
... print(item['title'], item['deposited']['date-time'])
...
['A Facile Preparation of 1-(6-Hydroxyindol-1-yl)-2,2-dimethylpropan-1-one'] 2007-02-13T20:56:13Z
['Contributions to the Flora of the Lake Champlain Valley, New York and Vermont, III'] 2007-02-13T20:56:13Z
['Pilularia americana A. Braun in Klamath County, Oregon'] 2007-02-13T20:56:13Z
...
Example 2:
>>> from crossref.restful import Works
>>> works.query('zika').sort('deposited').order('desc').url
'https://api.crossref.org/works?sort=deposited&query=zika&order=desc'
>>> query = works.query('zika').sort('deposited').order('desc')
>>> for item in query:
... print(item['title'], item['deposited']['date-time'])
...
["Planning for the unexpected: Ebola virus, Zika virus, what's next?"] 2017-05-29T12:55:53Z
['Sensitivity of RT-PCR method in samples shown to be positive for Zika virus by RT-qPCR in vector competence studies'] 2017-05-29T12:53:54Z
['Re-evaluation of routine dengue virus serology in travelers in the era of Zika virus emergence'] 2017-05-29T10:46:11Z
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
if order not in self.ORDER_VALUES:
raise UrlSyntaxError(
'Sort order specified as %s but must be one of: %s' % (
str(order),
', '.join(self.ORDER_VALUES)
)
)
request_params['order'] = order
return self.__class__(request_url, request_params, context, self.etiquette)
def select(self, *args):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded with query, filter,
sort and facet methods.
args: valid FIELDS_SELECT arguments.
return: iterable object of Works metadata
Example 1:
>>> from crossref.restful import Works
>>> works = Works()
>>> for i in works.filter(has_funder='true', has_license='true').sample(5).select('DOI, prefix'):
... print(i)
...
{'DOI': '10.1016/j.jdiacomp.2016.06.005', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.mssp.2015.07.076', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1002/slct.201700168', 'prefix': '10.1002', 'member': 'http://id.crossref.org/member/311'}
{'DOI': '10.1016/j.actbio.2017.01.034', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.optcom.2013.11.013', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
...
Example 2:
>>> from crossref.restful import Works
>>> works = Works()
>>> for i in works.filter(has_funder='true', has_license='true').sample(5).select('DOI').select('prefix'):
>>> print(i)
...
{'DOI': '10.1016/j.sajb.2016.03.010', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.jneumeth.2009.08.017', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.tetlet.2016.05.058', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1007/s00170-017-0689-z', 'prefix': '10.1007', 'member': 'http://id.crossref.org/member/297'}
{'DOI': '10.1016/j.dsr.2016.03.004', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
...
Example: 3:
>>> from crossref.restful import Works
>>> works = Works()
>>>: for i in works.filter(has_funder='true', has_license='true').sample(5).select(['DOI', 'prefix']):
>>> print(i)
...
{'DOI': '10.1111/zoj.12146', 'prefix': '10.1093', 'member': 'http://id.crossref.org/member/286'}
{'DOI': '10.1016/j.bios.2014.04.018', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.cej.2016.10.011', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.dci.2017.08.001', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.icheatmasstransfer.2016.09.012', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
...
Example: 4:
>>> from crossref.restful import Works
>>> works = Works()
>>>: for i in works.filter(has_funder='true', has_license='true').sample(5).select('DOI', 'prefix'):
>>> print(i)
...
{'DOI': '10.1111/zoj.12146', 'prefix': '10.1093', 'member': 'http://id.crossref.org/member/286'}
{'DOI': '10.1016/j.bios.2014.04.018', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.cej.2016.10.011', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.dci.2017.08.001', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.icheatmasstransfer.2016.09.012', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
select_args = []
invalid_select_args = []
for item in args:
if isinstance(item, list):
select_args += [i.strip() for i in item]
if isinstance(item, str):
select_args += [i.strip() for i in item.split(',')]
invalid_select_args = set(select_args) - set(self.FIELDS_SELECT)
if len(invalid_select_args) != 0:
raise UrlSyntaxError(
'Select field\'s specified as (%s) but must be one of: %s' % (
', '.join(invalid_select_args),
', '.join(self.FIELDS_SELECT)
)
)
request_params['select'] = ','.join(
sorted([i for i in set(request_params.get('select', '').split(',') + select_args) if i])
)
return self.__class__(request_url, request_params, context, self.etiquette)
def sort(self, sort='score'):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded with query, filter,
order and facet methods.
kwargs: valid SORT_VALUES arguments.
return: iterable object of Works metadata
Example 1:
>>> from crossref.restful import Works
>>> works = Works()
>>> query = works.sort('deposited')
>>> for item in query:
... print(item['title'])
...
['Integralidade e transdisciplinaridade em equipes multiprofissionais na saúde coletiva']
['Aprendizagem em grupo operativo de diabetes: uma abordagem etnográfica']
['A rotatividade de enfermeiros e médicos: um impasse na implementação da Estratégia de Saúde da Família']
...
Example 2:
>>> from crossref.restful import Works
>>> works = Works()
>>> query = works.sort('relevance')
>>> for item in query:
... print(item['title'])
...
['Proceedings of the American Physical Society']
['Annual Meeting of the Research Society on Alcoholism']
['Local steroid injections: Comment on the American college of rheumatology guidelines for the management of osteoarthritis of the hip and on the letter by Swezey']
['Intraventricular neurocytoma']
['Mammography accreditation']
['Temporal lobe necrosis in nasopharyngeal carcinoma: Pictorial essay']
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
if sort not in self.SORT_VALUES:
raise UrlSyntaxError(
'Sort field specified as %s but must be one of: %s' % (
str(sort),
', '.join(self.SORT_VALUES)
)
)
request_params['sort'] = sort
return self.__class__(request_url, request_params, context, self.etiquette)
def filter(self, **kwargs):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded and recursively with query, filter,
order, sort and facet methods.
kwargs: valid FILTER_VALIDATOR arguments.
return: iterable object of Works metadata
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> query = works.filter(has_funder='true', has_license='true')
>>> for item in query:
... print(item['title'])
...
['Design of smiling-face-shaped band-notched UWB antenna']
['Phase I clinical and pharmacokinetic study of PM01183 (a tetrahydroisoquinoline, Lurbinectedin) in combination with gemcitabine in patients with advanced solid tumors']
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
for fltr, value in kwargs.items():
decoded_fltr = fltr.replace('__', '.').replace('_', '-')
if decoded_fltr not in self.FILTER_VALIDATOR.keys():
raise UrlSyntaxError(
'Filter %s specified but there is no such filter for this route. Valid filters for this route are: %s' % (
str(decoded_fltr),
', '.join(self.FILTER_VALIDATOR.keys())
)
)
if self.FILTER_VALIDATOR[decoded_fltr] is not None:
self.FILTER_VALIDATOR[decoded_fltr](str(value))
if 'filter' not in request_params:
request_params['filter'] = decoded_fltr + ':' + str(value)
else:
request_params['filter'] += ',' + decoded_fltr + ':' + str(value)
return self.__class__(request_url, request_params, context, self.etiquette)
def facet(self, facet_name, facet_count=100):
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
request_params['rows'] = 0
if facet_name not in self.FACET_VALUES.keys():
raise UrlSyntaxError('Facet %s specified but there is no such facet for this route. Valid facets for this route are: *, affiliation, funder-name, funder-doi, publisher-name, orcid, container-title, assertion, archive, update-type, issn, published, source, type-name, license, category-name, relation-type, assertion-group' %
str(facet_name),
', '.join(self.FACET_VALUES.keys())
)
facet_count = self.FACET_VALUES[facet_name] if self.FACET_VALUES[facet_name] is not None and self.FACET_VALUES[facet_name] <= facet_count else facet_count
request_params['facet'] = '%s:%s' % (facet_name, facet_count)
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
).json()
return result['message']['facets']
def query(self, *args, **kwargs):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded and recursively with query, filter,
order, sort and facet methods.
args: strings (String)
kwargs: valid FIELDS_QUERY arguments.
return: iterable object of Works metadata
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> query = works.query('Zika Virus')
>>> query.url
'https://api.crossref.org/works?query=Zika+Virus'
>>> for item in query:
... print(item['title'])
...
['Zika Virus']
['Zika virus disease']
['Zika Virus: Laboratory Diagnosis']
['Spread of Zika virus disease']
['Carditis in Zika Virus Infection']
['Understanding Zika virus']
['Zika Virus: History and Infectology']
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
if args:
request_params['query'] = ' '.join([str(i) for i in args])
for field, value in kwargs.items():
if field not in self.FIELDS_QUERY:
raise UrlSyntaxError(
'Field query %s specified but there is no such field query for this route. Valid field queries for this route are: %s' % (
str(field), ', '.join(self.FIELDS_QUERY)
)
)
request_params['query.%s' % field.replace('_', '-')] = value
return self.__class__(request_url, request_params, context, self.etiquette)
def doi(self, doi, only_message=True):
"""
This method retrieve the DOI metadata related to a given DOI
number.
args: Crossref DOI id (String)
return: JSON
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi('10.1590/S0004-28032013005000001')
{'is-referenced-by-count': 6, 'reference-count': 216, 'DOI': '10.1590/s0004-28032013005000001',
'subtitle': [], 'issued': {'date-parts': [[2013, 4, 19]]}, 'source': 'Crossref',
'short-container-title': ['Arq. Gastroenterol.'], 'references-count': 216, 'short-title': [],
'deposited': {'timestamp': 1495911725000, 'date-time': '2017-05-27T19:02:05Z',
'date-parts': [[2017, 5, 27]]}, 'ISSN': ['0004-2803'], 'type': 'journal-article',
'URL': 'http://dx.doi.org/10.1590/s0004-28032013005000001',
'indexed': {'timestamp': 1496034748592, 'date-time': '2017-05-29T05:12:28Z',
'date-parts': [[2017, 5, 29]]}, 'content-domain': {'crossmark-restriction': False, 'domain': []},
'created': {'timestamp': 1374613284000, 'date-time': '2013-07-23T21:01:24Z',
'date-parts': [[2013, 7, 23]]}, 'issn-type': [{'value': '0004-2803', 'type': 'electronic'}],
'page': '81-96', 'volume': '50', 'original-title': [], 'subject': ['Gastroenterology'],
'relation': {}, 'container-title': ['Arquivos de Gastroenterologia'], 'member': '530',
'prefix': '10.1590', 'published-print': {'date-parts': [[2013, 4, 19]]},
'title': ['3rd BRAZILIAN CONSENSUS ON Helicobacter pylori'],
'publisher': 'FapUNIFESP (SciELO)', 'alternative-id': ['S0004-28032013000200081'],
'abstract': '<jats:p>Significant abstract data..... .</jats:p>',
'author': [{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}],
'family': 'Coelho', 'given': 'Luiz Gonzaga'}, {'affiliation': [
{'name': 'Universidade Federal do Rio Grande do Sul, Brazil'}], 'family': 'Maguinilk',
'given': 'Ismael'}, {'affiliation': [
{'name': 'Presidente de Honra do Núcleo Brasileiro para Estudo do Helicobacter, Brazil'}],
'family': 'Zaterka', 'given': 'Schlioma'}, {'affiliation': [
{'name': 'Universidade Federal do Piauí, Brasil'}], 'family': 'Parente', 'given': 'José Miguel'},
{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}],
'family': 'Passos', 'given': 'Maria do Carmo Friche'}, {'affiliation': [
{'name': 'Universidade de São Paulo, Brasil'}], 'family': 'Moraes-Filho',
'given': 'Joaquim Prado P.'}], 'score': 1.0, 'issue': '2'}
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, doi])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return
result = result.json()
return result['message'] if only_message is True else result
def agency(self, doi, only_message=True):
"""
This method retrieve the DOI Agency metadata related to a given DOI
number.
args: Crossref DOI id (String)
return: JSON
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.agency('10.1590/S0004-28032013005000001')
{'DOI': '10.1590/s0004-28032013005000001', 'agency': {'label': 'CrossRef', 'id': 'crossref'}}
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, doi, 'agency'])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return
result = result.json()
return result['message'] if only_message is True else result
def doi_exists(self, doi):
"""
This method retrieve a boolean according to the existence of a crossref
DOI number. It returns False if the API results a 404 status code.
args: Crossref DOI id (String)
return: Boolean
Example 1:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi_exists('10.1590/S0004-28032013005000001')
True
Example 2:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi_exists('10.1590/S0004-28032013005000001_invalid_doi')
False
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, doi])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
only_headers=True,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return False
return True
|
fabiobatalha/crossrefapi | crossref/restful.py | Works.doi | python | def doi(self, doi, only_message=True):
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, doi])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return
result = result.json()
return result['message'] if only_message is True else result | This method retrieve the DOI metadata related to a given DOI
number.
args: Crossref DOI id (String)
return: JSON
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi('10.1590/S0004-28032013005000001')
{'is-referenced-by-count': 6, 'reference-count': 216, 'DOI': '10.1590/s0004-28032013005000001',
'subtitle': [], 'issued': {'date-parts': [[2013, 4, 19]]}, 'source': 'Crossref',
'short-container-title': ['Arq. Gastroenterol.'], 'references-count': 216, 'short-title': [],
'deposited': {'timestamp': 1495911725000, 'date-time': '2017-05-27T19:02:05Z',
'date-parts': [[2017, 5, 27]]}, 'ISSN': ['0004-2803'], 'type': 'journal-article',
'URL': 'http://dx.doi.org/10.1590/s0004-28032013005000001',
'indexed': {'timestamp': 1496034748592, 'date-time': '2017-05-29T05:12:28Z',
'date-parts': [[2017, 5, 29]]}, 'content-domain': {'crossmark-restriction': False, 'domain': []},
'created': {'timestamp': 1374613284000, 'date-time': '2013-07-23T21:01:24Z',
'date-parts': [[2013, 7, 23]]}, 'issn-type': [{'value': '0004-2803', 'type': 'electronic'}],
'page': '81-96', 'volume': '50', 'original-title': [], 'subject': ['Gastroenterology'],
'relation': {}, 'container-title': ['Arquivos de Gastroenterologia'], 'member': '530',
'prefix': '10.1590', 'published-print': {'date-parts': [[2013, 4, 19]]},
'title': ['3rd BRAZILIAN CONSENSUS ON Helicobacter pylori'],
'publisher': 'FapUNIFESP (SciELO)', 'alternative-id': ['S0004-28032013000200081'],
'abstract': '<jats:p>Significant abstract data..... .</jats:p>',
'author': [{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}],
'family': 'Coelho', 'given': 'Luiz Gonzaga'}, {'affiliation': [
{'name': 'Universidade Federal do Rio Grande do Sul, Brazil'}], 'family': 'Maguinilk',
'given': 'Ismael'}, {'affiliation': [
{'name': 'Presidente de Honra do Núcleo Brasileiro para Estudo do Helicobacter, Brazil'}],
'family': 'Zaterka', 'given': 'Schlioma'}, {'affiliation': [
{'name': 'Universidade Federal do Piauí, Brasil'}], 'family': 'Parente', 'given': 'José Miguel'},
{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}],
'family': 'Passos', 'given': 'Maria do Carmo Friche'}, {'affiliation': [
{'name': 'Universidade de São Paulo, Brasil'}], 'family': 'Moraes-Filho',
'given': 'Joaquim Prado P.'}], 'score': 1.0, 'issue': '2'} | train | https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L901-L959 | [
"def build_url_endpoint(endpoint, context=None):\n\n endpoint = '/'.join([i for i in [context, endpoint] if i])\n\n return 'https://%s/%s' % (API, endpoint)\n"
] | class Works(Endpoint):
CURSOR_AS_ITER_METHOD = True
ENDPOINT = 'works'
ORDER_VALUES = ('asc', 'desc', '1', '-1')
SORT_VALUES = (
'created',
'deposited',
'indexed',
'is-referenced-by-count',
'issued',
'published',
'published-online',
'published-print',
'references-count',
'relevance',
'score',
'submitted',
'updated'
)
FIELDS_QUERY = (
'affiliation',
'author',
'bibliographic',
'chair',
'container_title',
'contributor',
'editor',
'event_acronym',
'event_location',
'event_name',
'event_sponsor',
'event_theme',
'funder_name',
'publisher_location',
'publisher_name',
'title',
'translator'
)
FIELDS_SELECT = (
'abstract',
'URL',
'member',
'posted',
'score',
'created',
'degree',
'update-policy',
'short-title',
'license',
'ISSN',
'container-title',
'issued',
'update-to',
'issue',
'prefix',
'approved',
'indexed',
'article-number',
'clinical-trial-number',
'accepted',
'author',
'group-title',
'DOI',
'is-referenced-by-count',
'updated-by',
'event',
'chair',
'standards-body',
'original-title',
'funder',
'translator',
'archive',
'published-print',
'alternative-id',
'subject',
'subtitle',
'published-online',
'publisher-location',
'content-domain',
'reference',
'title',
'link',
'type',
'publisher',
'volume',
'references-count',
'ISBN',
'issn-type',
'assertion',
'deposited',
'page',
'content-created',
'short-container-title',
'relation',
'editor'
)
FILTER_VALIDATOR = {
'alternative_id': None,
'archive': validators.archive,
'article_number': None,
'assertion': None,
'assertion-group': None,
'award.funder': None,
'award.number': None,
'category-name': None,
'clinical-trial-number': None,
'container-title': None,
'content-domain': None,
'directory': validators.directory,
'doi': None,
'from-accepted-date': validators.is_date,
'from-created-date': validators.is_date,
'from-deposit-date': validators.is_date,
'from-event-end-date': validators.is_date,
'from-event-start-date': validators.is_date,
'from-index-date': validators.is_date,
'from-issued-date': validators.is_date,
'from-online-pub-date': validators.is_date,
'from-posted-date': validators.is_date,
'from-print-pub-date': validators.is_date,
'from-pub-date': validators.is_date,
'from-update-date': validators.is_date,
'full-text.application': None,
'full-text.type': None,
'full-text.version': None,
'funder': None,
'funder-doi-asserted-by': None,
'group-title': None,
'has-abstract': validators.is_bool,
'has-affiliation': validators.is_bool,
'has-archive': validators.is_bool,
'has-assertion': validators.is_bool,
'has-authenticated-orcid': validators.is_bool,
'has-award': validators.is_bool,
'has-clinical-trial-number': validators.is_bool,
'has-content-domain': validators.is_bool,
'has-domain-restriction': validators.is_bool,
'has-event': validators.is_bool,
'has-full-text': validators.is_bool,
'has-funder': validators.is_bool,
'has-funder-doi': validators.is_bool,
'has-license': validators.is_bool,
'has-orcid': validators.is_bool,
'has-references': validators.is_bool,
'has-relation': validators.is_bool,
'has-update': validators.is_bool,
'has-update-policy': validators.is_bool,
'is-update': validators.is_bool,
'isbn': None,
'issn': None,
'license.delay': validators.is_integer,
'license.url': None,
'license.version': None,
'location': None,
'member': validators.is_integer,
'orcid': None,
'prefix': None,
'relation.object': None,
'relation.object-type': None,
'relation.type': None,
'type': validators.document_type,
'type-name': None,
'until-accepted-date': validators.is_date,
'until-created-date': validators.is_date,
'until-deposit-date': validators.is_date,
'until-event-end-date': validators.is_date,
'until-event-start-date': validators.is_date,
'until-index-date': validators.is_date,
'until-issued-date': validators.is_date,
'until-online-pub-date': validators.is_date,
'until-posted-date': validators.is_date,
'until-print-pub-date': validators.is_date,
'until-pub-date': validators.is_date,
'until-update-date': validators.is_date,
'update-type': None,
'updates': None
}
FACET_VALUES = {
'archive': None,
'affiliation': None,
'assertion': None,
'assertion-group': None,
'category-name': None,
'container-title': 1000,
'license': None,
'funder-doi': None,
'funder-name': None,
'issn': 1000,
'orcid': 1000,
'published': None,
'publisher-name': None,
'relation-type': None,
'source': None,
'type-name': None,
'update-type': None
}
def order(self, order='asc'):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded with query, filter,
sort and facet methods.
kwargs: valid SORT_VALUES arguments.
return: iterable object of Works metadata
Example 1:
>>> from crossref.restful import Works
>>> works.query('zika').sort('deposited').order('asc').url
'https://api.crossref.org/works?sort=deposited&query=zika&order=asc'
>>> query = works.query('zika').sort('deposited').order('asc')
>>> for item in query:
... print(item['title'], item['deposited']['date-time'])
...
['A Facile Preparation of 1-(6-Hydroxyindol-1-yl)-2,2-dimethylpropan-1-one'] 2007-02-13T20:56:13Z
['Contributions to the Flora of the Lake Champlain Valley, New York and Vermont, III'] 2007-02-13T20:56:13Z
['Pilularia americana A. Braun in Klamath County, Oregon'] 2007-02-13T20:56:13Z
...
Example 2:
>>> from crossref.restful import Works
>>> works.query('zika').sort('deposited').order('desc').url
'https://api.crossref.org/works?sort=deposited&query=zika&order=desc'
>>> query = works.query('zika').sort('deposited').order('desc')
>>> for item in query:
... print(item['title'], item['deposited']['date-time'])
...
["Planning for the unexpected: Ebola virus, Zika virus, what's next?"] 2017-05-29T12:55:53Z
['Sensitivity of RT-PCR method in samples shown to be positive for Zika virus by RT-qPCR in vector competence studies'] 2017-05-29T12:53:54Z
['Re-evaluation of routine dengue virus serology in travelers in the era of Zika virus emergence'] 2017-05-29T10:46:11Z
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
if order not in self.ORDER_VALUES:
raise UrlSyntaxError(
'Sort order specified as %s but must be one of: %s' % (
str(order),
', '.join(self.ORDER_VALUES)
)
)
request_params['order'] = order
return self.__class__(request_url, request_params, context, self.etiquette)
def select(self, *args):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded with query, filter,
sort and facet methods.
args: valid FIELDS_SELECT arguments.
return: iterable object of Works metadata
Example 1:
>>> from crossref.restful import Works
>>> works = Works()
>>> for i in works.filter(has_funder='true', has_license='true').sample(5).select('DOI, prefix'):
... print(i)
...
{'DOI': '10.1016/j.jdiacomp.2016.06.005', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.mssp.2015.07.076', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1002/slct.201700168', 'prefix': '10.1002', 'member': 'http://id.crossref.org/member/311'}
{'DOI': '10.1016/j.actbio.2017.01.034', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.optcom.2013.11.013', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
...
Example 2:
>>> from crossref.restful import Works
>>> works = Works()
>>> for i in works.filter(has_funder='true', has_license='true').sample(5).select('DOI').select('prefix'):
>>> print(i)
...
{'DOI': '10.1016/j.sajb.2016.03.010', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.jneumeth.2009.08.017', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.tetlet.2016.05.058', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1007/s00170-017-0689-z', 'prefix': '10.1007', 'member': 'http://id.crossref.org/member/297'}
{'DOI': '10.1016/j.dsr.2016.03.004', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
...
Example: 3:
>>> from crossref.restful import Works
>>> works = Works()
>>>: for i in works.filter(has_funder='true', has_license='true').sample(5).select(['DOI', 'prefix']):
>>> print(i)
...
{'DOI': '10.1111/zoj.12146', 'prefix': '10.1093', 'member': 'http://id.crossref.org/member/286'}
{'DOI': '10.1016/j.bios.2014.04.018', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.cej.2016.10.011', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.dci.2017.08.001', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.icheatmasstransfer.2016.09.012', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
...
Example: 4:
>>> from crossref.restful import Works
>>> works = Works()
>>>: for i in works.filter(has_funder='true', has_license='true').sample(5).select('DOI', 'prefix'):
>>> print(i)
...
{'DOI': '10.1111/zoj.12146', 'prefix': '10.1093', 'member': 'http://id.crossref.org/member/286'}
{'DOI': '10.1016/j.bios.2014.04.018', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.cej.2016.10.011', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.dci.2017.08.001', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.icheatmasstransfer.2016.09.012', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
select_args = []
invalid_select_args = []
for item in args:
if isinstance(item, list):
select_args += [i.strip() for i in item]
if isinstance(item, str):
select_args += [i.strip() for i in item.split(',')]
invalid_select_args = set(select_args) - set(self.FIELDS_SELECT)
if len(invalid_select_args) != 0:
raise UrlSyntaxError(
'Select field\'s specified as (%s) but must be one of: %s' % (
', '.join(invalid_select_args),
', '.join(self.FIELDS_SELECT)
)
)
request_params['select'] = ','.join(
sorted([i for i in set(request_params.get('select', '').split(',') + select_args) if i])
)
return self.__class__(request_url, request_params, context, self.etiquette)
def sort(self, sort='score'):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded with query, filter,
order and facet methods.
kwargs: valid SORT_VALUES arguments.
return: iterable object of Works metadata
Example 1:
>>> from crossref.restful import Works
>>> works = Works()
>>> query = works.sort('deposited')
>>> for item in query:
... print(item['title'])
...
['Integralidade e transdisciplinaridade em equipes multiprofissionais na saúde coletiva']
['Aprendizagem em grupo operativo de diabetes: uma abordagem etnográfica']
['A rotatividade de enfermeiros e médicos: um impasse na implementação da Estratégia de Saúde da Família']
...
Example 2:
>>> from crossref.restful import Works
>>> works = Works()
>>> query = works.sort('relevance')
>>> for item in query:
... print(item['title'])
...
['Proceedings of the American Physical Society']
['Annual Meeting of the Research Society on Alcoholism']
['Local steroid injections: Comment on the American college of rheumatology guidelines for the management of osteoarthritis of the hip and on the letter by Swezey']
['Intraventricular neurocytoma']
['Mammography accreditation']
['Temporal lobe necrosis in nasopharyngeal carcinoma: Pictorial essay']
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
if sort not in self.SORT_VALUES:
raise UrlSyntaxError(
'Sort field specified as %s but must be one of: %s' % (
str(sort),
', '.join(self.SORT_VALUES)
)
)
request_params['sort'] = sort
return self.__class__(request_url, request_params, context, self.etiquette)
def filter(self, **kwargs):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded and recursively with query, filter,
order, sort and facet methods.
kwargs: valid FILTER_VALIDATOR arguments.
return: iterable object of Works metadata
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> query = works.filter(has_funder='true', has_license='true')
>>> for item in query:
... print(item['title'])
...
['Design of smiling-face-shaped band-notched UWB antenna']
['Phase I clinical and pharmacokinetic study of PM01183 (a tetrahydroisoquinoline, Lurbinectedin) in combination with gemcitabine in patients with advanced solid tumors']
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
for fltr, value in kwargs.items():
decoded_fltr = fltr.replace('__', '.').replace('_', '-')
if decoded_fltr not in self.FILTER_VALIDATOR.keys():
raise UrlSyntaxError(
'Filter %s specified but there is no such filter for this route. Valid filters for this route are: %s' % (
str(decoded_fltr),
', '.join(self.FILTER_VALIDATOR.keys())
)
)
if self.FILTER_VALIDATOR[decoded_fltr] is not None:
self.FILTER_VALIDATOR[decoded_fltr](str(value))
if 'filter' not in request_params:
request_params['filter'] = decoded_fltr + ':' + str(value)
else:
request_params['filter'] += ',' + decoded_fltr + ':' + str(value)
return self.__class__(request_url, request_params, context, self.etiquette)
def facet(self, facet_name, facet_count=100):
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
request_params['rows'] = 0
if facet_name not in self.FACET_VALUES.keys():
raise UrlSyntaxError('Facet %s specified but there is no such facet for this route. Valid facets for this route are: *, affiliation, funder-name, funder-doi, publisher-name, orcid, container-title, assertion, archive, update-type, issn, published, source, type-name, license, category-name, relation-type, assertion-group' %
str(facet_name),
', '.join(self.FACET_VALUES.keys())
)
facet_count = self.FACET_VALUES[facet_name] if self.FACET_VALUES[facet_name] is not None and self.FACET_VALUES[facet_name] <= facet_count else facet_count
request_params['facet'] = '%s:%s' % (facet_name, facet_count)
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
).json()
return result['message']['facets']
def query(self, *args, **kwargs):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded and recursively with query, filter,
order, sort and facet methods.
args: strings (String)
kwargs: valid FIELDS_QUERY arguments.
return: iterable object of Works metadata
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> query = works.query('Zika Virus')
>>> query.url
'https://api.crossref.org/works?query=Zika+Virus'
>>> for item in query:
... print(item['title'])
...
['Zika Virus']
['Zika virus disease']
['Zika Virus: Laboratory Diagnosis']
['Spread of Zika virus disease']
['Carditis in Zika Virus Infection']
['Understanding Zika virus']
['Zika Virus: History and Infectology']
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
if args:
request_params['query'] = ' '.join([str(i) for i in args])
for field, value in kwargs.items():
if field not in self.FIELDS_QUERY:
raise UrlSyntaxError(
'Field query %s specified but there is no such field query for this route. Valid field queries for this route are: %s' % (
str(field), ', '.join(self.FIELDS_QUERY)
)
)
request_params['query.%s' % field.replace('_', '-')] = value
return self.__class__(request_url, request_params, context, self.etiquette)
def sample(self, sample_size=20):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
kwargs: sample_size (Integer) between 0 and 100.
return: iterable object of Works metadata
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.sample(2).url
'https://api.crossref.org/works?sample=2'
>>> [i['title'] for i in works.sample(2)]
[['A study on the hemolytic properties ofPrevotella nigrescens'],
['The geometry and the radial breathing mode of carbon nanotubes: beyond the ideal behaviour']]
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
try:
if sample_size > 100:
raise UrlSyntaxError(
'Integer specified as %s but must be a positive integer less than or equal to 100.' % str(sample_size)
)
except TypeError:
raise UrlSyntaxError(
'Integer specified as %s but must be a positive integer less than or equal to 100.' % str(sample_size)
)
request_params['sample'] = sample_size
return self.__class__(request_url, request_params, context, self.etiquette)
def agency(self, doi, only_message=True):
"""
This method retrieve the DOI Agency metadata related to a given DOI
number.
args: Crossref DOI id (String)
return: JSON
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.agency('10.1590/S0004-28032013005000001')
{'DOI': '10.1590/s0004-28032013005000001', 'agency': {'label': 'CrossRef', 'id': 'crossref'}}
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, doi, 'agency'])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return
result = result.json()
return result['message'] if only_message is True else result
def doi_exists(self, doi):
"""
This method retrieve a boolean according to the existence of a crossref
DOI number. It returns False if the API results a 404 status code.
args: Crossref DOI id (String)
return: Boolean
Example 1:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi_exists('10.1590/S0004-28032013005000001')
True
Example 2:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi_exists('10.1590/S0004-28032013005000001_invalid_doi')
False
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, doi])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
only_headers=True,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return False
return True
|
fabiobatalha/crossrefapi | crossref/restful.py | Works.doi_exists | python | def doi_exists(self, doi):
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, doi])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
only_headers=True,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return False
return True | This method retrieve a boolean according to the existence of a crossref
DOI number. It returns False if the API results a 404 status code.
args: Crossref DOI id (String)
return: Boolean
Example 1:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi_exists('10.1590/S0004-28032013005000001')
True
Example 2:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi_exists('10.1590/S0004-28032013005000001_invalid_doi')
False | train | https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L995-L1032 | [
"def build_url_endpoint(endpoint, context=None):\n\n endpoint = '/'.join([i for i in [context, endpoint] if i])\n\n return 'https://%s/%s' % (API, endpoint)\n"
] | class Works(Endpoint):
CURSOR_AS_ITER_METHOD = True
ENDPOINT = 'works'
ORDER_VALUES = ('asc', 'desc', '1', '-1')
SORT_VALUES = (
'created',
'deposited',
'indexed',
'is-referenced-by-count',
'issued',
'published',
'published-online',
'published-print',
'references-count',
'relevance',
'score',
'submitted',
'updated'
)
FIELDS_QUERY = (
'affiliation',
'author',
'bibliographic',
'chair',
'container_title',
'contributor',
'editor',
'event_acronym',
'event_location',
'event_name',
'event_sponsor',
'event_theme',
'funder_name',
'publisher_location',
'publisher_name',
'title',
'translator'
)
FIELDS_SELECT = (
'abstract',
'URL',
'member',
'posted',
'score',
'created',
'degree',
'update-policy',
'short-title',
'license',
'ISSN',
'container-title',
'issued',
'update-to',
'issue',
'prefix',
'approved',
'indexed',
'article-number',
'clinical-trial-number',
'accepted',
'author',
'group-title',
'DOI',
'is-referenced-by-count',
'updated-by',
'event',
'chair',
'standards-body',
'original-title',
'funder',
'translator',
'archive',
'published-print',
'alternative-id',
'subject',
'subtitle',
'published-online',
'publisher-location',
'content-domain',
'reference',
'title',
'link',
'type',
'publisher',
'volume',
'references-count',
'ISBN',
'issn-type',
'assertion',
'deposited',
'page',
'content-created',
'short-container-title',
'relation',
'editor'
)
FILTER_VALIDATOR = {
'alternative_id': None,
'archive': validators.archive,
'article_number': None,
'assertion': None,
'assertion-group': None,
'award.funder': None,
'award.number': None,
'category-name': None,
'clinical-trial-number': None,
'container-title': None,
'content-domain': None,
'directory': validators.directory,
'doi': None,
'from-accepted-date': validators.is_date,
'from-created-date': validators.is_date,
'from-deposit-date': validators.is_date,
'from-event-end-date': validators.is_date,
'from-event-start-date': validators.is_date,
'from-index-date': validators.is_date,
'from-issued-date': validators.is_date,
'from-online-pub-date': validators.is_date,
'from-posted-date': validators.is_date,
'from-print-pub-date': validators.is_date,
'from-pub-date': validators.is_date,
'from-update-date': validators.is_date,
'full-text.application': None,
'full-text.type': None,
'full-text.version': None,
'funder': None,
'funder-doi-asserted-by': None,
'group-title': None,
'has-abstract': validators.is_bool,
'has-affiliation': validators.is_bool,
'has-archive': validators.is_bool,
'has-assertion': validators.is_bool,
'has-authenticated-orcid': validators.is_bool,
'has-award': validators.is_bool,
'has-clinical-trial-number': validators.is_bool,
'has-content-domain': validators.is_bool,
'has-domain-restriction': validators.is_bool,
'has-event': validators.is_bool,
'has-full-text': validators.is_bool,
'has-funder': validators.is_bool,
'has-funder-doi': validators.is_bool,
'has-license': validators.is_bool,
'has-orcid': validators.is_bool,
'has-references': validators.is_bool,
'has-relation': validators.is_bool,
'has-update': validators.is_bool,
'has-update-policy': validators.is_bool,
'is-update': validators.is_bool,
'isbn': None,
'issn': None,
'license.delay': validators.is_integer,
'license.url': None,
'license.version': None,
'location': None,
'member': validators.is_integer,
'orcid': None,
'prefix': None,
'relation.object': None,
'relation.object-type': None,
'relation.type': None,
'type': validators.document_type,
'type-name': None,
'until-accepted-date': validators.is_date,
'until-created-date': validators.is_date,
'until-deposit-date': validators.is_date,
'until-event-end-date': validators.is_date,
'until-event-start-date': validators.is_date,
'until-index-date': validators.is_date,
'until-issued-date': validators.is_date,
'until-online-pub-date': validators.is_date,
'until-posted-date': validators.is_date,
'until-print-pub-date': validators.is_date,
'until-pub-date': validators.is_date,
'until-update-date': validators.is_date,
'update-type': None,
'updates': None
}
FACET_VALUES = {
'archive': None,
'affiliation': None,
'assertion': None,
'assertion-group': None,
'category-name': None,
'container-title': 1000,
'license': None,
'funder-doi': None,
'funder-name': None,
'issn': 1000,
'orcid': 1000,
'published': None,
'publisher-name': None,
'relation-type': None,
'source': None,
'type-name': None,
'update-type': None
}
def order(self, order='asc'):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded with query, filter,
sort and facet methods.
kwargs: valid SORT_VALUES arguments.
return: iterable object of Works metadata
Example 1:
>>> from crossref.restful import Works
>>> works.query('zika').sort('deposited').order('asc').url
'https://api.crossref.org/works?sort=deposited&query=zika&order=asc'
>>> query = works.query('zika').sort('deposited').order('asc')
>>> for item in query:
... print(item['title'], item['deposited']['date-time'])
...
['A Facile Preparation of 1-(6-Hydroxyindol-1-yl)-2,2-dimethylpropan-1-one'] 2007-02-13T20:56:13Z
['Contributions to the Flora of the Lake Champlain Valley, New York and Vermont, III'] 2007-02-13T20:56:13Z
['Pilularia americana A. Braun in Klamath County, Oregon'] 2007-02-13T20:56:13Z
...
Example 2:
>>> from crossref.restful import Works
>>> works.query('zika').sort('deposited').order('desc').url
'https://api.crossref.org/works?sort=deposited&query=zika&order=desc'
>>> query = works.query('zika').sort('deposited').order('desc')
>>> for item in query:
... print(item['title'], item['deposited']['date-time'])
...
["Planning for the unexpected: Ebola virus, Zika virus, what's next?"] 2017-05-29T12:55:53Z
['Sensitivity of RT-PCR method in samples shown to be positive for Zika virus by RT-qPCR in vector competence studies'] 2017-05-29T12:53:54Z
['Re-evaluation of routine dengue virus serology in travelers in the era of Zika virus emergence'] 2017-05-29T10:46:11Z
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
if order not in self.ORDER_VALUES:
raise UrlSyntaxError(
'Sort order specified as %s but must be one of: %s' % (
str(order),
', '.join(self.ORDER_VALUES)
)
)
request_params['order'] = order
return self.__class__(request_url, request_params, context, self.etiquette)
def select(self, *args):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded with query, filter,
sort and facet methods.
args: valid FIELDS_SELECT arguments.
return: iterable object of Works metadata
Example 1:
>>> from crossref.restful import Works
>>> works = Works()
>>> for i in works.filter(has_funder='true', has_license='true').sample(5).select('DOI, prefix'):
... print(i)
...
{'DOI': '10.1016/j.jdiacomp.2016.06.005', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.mssp.2015.07.076', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1002/slct.201700168', 'prefix': '10.1002', 'member': 'http://id.crossref.org/member/311'}
{'DOI': '10.1016/j.actbio.2017.01.034', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.optcom.2013.11.013', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
...
Example 2:
>>> from crossref.restful import Works
>>> works = Works()
>>> for i in works.filter(has_funder='true', has_license='true').sample(5).select('DOI').select('prefix'):
>>> print(i)
...
{'DOI': '10.1016/j.sajb.2016.03.010', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.jneumeth.2009.08.017', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.tetlet.2016.05.058', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1007/s00170-017-0689-z', 'prefix': '10.1007', 'member': 'http://id.crossref.org/member/297'}
{'DOI': '10.1016/j.dsr.2016.03.004', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
...
Example: 3:
>>> from crossref.restful import Works
>>> works = Works()
>>>: for i in works.filter(has_funder='true', has_license='true').sample(5).select(['DOI', 'prefix']):
>>> print(i)
...
{'DOI': '10.1111/zoj.12146', 'prefix': '10.1093', 'member': 'http://id.crossref.org/member/286'}
{'DOI': '10.1016/j.bios.2014.04.018', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.cej.2016.10.011', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.dci.2017.08.001', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.icheatmasstransfer.2016.09.012', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
...
Example: 4:
>>> from crossref.restful import Works
>>> works = Works()
>>>: for i in works.filter(has_funder='true', has_license='true').sample(5).select('DOI', 'prefix'):
>>> print(i)
...
{'DOI': '10.1111/zoj.12146', 'prefix': '10.1093', 'member': 'http://id.crossref.org/member/286'}
{'DOI': '10.1016/j.bios.2014.04.018', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.cej.2016.10.011', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.dci.2017.08.001', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
{'DOI': '10.1016/j.icheatmasstransfer.2016.09.012', 'prefix': '10.1016', 'member': 'http://id.crossref.org/member/78'}
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
select_args = []
invalid_select_args = []
for item in args:
if isinstance(item, list):
select_args += [i.strip() for i in item]
if isinstance(item, str):
select_args += [i.strip() for i in item.split(',')]
invalid_select_args = set(select_args) - set(self.FIELDS_SELECT)
if len(invalid_select_args) != 0:
raise UrlSyntaxError(
'Select field\'s specified as (%s) but must be one of: %s' % (
', '.join(invalid_select_args),
', '.join(self.FIELDS_SELECT)
)
)
request_params['select'] = ','.join(
sorted([i for i in set(request_params.get('select', '').split(',') + select_args) if i])
)
return self.__class__(request_url, request_params, context, self.etiquette)
def sort(self, sort='score'):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded with query, filter,
order and facet methods.
kwargs: valid SORT_VALUES arguments.
return: iterable object of Works metadata
Example 1:
>>> from crossref.restful import Works
>>> works = Works()
>>> query = works.sort('deposited')
>>> for item in query:
... print(item['title'])
...
['Integralidade e transdisciplinaridade em equipes multiprofissionais na saúde coletiva']
['Aprendizagem em grupo operativo de diabetes: uma abordagem etnográfica']
['A rotatividade de enfermeiros e médicos: um impasse na implementação da Estratégia de Saúde da Família']
...
Example 2:
>>> from crossref.restful import Works
>>> works = Works()
>>> query = works.sort('relevance')
>>> for item in query:
... print(item['title'])
...
['Proceedings of the American Physical Society']
['Annual Meeting of the Research Society on Alcoholism']
['Local steroid injections: Comment on the American college of rheumatology guidelines for the management of osteoarthritis of the hip and on the letter by Swezey']
['Intraventricular neurocytoma']
['Mammography accreditation']
['Temporal lobe necrosis in nasopharyngeal carcinoma: Pictorial essay']
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
if sort not in self.SORT_VALUES:
raise UrlSyntaxError(
'Sort field specified as %s but must be one of: %s' % (
str(sort),
', '.join(self.SORT_VALUES)
)
)
request_params['sort'] = sort
return self.__class__(request_url, request_params, context, self.etiquette)
def filter(self, **kwargs):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded and recursively with query, filter,
order, sort and facet methods.
kwargs: valid FILTER_VALIDATOR arguments.
return: iterable object of Works metadata
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> query = works.filter(has_funder='true', has_license='true')
>>> for item in query:
... print(item['title'])
...
['Design of smiling-face-shaped band-notched UWB antenna']
['Phase I clinical and pharmacokinetic study of PM01183 (a tetrahydroisoquinoline, Lurbinectedin) in combination with gemcitabine in patients with advanced solid tumors']
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
for fltr, value in kwargs.items():
decoded_fltr = fltr.replace('__', '.').replace('_', '-')
if decoded_fltr not in self.FILTER_VALIDATOR.keys():
raise UrlSyntaxError(
'Filter %s specified but there is no such filter for this route. Valid filters for this route are: %s' % (
str(decoded_fltr),
', '.join(self.FILTER_VALIDATOR.keys())
)
)
if self.FILTER_VALIDATOR[decoded_fltr] is not None:
self.FILTER_VALIDATOR[decoded_fltr](str(value))
if 'filter' not in request_params:
request_params['filter'] = decoded_fltr + ':' + str(value)
else:
request_params['filter'] += ',' + decoded_fltr + ':' + str(value)
return self.__class__(request_url, request_params, context, self.etiquette)
def facet(self, facet_name, facet_count=100):
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
request_params['rows'] = 0
if facet_name not in self.FACET_VALUES.keys():
raise UrlSyntaxError('Facet %s specified but there is no such facet for this route. Valid facets for this route are: *, affiliation, funder-name, funder-doi, publisher-name, orcid, container-title, assertion, archive, update-type, issn, published, source, type-name, license, category-name, relation-type, assertion-group' %
str(facet_name),
', '.join(self.FACET_VALUES.keys())
)
facet_count = self.FACET_VALUES[facet_name] if self.FACET_VALUES[facet_name] is not None and self.FACET_VALUES[facet_name] <= facet_count else facet_count
request_params['facet'] = '%s:%s' % (facet_name, facet_count)
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
).json()
return result['message']['facets']
def query(self, *args, **kwargs):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded and recursively with query, filter,
order, sort and facet methods.
args: strings (String)
kwargs: valid FIELDS_QUERY arguments.
return: iterable object of Works metadata
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> query = works.query('Zika Virus')
>>> query.url
'https://api.crossref.org/works?query=Zika+Virus'
>>> for item in query:
... print(item['title'])
...
['Zika Virus']
['Zika virus disease']
['Zika Virus: Laboratory Diagnosis']
['Spread of Zika virus disease']
['Carditis in Zika Virus Infection']
['Understanding Zika virus']
['Zika Virus: History and Infectology']
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
if args:
request_params['query'] = ' '.join([str(i) for i in args])
for field, value in kwargs.items():
if field not in self.FIELDS_QUERY:
raise UrlSyntaxError(
'Field query %s specified but there is no such field query for this route. Valid field queries for this route are: %s' % (
str(field), ', '.join(self.FIELDS_QUERY)
)
)
request_params['query.%s' % field.replace('_', '-')] = value
return self.__class__(request_url, request_params, context, self.etiquette)
def sample(self, sample_size=20):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
kwargs: sample_size (Integer) between 0 and 100.
return: iterable object of Works metadata
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.sample(2).url
'https://api.crossref.org/works?sample=2'
>>> [i['title'] for i in works.sample(2)]
[['A study on the hemolytic properties ofPrevotella nigrescens'],
['The geometry and the radial breathing mode of carbon nanotubes: beyond the ideal behaviour']]
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
try:
if sample_size > 100:
raise UrlSyntaxError(
'Integer specified as %s but must be a positive integer less than or equal to 100.' % str(sample_size)
)
except TypeError:
raise UrlSyntaxError(
'Integer specified as %s but must be a positive integer less than or equal to 100.' % str(sample_size)
)
request_params['sample'] = sample_size
return self.__class__(request_url, request_params, context, self.etiquette)
def doi(self, doi, only_message=True):
"""
This method retrieve the DOI metadata related to a given DOI
number.
args: Crossref DOI id (String)
return: JSON
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi('10.1590/S0004-28032013005000001')
{'is-referenced-by-count': 6, 'reference-count': 216, 'DOI': '10.1590/s0004-28032013005000001',
'subtitle': [], 'issued': {'date-parts': [[2013, 4, 19]]}, 'source': 'Crossref',
'short-container-title': ['Arq. Gastroenterol.'], 'references-count': 216, 'short-title': [],
'deposited': {'timestamp': 1495911725000, 'date-time': '2017-05-27T19:02:05Z',
'date-parts': [[2017, 5, 27]]}, 'ISSN': ['0004-2803'], 'type': 'journal-article',
'URL': 'http://dx.doi.org/10.1590/s0004-28032013005000001',
'indexed': {'timestamp': 1496034748592, 'date-time': '2017-05-29T05:12:28Z',
'date-parts': [[2017, 5, 29]]}, 'content-domain': {'crossmark-restriction': False, 'domain': []},
'created': {'timestamp': 1374613284000, 'date-time': '2013-07-23T21:01:24Z',
'date-parts': [[2013, 7, 23]]}, 'issn-type': [{'value': '0004-2803', 'type': 'electronic'}],
'page': '81-96', 'volume': '50', 'original-title': [], 'subject': ['Gastroenterology'],
'relation': {}, 'container-title': ['Arquivos de Gastroenterologia'], 'member': '530',
'prefix': '10.1590', 'published-print': {'date-parts': [[2013, 4, 19]]},
'title': ['3rd BRAZILIAN CONSENSUS ON Helicobacter pylori'],
'publisher': 'FapUNIFESP (SciELO)', 'alternative-id': ['S0004-28032013000200081'],
'abstract': '<jats:p>Significant abstract data..... .</jats:p>',
'author': [{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}],
'family': 'Coelho', 'given': 'Luiz Gonzaga'}, {'affiliation': [
{'name': 'Universidade Federal do Rio Grande do Sul, Brazil'}], 'family': 'Maguinilk',
'given': 'Ismael'}, {'affiliation': [
{'name': 'Presidente de Honra do Núcleo Brasileiro para Estudo do Helicobacter, Brazil'}],
'family': 'Zaterka', 'given': 'Schlioma'}, {'affiliation': [
{'name': 'Universidade Federal do Piauí, Brasil'}], 'family': 'Parente', 'given': 'José Miguel'},
{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}],
'family': 'Passos', 'given': 'Maria do Carmo Friche'}, {'affiliation': [
{'name': 'Universidade de São Paulo, Brasil'}], 'family': 'Moraes-Filho',
'given': 'Joaquim Prado P.'}], 'score': 1.0, 'issue': '2'}
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, doi])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return
result = result.json()
return result['message'] if only_message is True else result
def agency(self, doi, only_message=True):
"""
This method retrieve the DOI Agency metadata related to a given DOI
number.
args: Crossref DOI id (String)
return: JSON
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.agency('10.1590/S0004-28032013005000001')
{'DOI': '10.1590/s0004-28032013005000001', 'agency': {'label': 'CrossRef', 'id': 'crossref'}}
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, doi, 'agency'])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return
result = result.json()
return result['message'] if only_message is True else result
|
fabiobatalha/crossrefapi | crossref/restful.py | Funders.works | python | def works(self, funder_id):
context = '%s/%s' % (self.ENDPOINT, str(funder_id))
return Works(context=context) | This method retrieve a iterable of Works of the given funder.
args: Crossref allowed document Types (String)
return: Works() | train | https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L1199-L1208 | null | class Funders(Endpoint):
CURSOR_AS_ITER_METHOD = False
ENDPOINT = 'funders'
FILTER_VALIDATOR = {
'location': None,
}
def query(self, *args):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
args: Funder ID (Integer)
return: iterable object of Funders metadata
Example:
>>> from crossref.restful import Funders
>>> funders = Funders()
>>> funders.query('ABBEY').url
'https://api.crossref.org/funders?query=ABBEY'
>>> next(iter(funders.query('ABBEY')))
{'alt-names': ['Abbey'], 'location': 'United Kingdom', 'replaced-by': [],
'replaces': [], 'name': 'ABBEY AWARDS', 'id': '501100000314',
'tokens': ['abbey', 'awards', 'abbey'],
'uri': 'http://dx.doi.org/10.13039/501100000314'}
"""
request_url = build_url_endpoint(self.ENDPOINT)
request_params = dict(self.request_params)
if args:
request_params['query'] = ' '.join([str(i) for i in args])
return self.__class__(request_url, request_params, self.etiquette)
def filter(self, **kwargs):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded and recursively with query, filter,
order, sort and facet methods.
kwargs: valid FILTER_VALIDATOR arguments.
return: iterable object of Funders metadata
Example:
>>> from crossref.restful import Funders
>>> funders = Funders()
>>> query = funders.filter(location='Japan')
>>> for item in query:
... print(item['name'], item['location'])
...
(u'Central Research Institute, Fukuoka University', u'Japan')
(u'Tohoku University', u'Japan')
(u'Information-Technology Promotion Agency', u'Japan')
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
for fltr, value in kwargs.items():
decoded_fltr = fltr.replace('__', '.').replace('_', '-')
if decoded_fltr not in self.FILTER_VALIDATOR.keys():
raise UrlSyntaxError(
'Filter %s specified but there is no such filter for this route. Valid filters for this route are: %s' % (
str(decoded_fltr),
', '.join(self.FILTER_VALIDATOR.keys())
)
)
if self.FILTER_VALIDATOR[decoded_fltr] is not None:
self.FILTER_VALIDATOR[decoded_fltr](str(value))
if 'filter' not in request_params:
request_params['filter'] = decoded_fltr + ':' + str(value)
else:
request_params['filter'] += ',' + decoded_fltr + ':' + str(value)
return self.__class__(request_url, request_params, context, self.etiquette)
def funder(self, funder_id, only_message=True):
"""funder
This method retrive a crossref funder metadata related to the
given funder_id.
args: Funder ID (Integer)
Example:
>>> from crossref.restful import Funders
>>> funders = Funders()
>>> funders.funder('501100000314')
{'hierarchy': {'501100000314': {}}, 'alt-names': ['Abbey'],
'work-count': 3, 'replaced-by': [], 'replaces': [],
'hierarchy-names': {'501100000314': 'ABBEY AWARDS'},
'uri': 'http://dx.doi.org/10.13039/501100000314', 'location': 'United Kingdom',
'descendant-work-count': 3, 'descendants': [], 'name': 'ABBEY AWARDS',
'id': '501100000314', 'tokens': ['abbey', 'awards', 'abbey']}
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, str(funder_id)])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return
result = result.json()
return result['message'] if only_message is True else result
def funder_exists(self, funder_id):
"""
This method retrieve a boolean according to the existence of a crossref
funder. It returns False if the API results a 404 status code.
args: Crossref Funder id (Integer)
return: Boolean
Example 1:
>>> from crossref.restful import Funders
>>> funders = Funders()
>>> funders.funder_exists('501100000314')
True
Example 2:
>>> from crossref.restful import Funders
>>> funders = Funders()
>>> funders.funder_exists('999999999999')
False
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, str(funder_id)])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
only_headers=True,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return False
return True
|
fabiobatalha/crossrefapi | crossref/restful.py | Members.query | python | def query(self, *args):
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT)
request_params = dict(self.request_params)
if args:
request_params['query'] = ' '.join([str(i) for i in args])
return self.__class__(request_url, request_params, context, self.etiquette) | This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
args: strings (String)
return: iterable object of Members metadata
Example:
>>> from crossref.restful import Members
>>> members = Members().query('Korean Association')
members.query('Korean Association').url
'https://api.crossref.org/journals?query=Public+Health+Health+Science'
>>> next(iter(members.query('Korean Association')))
{'prefix': [{'value': '10.20433', 'public-references': False,
'name': 'The New Korean Philosophical Association'}], 'counts': {'total-dois': 0, 'backfile-dois': 0,
'current-dois': 0}, 'coverage': {'references-backfile': 0, 'references-current': 0,
'abstracts-current': 0, 'update-policies-backfile': 0, 'orcids-current': 0, 'orcids-backfile': 0,
'licenses-current': 0, 'affiliations-backfile': 0, 'licenses-backfile': 0, 'update-policies-current': 0,
'resource-links-current': 0, 'resource-links-backfile': 0, 'award-numbers-backfile': 0,
'abstracts-backfile': 0, 'funders-current': 0, 'funders-backfile': 0, 'affiliations-current': 0,
'award-numbers-current': 0}, 'flags': {'deposits-orcids-backfile': False,
'deposits-references-backfile': False, 'deposits-licenses-current': False, 'deposits': False,
'deposits-abstracts-current': False, 'deposits-award-numbers-current': False, 'deposits-articles': False,
'deposits-resource-links-backfile': False, 'deposits-funders-current': False,
'deposits-award-numbers-backfile': False, 'deposits-references-current': False,
'deposits-abstracts-backfile': False, 'deposits-funders-backfile': False,
'deposits-update-policies-current': False, 'deposits-orcids-current': False,
'deposits-licenses-backfile': False, 'deposits-affiliations-backfile': False,
'deposits-update-policies-backfile': False, 'deposits-resource-links-current': False,
'deposits-affiliations-current': False}, 'names': ['The New Korean Philosophical Association'],
'breakdowns': {'dois-by-issued-year': []}, 'location': 'Dongsin Tower, 4th Floor 5, Mullae-dong 6-ga,
Mullae-dong 6-ga Seoul 150-096 South Korea', 'prefixes': ['10.20433'],
'last-status-check-time': 1496034177684, 'id': 8334, 'tokens': ['the', 'new', 'korean', 'philosophical',
'association'], 'primary-name': 'The New Korean Philosophical Association'} | train | https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L1224-L1269 | [
"def build_url_endpoint(endpoint, context=None):\n\n endpoint = '/'.join([i for i in [context, endpoint] if i])\n\n return 'https://%s/%s' % (API, endpoint)\n"
] | class Members(Endpoint):
CURSOR_AS_ITER_METHOD = False
ENDPOINT = 'members'
FILTER_VALIDATOR = {
'prefix': None,
'has-public-references': validators.is_bool,
'backfile-doi-count': validators.is_integer,
'current-doi-count': validators.is_integer
}
def filter(self, **kwargs):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded and recursively with query, filter,
order, sort and facet methods.
kwargs: valid FILTER_VALIDATOR arguments.
return: iterable object of Members metadata
Example:
>>> from crossref.restful import Members
>>> members = Members()
>>> query = members.filter(has_public_references='true')
>>> for item in query:
... print(item['prefix'])
...
[{u'public-references': False, u'name': u'Open Library of Humanities', u'value': u'10.16995'}, {u'public-references': True, u'name': u'Martin Eve', u'value': u'10.7766'}]
[{u'public-references': True, u'name': u'Institute of Business Research', u'value': u'10.24122'}]
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
for fltr, value in kwargs.items():
decoded_fltr = fltr.replace('__', '.').replace('_', '-')
if decoded_fltr not in self.FILTER_VALIDATOR.keys():
raise UrlSyntaxError(
'Filter %s specified but there is no such filter for this route. Valid filters for this route are: %s' % (
str(decoded_fltr),
', '.join(self.FILTER_VALIDATOR.keys())
)
)
if self.FILTER_VALIDATOR[decoded_fltr] is not None:
self.FILTER_VALIDATOR[decoded_fltr](str(value))
if 'filter' not in request_params:
request_params['filter'] = decoded_fltr + ':' + str(value)
else:
request_params['filter'] += ',' + decoded_fltr + ':' + str(value)
return self.__class__(request_url, request_params, context, self.etiquette)
def member(self, member_id, only_message=True):
"""
This method retrive a crossref member metadata related to the
given member_id.
args: Member ID (Integer)
Example:
>>> from crossref.restful import Members
>>> members = Members()
>>> members.member(101)
{'prefix': [{'value': '10.1024', 'public-references': False,
'name': 'Hogrefe Publishing Group'}, {'value': '10.1027', 'public-references': False,
'name': 'Hogrefe Publishing Group'}, {'value': '10.1026', 'public-references': False,
'name': 'Hogrefe Publishing Group'}], 'counts': {'total-dois': 35039, 'backfile-dois': 31430,
'current-dois': 3609}, 'coverage': {'references-backfile': 0.3601972758769989,
'references-current': 0.019118869677186012, 'abstracts-current': 0.0,
'update-policies-backfile': 0.0, 'orcids-current': 0.0, 'orcids-backfile': 0.0,
'licenses-current': 0.0, 'affiliations-backfile': 0.05685650557279587,
'licenses-backfile': 0.0, 'update-policies-current': 0.0, 'resource-links-current': 0.0,
'resource-links-backfile': 0.0, 'award-numbers-backfile': 0.0, 'abstracts-backfile': 0.0,
'funders-current': 0.0, 'funders-backfile': 0.0, 'affiliations-current': 0.15710723400115967,
'award-numbers-current': 0.0}, 'flags': {'deposits-orcids-backfile': False,
'deposits-references-backfile': True, 'deposits-licenses-current': False, 'deposits': True,
'deposits-abstracts-current': False, 'deposits-award-numbers-current': False,
'deposits-articles': True, 'deposits-resource-links-backfile': False,
'deposits-funders-current': False, 'deposits-award-numbers-backfile': False,
'deposits-references-current': True, 'deposits-abstracts-backfile': False,
'deposits-funders-backfile': False, 'deposits-update-policies-current': False,
'deposits-orcids-current': False, 'deposits-licenses-backfile': False,
'deposits-affiliations-backfile': True, 'deposits-update-policies-backfile': False,
'deposits-resource-links-current': False, 'deposits-affiliations-current': True},
'names': ['Hogrefe Publishing Group'], 'breakdowns': {'dois-by-issued-year':
[[2003, 2329], [2004, 2264], [2002, 2211], [2005, 2204], [2006, 2158], [2007, 2121], [2016, 1954],
[2008, 1884], [2015, 1838], [2012, 1827], [2013, 1805], [2014, 1796], [2009, 1760], [2010, 1718],
[2011, 1681], [2001, 1479], [2000, 1477], [1999, 1267], [2017, 767], [1997, 164], [1996, 140],
[1998, 138], [1995, 103], [1994, 11], [1993, 11], [0, 1]]},
'location': 'Langgass-Strasse 76 Berne CH-3000 Switzerland', 'prefixes': ['10.1024', '10.1027',
'10.1026'], 'last-status-check-time': 1496034132646, 'id': 101, 'tokens': ['hogrefe', 'publishing',
'group'], 'primary-name': 'Hogrefe Publishing Group'}
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, str(member_id)])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return
result = result.json()
return result['message'] if only_message is True else result
def member_exists(self, member_id):
"""
This method retrieve a boolean according to the existence of a crossref
member. It returns False if the API results a 404 status code.
args: Crossref allowed document Type (String)
return: Boolean
Example 1:
>>> from crossref.restful import Members
>>> members = Members()
>>> members.member_exists(101)
True
Example 2:
>>> from crossref.restful import Members
>>> members = Members()
>>> members.member_exists(88888)
False
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, str(member_id)])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
only_headers=True,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return False
return True
def works(self, member_id):
"""
This method retrieve a iterable of Works of the given member.
args: Member ID (Integer)
return: Works()
"""
context = '%s/%s' % (self.ENDPOINT, str(member_id))
return Works(context=context)
|
fabiobatalha/crossrefapi | crossref/restful.py | Members.works | python | def works(self, member_id):
context = '%s/%s' % (self.ENDPOINT, str(member_id))
return Works(context=context) | This method retrieve a iterable of Works of the given member.
args: Member ID (Integer)
return: Works() | train | https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L1418-L1427 | null | class Members(Endpoint):
CURSOR_AS_ITER_METHOD = False
ENDPOINT = 'members'
FILTER_VALIDATOR = {
'prefix': None,
'has-public-references': validators.is_bool,
'backfile-doi-count': validators.is_integer,
'current-doi-count': validators.is_integer
}
def query(self, *args):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
args: strings (String)
return: iterable object of Members metadata
Example:
>>> from crossref.restful import Members
>>> members = Members().query('Korean Association')
members.query('Korean Association').url
'https://api.crossref.org/journals?query=Public+Health+Health+Science'
>>> next(iter(members.query('Korean Association')))
{'prefix': [{'value': '10.20433', 'public-references': False,
'name': 'The New Korean Philosophical Association'}], 'counts': {'total-dois': 0, 'backfile-dois': 0,
'current-dois': 0}, 'coverage': {'references-backfile': 0, 'references-current': 0,
'abstracts-current': 0, 'update-policies-backfile': 0, 'orcids-current': 0, 'orcids-backfile': 0,
'licenses-current': 0, 'affiliations-backfile': 0, 'licenses-backfile': 0, 'update-policies-current': 0,
'resource-links-current': 0, 'resource-links-backfile': 0, 'award-numbers-backfile': 0,
'abstracts-backfile': 0, 'funders-current': 0, 'funders-backfile': 0, 'affiliations-current': 0,
'award-numbers-current': 0}, 'flags': {'deposits-orcids-backfile': False,
'deposits-references-backfile': False, 'deposits-licenses-current': False, 'deposits': False,
'deposits-abstracts-current': False, 'deposits-award-numbers-current': False, 'deposits-articles': False,
'deposits-resource-links-backfile': False, 'deposits-funders-current': False,
'deposits-award-numbers-backfile': False, 'deposits-references-current': False,
'deposits-abstracts-backfile': False, 'deposits-funders-backfile': False,
'deposits-update-policies-current': False, 'deposits-orcids-current': False,
'deposits-licenses-backfile': False, 'deposits-affiliations-backfile': False,
'deposits-update-policies-backfile': False, 'deposits-resource-links-current': False,
'deposits-affiliations-current': False}, 'names': ['The New Korean Philosophical Association'],
'breakdowns': {'dois-by-issued-year': []}, 'location': 'Dongsin Tower, 4th Floor 5, Mullae-dong 6-ga,
Mullae-dong 6-ga Seoul 150-096 South Korea', 'prefixes': ['10.20433'],
'last-status-check-time': 1496034177684, 'id': 8334, 'tokens': ['the', 'new', 'korean', 'philosophical',
'association'], 'primary-name': 'The New Korean Philosophical Association'}
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT)
request_params = dict(self.request_params)
if args:
request_params['query'] = ' '.join([str(i) for i in args])
return self.__class__(request_url, request_params, context, self.etiquette)
def filter(self, **kwargs):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
This method can be used compounded and recursively with query, filter,
order, sort and facet methods.
kwargs: valid FILTER_VALIDATOR arguments.
return: iterable object of Members metadata
Example:
>>> from crossref.restful import Members
>>> members = Members()
>>> query = members.filter(has_public_references='true')
>>> for item in query:
... print(item['prefix'])
...
[{u'public-references': False, u'name': u'Open Library of Humanities', u'value': u'10.16995'}, {u'public-references': True, u'name': u'Martin Eve', u'value': u'10.7766'}]
[{u'public-references': True, u'name': u'Institute of Business Research', u'value': u'10.24122'}]
...
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
for fltr, value in kwargs.items():
decoded_fltr = fltr.replace('__', '.').replace('_', '-')
if decoded_fltr not in self.FILTER_VALIDATOR.keys():
raise UrlSyntaxError(
'Filter %s specified but there is no such filter for this route. Valid filters for this route are: %s' % (
str(decoded_fltr),
', '.join(self.FILTER_VALIDATOR.keys())
)
)
if self.FILTER_VALIDATOR[decoded_fltr] is not None:
self.FILTER_VALIDATOR[decoded_fltr](str(value))
if 'filter' not in request_params:
request_params['filter'] = decoded_fltr + ':' + str(value)
else:
request_params['filter'] += ',' + decoded_fltr + ':' + str(value)
return self.__class__(request_url, request_params, context, self.etiquette)
def member(self, member_id, only_message=True):
"""
This method retrive a crossref member metadata related to the
given member_id.
args: Member ID (Integer)
Example:
>>> from crossref.restful import Members
>>> members = Members()
>>> members.member(101)
{'prefix': [{'value': '10.1024', 'public-references': False,
'name': 'Hogrefe Publishing Group'}, {'value': '10.1027', 'public-references': False,
'name': 'Hogrefe Publishing Group'}, {'value': '10.1026', 'public-references': False,
'name': 'Hogrefe Publishing Group'}], 'counts': {'total-dois': 35039, 'backfile-dois': 31430,
'current-dois': 3609}, 'coverage': {'references-backfile': 0.3601972758769989,
'references-current': 0.019118869677186012, 'abstracts-current': 0.0,
'update-policies-backfile': 0.0, 'orcids-current': 0.0, 'orcids-backfile': 0.0,
'licenses-current': 0.0, 'affiliations-backfile': 0.05685650557279587,
'licenses-backfile': 0.0, 'update-policies-current': 0.0, 'resource-links-current': 0.0,
'resource-links-backfile': 0.0, 'award-numbers-backfile': 0.0, 'abstracts-backfile': 0.0,
'funders-current': 0.0, 'funders-backfile': 0.0, 'affiliations-current': 0.15710723400115967,
'award-numbers-current': 0.0}, 'flags': {'deposits-orcids-backfile': False,
'deposits-references-backfile': True, 'deposits-licenses-current': False, 'deposits': True,
'deposits-abstracts-current': False, 'deposits-award-numbers-current': False,
'deposits-articles': True, 'deposits-resource-links-backfile': False,
'deposits-funders-current': False, 'deposits-award-numbers-backfile': False,
'deposits-references-current': True, 'deposits-abstracts-backfile': False,
'deposits-funders-backfile': False, 'deposits-update-policies-current': False,
'deposits-orcids-current': False, 'deposits-licenses-backfile': False,
'deposits-affiliations-backfile': True, 'deposits-update-policies-backfile': False,
'deposits-resource-links-current': False, 'deposits-affiliations-current': True},
'names': ['Hogrefe Publishing Group'], 'breakdowns': {'dois-by-issued-year':
[[2003, 2329], [2004, 2264], [2002, 2211], [2005, 2204], [2006, 2158], [2007, 2121], [2016, 1954],
[2008, 1884], [2015, 1838], [2012, 1827], [2013, 1805], [2014, 1796], [2009, 1760], [2010, 1718],
[2011, 1681], [2001, 1479], [2000, 1477], [1999, 1267], [2017, 767], [1997, 164], [1996, 140],
[1998, 138], [1995, 103], [1994, 11], [1993, 11], [0, 1]]},
'location': 'Langgass-Strasse 76 Berne CH-3000 Switzerland', 'prefixes': ['10.1024', '10.1027',
'10.1026'], 'last-status-check-time': 1496034132646, 'id': 101, 'tokens': ['hogrefe', 'publishing',
'group'], 'primary-name': 'Hogrefe Publishing Group'}
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, str(member_id)])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return
result = result.json()
return result['message'] if only_message is True else result
def member_exists(self, member_id):
"""
This method retrieve a boolean according to the existence of a crossref
member. It returns False if the API results a 404 status code.
args: Crossref allowed document Type (String)
return: Boolean
Example 1:
>>> from crossref.restful import Members
>>> members = Members()
>>> members.member_exists(101)
True
Example 2:
>>> from crossref.restful import Members
>>> members = Members()
>>> members.member_exists(88888)
False
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, str(member_id)])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
only_headers=True,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return False
return True
|
fabiobatalha/crossrefapi | crossref/restful.py | Types.all | python | def all(self):
request_url = build_url_endpoint(self.ENDPOINT, self.context)
request_params = dict(self.request_params)
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
raise StopIteration()
result = result.json()
for item in result['message']['items']:
yield item | This method retrieve an iterator with all the available types.
return: iterator of crossref document types
Example:
>>> from crossref.restful import Types
>>> types = Types()
>>> [i for i in types.all()]
[{'label': 'Book Section', 'id': 'book-section'},
{'label': 'Monograph', 'id': 'monograph'},
{'label': 'Report', 'id': 'report'},
{'label': 'Book Track', 'id': 'book-track'},
{'label': 'Journal Article', 'id': 'journal-article'},
{'label': 'Part', 'id': 'book-part'},
...
}] | train | https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L1466-L1501 | [
"def build_url_endpoint(endpoint, context=None):\n\n endpoint = '/'.join([i for i in [context, endpoint] if i])\n\n return 'https://%s/%s' % (API, endpoint)\n"
] | class Types(Endpoint):
CURSOR_AS_ITER_METHOD = False
ENDPOINT = 'types'
def type(self, type_id, only_message=True):
"""
This method retrive a crossref document type metadata related to the
given type_id.
args: Crossref allowed document Types (String)
Example:
>>> types.type('journal-article')
{'label': 'Journal Article', 'id': 'journal-article'}
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, str(type_id)])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return
result = result.json()
return result['message'] if only_message is True else result
def type_exists(self, type_id):
"""
This method retrieve a boolean according to the existence of a crossref
document type. It returns False if the API results a 404 status code.
args: Crossref allowed document Type (String)
return: Boolean
Example 1:
>>> from crossref.restful import Types
>>> types = Types()
>>> types.type_exists('journal-article')
True
Example 2:
>>> from crossref.restful import Types
>>> types = Types()
>>> types.type_exists('unavailable type')
False
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, str(type_id)])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
only_headers=True,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return False
return True
def works(self, type_id):
"""
This method retrieve a iterable of Works of the given type.
args: Crossref allowed document Types (String)
return: Works()
"""
context = '%s/%s' % (self.ENDPOINT, str(type_id))
return Works(context=context)
|
fabiobatalha/crossrefapi | crossref/restful.py | Types.works | python | def works(self, type_id):
context = '%s/%s' % (self.ENDPOINT, str(type_id))
return Works(context=context) | This method retrieve a iterable of Works of the given type.
args: Crossref allowed document Types (String)
return: Works() | train | https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L1542-L1551 | null | class Types(Endpoint):
CURSOR_AS_ITER_METHOD = False
ENDPOINT = 'types'
def type(self, type_id, only_message=True):
"""
This method retrive a crossref document type metadata related to the
given type_id.
args: Crossref allowed document Types (String)
Example:
>>> types.type('journal-article')
{'label': 'Journal Article', 'id': 'journal-article'}
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, str(type_id)])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return
result = result.json()
return result['message'] if only_message is True else result
def all(self):
"""
This method retrieve an iterator with all the available types.
return: iterator of crossref document types
Example:
>>> from crossref.restful import Types
>>> types = Types()
>>> [i for i in types.all()]
[{'label': 'Book Section', 'id': 'book-section'},
{'label': 'Monograph', 'id': 'monograph'},
{'label': 'Report', 'id': 'report'},
{'label': 'Book Track', 'id': 'book-track'},
{'label': 'Journal Article', 'id': 'journal-article'},
{'label': 'Part', 'id': 'book-part'},
...
}]
"""
request_url = build_url_endpoint(self.ENDPOINT, self.context)
request_params = dict(self.request_params)
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
raise StopIteration()
result = result.json()
for item in result['message']['items']:
yield item
def type_exists(self, type_id):
"""
This method retrieve a boolean according to the existence of a crossref
document type. It returns False if the API results a 404 status code.
args: Crossref allowed document Type (String)
return: Boolean
Example 1:
>>> from crossref.restful import Types
>>> types = Types()
>>> types.type_exists('journal-article')
True
Example 2:
>>> from crossref.restful import Types
>>> types = Types()
>>> types.type_exists('unavailable type')
False
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, str(type_id)])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
only_headers=True,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return False
return True
|
fabiobatalha/crossrefapi | crossref/restful.py | Prefixes.works | python | def works(self, prefix_id):
context = '%s/%s' % (self.ENDPOINT, str(prefix_id))
return Works(context=context) | This method retrieve a iterable of Works of the given prefix.
args: Crossref Prefix (String)
return: Works() | train | https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L1594-L1603 | null | class Prefixes(Endpoint):
CURSOR_AS_ITER_METHOD = False
ENDPOINT = 'prefixes'
def prefix(self, prefix_id, only_message=True):
"""
This method retrieve a json with the given Prefix metadata
args: Crossref Prefix (String)
return: JSON
Example:
>>> from crossref.restful import Prefixes
>>> prefixes = Prefixes()
>>> prefixes.prefix('10.1590')
{'name': 'FapUNIFESP (SciELO)', 'member': 'http://id.crossref.org/member/530',
'prefix': 'http://id.crossref.org/prefix/10.1590'}
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, str(prefix_id)])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return
result = result.json()
return result['message'] if only_message is True else result
|
fabiobatalha/crossrefapi | crossref/restful.py | Journals.works | python | def works(self, issn):
context = '%s/%s' % (self.ENDPOINT, str(issn))
return Works(context=context) | This method retrieve a iterable of Works of the given journal.
args: Journal ISSN (String)
return: Works() | train | https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L1718-L1728 | null | class Journals(Endpoint):
CURSOR_AS_ITER_METHOD = False
ENDPOINT = 'journals'
def query(self, *args):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
args: strings (String)
return: iterable object of Journals metadata
Example:
>>> from crossref.restful import Journals
>>> journals = Journals().query('Public Health', 'Health Science')
>>> journals.url
'https://api.crossref.org/journals?query=Public+Health+Health+Science'
>>> next(iter(journals))
{'last-status-check-time': None, 'counts': None, 'coverage': None,
'publisher': 'ScopeMed International Medical Journal Managment and Indexing System',
'flags': None, 'breakdowns': None, 'ISSN': ['2320-4664', '2277-338X'],
'title': 'International Journal of Medical Science and Public Health'}
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT)
request_params = dict(self.request_params)
if args:
request_params['query'] = ' '.join([str(i) for i in args])
return self.__class__(request_url, request_params, context, self.etiquette)
def journal(self, issn, only_message=True):
"""
This method retrieve a json with the given ISSN metadata
args: Journal ISSN (String)
return: Journal JSON data
Example:
>>> from crossref.restful import Journals
>>> journals = Journals()
>>> journals.journal('2277-338X')
{'last-status-check-time': None, 'counts': None, 'coverage': None,
'publisher': 'ScopeMed International Medical Journal Managment and Indexing System',
'flags': None, 'breakdowns': None, 'ISSN': ['2320-4664', '2277-338X'],
'title': 'International Journal of Medical Science and Public Health'}
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, str(issn)])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return
result = result.json()
return result['message'] if only_message is True else result
def journal_exists(self, issn):
"""
This method retrieve a boolean according to the existence of a journal
in the Crossref database. It returns False if the API results a 404
status code.
args: Journal ISSN (String)
return: Boolean
Example 1:
>>> from crossref.restful import Journals
>>> journals = Journals()
>>> journals.journal_exists('2277-338X')
True
Example 2:
>>> from crossref.restful import Journals
>>> journals = Journals()
>>> journals.journal_exists('9999-AAAA')
False
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, str(issn)])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
only_headers=True,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return False
return True
|
fabiobatalha/crossrefapi | crossref/restful.py | Depositor.register_doi | python | def register_doi(self, submission_id, request_xml):
endpoint = self.get_endpoint('deposit')
files = {
'mdFile': ('%s.xml' % submission_id, request_xml)
}
params = {
'operation': 'doMDUpload',
'login_id': self.api_user,
'login_passwd': self.api_key
}
result = self.do_http_request(
'post',
endpoint,
data=params,
files=files,
timeout=10,
custom_header=str(self.etiquette)
)
return result | This method registry a new DOI number in Crossref or update some DOI
metadata.
submission_id: Will be used as the submission file name. The file name
could be used in future requests to retrieve the submission status.
request_xml: The XML with the document metadata. It must be under
compliance with the Crossref Submission Schema. | train | https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L1746-L1779 | [
"def get_endpoint(self, verb):\n subdomain = 'test' if self.use_test_server else 'doi'\n return \"https://{}.crossref.org/servlet/{}\".format(subdomain, verb)\n"
] | class Depositor(object):
def __init__(self, prefix, api_user, api_key, etiquette=None,
use_test_server=False):
self.do_http_request = HTTPRequest(throttle=False).do_http_request
self.etiquette = etiquette or Etiquette()
self.prefix = prefix
self.api_user = api_user
self.api_key = api_key
self.use_test_server = use_test_server
def get_endpoint(self, verb):
subdomain = 'test' if self.use_test_server else 'doi'
return "https://{}.crossref.org/servlet/{}".format(subdomain, verb)
def request_doi_status_by_filename(self, file_name, data_type='result'):
"""
This method retrieve the DOI requests status.
file_name: Used as unique ID to identify a deposit.
data_type: [contents, result]
contents - retrieve the XML submited by the publisher
result - retrieve a JSON with the status of the submission
"""
endpoint = self.get_endpoint('submissionDownload')
params = {
'usr': self.api_user,
'pwd': self.api_key,
'file_name': file_name,
'type': data_type
}
result = self.do_http_request(
'get',
endpoint,
data=params,
timeout=10,
custom_header=str(self.etiquette)
)
return result
def request_doi_status_by_batch_id(self, doi_batch_id, data_type='result'):
"""
This method retrieve the DOI requests status.
file_name: Used as unique ID to identify a deposit.
data_type: [contents, result]
contents - retrieve the XML submited by the publisher
result - retrieve a XML with the status of the submission
"""
endpoint = self.get_endpoint('submissionDownload')
params = {
'usr': self.api_user,
'pwd': self.api_key,
'doi_batch_id': doi_batch_id,
'type': data_type
}
result = self.do_http_request(
'get',
endpoint,
data=params,
timeout=10,
custom_header=str(self.etiquette)
)
return result
|
fabiobatalha/crossrefapi | crossref/restful.py | Depositor.request_doi_status_by_filename | python | def request_doi_status_by_filename(self, file_name, data_type='result'):
endpoint = self.get_endpoint('submissionDownload')
params = {
'usr': self.api_user,
'pwd': self.api_key,
'file_name': file_name,
'type': data_type
}
result = self.do_http_request(
'get',
endpoint,
data=params,
timeout=10,
custom_header=str(self.etiquette)
)
return result | This method retrieve the DOI requests status.
file_name: Used as unique ID to identify a deposit.
data_type: [contents, result]
contents - retrieve the XML submited by the publisher
result - retrieve a JSON with the status of the submission | train | https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L1781-L1809 | [
"def get_endpoint(self, verb):\n subdomain = 'test' if self.use_test_server else 'doi'\n return \"https://{}.crossref.org/servlet/{}\".format(subdomain, verb)\n"
] | class Depositor(object):
def __init__(self, prefix, api_user, api_key, etiquette=None,
use_test_server=False):
self.do_http_request = HTTPRequest(throttle=False).do_http_request
self.etiquette = etiquette or Etiquette()
self.prefix = prefix
self.api_user = api_user
self.api_key = api_key
self.use_test_server = use_test_server
def get_endpoint(self, verb):
subdomain = 'test' if self.use_test_server else 'doi'
return "https://{}.crossref.org/servlet/{}".format(subdomain, verb)
def register_doi(self, submission_id, request_xml):
"""
This method registry a new DOI number in Crossref or update some DOI
metadata.
submission_id: Will be used as the submission file name. The file name
could be used in future requests to retrieve the submission status.
request_xml: The XML with the document metadata. It must be under
compliance with the Crossref Submission Schema.
"""
endpoint = self.get_endpoint('deposit')
files = {
'mdFile': ('%s.xml' % submission_id, request_xml)
}
params = {
'operation': 'doMDUpload',
'login_id': self.api_user,
'login_passwd': self.api_key
}
result = self.do_http_request(
'post',
endpoint,
data=params,
files=files,
timeout=10,
custom_header=str(self.etiquette)
)
return result
def request_doi_status_by_batch_id(self, doi_batch_id, data_type='result'):
"""
This method retrieve the DOI requests status.
file_name: Used as unique ID to identify a deposit.
data_type: [contents, result]
contents - retrieve the XML submited by the publisher
result - retrieve a XML with the status of the submission
"""
endpoint = self.get_endpoint('submissionDownload')
params = {
'usr': self.api_user,
'pwd': self.api_key,
'doi_batch_id': doi_batch_id,
'type': data_type
}
result = self.do_http_request(
'get',
endpoint,
data=params,
timeout=10,
custom_header=str(self.etiquette)
)
return result
|
fabiobatalha/crossrefapi | crossref/restful.py | Depositor.request_doi_status_by_batch_id | python | def request_doi_status_by_batch_id(self, doi_batch_id, data_type='result'):
endpoint = self.get_endpoint('submissionDownload')
params = {
'usr': self.api_user,
'pwd': self.api_key,
'doi_batch_id': doi_batch_id,
'type': data_type
}
result = self.do_http_request(
'get',
endpoint,
data=params,
timeout=10,
custom_header=str(self.etiquette)
)
return result | This method retrieve the DOI requests status.
file_name: Used as unique ID to identify a deposit.
data_type: [contents, result]
contents - retrieve the XML submited by the publisher
result - retrieve a XML with the status of the submission | train | https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L1811-L1839 | [
"def get_endpoint(self, verb):\n subdomain = 'test' if self.use_test_server else 'doi'\n return \"https://{}.crossref.org/servlet/{}\".format(subdomain, verb)\n"
] | class Depositor(object):
def __init__(self, prefix, api_user, api_key, etiquette=None,
use_test_server=False):
self.do_http_request = HTTPRequest(throttle=False).do_http_request
self.etiquette = etiquette or Etiquette()
self.prefix = prefix
self.api_user = api_user
self.api_key = api_key
self.use_test_server = use_test_server
def get_endpoint(self, verb):
subdomain = 'test' if self.use_test_server else 'doi'
return "https://{}.crossref.org/servlet/{}".format(subdomain, verb)
def register_doi(self, submission_id, request_xml):
"""
This method registry a new DOI number in Crossref or update some DOI
metadata.
submission_id: Will be used as the submission file name. The file name
could be used in future requests to retrieve the submission status.
request_xml: The XML with the document metadata. It must be under
compliance with the Crossref Submission Schema.
"""
endpoint = self.get_endpoint('deposit')
files = {
'mdFile': ('%s.xml' % submission_id, request_xml)
}
params = {
'operation': 'doMDUpload',
'login_id': self.api_user,
'login_passwd': self.api_key
}
result = self.do_http_request(
'post',
endpoint,
data=params,
files=files,
timeout=10,
custom_header=str(self.etiquette)
)
return result
def request_doi_status_by_filename(self, file_name, data_type='result'):
"""
This method retrieve the DOI requests status.
file_name: Used as unique ID to identify a deposit.
data_type: [contents, result]
contents - retrieve the XML submited by the publisher
result - retrieve a JSON with the status of the submission
"""
endpoint = self.get_endpoint('submissionDownload')
params = {
'usr': self.api_user,
'pwd': self.api_key,
'file_name': file_name,
'type': data_type
}
result = self.do_http_request(
'get',
endpoint,
data=params,
timeout=10,
custom_header=str(self.etiquette)
)
return result
|
fabiobatalha/crossrefapi | crossref/utils.py | asbool | python | def asbool(s):
if s is None:
return False
if isinstance(s, bool):
return s
s = str(s).strip()
return s.lower() in truthy | Return the boolean value ``True`` if the case-lowered value of string
input ``s`` is a :term:`truthy string`. If ``s`` is already one of the
boolean values ``True`` or ``False``, return it. | train | https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/utils.py#L6-L15 | null | # coding: utf-8
truthy = frozenset(('t', 'true', 'y', 'yes', 'on', '1'))
|
Chyroc/WechatSogou | wechatsogou/api.py | WechatSogouAPI.__hosting_wechat_img | python | def __hosting_wechat_img(self, content_info, hosting_callback):
assert callable(hosting_callback)
content_img_list = content_info.pop("content_img_list")
content_html = content_info.pop("content_html")
for idx, img_url in enumerate(content_img_list):
hosting_img_url = hosting_callback(img_url)
if not hosting_img_url:
# todo 定义标准异常
raise Exception()
content_img_list[idx] = hosting_img_url
content_html = content_html.replace(img_url, hosting_img_url)
return dict(content_img_list=content_img_list, content_html=content_html) | 将微信明细中图片托管到云端,同时将html页面中的对应图片替换
Parameters
----------
content_info : dict 微信文章明细字典
{
'content_img_list': [], # 从微信文章解析出的原始图片列表
'content_html': '', # 从微信文章解析出文章的内容
}
hosting_callback : callable
托管回调函数,传入单个图片链接,返回托管后的图片链接
Returns
-------
dict
{
'content_img_list': '', # 托管后的图片列表
'content_html': '', # 图片链接为托管后的图片链接内容
} | train | https://github.com/Chyroc/WechatSogou/blob/2e0e9886f555fd8bcfc7ae9718ced6ce955cd24a/wechatsogou/api.py#L138-L171 | null | class WechatSogouAPI(object):
def __init__(self, captcha_break_time=1, headers=None, **kwargs):
"""初始化参数
Parameters
----------
captcha_break_time : int
验证码输入错误重试次数
proxies : dict
代理
timeout : float
超时时间
"""
assert isinstance(captcha_break_time, int) and 0 < captcha_break_time < 20
self.captcha_break_times = captcha_break_time
self.requests_kwargs = kwargs
self.headers = headers
if self.headers:
self.headers['User-Agent'] = random.choice(agents)
else:
self.headers = {'User-Agent': random.choice(agents)}
def __set_cookie(self, suv=None, snuid=None, referer=None):
suv = ws_cache.get('suv') if suv is None else suv
snuid = ws_cache.get('snuid') if snuid is None else snuid
_headers = {'Cookie': 'SUV={};SNUID={};'.format(suv, snuid)}
if referer is not None:
_headers['Referer'] = referer
return _headers
def __set_cache(self, suv, snuid):
ws_cache.set('suv', suv)
ws_cache.set('snuid', snuid)
def __get(self, url, session, headers):
h = {}
if headers:
for k, v in headers.items():
h[k] = v
if self.headers:
for k, v in self.headers.items():
h[k] = v
resp = session.get(url, headers=h, **self.requests_kwargs)
if not resp.ok:
raise WechatSogouRequestsException('WechatSogouAPI get error', resp)
return resp
def __unlock_sogou(self, url, resp, session, unlock_callback=None, identify_image_callback=None):
if unlock_callback is None:
unlock_callback = unlock_sogou_callback_example
millis = int(round(time.time() * 1000))
r_captcha = session.get('http://weixin.sogou.com/antispider/util/seccode.php?tc={}'.format(millis), headers={
'Referer': url,
})
if not r_captcha.ok:
raise WechatSogouRequestsException('WechatSogouAPI get img', r_captcha)
r_unlock = unlock_callback(url, session, resp, r_captcha.content, identify_image_callback)
if r_unlock['code'] != 0:
raise WechatSogouVcodeOcrException(
'[WechatSogouAPI identify image] code: {code}, msg: {msg}'.format(code=r_unlock.get('code'),
msg=r_unlock.get('msg')))
else:
self.__set_cache(session.cookies.get('SUID'), r_unlock['id'])
def __unlock_wechat(self, url, resp, session, unlock_callback=None, identify_image_callback=None):
if unlock_callback is None:
unlock_callback = unlock_weixin_callback_example
r_captcha = session.get('https://mp.weixin.qq.com/mp/verifycode?cert={}'.format(time.time() * 1000))
if not r_captcha.ok:
raise WechatSogouRequestsException('WechatSogouAPI unlock_history get img', resp)
r_unlock = unlock_callback(url, session, resp, r_captcha.content, identify_image_callback)
if r_unlock['ret'] != 0:
raise WechatSogouVcodeOcrException(
'[WechatSogouAPI identify image] code: {ret}, msg: {errmsg}, cookie_count: {cookie_count}'.format(
ret=r_unlock.get('ret'), errmsg=r_unlock.get('errmsg'), cookie_count=r_unlock.get('cookie_count')))
def __get_by_unlock(self, url, referer=None, unlock_platform=None, unlock_callback=None, identify_image_callback=None, session=None):
assert unlock_platform is None or callable(unlock_platform)
if identify_image_callback is None:
identify_image_callback = identify_image_callback_by_hand
assert unlock_callback is None or callable(unlock_callback)
assert callable(identify_image_callback)
if not session:
session = requests.session()
resp = self.__get(url, session, headers=self.__set_cookie(referer=referer))
resp.encoding = 'utf-8'
if 'antispider' in resp.url or '请输入验证码' in resp.text:
for i in range(self.captcha_break_times):
try:
unlock_platform(url=url, resp=resp, session=session, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback)
break
except WechatSogouVcodeOcrException as e:
if i == self.captcha_break_times - 1:
raise WechatSogouVcodeOcrException(e)
if '请输入验证码' in resp.text:
resp = session.get(url)
resp.encoding = 'utf-8'
else:
headers = self.__set_cookie(referer=referer)
headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; WOW64)'
resp = self.__get(url, session, headers)
resp.encoding = 'utf-8'
return resp
def __format_url(self, url, referer, text, unlock_callback=None, identify_image_callback=None, session=None):
def _parse_url(url, pads):
b = math.floor(random.random() * 100) + 1
a = url.find("url=")
c = url.find("&k=")
if a != -1 and c == -1:
sum = 0
for i in list(pads) + [a, b]:
sum += int(must_str(i))
a = url[sum]
return '{}&k={}&h={}'.format(url, may_int(b), may_int(a))
if url.startswith('/link?url='):
url = 'https://weixin.sogou.com{}'.format(url)
pads = re.findall(r'href\.substr\(a\+(\d+)\+parseInt\("(\d+)"\)\+b,1\)', text)
url = _parse_url(url, pads[0] if pads else [])
resp = self.__get_by_unlock(url,
referer=referer,
unlock_platform=self.__unlock_sogou,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback,
session=session)
uri = ''
base_url = re.findall(r'var url = \'(.*?)\';', resp.text)
if base_url and len(base_url) > 0:
uri = base_url[0]
mp_url = re.findall(r'url \+= \'(.*?)\';', resp.text)
if mp_url:
uri = uri + ''.join(mp_url)
url = uri.replace('@', '')
return url
def get_gzh_info(self, wecgat_id_or_name, unlock_callback=None, identify_image_callback=None, decode_url=True):
"""获取公众号微信号 wechatid 的信息
因为wechatid唯一确定,所以第一个就是要搜索的公众号
Parameters
----------
wecgat_id_or_name : str or unicode
wechat_id or wechat_name
unlock_callback : callable
处理出现验证码页面的函数,参见 unlock_callback_example
identify_image_callback : callable
处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
Returns
-------
dict or None
{
'open_id': '', # 微信号唯一ID
'profile_url': '', # 最近10条群发页链接
'headimage': '', # 头像
'wechat_name': '', # 名称
'wechat_id': '', # 微信id
'post_perm': '', # 最近一月群发数
'qrcode': '', # 二维码
'introduction': '', # 介绍
'authentication': '' # 认证
}
"""
info = self.search_gzh(wecgat_id_or_name, 1, unlock_callback, identify_image_callback, decode_url)
try:
return next(info)
except StopIteration:
return None
def search_gzh(self, keyword, page=1, unlock_callback=None, identify_image_callback=None, decode_url=True):
"""搜索 公众号
对于出现验证码的情况,可以由使用者自己提供:
1、函数 unlock_callback ,这个函数 handle 出现验证码到解决的整个流程
2、也可以 只提供函数 identify_image_callback,这个函数输入验证码二进制数据,输出验证码文字,剩下的由 wechatsogou 包来解决
注意:
函数 unlock_callback 和 identify_image_callback 只需要提供一个,如果都提供了,那么 identify_image_callback 不起作用
Parameters
----------
keyword : str or unicode
搜索文字
page : int, optional
页数 the default is 1
unlock_callback : callable
处理出现验证码页面的函数,参见 unlock_callback_example
identify_image_callback : callable
处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
decode_url : bool
是否解析 url
Returns
-------
list[dict]
{
'open_id': '', # 微信号唯一ID
'profile_url': '', # 最近10条群发页链接
'headimage': '', # 头像
'wechat_name': '', # 名称
'wechat_id': '', # 微信id
'post_perm': '', # 最近一月群发数
'qrcode': '', # 二维码
'introduction': '', # 介绍
'authentication': '' # 认证
}
Raises
------
WechatSogouRequestsException
requests error
"""
url = WechatSogouRequest.gen_search_gzh_url(keyword, page)
session = requests.session()
resp = self.__get_by_unlock(url,
unlock_platform=self.__unlock_sogou,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback,
session=session)
gzh_list = WechatSogouStructuring.get_gzh_by_search(resp.text)
for i in gzh_list:
if decode_url:
i['profile_url'] = self.__format_url(i['profile_url'], url, resp.text, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback, session=session)
yield i
def search_article(self, keyword, page=1, timesn=WechatSogouConst.search_article_time.anytime,
article_type=WechatSogouConst.search_article_type.all, ft=None, et=None,
unlock_callback=None,
identify_image_callback=None,
decode_url=True):
"""搜索 文章
对于出现验证码的情况,可以由使用者自己提供:
1、函数 unlock_callback ,这个函数 handle 出现验证码到解决的整个流程
2、也可以 只提供函数 identify_image_callback,这个函数输入验证码二进制数据,输出验证码文字,剩下的由 wechatsogou 包来解决
注意:
函数 unlock_callback 和 identify_image_callback 只需要提供一个,如果都提供了,那么 identify_image_callback 不起作用
Parameters
----------
keyword : str or unicode
搜索文字
page : int, optional
页数 the default is 1
timesn : WechatSogouConst.search_article_time
时间 anytime 没有限制 / day 一天 / week 一周 / month 一月 / year 一年 / specific 自定
the default is anytime
article_type : WechatSogouConst.search_article_type
含有内容的类型 image 有图 / video 有视频 / rich 有图和视频 / all 啥都有
ft, et : datetime.date or None
当 tsn 是 specific 时,ft 代表开始时间,如: 2017-07-01
当 tsn 是 specific 时,et 代表结束时间,如: 2017-07-15
unlock_callback : callable
处理出现验证码页面的函数,参见 unlock_callback_example
identify_image_callback : callable
处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
decode_url : bool
是否解析 url
Returns
-------
list[dict]
{
'article': {
'title': '', # 文章标题
'url': '', # 文章链接
'imgs': '', # 文章图片list
'abstract': '', # 文章摘要
'time': '' # 文章推送时间
},
'gzh': {
'profile_url': '', # 公众号最近10条群发页链接
'headimage': '', # 头像
'wechat_name': '', # 名称
'isv': '', # 是否加v
}
}
Raises
------
WechatSogouRequestsException
requests error
"""
url = WechatSogouRequest.gen_search_article_url(keyword, page, timesn, article_type, ft, et)
session = requests.session()
resp = self.__get_by_unlock(url, WechatSogouRequest.gen_search_article_url(keyword),
unlock_platform=self.__unlock_sogou,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback,
session=session)
article_list = WechatSogouStructuring.get_article_by_search(resp.text)
for i in article_list:
if decode_url:
i['article']['url'] = self.__format_url(i['article']['url'], url, resp.text, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback, session=session)
i['gzh']['profile_url'] = self.__format_url(i['gzh']['profile_url'], url, resp.text, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback, session=session)
yield i
def get_gzh_article_by_history(self, keyword=None, url=None,
unlock_callback_sogou=None,
identify_image_callback_sogou=None,
unlock_callback_weixin=None,
identify_image_callback_weixin=None):
"""从 公众号的最近10条群发页面 提取公众号信息 和 文章列表信息
对于出现验证码的情况,可以由使用者自己提供:
1、函数 unlock_callback ,这个函数 handle 出现验证码到解决的整个流程
2、也可以 只提供函数 identify_image_callback,这个函数输入验证码二进制数据,输出验证码文字,剩下的由 wechatsogou 包来解决
注意:
函数 unlock_callback 和 identify_image_callback 只需要提供一个,如果都提供了,那么 identify_image_callback 不起作用
Parameters
----------
keyword : str or unicode
公众号的id 或者name
url : str or unicode
群发页url,如果不提供url,就先去搜索一遍拿到url
unlock_callback_sogou : callable
处理出现 搜索 的时候出现验证码的函数,参见 unlock_callback_example
identify_image_callback_sogou : callable
处理 搜索 的时候处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
unlock_callback_weixin : callable
处理出现 历史页 的时候出现验证码的函数,参见 unlock_callback_example
identify_image_callback_weixin : callable
处理 历史页 的时候处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
Returns
-------
dict
{
'gzh': {
'wechat_name': '', # 名称
'wechat_id': '', # 微信id
'introduction': '', # 描述
'authentication': '', # 认证
'headimage': '' # 头像
},
'article': [
{
'send_id': '', # 群发id,注意不唯一,因为同一次群发多个消息,而群发id一致
'datetime': '', # 群发datatime
'type': '', # 消息类型,均是49,表示图文
'main': 0, # 是否是一次群发的第一次消息
'title': '', # 文章标题
'abstract': '', # 摘要
'fileid': '', #
'content_url': '', # 文章链接
'source_url': '', # 阅读原文的链接
'cover': '', # 封面图
'author': '', # 作者
'copyright_stat': '', # 文章类型,例如:原创啊
},
...
]
}
Raises
------
WechatSogouRequestsException
requests error
"""
if url is None:
gzh_list = self.get_gzh_info(keyword, unlock_callback_sogou, identify_image_callback_sogou)
if gzh_list is None:
return {}
if 'profile_url' not in gzh_list:
raise Exception() # todo use ws exception
url = gzh_list['profile_url']
resp = self.__get_by_unlock(url, WechatSogouRequest.gen_search_article_url(keyword),
unlock_platform=self.__unlock_wechat,
unlock_callback=unlock_callback_weixin,
identify_image_callback=identify_image_callback_weixin)
return WechatSogouStructuring.get_gzh_info_and_article_by_history(resp.text)
def get_gzh_article_by_hot(self, hot_index, page=1, unlock_callback=None, identify_image_callback=None):
"""获取 首页热门文章
Parameters
----------
hot_index : WechatSogouConst.hot_index
首页热门文章的分类(常量):WechatSogouConst.hot_index.xxx
page : int
页数
Returns
-------
list[dict]
{
'gzh': {
'headimage': str, # 公众号头像
'wechat_name': str, # 公众号名称
},
'article': {
'url': str, # 文章临时链接
'title': str, # 文章标题
'abstract': str, # 文章摘要
'time': int, # 推送时间,10位时间戳
'open_id': str, # open id
'main_img': str # 封面图片
}
}
"""
assert hasattr(WechatSogouConst.hot_index, hot_index)
assert isinstance(page, int) and page > 0
url = WechatSogouRequest.gen_hot_url(hot_index, page)
resp = self.__get_by_unlock(url,
unlock_platform=self.__unlock_sogou,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback)
resp.encoding = 'utf-8'
return WechatSogouStructuring.get_gzh_article_by_hot(resp.text)
def get_article_content(self, url, del_qqmusic=True, del_mpvoice=True, unlock_callback=None,
identify_image_callback=None, hosting_callback=None, raw=False):
"""获取文章原文,避免临时链接失效
Parameters
----------
url : str or unicode
原文链接,临时链接
raw : bool
True: 返回原始html
False: 返回处理后的html
del_qqmusic: bool
True:微信原文中有插入的qq音乐,则删除
False:微信源文中有插入的qq音乐,则保留
del_mpvoice: bool
True:微信原文中有插入的语音消息,则删除
False:微信源文中有插入的语音消息,则保留
unlock_callback : callable
处理 文章明细 的时候出现验证码的函数,参见 unlock_callback_example
identify_image_callback : callable
处理 文章明细 的时候处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
hosting_callback: callable
将微信采集的文章托管到7牛或者阿里云回调函数,输入微信图片源地址,返回托管后地址
Returns
-------
content_html
原文内容
content_img_list
文章中图片列表
Raises
------
WechatSogouRequestsException
"""
resp = self.__get_by_unlock(url,
unlock_platform=self.__unlock_wechat,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback)
resp.encoding = 'utf-8'
if '链接已过期' in resp.text:
raise WechatSogouException('get_article_content 链接 [{}] 已过期'.format(url))
if raw:
return resp.text
content_info = WechatSogouStructuring.get_article_detail(resp.text, del_qqmusic=del_qqmusic,
del_voice=del_mpvoice)
if hosting_callback:
content_info = self.__hosting_wechat_img(content_info, hosting_callback)
return content_info
def get_sugg(self, keyword):
"""获取微信搜狗搜索关键词联想
Parameters
----------
keyword : str or unicode
关键词
Returns
-------
list[str]
联想关键词列表
Raises
------
WechatSogouRequestsException
"""
url = 'http://w.sugg.sogou.com/sugg/ajaj_json.jsp?key={}&type=wxpub&pr=web'.format(
quote(keyword.encode('utf-8')))
r = requests.get(url)
if not r.ok:
raise WechatSogouRequestsException('get_sugg', r)
sugg = re.findall(u'\["' + keyword + '",(.*?),\["', r.text)[0]
return json.loads(sugg)
|
Chyroc/WechatSogou | wechatsogou/api.py | WechatSogouAPI.get_gzh_info | python | def get_gzh_info(self, wecgat_id_or_name, unlock_callback=None, identify_image_callback=None, decode_url=True):
info = self.search_gzh(wecgat_id_or_name, 1, unlock_callback, identify_image_callback, decode_url)
try:
return next(info)
except StopIteration:
return None | 获取公众号微信号 wechatid 的信息
因为wechatid唯一确定,所以第一个就是要搜索的公众号
Parameters
----------
wecgat_id_or_name : str or unicode
wechat_id or wechat_name
unlock_callback : callable
处理出现验证码页面的函数,参见 unlock_callback_example
identify_image_callback : callable
处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
Returns
-------
dict or None
{
'open_id': '', # 微信号唯一ID
'profile_url': '', # 最近10条群发页链接
'headimage': '', # 头像
'wechat_name': '', # 名称
'wechat_id': '', # 微信id
'post_perm': '', # 最近一月群发数
'qrcode': '', # 二维码
'introduction': '', # 介绍
'authentication': '' # 认证
} | train | https://github.com/Chyroc/WechatSogou/blob/2e0e9886f555fd8bcfc7ae9718ced6ce955cd24a/wechatsogou/api.py#L208-L241 | [
"def search_gzh(self, keyword, page=1, unlock_callback=None, identify_image_callback=None, decode_url=True):\n \"\"\"搜索 公众号\n\n 对于出现验证码的情况,可以由使用者自己提供:\n 1、函数 unlock_callback ,这个函数 handle 出现验证码到解决的整个流程\n 2、也可以 只提供函数 identify_image_callback,这个函数输入验证码二进制数据,输出验证码文字,剩下的由 wechatsogou 包来解决\n 注意:\n 函数 unlock_callback 和 identify_image_callback 只需要提供一个,如果都提供了,那么 identify_image_callback 不起作用\n\n Parameters\n ----------\n keyword : str or unicode\n 搜索文字\n page : int, optional\n 页数 the default is 1\n unlock_callback : callable\n 处理出现验证码页面的函数,参见 unlock_callback_example\n identify_image_callback : callable\n 处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example\n decode_url : bool\n 是否解析 url\n\n Returns\n -------\n list[dict]\n {\n 'open_id': '', # 微信号唯一ID\n 'profile_url': '', # 最近10条群发页链接\n 'headimage': '', # 头像\n 'wechat_name': '', # 名称\n 'wechat_id': '', # 微信id\n 'post_perm': '', # 最近一月群发数\n 'qrcode': '', # 二维码\n 'introduction': '', # 介绍\n 'authentication': '' # 认证\n }\n\n Raises\n ------\n WechatSogouRequestsException\n requests error\n \"\"\"\n url = WechatSogouRequest.gen_search_gzh_url(keyword, page)\n session = requests.session()\n resp = self.__get_by_unlock(url,\n unlock_platform=self.__unlock_sogou,\n unlock_callback=unlock_callback,\n identify_image_callback=identify_image_callback,\n session=session)\n gzh_list = WechatSogouStructuring.get_gzh_by_search(resp.text)\n for i in gzh_list:\n if decode_url:\n i['profile_url'] = self.__format_url(i['profile_url'], url, resp.text, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback, session=session)\n yield i\n"
] | class WechatSogouAPI(object):
def __init__(self, captcha_break_time=1, headers=None, **kwargs):
"""初始化参数
Parameters
----------
captcha_break_time : int
验证码输入错误重试次数
proxies : dict
代理
timeout : float
超时时间
"""
assert isinstance(captcha_break_time, int) and 0 < captcha_break_time < 20
self.captcha_break_times = captcha_break_time
self.requests_kwargs = kwargs
self.headers = headers
if self.headers:
self.headers['User-Agent'] = random.choice(agents)
else:
self.headers = {'User-Agent': random.choice(agents)}
def __set_cookie(self, suv=None, snuid=None, referer=None):
suv = ws_cache.get('suv') if suv is None else suv
snuid = ws_cache.get('snuid') if snuid is None else snuid
_headers = {'Cookie': 'SUV={};SNUID={};'.format(suv, snuid)}
if referer is not None:
_headers['Referer'] = referer
return _headers
def __set_cache(self, suv, snuid):
ws_cache.set('suv', suv)
ws_cache.set('snuid', snuid)
def __get(self, url, session, headers):
h = {}
if headers:
for k, v in headers.items():
h[k] = v
if self.headers:
for k, v in self.headers.items():
h[k] = v
resp = session.get(url, headers=h, **self.requests_kwargs)
if not resp.ok:
raise WechatSogouRequestsException('WechatSogouAPI get error', resp)
return resp
def __unlock_sogou(self, url, resp, session, unlock_callback=None, identify_image_callback=None):
if unlock_callback is None:
unlock_callback = unlock_sogou_callback_example
millis = int(round(time.time() * 1000))
r_captcha = session.get('http://weixin.sogou.com/antispider/util/seccode.php?tc={}'.format(millis), headers={
'Referer': url,
})
if not r_captcha.ok:
raise WechatSogouRequestsException('WechatSogouAPI get img', r_captcha)
r_unlock = unlock_callback(url, session, resp, r_captcha.content, identify_image_callback)
if r_unlock['code'] != 0:
raise WechatSogouVcodeOcrException(
'[WechatSogouAPI identify image] code: {code}, msg: {msg}'.format(code=r_unlock.get('code'),
msg=r_unlock.get('msg')))
else:
self.__set_cache(session.cookies.get('SUID'), r_unlock['id'])
def __unlock_wechat(self, url, resp, session, unlock_callback=None, identify_image_callback=None):
if unlock_callback is None:
unlock_callback = unlock_weixin_callback_example
r_captcha = session.get('https://mp.weixin.qq.com/mp/verifycode?cert={}'.format(time.time() * 1000))
if not r_captcha.ok:
raise WechatSogouRequestsException('WechatSogouAPI unlock_history get img', resp)
r_unlock = unlock_callback(url, session, resp, r_captcha.content, identify_image_callback)
if r_unlock['ret'] != 0:
raise WechatSogouVcodeOcrException(
'[WechatSogouAPI identify image] code: {ret}, msg: {errmsg}, cookie_count: {cookie_count}'.format(
ret=r_unlock.get('ret'), errmsg=r_unlock.get('errmsg'), cookie_count=r_unlock.get('cookie_count')))
def __get_by_unlock(self, url, referer=None, unlock_platform=None, unlock_callback=None, identify_image_callback=None, session=None):
assert unlock_platform is None or callable(unlock_platform)
if identify_image_callback is None:
identify_image_callback = identify_image_callback_by_hand
assert unlock_callback is None or callable(unlock_callback)
assert callable(identify_image_callback)
if not session:
session = requests.session()
resp = self.__get(url, session, headers=self.__set_cookie(referer=referer))
resp.encoding = 'utf-8'
if 'antispider' in resp.url or '请输入验证码' in resp.text:
for i in range(self.captcha_break_times):
try:
unlock_platform(url=url, resp=resp, session=session, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback)
break
except WechatSogouVcodeOcrException as e:
if i == self.captcha_break_times - 1:
raise WechatSogouVcodeOcrException(e)
if '请输入验证码' in resp.text:
resp = session.get(url)
resp.encoding = 'utf-8'
else:
headers = self.__set_cookie(referer=referer)
headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; WOW64)'
resp = self.__get(url, session, headers)
resp.encoding = 'utf-8'
return resp
def __hosting_wechat_img(self, content_info, hosting_callback):
"""将微信明细中图片托管到云端,同时将html页面中的对应图片替换
Parameters
----------
content_info : dict 微信文章明细字典
{
'content_img_list': [], # 从微信文章解析出的原始图片列表
'content_html': '', # 从微信文章解析出文章的内容
}
hosting_callback : callable
托管回调函数,传入单个图片链接,返回托管后的图片链接
Returns
-------
dict
{
'content_img_list': '', # 托管后的图片列表
'content_html': '', # 图片链接为托管后的图片链接内容
}
"""
assert callable(hosting_callback)
content_img_list = content_info.pop("content_img_list")
content_html = content_info.pop("content_html")
for idx, img_url in enumerate(content_img_list):
hosting_img_url = hosting_callback(img_url)
if not hosting_img_url:
# todo 定义标准异常
raise Exception()
content_img_list[idx] = hosting_img_url
content_html = content_html.replace(img_url, hosting_img_url)
return dict(content_img_list=content_img_list, content_html=content_html)
def __format_url(self, url, referer, text, unlock_callback=None, identify_image_callback=None, session=None):
def _parse_url(url, pads):
b = math.floor(random.random() * 100) + 1
a = url.find("url=")
c = url.find("&k=")
if a != -1 and c == -1:
sum = 0
for i in list(pads) + [a, b]:
sum += int(must_str(i))
a = url[sum]
return '{}&k={}&h={}'.format(url, may_int(b), may_int(a))
if url.startswith('/link?url='):
url = 'https://weixin.sogou.com{}'.format(url)
pads = re.findall(r'href\.substr\(a\+(\d+)\+parseInt\("(\d+)"\)\+b,1\)', text)
url = _parse_url(url, pads[0] if pads else [])
resp = self.__get_by_unlock(url,
referer=referer,
unlock_platform=self.__unlock_sogou,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback,
session=session)
uri = ''
base_url = re.findall(r'var url = \'(.*?)\';', resp.text)
if base_url and len(base_url) > 0:
uri = base_url[0]
mp_url = re.findall(r'url \+= \'(.*?)\';', resp.text)
if mp_url:
uri = uri + ''.join(mp_url)
url = uri.replace('@', '')
return url
def search_gzh(self, keyword, page=1, unlock_callback=None, identify_image_callback=None, decode_url=True):
"""搜索 公众号
对于出现验证码的情况,可以由使用者自己提供:
1、函数 unlock_callback ,这个函数 handle 出现验证码到解决的整个流程
2、也可以 只提供函数 identify_image_callback,这个函数输入验证码二进制数据,输出验证码文字,剩下的由 wechatsogou 包来解决
注意:
函数 unlock_callback 和 identify_image_callback 只需要提供一个,如果都提供了,那么 identify_image_callback 不起作用
Parameters
----------
keyword : str or unicode
搜索文字
page : int, optional
页数 the default is 1
unlock_callback : callable
处理出现验证码页面的函数,参见 unlock_callback_example
identify_image_callback : callable
处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
decode_url : bool
是否解析 url
Returns
-------
list[dict]
{
'open_id': '', # 微信号唯一ID
'profile_url': '', # 最近10条群发页链接
'headimage': '', # 头像
'wechat_name': '', # 名称
'wechat_id': '', # 微信id
'post_perm': '', # 最近一月群发数
'qrcode': '', # 二维码
'introduction': '', # 介绍
'authentication': '' # 认证
}
Raises
------
WechatSogouRequestsException
requests error
"""
url = WechatSogouRequest.gen_search_gzh_url(keyword, page)
session = requests.session()
resp = self.__get_by_unlock(url,
unlock_platform=self.__unlock_sogou,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback,
session=session)
gzh_list = WechatSogouStructuring.get_gzh_by_search(resp.text)
for i in gzh_list:
if decode_url:
i['profile_url'] = self.__format_url(i['profile_url'], url, resp.text, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback, session=session)
yield i
def search_article(self, keyword, page=1, timesn=WechatSogouConst.search_article_time.anytime,
article_type=WechatSogouConst.search_article_type.all, ft=None, et=None,
unlock_callback=None,
identify_image_callback=None,
decode_url=True):
"""搜索 文章
对于出现验证码的情况,可以由使用者自己提供:
1、函数 unlock_callback ,这个函数 handle 出现验证码到解决的整个流程
2、也可以 只提供函数 identify_image_callback,这个函数输入验证码二进制数据,输出验证码文字,剩下的由 wechatsogou 包来解决
注意:
函数 unlock_callback 和 identify_image_callback 只需要提供一个,如果都提供了,那么 identify_image_callback 不起作用
Parameters
----------
keyword : str or unicode
搜索文字
page : int, optional
页数 the default is 1
timesn : WechatSogouConst.search_article_time
时间 anytime 没有限制 / day 一天 / week 一周 / month 一月 / year 一年 / specific 自定
the default is anytime
article_type : WechatSogouConst.search_article_type
含有内容的类型 image 有图 / video 有视频 / rich 有图和视频 / all 啥都有
ft, et : datetime.date or None
当 tsn 是 specific 时,ft 代表开始时间,如: 2017-07-01
当 tsn 是 specific 时,et 代表结束时间,如: 2017-07-15
unlock_callback : callable
处理出现验证码页面的函数,参见 unlock_callback_example
identify_image_callback : callable
处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
decode_url : bool
是否解析 url
Returns
-------
list[dict]
{
'article': {
'title': '', # 文章标题
'url': '', # 文章链接
'imgs': '', # 文章图片list
'abstract': '', # 文章摘要
'time': '' # 文章推送时间
},
'gzh': {
'profile_url': '', # 公众号最近10条群发页链接
'headimage': '', # 头像
'wechat_name': '', # 名称
'isv': '', # 是否加v
}
}
Raises
------
WechatSogouRequestsException
requests error
"""
url = WechatSogouRequest.gen_search_article_url(keyword, page, timesn, article_type, ft, et)
session = requests.session()
resp = self.__get_by_unlock(url, WechatSogouRequest.gen_search_article_url(keyword),
unlock_platform=self.__unlock_sogou,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback,
session=session)
article_list = WechatSogouStructuring.get_article_by_search(resp.text)
for i in article_list:
if decode_url:
i['article']['url'] = self.__format_url(i['article']['url'], url, resp.text, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback, session=session)
i['gzh']['profile_url'] = self.__format_url(i['gzh']['profile_url'], url, resp.text, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback, session=session)
yield i
def get_gzh_article_by_history(self, keyword=None, url=None,
unlock_callback_sogou=None,
identify_image_callback_sogou=None,
unlock_callback_weixin=None,
identify_image_callback_weixin=None):
"""从 公众号的最近10条群发页面 提取公众号信息 和 文章列表信息
对于出现验证码的情况,可以由使用者自己提供:
1、函数 unlock_callback ,这个函数 handle 出现验证码到解决的整个流程
2、也可以 只提供函数 identify_image_callback,这个函数输入验证码二进制数据,输出验证码文字,剩下的由 wechatsogou 包来解决
注意:
函数 unlock_callback 和 identify_image_callback 只需要提供一个,如果都提供了,那么 identify_image_callback 不起作用
Parameters
----------
keyword : str or unicode
公众号的id 或者name
url : str or unicode
群发页url,如果不提供url,就先去搜索一遍拿到url
unlock_callback_sogou : callable
处理出现 搜索 的时候出现验证码的函数,参见 unlock_callback_example
identify_image_callback_sogou : callable
处理 搜索 的时候处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
unlock_callback_weixin : callable
处理出现 历史页 的时候出现验证码的函数,参见 unlock_callback_example
identify_image_callback_weixin : callable
处理 历史页 的时候处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
Returns
-------
dict
{
'gzh': {
'wechat_name': '', # 名称
'wechat_id': '', # 微信id
'introduction': '', # 描述
'authentication': '', # 认证
'headimage': '' # 头像
},
'article': [
{
'send_id': '', # 群发id,注意不唯一,因为同一次群发多个消息,而群发id一致
'datetime': '', # 群发datatime
'type': '', # 消息类型,均是49,表示图文
'main': 0, # 是否是一次群发的第一次消息
'title': '', # 文章标题
'abstract': '', # 摘要
'fileid': '', #
'content_url': '', # 文章链接
'source_url': '', # 阅读原文的链接
'cover': '', # 封面图
'author': '', # 作者
'copyright_stat': '', # 文章类型,例如:原创啊
},
...
]
}
Raises
------
WechatSogouRequestsException
requests error
"""
if url is None:
gzh_list = self.get_gzh_info(keyword, unlock_callback_sogou, identify_image_callback_sogou)
if gzh_list is None:
return {}
if 'profile_url' not in gzh_list:
raise Exception() # todo use ws exception
url = gzh_list['profile_url']
resp = self.__get_by_unlock(url, WechatSogouRequest.gen_search_article_url(keyword),
unlock_platform=self.__unlock_wechat,
unlock_callback=unlock_callback_weixin,
identify_image_callback=identify_image_callback_weixin)
return WechatSogouStructuring.get_gzh_info_and_article_by_history(resp.text)
def get_gzh_article_by_hot(self, hot_index, page=1, unlock_callback=None, identify_image_callback=None):
"""获取 首页热门文章
Parameters
----------
hot_index : WechatSogouConst.hot_index
首页热门文章的分类(常量):WechatSogouConst.hot_index.xxx
page : int
页数
Returns
-------
list[dict]
{
'gzh': {
'headimage': str, # 公众号头像
'wechat_name': str, # 公众号名称
},
'article': {
'url': str, # 文章临时链接
'title': str, # 文章标题
'abstract': str, # 文章摘要
'time': int, # 推送时间,10位时间戳
'open_id': str, # open id
'main_img': str # 封面图片
}
}
"""
assert hasattr(WechatSogouConst.hot_index, hot_index)
assert isinstance(page, int) and page > 0
url = WechatSogouRequest.gen_hot_url(hot_index, page)
resp = self.__get_by_unlock(url,
unlock_platform=self.__unlock_sogou,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback)
resp.encoding = 'utf-8'
return WechatSogouStructuring.get_gzh_article_by_hot(resp.text)
def get_article_content(self, url, del_qqmusic=True, del_mpvoice=True, unlock_callback=None,
identify_image_callback=None, hosting_callback=None, raw=False):
"""获取文章原文,避免临时链接失效
Parameters
----------
url : str or unicode
原文链接,临时链接
raw : bool
True: 返回原始html
False: 返回处理后的html
del_qqmusic: bool
True:微信原文中有插入的qq音乐,则删除
False:微信源文中有插入的qq音乐,则保留
del_mpvoice: bool
True:微信原文中有插入的语音消息,则删除
False:微信源文中有插入的语音消息,则保留
unlock_callback : callable
处理 文章明细 的时候出现验证码的函数,参见 unlock_callback_example
identify_image_callback : callable
处理 文章明细 的时候处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
hosting_callback: callable
将微信采集的文章托管到7牛或者阿里云回调函数,输入微信图片源地址,返回托管后地址
Returns
-------
content_html
原文内容
content_img_list
文章中图片列表
Raises
------
WechatSogouRequestsException
"""
resp = self.__get_by_unlock(url,
unlock_platform=self.__unlock_wechat,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback)
resp.encoding = 'utf-8'
if '链接已过期' in resp.text:
raise WechatSogouException('get_article_content 链接 [{}] 已过期'.format(url))
if raw:
return resp.text
content_info = WechatSogouStructuring.get_article_detail(resp.text, del_qqmusic=del_qqmusic,
del_voice=del_mpvoice)
if hosting_callback:
content_info = self.__hosting_wechat_img(content_info, hosting_callback)
return content_info
def get_sugg(self, keyword):
"""获取微信搜狗搜索关键词联想
Parameters
----------
keyword : str or unicode
关键词
Returns
-------
list[str]
联想关键词列表
Raises
------
WechatSogouRequestsException
"""
url = 'http://w.sugg.sogou.com/sugg/ajaj_json.jsp?key={}&type=wxpub&pr=web'.format(
quote(keyword.encode('utf-8')))
r = requests.get(url)
if not r.ok:
raise WechatSogouRequestsException('get_sugg', r)
sugg = re.findall(u'\["' + keyword + '",(.*?),\["', r.text)[0]
return json.loads(sugg)
|
Chyroc/WechatSogou | wechatsogou/api.py | WechatSogouAPI.search_gzh | python | def search_gzh(self, keyword, page=1, unlock_callback=None, identify_image_callback=None, decode_url=True):
url = WechatSogouRequest.gen_search_gzh_url(keyword, page)
session = requests.session()
resp = self.__get_by_unlock(url,
unlock_platform=self.__unlock_sogou,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback,
session=session)
gzh_list = WechatSogouStructuring.get_gzh_by_search(resp.text)
for i in gzh_list:
if decode_url:
i['profile_url'] = self.__format_url(i['profile_url'], url, resp.text, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback, session=session)
yield i | 搜索 公众号
对于出现验证码的情况,可以由使用者自己提供:
1、函数 unlock_callback ,这个函数 handle 出现验证码到解决的整个流程
2、也可以 只提供函数 identify_image_callback,这个函数输入验证码二进制数据,输出验证码文字,剩下的由 wechatsogou 包来解决
注意:
函数 unlock_callback 和 identify_image_callback 只需要提供一个,如果都提供了,那么 identify_image_callback 不起作用
Parameters
----------
keyword : str or unicode
搜索文字
page : int, optional
页数 the default is 1
unlock_callback : callable
处理出现验证码页面的函数,参见 unlock_callback_example
identify_image_callback : callable
处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
decode_url : bool
是否解析 url
Returns
-------
list[dict]
{
'open_id': '', # 微信号唯一ID
'profile_url': '', # 最近10条群发页链接
'headimage': '', # 头像
'wechat_name': '', # 名称
'wechat_id': '', # 微信id
'post_perm': '', # 最近一月群发数
'qrcode': '', # 二维码
'introduction': '', # 介绍
'authentication': '' # 认证
}
Raises
------
WechatSogouRequestsException
requests error | train | https://github.com/Chyroc/WechatSogou/blob/2e0e9886f555fd8bcfc7ae9718ced6ce955cd24a/wechatsogou/api.py#L243-L296 | [
"def __get_by_unlock(self, url, referer=None, unlock_platform=None, unlock_callback=None, identify_image_callback=None, session=None):\n assert unlock_platform is None or callable(unlock_platform)\n\n if identify_image_callback is None:\n identify_image_callback = identify_image_callback_by_hand\n assert unlock_callback is None or callable(unlock_callback)\n assert callable(identify_image_callback)\n\n if not session:\n session = requests.session()\n resp = self.__get(url, session, headers=self.__set_cookie(referer=referer))\n resp.encoding = 'utf-8'\n if 'antispider' in resp.url or '请输入验证码' in resp.text:\n for i in range(self.captcha_break_times):\n try:\n unlock_platform(url=url, resp=resp, session=session, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback)\n break\n except WechatSogouVcodeOcrException as e:\n if i == self.captcha_break_times - 1:\n raise WechatSogouVcodeOcrException(e)\n\n if '请输入验证码' in resp.text:\n resp = session.get(url)\n resp.encoding = 'utf-8'\n else:\n headers = self.__set_cookie(referer=referer)\n headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; WOW64)'\n resp = self.__get(url, session, headers)\n resp.encoding = 'utf-8'\n\n return resp\n",
"def __format_url(self, url, referer, text, unlock_callback=None, identify_image_callback=None, session=None):\n def _parse_url(url, pads):\n b = math.floor(random.random() * 100) + 1\n a = url.find(\"url=\")\n c = url.find(\"&k=\")\n if a != -1 and c == -1:\n sum = 0\n for i in list(pads) + [a, b]:\n sum += int(must_str(i))\n a = url[sum]\n\n return '{}&k={}&h={}'.format(url, may_int(b), may_int(a))\n\n if url.startswith('/link?url='):\n url = 'https://weixin.sogou.com{}'.format(url)\n\n pads = re.findall(r'href\\.substr\\(a\\+(\\d+)\\+parseInt\\(\"(\\d+)\"\\)\\+b,1\\)', text)\n url = _parse_url(url, pads[0] if pads else [])\n resp = self.__get_by_unlock(url,\n referer=referer,\n unlock_platform=self.__unlock_sogou,\n unlock_callback=unlock_callback,\n identify_image_callback=identify_image_callback,\n session=session)\n uri = ''\n base_url = re.findall(r'var url = \\'(.*?)\\';', resp.text)\n if base_url and len(base_url) > 0:\n uri = base_url[0]\n\n mp_url = re.findall(r'url \\+= \\'(.*?)\\';', resp.text)\n if mp_url:\n uri = uri + ''.join(mp_url)\n url = uri.replace('@', '')\n return url\n",
"def gen_search_gzh_url(keyword, page=1):\n \"\"\"拼接搜索 公众号 URL\n\n Parameters\n ----------\n keyword : str or unicode\n 搜索文字\n page : int, optional\n 页数 the default is 1\n\n Returns\n -------\n str\n search_gzh_url\n \"\"\"\n assert isinstance(page, int) and page > 0\n\n qs_dict = OrderedDict()\n qs_dict['type'] = _search_type_gzh\n qs_dict['page'] = page\n qs_dict['ie'] = 'utf8'\n qs_dict['query'] = keyword\n\n return 'http://weixin.sogou.com/weixin?{}'.format(urlencode(qs_dict))\n",
"def get_gzh_by_search(text):\n \"\"\"从搜索公众号获得的文本 提取公众号信息\n\n Parameters\n ----------\n text : str or unicode\n 搜索公众号获得的文本\n\n Returns\n -------\n list[dict]\n {\n 'open_id': '', # 微信号唯一ID\n 'profile_url': '', # 最近10条群发页链接\n 'headimage': '', # 头像\n 'wechat_name': '', # 名称\n 'wechat_id': '', # 微信id\n 'post_perm': '', # 最近一月群发数\n 'view_perm': '', # 最近一月阅读量\n 'qrcode': '', # 二维码\n 'introduction': '', # 介绍\n 'authentication': '' # 认证\n }\n \"\"\"\n post_view_perms = WechatSogouStructuring.__get_post_view_perm(text)\n\n page = etree.HTML(text)\n lis = page.xpath('//ul[@class=\"news-list2\"]/li')\n relist = []\n for li in lis:\n url = get_first_of_element(li, 'div/div[1]/a/@href')\n headimage = format_image_url(get_first_of_element(li, 'div/div[1]/a/img/@src'))\n wechat_name = get_elem_text(get_first_of_element(li, 'div/div[2]/p[1]'))\n info = get_elem_text(get_first_of_element(li, 'div/div[2]/p[2]'))\n qrcode = get_first_of_element(li, 'div/div[3]/span/img[1]/@src')\n introduction = get_elem_text(get_first_of_element(li, 'dl[1]/dd'))\n authentication = get_first_of_element(li, 'dl[2]/dd/text()')\n\n relist.append({\n 'open_id': headimage.split('/')[-1],\n 'profile_url': url,\n 'headimage': headimage,\n 'wechat_name': wechat_name.replace('red_beg', '').replace('red_end', ''),\n 'wechat_id': info.replace('微信号:', ''),\n 'qrcode': qrcode,\n 'introduction': introduction.replace('red_beg', '').replace('red_end', ''),\n 'authentication': authentication,\n 'post_perm': -1,\n 'view_perm': -1,\n })\n\n if post_view_perms:\n for i in relist:\n if i['open_id'] in post_view_perms:\n post_view_perm = post_view_perms[i['open_id']].split(',')\n if len(post_view_perm) == 2:\n i['post_perm'] = int(post_view_perm[0])\n i['view_perm'] = int(post_view_perm[1])\n return relist\n"
] | class WechatSogouAPI(object):
def __init__(self, captcha_break_time=1, headers=None, **kwargs):
"""初始化参数
Parameters
----------
captcha_break_time : int
验证码输入错误重试次数
proxies : dict
代理
timeout : float
超时时间
"""
assert isinstance(captcha_break_time, int) and 0 < captcha_break_time < 20
self.captcha_break_times = captcha_break_time
self.requests_kwargs = kwargs
self.headers = headers
if self.headers:
self.headers['User-Agent'] = random.choice(agents)
else:
self.headers = {'User-Agent': random.choice(agents)}
def __set_cookie(self, suv=None, snuid=None, referer=None):
suv = ws_cache.get('suv') if suv is None else suv
snuid = ws_cache.get('snuid') if snuid is None else snuid
_headers = {'Cookie': 'SUV={};SNUID={};'.format(suv, snuid)}
if referer is not None:
_headers['Referer'] = referer
return _headers
def __set_cache(self, suv, snuid):
ws_cache.set('suv', suv)
ws_cache.set('snuid', snuid)
def __get(self, url, session, headers):
h = {}
if headers:
for k, v in headers.items():
h[k] = v
if self.headers:
for k, v in self.headers.items():
h[k] = v
resp = session.get(url, headers=h, **self.requests_kwargs)
if not resp.ok:
raise WechatSogouRequestsException('WechatSogouAPI get error', resp)
return resp
def __unlock_sogou(self, url, resp, session, unlock_callback=None, identify_image_callback=None):
if unlock_callback is None:
unlock_callback = unlock_sogou_callback_example
millis = int(round(time.time() * 1000))
r_captcha = session.get('http://weixin.sogou.com/antispider/util/seccode.php?tc={}'.format(millis), headers={
'Referer': url,
})
if not r_captcha.ok:
raise WechatSogouRequestsException('WechatSogouAPI get img', r_captcha)
r_unlock = unlock_callback(url, session, resp, r_captcha.content, identify_image_callback)
if r_unlock['code'] != 0:
raise WechatSogouVcodeOcrException(
'[WechatSogouAPI identify image] code: {code}, msg: {msg}'.format(code=r_unlock.get('code'),
msg=r_unlock.get('msg')))
else:
self.__set_cache(session.cookies.get('SUID'), r_unlock['id'])
def __unlock_wechat(self, url, resp, session, unlock_callback=None, identify_image_callback=None):
if unlock_callback is None:
unlock_callback = unlock_weixin_callback_example
r_captcha = session.get('https://mp.weixin.qq.com/mp/verifycode?cert={}'.format(time.time() * 1000))
if not r_captcha.ok:
raise WechatSogouRequestsException('WechatSogouAPI unlock_history get img', resp)
r_unlock = unlock_callback(url, session, resp, r_captcha.content, identify_image_callback)
if r_unlock['ret'] != 0:
raise WechatSogouVcodeOcrException(
'[WechatSogouAPI identify image] code: {ret}, msg: {errmsg}, cookie_count: {cookie_count}'.format(
ret=r_unlock.get('ret'), errmsg=r_unlock.get('errmsg'), cookie_count=r_unlock.get('cookie_count')))
def __get_by_unlock(self, url, referer=None, unlock_platform=None, unlock_callback=None, identify_image_callback=None, session=None):
assert unlock_platform is None or callable(unlock_platform)
if identify_image_callback is None:
identify_image_callback = identify_image_callback_by_hand
assert unlock_callback is None or callable(unlock_callback)
assert callable(identify_image_callback)
if not session:
session = requests.session()
resp = self.__get(url, session, headers=self.__set_cookie(referer=referer))
resp.encoding = 'utf-8'
if 'antispider' in resp.url or '请输入验证码' in resp.text:
for i in range(self.captcha_break_times):
try:
unlock_platform(url=url, resp=resp, session=session, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback)
break
except WechatSogouVcodeOcrException as e:
if i == self.captcha_break_times - 1:
raise WechatSogouVcodeOcrException(e)
if '请输入验证码' in resp.text:
resp = session.get(url)
resp.encoding = 'utf-8'
else:
headers = self.__set_cookie(referer=referer)
headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; WOW64)'
resp = self.__get(url, session, headers)
resp.encoding = 'utf-8'
return resp
def __hosting_wechat_img(self, content_info, hosting_callback):
"""将微信明细中图片托管到云端,同时将html页面中的对应图片替换
Parameters
----------
content_info : dict 微信文章明细字典
{
'content_img_list': [], # 从微信文章解析出的原始图片列表
'content_html': '', # 从微信文章解析出文章的内容
}
hosting_callback : callable
托管回调函数,传入单个图片链接,返回托管后的图片链接
Returns
-------
dict
{
'content_img_list': '', # 托管后的图片列表
'content_html': '', # 图片链接为托管后的图片链接内容
}
"""
assert callable(hosting_callback)
content_img_list = content_info.pop("content_img_list")
content_html = content_info.pop("content_html")
for idx, img_url in enumerate(content_img_list):
hosting_img_url = hosting_callback(img_url)
if not hosting_img_url:
# todo 定义标准异常
raise Exception()
content_img_list[idx] = hosting_img_url
content_html = content_html.replace(img_url, hosting_img_url)
return dict(content_img_list=content_img_list, content_html=content_html)
def __format_url(self, url, referer, text, unlock_callback=None, identify_image_callback=None, session=None):
def _parse_url(url, pads):
b = math.floor(random.random() * 100) + 1
a = url.find("url=")
c = url.find("&k=")
if a != -1 and c == -1:
sum = 0
for i in list(pads) + [a, b]:
sum += int(must_str(i))
a = url[sum]
return '{}&k={}&h={}'.format(url, may_int(b), may_int(a))
if url.startswith('/link?url='):
url = 'https://weixin.sogou.com{}'.format(url)
pads = re.findall(r'href\.substr\(a\+(\d+)\+parseInt\("(\d+)"\)\+b,1\)', text)
url = _parse_url(url, pads[0] if pads else [])
resp = self.__get_by_unlock(url,
referer=referer,
unlock_platform=self.__unlock_sogou,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback,
session=session)
uri = ''
base_url = re.findall(r'var url = \'(.*?)\';', resp.text)
if base_url and len(base_url) > 0:
uri = base_url[0]
mp_url = re.findall(r'url \+= \'(.*?)\';', resp.text)
if mp_url:
uri = uri + ''.join(mp_url)
url = uri.replace('@', '')
return url
def get_gzh_info(self, wecgat_id_or_name, unlock_callback=None, identify_image_callback=None, decode_url=True):
"""获取公众号微信号 wechatid 的信息
因为wechatid唯一确定,所以第一个就是要搜索的公众号
Parameters
----------
wecgat_id_or_name : str or unicode
wechat_id or wechat_name
unlock_callback : callable
处理出现验证码页面的函数,参见 unlock_callback_example
identify_image_callback : callable
处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
Returns
-------
dict or None
{
'open_id': '', # 微信号唯一ID
'profile_url': '', # 最近10条群发页链接
'headimage': '', # 头像
'wechat_name': '', # 名称
'wechat_id': '', # 微信id
'post_perm': '', # 最近一月群发数
'qrcode': '', # 二维码
'introduction': '', # 介绍
'authentication': '' # 认证
}
"""
info = self.search_gzh(wecgat_id_or_name, 1, unlock_callback, identify_image_callback, decode_url)
try:
return next(info)
except StopIteration:
return None
def search_article(self, keyword, page=1, timesn=WechatSogouConst.search_article_time.anytime,
article_type=WechatSogouConst.search_article_type.all, ft=None, et=None,
unlock_callback=None,
identify_image_callback=None,
decode_url=True):
"""搜索 文章
对于出现验证码的情况,可以由使用者自己提供:
1、函数 unlock_callback ,这个函数 handle 出现验证码到解决的整个流程
2、也可以 只提供函数 identify_image_callback,这个函数输入验证码二进制数据,输出验证码文字,剩下的由 wechatsogou 包来解决
注意:
函数 unlock_callback 和 identify_image_callback 只需要提供一个,如果都提供了,那么 identify_image_callback 不起作用
Parameters
----------
keyword : str or unicode
搜索文字
page : int, optional
页数 the default is 1
timesn : WechatSogouConst.search_article_time
时间 anytime 没有限制 / day 一天 / week 一周 / month 一月 / year 一年 / specific 自定
the default is anytime
article_type : WechatSogouConst.search_article_type
含有内容的类型 image 有图 / video 有视频 / rich 有图和视频 / all 啥都有
ft, et : datetime.date or None
当 tsn 是 specific 时,ft 代表开始时间,如: 2017-07-01
当 tsn 是 specific 时,et 代表结束时间,如: 2017-07-15
unlock_callback : callable
处理出现验证码页面的函数,参见 unlock_callback_example
identify_image_callback : callable
处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
decode_url : bool
是否解析 url
Returns
-------
list[dict]
{
'article': {
'title': '', # 文章标题
'url': '', # 文章链接
'imgs': '', # 文章图片list
'abstract': '', # 文章摘要
'time': '' # 文章推送时间
},
'gzh': {
'profile_url': '', # 公众号最近10条群发页链接
'headimage': '', # 头像
'wechat_name': '', # 名称
'isv': '', # 是否加v
}
}
Raises
------
WechatSogouRequestsException
requests error
"""
url = WechatSogouRequest.gen_search_article_url(keyword, page, timesn, article_type, ft, et)
session = requests.session()
resp = self.__get_by_unlock(url, WechatSogouRequest.gen_search_article_url(keyword),
unlock_platform=self.__unlock_sogou,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback,
session=session)
article_list = WechatSogouStructuring.get_article_by_search(resp.text)
for i in article_list:
if decode_url:
i['article']['url'] = self.__format_url(i['article']['url'], url, resp.text, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback, session=session)
i['gzh']['profile_url'] = self.__format_url(i['gzh']['profile_url'], url, resp.text, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback, session=session)
yield i
def get_gzh_article_by_history(self, keyword=None, url=None,
unlock_callback_sogou=None,
identify_image_callback_sogou=None,
unlock_callback_weixin=None,
identify_image_callback_weixin=None):
"""从 公众号的最近10条群发页面 提取公众号信息 和 文章列表信息
对于出现验证码的情况,可以由使用者自己提供:
1、函数 unlock_callback ,这个函数 handle 出现验证码到解决的整个流程
2、也可以 只提供函数 identify_image_callback,这个函数输入验证码二进制数据,输出验证码文字,剩下的由 wechatsogou 包来解决
注意:
函数 unlock_callback 和 identify_image_callback 只需要提供一个,如果都提供了,那么 identify_image_callback 不起作用
Parameters
----------
keyword : str or unicode
公众号的id 或者name
url : str or unicode
群发页url,如果不提供url,就先去搜索一遍拿到url
unlock_callback_sogou : callable
处理出现 搜索 的时候出现验证码的函数,参见 unlock_callback_example
identify_image_callback_sogou : callable
处理 搜索 的时候处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
unlock_callback_weixin : callable
处理出现 历史页 的时候出现验证码的函数,参见 unlock_callback_example
identify_image_callback_weixin : callable
处理 历史页 的时候处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
Returns
-------
dict
{
'gzh': {
'wechat_name': '', # 名称
'wechat_id': '', # 微信id
'introduction': '', # 描述
'authentication': '', # 认证
'headimage': '' # 头像
},
'article': [
{
'send_id': '', # 群发id,注意不唯一,因为同一次群发多个消息,而群发id一致
'datetime': '', # 群发datatime
'type': '', # 消息类型,均是49,表示图文
'main': 0, # 是否是一次群发的第一次消息
'title': '', # 文章标题
'abstract': '', # 摘要
'fileid': '', #
'content_url': '', # 文章链接
'source_url': '', # 阅读原文的链接
'cover': '', # 封面图
'author': '', # 作者
'copyright_stat': '', # 文章类型,例如:原创啊
},
...
]
}
Raises
------
WechatSogouRequestsException
requests error
"""
if url is None:
gzh_list = self.get_gzh_info(keyword, unlock_callback_sogou, identify_image_callback_sogou)
if gzh_list is None:
return {}
if 'profile_url' not in gzh_list:
raise Exception() # todo use ws exception
url = gzh_list['profile_url']
resp = self.__get_by_unlock(url, WechatSogouRequest.gen_search_article_url(keyword),
unlock_platform=self.__unlock_wechat,
unlock_callback=unlock_callback_weixin,
identify_image_callback=identify_image_callback_weixin)
return WechatSogouStructuring.get_gzh_info_and_article_by_history(resp.text)
def get_gzh_article_by_hot(self, hot_index, page=1, unlock_callback=None, identify_image_callback=None):
"""获取 首页热门文章
Parameters
----------
hot_index : WechatSogouConst.hot_index
首页热门文章的分类(常量):WechatSogouConst.hot_index.xxx
page : int
页数
Returns
-------
list[dict]
{
'gzh': {
'headimage': str, # 公众号头像
'wechat_name': str, # 公众号名称
},
'article': {
'url': str, # 文章临时链接
'title': str, # 文章标题
'abstract': str, # 文章摘要
'time': int, # 推送时间,10位时间戳
'open_id': str, # open id
'main_img': str # 封面图片
}
}
"""
assert hasattr(WechatSogouConst.hot_index, hot_index)
assert isinstance(page, int) and page > 0
url = WechatSogouRequest.gen_hot_url(hot_index, page)
resp = self.__get_by_unlock(url,
unlock_platform=self.__unlock_sogou,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback)
resp.encoding = 'utf-8'
return WechatSogouStructuring.get_gzh_article_by_hot(resp.text)
def get_article_content(self, url, del_qqmusic=True, del_mpvoice=True, unlock_callback=None,
identify_image_callback=None, hosting_callback=None, raw=False):
"""获取文章原文,避免临时链接失效
Parameters
----------
url : str or unicode
原文链接,临时链接
raw : bool
True: 返回原始html
False: 返回处理后的html
del_qqmusic: bool
True:微信原文中有插入的qq音乐,则删除
False:微信源文中有插入的qq音乐,则保留
del_mpvoice: bool
True:微信原文中有插入的语音消息,则删除
False:微信源文中有插入的语音消息,则保留
unlock_callback : callable
处理 文章明细 的时候出现验证码的函数,参见 unlock_callback_example
identify_image_callback : callable
处理 文章明细 的时候处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
hosting_callback: callable
将微信采集的文章托管到7牛或者阿里云回调函数,输入微信图片源地址,返回托管后地址
Returns
-------
content_html
原文内容
content_img_list
文章中图片列表
Raises
------
WechatSogouRequestsException
"""
resp = self.__get_by_unlock(url,
unlock_platform=self.__unlock_wechat,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback)
resp.encoding = 'utf-8'
if '链接已过期' in resp.text:
raise WechatSogouException('get_article_content 链接 [{}] 已过期'.format(url))
if raw:
return resp.text
content_info = WechatSogouStructuring.get_article_detail(resp.text, del_qqmusic=del_qqmusic,
del_voice=del_mpvoice)
if hosting_callback:
content_info = self.__hosting_wechat_img(content_info, hosting_callback)
return content_info
def get_sugg(self, keyword):
"""获取微信搜狗搜索关键词联想
Parameters
----------
keyword : str or unicode
关键词
Returns
-------
list[str]
联想关键词列表
Raises
------
WechatSogouRequestsException
"""
url = 'http://w.sugg.sogou.com/sugg/ajaj_json.jsp?key={}&type=wxpub&pr=web'.format(
quote(keyword.encode('utf-8')))
r = requests.get(url)
if not r.ok:
raise WechatSogouRequestsException('get_sugg', r)
sugg = re.findall(u'\["' + keyword + '",(.*?),\["', r.text)[0]
return json.loads(sugg)
|
Chyroc/WechatSogou | wechatsogou/api.py | WechatSogouAPI.search_article | python | def search_article(self, keyword, page=1, timesn=WechatSogouConst.search_article_time.anytime,
article_type=WechatSogouConst.search_article_type.all, ft=None, et=None,
unlock_callback=None,
identify_image_callback=None,
decode_url=True):
url = WechatSogouRequest.gen_search_article_url(keyword, page, timesn, article_type, ft, et)
session = requests.session()
resp = self.__get_by_unlock(url, WechatSogouRequest.gen_search_article_url(keyword),
unlock_platform=self.__unlock_sogou,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback,
session=session)
article_list = WechatSogouStructuring.get_article_by_search(resp.text)
for i in article_list:
if decode_url:
i['article']['url'] = self.__format_url(i['article']['url'], url, resp.text, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback, session=session)
i['gzh']['profile_url'] = self.__format_url(i['gzh']['profile_url'], url, resp.text, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback, session=session)
yield i | 搜索 文章
对于出现验证码的情况,可以由使用者自己提供:
1、函数 unlock_callback ,这个函数 handle 出现验证码到解决的整个流程
2、也可以 只提供函数 identify_image_callback,这个函数输入验证码二进制数据,输出验证码文字,剩下的由 wechatsogou 包来解决
注意:
函数 unlock_callback 和 identify_image_callback 只需要提供一个,如果都提供了,那么 identify_image_callback 不起作用
Parameters
----------
keyword : str or unicode
搜索文字
page : int, optional
页数 the default is 1
timesn : WechatSogouConst.search_article_time
时间 anytime 没有限制 / day 一天 / week 一周 / month 一月 / year 一年 / specific 自定
the default is anytime
article_type : WechatSogouConst.search_article_type
含有内容的类型 image 有图 / video 有视频 / rich 有图和视频 / all 啥都有
ft, et : datetime.date or None
当 tsn 是 specific 时,ft 代表开始时间,如: 2017-07-01
当 tsn 是 specific 时,et 代表结束时间,如: 2017-07-15
unlock_callback : callable
处理出现验证码页面的函数,参见 unlock_callback_example
identify_image_callback : callable
处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
decode_url : bool
是否解析 url
Returns
-------
list[dict]
{
'article': {
'title': '', # 文章标题
'url': '', # 文章链接
'imgs': '', # 文章图片list
'abstract': '', # 文章摘要
'time': '' # 文章推送时间
},
'gzh': {
'profile_url': '', # 公众号最近10条群发页链接
'headimage': '', # 头像
'wechat_name': '', # 名称
'isv': '', # 是否加v
}
}
Raises
------
WechatSogouRequestsException
requests error | train | https://github.com/Chyroc/WechatSogou/blob/2e0e9886f555fd8bcfc7ae9718ced6ce955cd24a/wechatsogou/api.py#L298-L369 | [
"def __get_by_unlock(self, url, referer=None, unlock_platform=None, unlock_callback=None, identify_image_callback=None, session=None):\n assert unlock_platform is None or callable(unlock_platform)\n\n if identify_image_callback is None:\n identify_image_callback = identify_image_callback_by_hand\n assert unlock_callback is None or callable(unlock_callback)\n assert callable(identify_image_callback)\n\n if not session:\n session = requests.session()\n resp = self.__get(url, session, headers=self.__set_cookie(referer=referer))\n resp.encoding = 'utf-8'\n if 'antispider' in resp.url or '请输入验证码' in resp.text:\n for i in range(self.captcha_break_times):\n try:\n unlock_platform(url=url, resp=resp, session=session, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback)\n break\n except WechatSogouVcodeOcrException as e:\n if i == self.captcha_break_times - 1:\n raise WechatSogouVcodeOcrException(e)\n\n if '请输入验证码' in resp.text:\n resp = session.get(url)\n resp.encoding = 'utf-8'\n else:\n headers = self.__set_cookie(referer=referer)\n headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; WOW64)'\n resp = self.__get(url, session, headers)\n resp.encoding = 'utf-8'\n\n return resp\n",
"def __format_url(self, url, referer, text, unlock_callback=None, identify_image_callback=None, session=None):\n def _parse_url(url, pads):\n b = math.floor(random.random() * 100) + 1\n a = url.find(\"url=\")\n c = url.find(\"&k=\")\n if a != -1 and c == -1:\n sum = 0\n for i in list(pads) + [a, b]:\n sum += int(must_str(i))\n a = url[sum]\n\n return '{}&k={}&h={}'.format(url, may_int(b), may_int(a))\n\n if url.startswith('/link?url='):\n url = 'https://weixin.sogou.com{}'.format(url)\n\n pads = re.findall(r'href\\.substr\\(a\\+(\\d+)\\+parseInt\\(\"(\\d+)\"\\)\\+b,1\\)', text)\n url = _parse_url(url, pads[0] if pads else [])\n resp = self.__get_by_unlock(url,\n referer=referer,\n unlock_platform=self.__unlock_sogou,\n unlock_callback=unlock_callback,\n identify_image_callback=identify_image_callback,\n session=session)\n uri = ''\n base_url = re.findall(r'var url = \\'(.*?)\\';', resp.text)\n if base_url and len(base_url) > 0:\n uri = base_url[0]\n\n mp_url = re.findall(r'url \\+= \\'(.*?)\\';', resp.text)\n if mp_url:\n uri = uri + ''.join(mp_url)\n url = uri.replace('@', '')\n return url\n",
"def gen_search_article_url(keyword, page=1, timesn=WechatSogouConst.search_article_time.anytime,\n article_type=WechatSogouConst.search_article_type.all, ft=None, et=None):\n \"\"\"拼接搜索 文章 URL\n\n Parameters\n ----------\n keyword : str or unicode\n 搜索文字\n page : int, optional\n 页数 the default is 1\n timesn : WechatSogouConst.search_article_time\n 时间 anytime 没有限制 / day 一天 / week 一周 / month 一月 / year 一年 / specific 自定\n 默认是 anytime\n article_type : WechatSogouConst.search_article_type\n 含有内容的类型 image 有图 / video 有视频 / rich 有图和视频 / all 啥都有\n 默认是 all\n ft, et : datetime.date\n 当 tsn 是 specific 时,ft 代表开始时间,如: 2017-07-01\n 当 tsn 是 specific 时,et 代表结束时间,如: 2017-07-15\n\n Returns\n -------\n str\n search_article_url\n \"\"\"\n assert isinstance(page, int) and page > 0\n assert timesn in [WechatSogouConst.search_article_time.anytime,\n WechatSogouConst.search_article_time.day,\n WechatSogouConst.search_article_time.week,\n WechatSogouConst.search_article_time.month,\n WechatSogouConst.search_article_time.year,\n WechatSogouConst.search_article_time.specific]\n\n if timesn == WechatSogouConst.search_article_time.specific:\n assert isinstance(ft, datetime.date)\n assert isinstance(et, datetime.date)\n assert ft <= et\n else:\n ft = ''\n et = ''\n\n interation_image = 458754\n interation_video = 458756\n if article_type == WechatSogouConst.search_article_type.rich:\n interation = '{},{}'.format(interation_image, interation_video)\n elif article_type == WechatSogouConst.search_article_type.image:\n interation = interation_image\n elif article_type == WechatSogouConst.search_article_type.video:\n interation = interation_video\n else:\n interation = ''\n\n qs_dict = OrderedDict()\n qs_dict['type'] = _search_type_article\n qs_dict['page'] = page\n qs_dict['ie'] = 'utf8'\n qs_dict['query'] = keyword\n qs_dict['interation'] = interation\n if timesn != 0:\n qs_dict['tsn'] = timesn\n qs_dict['ft'] = str(ft)\n qs_dict['et'] = str(et)\n\n # TODO 账号内搜索\n # '账号内 http://weixin.sogou.com/weixin?type=2&ie=utf8&query=%E9%AB%98%E8%80%83&tsn=3&ft=&et=&interation=458754\n # &wxid=oIWsFt1tmWoG6vO6BcsS7St61bRE&usip=nanhangqinggong'\n # qs['wxid'] = wxid\n # qs['usip'] = usip\n\n return 'http://weixin.sogou.com/weixin?{}'.format(urlencode(qs_dict))\n",
"def get_article_by_search(text):\n \"\"\"从搜索文章获得的文本 提取章列表信息\n\n Parameters\n ----------\n text : str or unicode\n 搜索文章获得的文本\n\n Returns\n -------\n list[dict]\n {\n 'article': {\n 'title': '', # 文章标题\n 'url': '', # 文章链接\n 'imgs': '', # 文章图片list\n 'abstract': '', # 文章摘要\n 'time': '' # 文章推送时间\n },\n 'gzh': {\n 'profile_url': '', # 公众号最近10条群发页链接\n 'headimage': '', # 头像\n 'wechat_name': '', # 名称\n 'isv': '', # 是否加v\n }\n }\n \"\"\"\n page = etree.HTML(text)\n lis = page.xpath('//ul[@class=\"news-list\"]/li')\n\n articles = []\n for li in lis:\n url = get_first_of_element(li, 'div[1]/a/@href')\n if url:\n title = get_first_of_element(li, 'div[2]/h3/a')\n imgs = li.xpath('div[1]/a/img/@src')\n abstract = get_first_of_element(li, 'div[2]/p')\n time = get_first_of_element(li, 'div[2]/div/span/script/text()')\n gzh_info = li.xpath('div[2]/div/a')[0]\n else:\n url = get_first_of_element(li, 'div/h3/a/@href')\n title = get_first_of_element(li, 'div/h3/a')\n imgs = []\n spans = li.xpath('div/div[1]/a')\n for span in spans:\n img = span.xpath('span/img/@src')\n if img:\n imgs.append(img)\n abstract = get_first_of_element(li, 'div/p')\n time = get_first_of_element(li, 'div/div[2]/span/script/text()')\n gzh_info = li.xpath('div/div[2]/a')[0]\n\n if title is not None:\n title = get_elem_text(title).replace(\"red_beg\", \"\").replace(\"red_end\", \"\")\n if abstract is not None:\n abstract = get_elem_text(abstract).replace(\"red_beg\", \"\").replace(\"red_end\", \"\")\n\n time = re.findall('timeConvert\\(\\'(.*?)\\'\\)', time)\n time = list_or_empty(time, int)\n profile_url = get_first_of_element(gzh_info, '@href')\n headimage = get_first_of_element(gzh_info, '@data-headimage')\n wechat_name = get_first_of_element(gzh_info, 'text()')\n gzh_isv = get_first_of_element(gzh_info, '@data-isv', int)\n\n articles.append({\n 'article': {\n 'title': title,\n 'url': url,\n 'imgs': format_image_url(imgs),\n 'abstract': abstract,\n 'time': time\n },\n 'gzh': {\n 'profile_url': profile_url,\n 'headimage': headimage,\n 'wechat_name': wechat_name,\n 'isv': gzh_isv,\n }\n })\n return articles\n"
] | class WechatSogouAPI(object):
def __init__(self, captcha_break_time=1, headers=None, **kwargs):
"""初始化参数
Parameters
----------
captcha_break_time : int
验证码输入错误重试次数
proxies : dict
代理
timeout : float
超时时间
"""
assert isinstance(captcha_break_time, int) and 0 < captcha_break_time < 20
self.captcha_break_times = captcha_break_time
self.requests_kwargs = kwargs
self.headers = headers
if self.headers:
self.headers['User-Agent'] = random.choice(agents)
else:
self.headers = {'User-Agent': random.choice(agents)}
def __set_cookie(self, suv=None, snuid=None, referer=None):
suv = ws_cache.get('suv') if suv is None else suv
snuid = ws_cache.get('snuid') if snuid is None else snuid
_headers = {'Cookie': 'SUV={};SNUID={};'.format(suv, snuid)}
if referer is not None:
_headers['Referer'] = referer
return _headers
def __set_cache(self, suv, snuid):
ws_cache.set('suv', suv)
ws_cache.set('snuid', snuid)
def __get(self, url, session, headers):
h = {}
if headers:
for k, v in headers.items():
h[k] = v
if self.headers:
for k, v in self.headers.items():
h[k] = v
resp = session.get(url, headers=h, **self.requests_kwargs)
if not resp.ok:
raise WechatSogouRequestsException('WechatSogouAPI get error', resp)
return resp
def __unlock_sogou(self, url, resp, session, unlock_callback=None, identify_image_callback=None):
if unlock_callback is None:
unlock_callback = unlock_sogou_callback_example
millis = int(round(time.time() * 1000))
r_captcha = session.get('http://weixin.sogou.com/antispider/util/seccode.php?tc={}'.format(millis), headers={
'Referer': url,
})
if not r_captcha.ok:
raise WechatSogouRequestsException('WechatSogouAPI get img', r_captcha)
r_unlock = unlock_callback(url, session, resp, r_captcha.content, identify_image_callback)
if r_unlock['code'] != 0:
raise WechatSogouVcodeOcrException(
'[WechatSogouAPI identify image] code: {code}, msg: {msg}'.format(code=r_unlock.get('code'),
msg=r_unlock.get('msg')))
else:
self.__set_cache(session.cookies.get('SUID'), r_unlock['id'])
def __unlock_wechat(self, url, resp, session, unlock_callback=None, identify_image_callback=None):
if unlock_callback is None:
unlock_callback = unlock_weixin_callback_example
r_captcha = session.get('https://mp.weixin.qq.com/mp/verifycode?cert={}'.format(time.time() * 1000))
if not r_captcha.ok:
raise WechatSogouRequestsException('WechatSogouAPI unlock_history get img', resp)
r_unlock = unlock_callback(url, session, resp, r_captcha.content, identify_image_callback)
if r_unlock['ret'] != 0:
raise WechatSogouVcodeOcrException(
'[WechatSogouAPI identify image] code: {ret}, msg: {errmsg}, cookie_count: {cookie_count}'.format(
ret=r_unlock.get('ret'), errmsg=r_unlock.get('errmsg'), cookie_count=r_unlock.get('cookie_count')))
def __get_by_unlock(self, url, referer=None, unlock_platform=None, unlock_callback=None, identify_image_callback=None, session=None):
assert unlock_platform is None or callable(unlock_platform)
if identify_image_callback is None:
identify_image_callback = identify_image_callback_by_hand
assert unlock_callback is None or callable(unlock_callback)
assert callable(identify_image_callback)
if not session:
session = requests.session()
resp = self.__get(url, session, headers=self.__set_cookie(referer=referer))
resp.encoding = 'utf-8'
if 'antispider' in resp.url or '请输入验证码' in resp.text:
for i in range(self.captcha_break_times):
try:
unlock_platform(url=url, resp=resp, session=session, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback)
break
except WechatSogouVcodeOcrException as e:
if i == self.captcha_break_times - 1:
raise WechatSogouVcodeOcrException(e)
if '请输入验证码' in resp.text:
resp = session.get(url)
resp.encoding = 'utf-8'
else:
headers = self.__set_cookie(referer=referer)
headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; WOW64)'
resp = self.__get(url, session, headers)
resp.encoding = 'utf-8'
return resp
def __hosting_wechat_img(self, content_info, hosting_callback):
"""将微信明细中图片托管到云端,同时将html页面中的对应图片替换
Parameters
----------
content_info : dict 微信文章明细字典
{
'content_img_list': [], # 从微信文章解析出的原始图片列表
'content_html': '', # 从微信文章解析出文章的内容
}
hosting_callback : callable
托管回调函数,传入单个图片链接,返回托管后的图片链接
Returns
-------
dict
{
'content_img_list': '', # 托管后的图片列表
'content_html': '', # 图片链接为托管后的图片链接内容
}
"""
assert callable(hosting_callback)
content_img_list = content_info.pop("content_img_list")
content_html = content_info.pop("content_html")
for idx, img_url in enumerate(content_img_list):
hosting_img_url = hosting_callback(img_url)
if not hosting_img_url:
# todo 定义标准异常
raise Exception()
content_img_list[idx] = hosting_img_url
content_html = content_html.replace(img_url, hosting_img_url)
return dict(content_img_list=content_img_list, content_html=content_html)
def __format_url(self, url, referer, text, unlock_callback=None, identify_image_callback=None, session=None):
def _parse_url(url, pads):
b = math.floor(random.random() * 100) + 1
a = url.find("url=")
c = url.find("&k=")
if a != -1 and c == -1:
sum = 0
for i in list(pads) + [a, b]:
sum += int(must_str(i))
a = url[sum]
return '{}&k={}&h={}'.format(url, may_int(b), may_int(a))
if url.startswith('/link?url='):
url = 'https://weixin.sogou.com{}'.format(url)
pads = re.findall(r'href\.substr\(a\+(\d+)\+parseInt\("(\d+)"\)\+b,1\)', text)
url = _parse_url(url, pads[0] if pads else [])
resp = self.__get_by_unlock(url,
referer=referer,
unlock_platform=self.__unlock_sogou,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback,
session=session)
uri = ''
base_url = re.findall(r'var url = \'(.*?)\';', resp.text)
if base_url and len(base_url) > 0:
uri = base_url[0]
mp_url = re.findall(r'url \+= \'(.*?)\';', resp.text)
if mp_url:
uri = uri + ''.join(mp_url)
url = uri.replace('@', '')
return url
def get_gzh_info(self, wecgat_id_or_name, unlock_callback=None, identify_image_callback=None, decode_url=True):
"""获取公众号微信号 wechatid 的信息
因为wechatid唯一确定,所以第一个就是要搜索的公众号
Parameters
----------
wecgat_id_or_name : str or unicode
wechat_id or wechat_name
unlock_callback : callable
处理出现验证码页面的函数,参见 unlock_callback_example
identify_image_callback : callable
处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
Returns
-------
dict or None
{
'open_id': '', # 微信号唯一ID
'profile_url': '', # 最近10条群发页链接
'headimage': '', # 头像
'wechat_name': '', # 名称
'wechat_id': '', # 微信id
'post_perm': '', # 最近一月群发数
'qrcode': '', # 二维码
'introduction': '', # 介绍
'authentication': '' # 认证
}
"""
info = self.search_gzh(wecgat_id_or_name, 1, unlock_callback, identify_image_callback, decode_url)
try:
return next(info)
except StopIteration:
return None
def search_gzh(self, keyword, page=1, unlock_callback=None, identify_image_callback=None, decode_url=True):
"""搜索 公众号
对于出现验证码的情况,可以由使用者自己提供:
1、函数 unlock_callback ,这个函数 handle 出现验证码到解决的整个流程
2、也可以 只提供函数 identify_image_callback,这个函数输入验证码二进制数据,输出验证码文字,剩下的由 wechatsogou 包来解决
注意:
函数 unlock_callback 和 identify_image_callback 只需要提供一个,如果都提供了,那么 identify_image_callback 不起作用
Parameters
----------
keyword : str or unicode
搜索文字
page : int, optional
页数 the default is 1
unlock_callback : callable
处理出现验证码页面的函数,参见 unlock_callback_example
identify_image_callback : callable
处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
decode_url : bool
是否解析 url
Returns
-------
list[dict]
{
'open_id': '', # 微信号唯一ID
'profile_url': '', # 最近10条群发页链接
'headimage': '', # 头像
'wechat_name': '', # 名称
'wechat_id': '', # 微信id
'post_perm': '', # 最近一月群发数
'qrcode': '', # 二维码
'introduction': '', # 介绍
'authentication': '' # 认证
}
Raises
------
WechatSogouRequestsException
requests error
"""
url = WechatSogouRequest.gen_search_gzh_url(keyword, page)
session = requests.session()
resp = self.__get_by_unlock(url,
unlock_platform=self.__unlock_sogou,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback,
session=session)
gzh_list = WechatSogouStructuring.get_gzh_by_search(resp.text)
for i in gzh_list:
if decode_url:
i['profile_url'] = self.__format_url(i['profile_url'], url, resp.text, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback, session=session)
yield i
def get_gzh_article_by_history(self, keyword=None, url=None,
unlock_callback_sogou=None,
identify_image_callback_sogou=None,
unlock_callback_weixin=None,
identify_image_callback_weixin=None):
"""从 公众号的最近10条群发页面 提取公众号信息 和 文章列表信息
对于出现验证码的情况,可以由使用者自己提供:
1、函数 unlock_callback ,这个函数 handle 出现验证码到解决的整个流程
2、也可以 只提供函数 identify_image_callback,这个函数输入验证码二进制数据,输出验证码文字,剩下的由 wechatsogou 包来解决
注意:
函数 unlock_callback 和 identify_image_callback 只需要提供一个,如果都提供了,那么 identify_image_callback 不起作用
Parameters
----------
keyword : str or unicode
公众号的id 或者name
url : str or unicode
群发页url,如果不提供url,就先去搜索一遍拿到url
unlock_callback_sogou : callable
处理出现 搜索 的时候出现验证码的函数,参见 unlock_callback_example
identify_image_callback_sogou : callable
处理 搜索 的时候处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
unlock_callback_weixin : callable
处理出现 历史页 的时候出现验证码的函数,参见 unlock_callback_example
identify_image_callback_weixin : callable
处理 历史页 的时候处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
Returns
-------
dict
{
'gzh': {
'wechat_name': '', # 名称
'wechat_id': '', # 微信id
'introduction': '', # 描述
'authentication': '', # 认证
'headimage': '' # 头像
},
'article': [
{
'send_id': '', # 群发id,注意不唯一,因为同一次群发多个消息,而群发id一致
'datetime': '', # 群发datatime
'type': '', # 消息类型,均是49,表示图文
'main': 0, # 是否是一次群发的第一次消息
'title': '', # 文章标题
'abstract': '', # 摘要
'fileid': '', #
'content_url': '', # 文章链接
'source_url': '', # 阅读原文的链接
'cover': '', # 封面图
'author': '', # 作者
'copyright_stat': '', # 文章类型,例如:原创啊
},
...
]
}
Raises
------
WechatSogouRequestsException
requests error
"""
if url is None:
gzh_list = self.get_gzh_info(keyword, unlock_callback_sogou, identify_image_callback_sogou)
if gzh_list is None:
return {}
if 'profile_url' not in gzh_list:
raise Exception() # todo use ws exception
url = gzh_list['profile_url']
resp = self.__get_by_unlock(url, WechatSogouRequest.gen_search_article_url(keyword),
unlock_platform=self.__unlock_wechat,
unlock_callback=unlock_callback_weixin,
identify_image_callback=identify_image_callback_weixin)
return WechatSogouStructuring.get_gzh_info_and_article_by_history(resp.text)
def get_gzh_article_by_hot(self, hot_index, page=1, unlock_callback=None, identify_image_callback=None):
"""获取 首页热门文章
Parameters
----------
hot_index : WechatSogouConst.hot_index
首页热门文章的分类(常量):WechatSogouConst.hot_index.xxx
page : int
页数
Returns
-------
list[dict]
{
'gzh': {
'headimage': str, # 公众号头像
'wechat_name': str, # 公众号名称
},
'article': {
'url': str, # 文章临时链接
'title': str, # 文章标题
'abstract': str, # 文章摘要
'time': int, # 推送时间,10位时间戳
'open_id': str, # open id
'main_img': str # 封面图片
}
}
"""
assert hasattr(WechatSogouConst.hot_index, hot_index)
assert isinstance(page, int) and page > 0
url = WechatSogouRequest.gen_hot_url(hot_index, page)
resp = self.__get_by_unlock(url,
unlock_platform=self.__unlock_sogou,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback)
resp.encoding = 'utf-8'
return WechatSogouStructuring.get_gzh_article_by_hot(resp.text)
def get_article_content(self, url, del_qqmusic=True, del_mpvoice=True, unlock_callback=None,
identify_image_callback=None, hosting_callback=None, raw=False):
"""获取文章原文,避免临时链接失效
Parameters
----------
url : str or unicode
原文链接,临时链接
raw : bool
True: 返回原始html
False: 返回处理后的html
del_qqmusic: bool
True:微信原文中有插入的qq音乐,则删除
False:微信源文中有插入的qq音乐,则保留
del_mpvoice: bool
True:微信原文中有插入的语音消息,则删除
False:微信源文中有插入的语音消息,则保留
unlock_callback : callable
处理 文章明细 的时候出现验证码的函数,参见 unlock_callback_example
identify_image_callback : callable
处理 文章明细 的时候处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
hosting_callback: callable
将微信采集的文章托管到7牛或者阿里云回调函数,输入微信图片源地址,返回托管后地址
Returns
-------
content_html
原文内容
content_img_list
文章中图片列表
Raises
------
WechatSogouRequestsException
"""
resp = self.__get_by_unlock(url,
unlock_platform=self.__unlock_wechat,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback)
resp.encoding = 'utf-8'
if '链接已过期' in resp.text:
raise WechatSogouException('get_article_content 链接 [{}] 已过期'.format(url))
if raw:
return resp.text
content_info = WechatSogouStructuring.get_article_detail(resp.text, del_qqmusic=del_qqmusic,
del_voice=del_mpvoice)
if hosting_callback:
content_info = self.__hosting_wechat_img(content_info, hosting_callback)
return content_info
def get_sugg(self, keyword):
"""获取微信搜狗搜索关键词联想
Parameters
----------
keyword : str or unicode
关键词
Returns
-------
list[str]
联想关键词列表
Raises
------
WechatSogouRequestsException
"""
url = 'http://w.sugg.sogou.com/sugg/ajaj_json.jsp?key={}&type=wxpub&pr=web'.format(
quote(keyword.encode('utf-8')))
r = requests.get(url)
if not r.ok:
raise WechatSogouRequestsException('get_sugg', r)
sugg = re.findall(u'\["' + keyword + '",(.*?),\["', r.text)[0]
return json.loads(sugg)
|
Chyroc/WechatSogou | wechatsogou/api.py | WechatSogouAPI.get_gzh_article_by_history | python | def get_gzh_article_by_history(self, keyword=None, url=None,
unlock_callback_sogou=None,
identify_image_callback_sogou=None,
unlock_callback_weixin=None,
identify_image_callback_weixin=None):
if url is None:
gzh_list = self.get_gzh_info(keyword, unlock_callback_sogou, identify_image_callback_sogou)
if gzh_list is None:
return {}
if 'profile_url' not in gzh_list:
raise Exception() # todo use ws exception
url = gzh_list['profile_url']
resp = self.__get_by_unlock(url, WechatSogouRequest.gen_search_article_url(keyword),
unlock_platform=self.__unlock_wechat,
unlock_callback=unlock_callback_weixin,
identify_image_callback=identify_image_callback_weixin)
return WechatSogouStructuring.get_gzh_info_and_article_by_history(resp.text) | 从 公众号的最近10条群发页面 提取公众号信息 和 文章列表信息
对于出现验证码的情况,可以由使用者自己提供:
1、函数 unlock_callback ,这个函数 handle 出现验证码到解决的整个流程
2、也可以 只提供函数 identify_image_callback,这个函数输入验证码二进制数据,输出验证码文字,剩下的由 wechatsogou 包来解决
注意:
函数 unlock_callback 和 identify_image_callback 只需要提供一个,如果都提供了,那么 identify_image_callback 不起作用
Parameters
----------
keyword : str or unicode
公众号的id 或者name
url : str or unicode
群发页url,如果不提供url,就先去搜索一遍拿到url
unlock_callback_sogou : callable
处理出现 搜索 的时候出现验证码的函数,参见 unlock_callback_example
identify_image_callback_sogou : callable
处理 搜索 的时候处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
unlock_callback_weixin : callable
处理出现 历史页 的时候出现验证码的函数,参见 unlock_callback_example
identify_image_callback_weixin : callable
处理 历史页 的时候处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
Returns
-------
dict
{
'gzh': {
'wechat_name': '', # 名称
'wechat_id': '', # 微信id
'introduction': '', # 描述
'authentication': '', # 认证
'headimage': '' # 头像
},
'article': [
{
'send_id': '', # 群发id,注意不唯一,因为同一次群发多个消息,而群发id一致
'datetime': '', # 群发datatime
'type': '', # 消息类型,均是49,表示图文
'main': 0, # 是否是一次群发的第一次消息
'title': '', # 文章标题
'abstract': '', # 摘要
'fileid': '', #
'content_url': '', # 文章链接
'source_url': '', # 阅读原文的链接
'cover': '', # 封面图
'author': '', # 作者
'copyright_stat': '', # 文章类型,例如:原创啊
},
...
]
}
Raises
------
WechatSogouRequestsException
requests error | train | https://github.com/Chyroc/WechatSogou/blob/2e0e9886f555fd8bcfc7ae9718ced6ce955cd24a/wechatsogou/api.py#L371-L448 | [
"def __get_by_unlock(self, url, referer=None, unlock_platform=None, unlock_callback=None, identify_image_callback=None, session=None):\n assert unlock_platform is None or callable(unlock_platform)\n\n if identify_image_callback is None:\n identify_image_callback = identify_image_callback_by_hand\n assert unlock_callback is None or callable(unlock_callback)\n assert callable(identify_image_callback)\n\n if not session:\n session = requests.session()\n resp = self.__get(url, session, headers=self.__set_cookie(referer=referer))\n resp.encoding = 'utf-8'\n if 'antispider' in resp.url or '请输入验证码' in resp.text:\n for i in range(self.captcha_break_times):\n try:\n unlock_platform(url=url, resp=resp, session=session, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback)\n break\n except WechatSogouVcodeOcrException as e:\n if i == self.captcha_break_times - 1:\n raise WechatSogouVcodeOcrException(e)\n\n if '请输入验证码' in resp.text:\n resp = session.get(url)\n resp.encoding = 'utf-8'\n else:\n headers = self.__set_cookie(referer=referer)\n headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; WOW64)'\n resp = self.__get(url, session, headers)\n resp.encoding = 'utf-8'\n\n return resp\n",
"def get_gzh_info(self, wecgat_id_or_name, unlock_callback=None, identify_image_callback=None, decode_url=True):\n \"\"\"获取公众号微信号 wechatid 的信息\n\n 因为wechatid唯一确定,所以第一个就是要搜索的公众号\n\n Parameters\n ----------\n wecgat_id_or_name : str or unicode\n wechat_id or wechat_name\n unlock_callback : callable\n 处理出现验证码页面的函数,参见 unlock_callback_example\n identify_image_callback : callable\n 处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example\n\n Returns\n -------\n dict or None\n {\n 'open_id': '', # 微信号唯一ID\n 'profile_url': '', # 最近10条群发页链接\n 'headimage': '', # 头像\n 'wechat_name': '', # 名称\n 'wechat_id': '', # 微信id\n 'post_perm': '', # 最近一月群发数\n 'qrcode': '', # 二维码\n 'introduction': '', # 介绍\n 'authentication': '' # 认证\n }\n \"\"\"\n info = self.search_gzh(wecgat_id_or_name, 1, unlock_callback, identify_image_callback, decode_url)\n try:\n return next(info)\n except StopIteration:\n return None\n",
"def gen_search_article_url(keyword, page=1, timesn=WechatSogouConst.search_article_time.anytime,\n article_type=WechatSogouConst.search_article_type.all, ft=None, et=None):\n \"\"\"拼接搜索 文章 URL\n\n Parameters\n ----------\n keyword : str or unicode\n 搜索文字\n page : int, optional\n 页数 the default is 1\n timesn : WechatSogouConst.search_article_time\n 时间 anytime 没有限制 / day 一天 / week 一周 / month 一月 / year 一年 / specific 自定\n 默认是 anytime\n article_type : WechatSogouConst.search_article_type\n 含有内容的类型 image 有图 / video 有视频 / rich 有图和视频 / all 啥都有\n 默认是 all\n ft, et : datetime.date\n 当 tsn 是 specific 时,ft 代表开始时间,如: 2017-07-01\n 当 tsn 是 specific 时,et 代表结束时间,如: 2017-07-15\n\n Returns\n -------\n str\n search_article_url\n \"\"\"\n assert isinstance(page, int) and page > 0\n assert timesn in [WechatSogouConst.search_article_time.anytime,\n WechatSogouConst.search_article_time.day,\n WechatSogouConst.search_article_time.week,\n WechatSogouConst.search_article_time.month,\n WechatSogouConst.search_article_time.year,\n WechatSogouConst.search_article_time.specific]\n\n if timesn == WechatSogouConst.search_article_time.specific:\n assert isinstance(ft, datetime.date)\n assert isinstance(et, datetime.date)\n assert ft <= et\n else:\n ft = ''\n et = ''\n\n interation_image = 458754\n interation_video = 458756\n if article_type == WechatSogouConst.search_article_type.rich:\n interation = '{},{}'.format(interation_image, interation_video)\n elif article_type == WechatSogouConst.search_article_type.image:\n interation = interation_image\n elif article_type == WechatSogouConst.search_article_type.video:\n interation = interation_video\n else:\n interation = ''\n\n qs_dict = OrderedDict()\n qs_dict['type'] = _search_type_article\n qs_dict['page'] = page\n qs_dict['ie'] = 'utf8'\n qs_dict['query'] = keyword\n qs_dict['interation'] = interation\n if timesn != 0:\n qs_dict['tsn'] = timesn\n qs_dict['ft'] = str(ft)\n qs_dict['et'] = str(et)\n\n # TODO 账号内搜索\n # '账号内 http://weixin.sogou.com/weixin?type=2&ie=utf8&query=%E9%AB%98%E8%80%83&tsn=3&ft=&et=&interation=458754\n # &wxid=oIWsFt1tmWoG6vO6BcsS7St61bRE&usip=nanhangqinggong'\n # qs['wxid'] = wxid\n # qs['usip'] = usip\n\n return 'http://weixin.sogou.com/weixin?{}'.format(urlencode(qs_dict))\n",
"def get_gzh_info_and_article_by_history(text):\n \"\"\"从 历史消息页的文本 提取公众号信息 和 文章列表信息\n\n Parameters\n ----------\n text : str or unicode\n 历史消息页的文本\n\n Returns\n -------\n dict\n {\n 'gzh': {\n 'wechat_name': '', # 名称\n 'wechat_id': '', # 微信id\n 'introduction': '', # 描述\n 'authentication': '', # 认证\n 'headimage': '' # 头像\n },\n 'article': [\n {\n 'send_id': '', # 群发id,注意不唯一,因为同一次群发多个消息,而群发id一致\n 'datetime': '', # 群发datatime\n 'type': '', # 消息类型,均是49,表示图文\n 'main': 0, # 是否是一次群发的第一次消息\n 'title': '', # 文章标题\n 'abstract': '', # 摘要\n 'fileid': '', #\n 'content_url': '', # 文章链接\n 'source_url': '', # 阅读原文的链接\n 'cover': '', # 封面图\n 'author': '', # 作者\n 'copyright_stat': '', # 文章类型,例如:原创啊\n },\n ...\n ]\n }\n \"\"\"\n return {\n 'gzh': WechatSogouStructuring.get_gzh_info_by_history(text),\n 'article': WechatSogouStructuring.get_article_by_history_json(text)\n }\n"
] | class WechatSogouAPI(object):
def __init__(self, captcha_break_time=1, headers=None, **kwargs):
"""初始化参数
Parameters
----------
captcha_break_time : int
验证码输入错误重试次数
proxies : dict
代理
timeout : float
超时时间
"""
assert isinstance(captcha_break_time, int) and 0 < captcha_break_time < 20
self.captcha_break_times = captcha_break_time
self.requests_kwargs = kwargs
self.headers = headers
if self.headers:
self.headers['User-Agent'] = random.choice(agents)
else:
self.headers = {'User-Agent': random.choice(agents)}
def __set_cookie(self, suv=None, snuid=None, referer=None):
suv = ws_cache.get('suv') if suv is None else suv
snuid = ws_cache.get('snuid') if snuid is None else snuid
_headers = {'Cookie': 'SUV={};SNUID={};'.format(suv, snuid)}
if referer is not None:
_headers['Referer'] = referer
return _headers
def __set_cache(self, suv, snuid):
ws_cache.set('suv', suv)
ws_cache.set('snuid', snuid)
def __get(self, url, session, headers):
h = {}
if headers:
for k, v in headers.items():
h[k] = v
if self.headers:
for k, v in self.headers.items():
h[k] = v
resp = session.get(url, headers=h, **self.requests_kwargs)
if not resp.ok:
raise WechatSogouRequestsException('WechatSogouAPI get error', resp)
return resp
def __unlock_sogou(self, url, resp, session, unlock_callback=None, identify_image_callback=None):
if unlock_callback is None:
unlock_callback = unlock_sogou_callback_example
millis = int(round(time.time() * 1000))
r_captcha = session.get('http://weixin.sogou.com/antispider/util/seccode.php?tc={}'.format(millis), headers={
'Referer': url,
})
if not r_captcha.ok:
raise WechatSogouRequestsException('WechatSogouAPI get img', r_captcha)
r_unlock = unlock_callback(url, session, resp, r_captcha.content, identify_image_callback)
if r_unlock['code'] != 0:
raise WechatSogouVcodeOcrException(
'[WechatSogouAPI identify image] code: {code}, msg: {msg}'.format(code=r_unlock.get('code'),
msg=r_unlock.get('msg')))
else:
self.__set_cache(session.cookies.get('SUID'), r_unlock['id'])
def __unlock_wechat(self, url, resp, session, unlock_callback=None, identify_image_callback=None):
if unlock_callback is None:
unlock_callback = unlock_weixin_callback_example
r_captcha = session.get('https://mp.weixin.qq.com/mp/verifycode?cert={}'.format(time.time() * 1000))
if not r_captcha.ok:
raise WechatSogouRequestsException('WechatSogouAPI unlock_history get img', resp)
r_unlock = unlock_callback(url, session, resp, r_captcha.content, identify_image_callback)
if r_unlock['ret'] != 0:
raise WechatSogouVcodeOcrException(
'[WechatSogouAPI identify image] code: {ret}, msg: {errmsg}, cookie_count: {cookie_count}'.format(
ret=r_unlock.get('ret'), errmsg=r_unlock.get('errmsg'), cookie_count=r_unlock.get('cookie_count')))
def __get_by_unlock(self, url, referer=None, unlock_platform=None, unlock_callback=None, identify_image_callback=None, session=None):
assert unlock_platform is None or callable(unlock_platform)
if identify_image_callback is None:
identify_image_callback = identify_image_callback_by_hand
assert unlock_callback is None or callable(unlock_callback)
assert callable(identify_image_callback)
if not session:
session = requests.session()
resp = self.__get(url, session, headers=self.__set_cookie(referer=referer))
resp.encoding = 'utf-8'
if 'antispider' in resp.url or '请输入验证码' in resp.text:
for i in range(self.captcha_break_times):
try:
unlock_platform(url=url, resp=resp, session=session, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback)
break
except WechatSogouVcodeOcrException as e:
if i == self.captcha_break_times - 1:
raise WechatSogouVcodeOcrException(e)
if '请输入验证码' in resp.text:
resp = session.get(url)
resp.encoding = 'utf-8'
else:
headers = self.__set_cookie(referer=referer)
headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; WOW64)'
resp = self.__get(url, session, headers)
resp.encoding = 'utf-8'
return resp
def __hosting_wechat_img(self, content_info, hosting_callback):
"""将微信明细中图片托管到云端,同时将html页面中的对应图片替换
Parameters
----------
content_info : dict 微信文章明细字典
{
'content_img_list': [], # 从微信文章解析出的原始图片列表
'content_html': '', # 从微信文章解析出文章的内容
}
hosting_callback : callable
托管回调函数,传入单个图片链接,返回托管后的图片链接
Returns
-------
dict
{
'content_img_list': '', # 托管后的图片列表
'content_html': '', # 图片链接为托管后的图片链接内容
}
"""
assert callable(hosting_callback)
content_img_list = content_info.pop("content_img_list")
content_html = content_info.pop("content_html")
for idx, img_url in enumerate(content_img_list):
hosting_img_url = hosting_callback(img_url)
if not hosting_img_url:
# todo 定义标准异常
raise Exception()
content_img_list[idx] = hosting_img_url
content_html = content_html.replace(img_url, hosting_img_url)
return dict(content_img_list=content_img_list, content_html=content_html)
def __format_url(self, url, referer, text, unlock_callback=None, identify_image_callback=None, session=None):
def _parse_url(url, pads):
b = math.floor(random.random() * 100) + 1
a = url.find("url=")
c = url.find("&k=")
if a != -1 and c == -1:
sum = 0
for i in list(pads) + [a, b]:
sum += int(must_str(i))
a = url[sum]
return '{}&k={}&h={}'.format(url, may_int(b), may_int(a))
if url.startswith('/link?url='):
url = 'https://weixin.sogou.com{}'.format(url)
pads = re.findall(r'href\.substr\(a\+(\d+)\+parseInt\("(\d+)"\)\+b,1\)', text)
url = _parse_url(url, pads[0] if pads else [])
resp = self.__get_by_unlock(url,
referer=referer,
unlock_platform=self.__unlock_sogou,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback,
session=session)
uri = ''
base_url = re.findall(r'var url = \'(.*?)\';', resp.text)
if base_url and len(base_url) > 0:
uri = base_url[0]
mp_url = re.findall(r'url \+= \'(.*?)\';', resp.text)
if mp_url:
uri = uri + ''.join(mp_url)
url = uri.replace('@', '')
return url
def get_gzh_info(self, wecgat_id_or_name, unlock_callback=None, identify_image_callback=None, decode_url=True):
"""获取公众号微信号 wechatid 的信息
因为wechatid唯一确定,所以第一个就是要搜索的公众号
Parameters
----------
wecgat_id_or_name : str or unicode
wechat_id or wechat_name
unlock_callback : callable
处理出现验证码页面的函数,参见 unlock_callback_example
identify_image_callback : callable
处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
Returns
-------
dict or None
{
'open_id': '', # 微信号唯一ID
'profile_url': '', # 最近10条群发页链接
'headimage': '', # 头像
'wechat_name': '', # 名称
'wechat_id': '', # 微信id
'post_perm': '', # 最近一月群发数
'qrcode': '', # 二维码
'introduction': '', # 介绍
'authentication': '' # 认证
}
"""
info = self.search_gzh(wecgat_id_or_name, 1, unlock_callback, identify_image_callback, decode_url)
try:
return next(info)
except StopIteration:
return None
def search_gzh(self, keyword, page=1, unlock_callback=None, identify_image_callback=None, decode_url=True):
"""搜索 公众号
对于出现验证码的情况,可以由使用者自己提供:
1、函数 unlock_callback ,这个函数 handle 出现验证码到解决的整个流程
2、也可以 只提供函数 identify_image_callback,这个函数输入验证码二进制数据,输出验证码文字,剩下的由 wechatsogou 包来解决
注意:
函数 unlock_callback 和 identify_image_callback 只需要提供一个,如果都提供了,那么 identify_image_callback 不起作用
Parameters
----------
keyword : str or unicode
搜索文字
page : int, optional
页数 the default is 1
unlock_callback : callable
处理出现验证码页面的函数,参见 unlock_callback_example
identify_image_callback : callable
处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
decode_url : bool
是否解析 url
Returns
-------
list[dict]
{
'open_id': '', # 微信号唯一ID
'profile_url': '', # 最近10条群发页链接
'headimage': '', # 头像
'wechat_name': '', # 名称
'wechat_id': '', # 微信id
'post_perm': '', # 最近一月群发数
'qrcode': '', # 二维码
'introduction': '', # 介绍
'authentication': '' # 认证
}
Raises
------
WechatSogouRequestsException
requests error
"""
url = WechatSogouRequest.gen_search_gzh_url(keyword, page)
session = requests.session()
resp = self.__get_by_unlock(url,
unlock_platform=self.__unlock_sogou,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback,
session=session)
gzh_list = WechatSogouStructuring.get_gzh_by_search(resp.text)
for i in gzh_list:
if decode_url:
i['profile_url'] = self.__format_url(i['profile_url'], url, resp.text, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback, session=session)
yield i
def search_article(self, keyword, page=1, timesn=WechatSogouConst.search_article_time.anytime,
article_type=WechatSogouConst.search_article_type.all, ft=None, et=None,
unlock_callback=None,
identify_image_callback=None,
decode_url=True):
"""搜索 文章
对于出现验证码的情况,可以由使用者自己提供:
1、函数 unlock_callback ,这个函数 handle 出现验证码到解决的整个流程
2、也可以 只提供函数 identify_image_callback,这个函数输入验证码二进制数据,输出验证码文字,剩下的由 wechatsogou 包来解决
注意:
函数 unlock_callback 和 identify_image_callback 只需要提供一个,如果都提供了,那么 identify_image_callback 不起作用
Parameters
----------
keyword : str or unicode
搜索文字
page : int, optional
页数 the default is 1
timesn : WechatSogouConst.search_article_time
时间 anytime 没有限制 / day 一天 / week 一周 / month 一月 / year 一年 / specific 自定
the default is anytime
article_type : WechatSogouConst.search_article_type
含有内容的类型 image 有图 / video 有视频 / rich 有图和视频 / all 啥都有
ft, et : datetime.date or None
当 tsn 是 specific 时,ft 代表开始时间,如: 2017-07-01
当 tsn 是 specific 时,et 代表结束时间,如: 2017-07-15
unlock_callback : callable
处理出现验证码页面的函数,参见 unlock_callback_example
identify_image_callback : callable
处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
decode_url : bool
是否解析 url
Returns
-------
list[dict]
{
'article': {
'title': '', # 文章标题
'url': '', # 文章链接
'imgs': '', # 文章图片list
'abstract': '', # 文章摘要
'time': '' # 文章推送时间
},
'gzh': {
'profile_url': '', # 公众号最近10条群发页链接
'headimage': '', # 头像
'wechat_name': '', # 名称
'isv': '', # 是否加v
}
}
Raises
------
WechatSogouRequestsException
requests error
"""
url = WechatSogouRequest.gen_search_article_url(keyword, page, timesn, article_type, ft, et)
session = requests.session()
resp = self.__get_by_unlock(url, WechatSogouRequest.gen_search_article_url(keyword),
unlock_platform=self.__unlock_sogou,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback,
session=session)
article_list = WechatSogouStructuring.get_article_by_search(resp.text)
for i in article_list:
if decode_url:
i['article']['url'] = self.__format_url(i['article']['url'], url, resp.text, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback, session=session)
i['gzh']['profile_url'] = self.__format_url(i['gzh']['profile_url'], url, resp.text, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback, session=session)
yield i
def get_gzh_article_by_hot(self, hot_index, page=1, unlock_callback=None, identify_image_callback=None):
"""获取 首页热门文章
Parameters
----------
hot_index : WechatSogouConst.hot_index
首页热门文章的分类(常量):WechatSogouConst.hot_index.xxx
page : int
页数
Returns
-------
list[dict]
{
'gzh': {
'headimage': str, # 公众号头像
'wechat_name': str, # 公众号名称
},
'article': {
'url': str, # 文章临时链接
'title': str, # 文章标题
'abstract': str, # 文章摘要
'time': int, # 推送时间,10位时间戳
'open_id': str, # open id
'main_img': str # 封面图片
}
}
"""
assert hasattr(WechatSogouConst.hot_index, hot_index)
assert isinstance(page, int) and page > 0
url = WechatSogouRequest.gen_hot_url(hot_index, page)
resp = self.__get_by_unlock(url,
unlock_platform=self.__unlock_sogou,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback)
resp.encoding = 'utf-8'
return WechatSogouStructuring.get_gzh_article_by_hot(resp.text)
def get_article_content(self, url, del_qqmusic=True, del_mpvoice=True, unlock_callback=None,
identify_image_callback=None, hosting_callback=None, raw=False):
"""获取文章原文,避免临时链接失效
Parameters
----------
url : str or unicode
原文链接,临时链接
raw : bool
True: 返回原始html
False: 返回处理后的html
del_qqmusic: bool
True:微信原文中有插入的qq音乐,则删除
False:微信源文中有插入的qq音乐,则保留
del_mpvoice: bool
True:微信原文中有插入的语音消息,则删除
False:微信源文中有插入的语音消息,则保留
unlock_callback : callable
处理 文章明细 的时候出现验证码的函数,参见 unlock_callback_example
identify_image_callback : callable
处理 文章明细 的时候处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
hosting_callback: callable
将微信采集的文章托管到7牛或者阿里云回调函数,输入微信图片源地址,返回托管后地址
Returns
-------
content_html
原文内容
content_img_list
文章中图片列表
Raises
------
WechatSogouRequestsException
"""
resp = self.__get_by_unlock(url,
unlock_platform=self.__unlock_wechat,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback)
resp.encoding = 'utf-8'
if '链接已过期' in resp.text:
raise WechatSogouException('get_article_content 链接 [{}] 已过期'.format(url))
if raw:
return resp.text
content_info = WechatSogouStructuring.get_article_detail(resp.text, del_qqmusic=del_qqmusic,
del_voice=del_mpvoice)
if hosting_callback:
content_info = self.__hosting_wechat_img(content_info, hosting_callback)
return content_info
def get_sugg(self, keyword):
"""获取微信搜狗搜索关键词联想
Parameters
----------
keyword : str or unicode
关键词
Returns
-------
list[str]
联想关键词列表
Raises
------
WechatSogouRequestsException
"""
url = 'http://w.sugg.sogou.com/sugg/ajaj_json.jsp?key={}&type=wxpub&pr=web'.format(
quote(keyword.encode('utf-8')))
r = requests.get(url)
if not r.ok:
raise WechatSogouRequestsException('get_sugg', r)
sugg = re.findall(u'\["' + keyword + '",(.*?),\["', r.text)[0]
return json.loads(sugg)
|
Chyroc/WechatSogou | wechatsogou/api.py | WechatSogouAPI.get_gzh_article_by_hot | python | def get_gzh_article_by_hot(self, hot_index, page=1, unlock_callback=None, identify_image_callback=None):
assert hasattr(WechatSogouConst.hot_index, hot_index)
assert isinstance(page, int) and page > 0
url = WechatSogouRequest.gen_hot_url(hot_index, page)
resp = self.__get_by_unlock(url,
unlock_platform=self.__unlock_sogou,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback)
resp.encoding = 'utf-8'
return WechatSogouStructuring.get_gzh_article_by_hot(resp.text) | 获取 首页热门文章
Parameters
----------
hot_index : WechatSogouConst.hot_index
首页热门文章的分类(常量):WechatSogouConst.hot_index.xxx
page : int
页数
Returns
-------
list[dict]
{
'gzh': {
'headimage': str, # 公众号头像
'wechat_name': str, # 公众号名称
},
'article': {
'url': str, # 文章临时链接
'title': str, # 文章标题
'abstract': str, # 文章摘要
'time': int, # 推送时间,10位时间戳
'open_id': str, # open id
'main_img': str # 封面图片
}
} | train | https://github.com/Chyroc/WechatSogou/blob/2e0e9886f555fd8bcfc7ae9718ced6ce955cd24a/wechatsogou/api.py#L450-L489 | [
"def __get_by_unlock(self, url, referer=None, unlock_platform=None, unlock_callback=None, identify_image_callback=None, session=None):\n assert unlock_platform is None or callable(unlock_platform)\n\n if identify_image_callback is None:\n identify_image_callback = identify_image_callback_by_hand\n assert unlock_callback is None or callable(unlock_callback)\n assert callable(identify_image_callback)\n\n if not session:\n session = requests.session()\n resp = self.__get(url, session, headers=self.__set_cookie(referer=referer))\n resp.encoding = 'utf-8'\n if 'antispider' in resp.url or '请输入验证码' in resp.text:\n for i in range(self.captcha_break_times):\n try:\n unlock_platform(url=url, resp=resp, session=session, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback)\n break\n except WechatSogouVcodeOcrException as e:\n if i == self.captcha_break_times - 1:\n raise WechatSogouVcodeOcrException(e)\n\n if '请输入验证码' in resp.text:\n resp = session.get(url)\n resp.encoding = 'utf-8'\n else:\n headers = self.__set_cookie(referer=referer)\n headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; WOW64)'\n resp = self.__get(url, session, headers)\n resp.encoding = 'utf-8'\n\n return resp\n",
"def gen_hot_url(hot_index, page=1):\n \"\"\"拼接 首页热门文章 URL\n\n Parameters\n ----------\n hot_index : WechatSogouConst.hot_index\n 首页热门文章的分类(常量):WechatSogouConst.hot_index.xxx\n page : int\n 页数\n\n Returns\n -------\n str\n 热门文章分类的url\n \"\"\"\n\n assert hasattr(WechatSogouConst.hot_index, hot_index)\n assert isinstance(page, int) and page > 0\n\n index_urls = {\n WechatSogouConst.hot_index.hot: 0, # 热门\n WechatSogouConst.hot_index.gaoxiao: 1, # 搞笑\n WechatSogouConst.hot_index.health: 2, # 养生\n WechatSogouConst.hot_index.sifanghua: 3, # 私房话\n WechatSogouConst.hot_index.gossip: 4, # 八卦\n WechatSogouConst.hot_index.technology: 5, # 科技\n WechatSogouConst.hot_index.finance: 6, # 财经\n WechatSogouConst.hot_index.car: 7, # 汽车\n WechatSogouConst.hot_index.life: 8, # 生活\n WechatSogouConst.hot_index.fashion: 9, # 时尚\n WechatSogouConst.hot_index.mummy: 10, # 辣妈 / 育儿\n WechatSogouConst.hot_index.travel: 11, # 旅行\n WechatSogouConst.hot_index.job: 12, # 职场\n WechatSogouConst.hot_index.food: 13, # 美食\n WechatSogouConst.hot_index.history: 14, # 历史\n WechatSogouConst.hot_index.study: 15, # 学霸 / 教育\n WechatSogouConst.hot_index.constellation: 16, # 星座\n WechatSogouConst.hot_index.sport: 17, # 体育\n WechatSogouConst.hot_index.military: 18, # 军事\n WechatSogouConst.hot_index.game: 19, # 游戏\n WechatSogouConst.hot_index.pet: 20, # 萌宠\n\n }\n return 'http://weixin.sogou.com/wapindex/wap/0612/wap_{}/{}.html'.format(index_urls[hot_index], page - 1)\n",
"def get_gzh_article_by_hot(text):\n \"\"\"从 首页热门搜索 提取公众号信息 和 文章列表信息\n\n Parameters\n ----------\n text : str or unicode\n 首页热门搜索 页 中 某一页 的文本\n\n Returns\n -------\n list[dict]\n {\n 'gzh': {\n 'headimage': str, # 公众号头像\n 'wechat_name': str, # 公众号名称\n },\n 'article': {\n 'url': str, # 文章临时链接\n 'title': str, # 文章标题\n 'abstract': str, # 文章摘要\n 'time': int, # 推送时间,10位时间戳\n 'open_id': str, # open id\n 'main_img': str # 封面图片\n }\n }\n \"\"\"\n page = etree.HTML(text)\n lis = page.xpath('/html/body/li')\n gzh_article_list = []\n for li in lis:\n url = get_first_of_element(li, 'div[1]/h4/a/@href')\n title = get_first_of_element(li, 'div[1]/h4/a/div/text()')\n abstract = get_first_of_element(li, 'div[1]/p[1]/text()')\n xpath_time = get_first_of_element(li, 'div[1]/p[2]')\n open_id = get_first_of_element(xpath_time, 'span/@data-openid')\n headimage = get_first_of_element(xpath_time, 'span/@data-headimage')\n gzh_name = get_first_of_element(xpath_time, 'span/text()')\n send_time = xpath_time.xpath('a/span/@data-lastmodified')\n main_img = get_first_of_element(li, 'div[2]/a/img/@src')\n\n try:\n send_time = int(send_time[0])\n except ValueError:\n send_time = send_time[0]\n\n gzh_article_list.append({\n 'gzh': {\n 'headimage': headimage,\n 'wechat_name': gzh_name,\n },\n 'article': {\n 'url': url,\n 'title': title,\n 'abstract': abstract,\n 'time': send_time,\n 'open_id': open_id,\n 'main_img': main_img\n }\n })\n\n return gzh_article_list\n"
] | class WechatSogouAPI(object):
def __init__(self, captcha_break_time=1, headers=None, **kwargs):
"""初始化参数
Parameters
----------
captcha_break_time : int
验证码输入错误重试次数
proxies : dict
代理
timeout : float
超时时间
"""
assert isinstance(captcha_break_time, int) and 0 < captcha_break_time < 20
self.captcha_break_times = captcha_break_time
self.requests_kwargs = kwargs
self.headers = headers
if self.headers:
self.headers['User-Agent'] = random.choice(agents)
else:
self.headers = {'User-Agent': random.choice(agents)}
def __set_cookie(self, suv=None, snuid=None, referer=None):
suv = ws_cache.get('suv') if suv is None else suv
snuid = ws_cache.get('snuid') if snuid is None else snuid
_headers = {'Cookie': 'SUV={};SNUID={};'.format(suv, snuid)}
if referer is not None:
_headers['Referer'] = referer
return _headers
def __set_cache(self, suv, snuid):
ws_cache.set('suv', suv)
ws_cache.set('snuid', snuid)
def __get(self, url, session, headers):
h = {}
if headers:
for k, v in headers.items():
h[k] = v
if self.headers:
for k, v in self.headers.items():
h[k] = v
resp = session.get(url, headers=h, **self.requests_kwargs)
if not resp.ok:
raise WechatSogouRequestsException('WechatSogouAPI get error', resp)
return resp
def __unlock_sogou(self, url, resp, session, unlock_callback=None, identify_image_callback=None):
if unlock_callback is None:
unlock_callback = unlock_sogou_callback_example
millis = int(round(time.time() * 1000))
r_captcha = session.get('http://weixin.sogou.com/antispider/util/seccode.php?tc={}'.format(millis), headers={
'Referer': url,
})
if not r_captcha.ok:
raise WechatSogouRequestsException('WechatSogouAPI get img', r_captcha)
r_unlock = unlock_callback(url, session, resp, r_captcha.content, identify_image_callback)
if r_unlock['code'] != 0:
raise WechatSogouVcodeOcrException(
'[WechatSogouAPI identify image] code: {code}, msg: {msg}'.format(code=r_unlock.get('code'),
msg=r_unlock.get('msg')))
else:
self.__set_cache(session.cookies.get('SUID'), r_unlock['id'])
def __unlock_wechat(self, url, resp, session, unlock_callback=None, identify_image_callback=None):
if unlock_callback is None:
unlock_callback = unlock_weixin_callback_example
r_captcha = session.get('https://mp.weixin.qq.com/mp/verifycode?cert={}'.format(time.time() * 1000))
if not r_captcha.ok:
raise WechatSogouRequestsException('WechatSogouAPI unlock_history get img', resp)
r_unlock = unlock_callback(url, session, resp, r_captcha.content, identify_image_callback)
if r_unlock['ret'] != 0:
raise WechatSogouVcodeOcrException(
'[WechatSogouAPI identify image] code: {ret}, msg: {errmsg}, cookie_count: {cookie_count}'.format(
ret=r_unlock.get('ret'), errmsg=r_unlock.get('errmsg'), cookie_count=r_unlock.get('cookie_count')))
def __get_by_unlock(self, url, referer=None, unlock_platform=None, unlock_callback=None, identify_image_callback=None, session=None):
assert unlock_platform is None or callable(unlock_platform)
if identify_image_callback is None:
identify_image_callback = identify_image_callback_by_hand
assert unlock_callback is None or callable(unlock_callback)
assert callable(identify_image_callback)
if not session:
session = requests.session()
resp = self.__get(url, session, headers=self.__set_cookie(referer=referer))
resp.encoding = 'utf-8'
if 'antispider' in resp.url or '请输入验证码' in resp.text:
for i in range(self.captcha_break_times):
try:
unlock_platform(url=url, resp=resp, session=session, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback)
break
except WechatSogouVcodeOcrException as e:
if i == self.captcha_break_times - 1:
raise WechatSogouVcodeOcrException(e)
if '请输入验证码' in resp.text:
resp = session.get(url)
resp.encoding = 'utf-8'
else:
headers = self.__set_cookie(referer=referer)
headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; WOW64)'
resp = self.__get(url, session, headers)
resp.encoding = 'utf-8'
return resp
def __hosting_wechat_img(self, content_info, hosting_callback):
"""将微信明细中图片托管到云端,同时将html页面中的对应图片替换
Parameters
----------
content_info : dict 微信文章明细字典
{
'content_img_list': [], # 从微信文章解析出的原始图片列表
'content_html': '', # 从微信文章解析出文章的内容
}
hosting_callback : callable
托管回调函数,传入单个图片链接,返回托管后的图片链接
Returns
-------
dict
{
'content_img_list': '', # 托管后的图片列表
'content_html': '', # 图片链接为托管后的图片链接内容
}
"""
assert callable(hosting_callback)
content_img_list = content_info.pop("content_img_list")
content_html = content_info.pop("content_html")
for idx, img_url in enumerate(content_img_list):
hosting_img_url = hosting_callback(img_url)
if not hosting_img_url:
# todo 定义标准异常
raise Exception()
content_img_list[idx] = hosting_img_url
content_html = content_html.replace(img_url, hosting_img_url)
return dict(content_img_list=content_img_list, content_html=content_html)
def __format_url(self, url, referer, text, unlock_callback=None, identify_image_callback=None, session=None):
def _parse_url(url, pads):
b = math.floor(random.random() * 100) + 1
a = url.find("url=")
c = url.find("&k=")
if a != -1 and c == -1:
sum = 0
for i in list(pads) + [a, b]:
sum += int(must_str(i))
a = url[sum]
return '{}&k={}&h={}'.format(url, may_int(b), may_int(a))
if url.startswith('/link?url='):
url = 'https://weixin.sogou.com{}'.format(url)
pads = re.findall(r'href\.substr\(a\+(\d+)\+parseInt\("(\d+)"\)\+b,1\)', text)
url = _parse_url(url, pads[0] if pads else [])
resp = self.__get_by_unlock(url,
referer=referer,
unlock_platform=self.__unlock_sogou,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback,
session=session)
uri = ''
base_url = re.findall(r'var url = \'(.*?)\';', resp.text)
if base_url and len(base_url) > 0:
uri = base_url[0]
mp_url = re.findall(r'url \+= \'(.*?)\';', resp.text)
if mp_url:
uri = uri + ''.join(mp_url)
url = uri.replace('@', '')
return url
def get_gzh_info(self, wecgat_id_or_name, unlock_callback=None, identify_image_callback=None, decode_url=True):
"""获取公众号微信号 wechatid 的信息
因为wechatid唯一确定,所以第一个就是要搜索的公众号
Parameters
----------
wecgat_id_or_name : str or unicode
wechat_id or wechat_name
unlock_callback : callable
处理出现验证码页面的函数,参见 unlock_callback_example
identify_image_callback : callable
处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
Returns
-------
dict or None
{
'open_id': '', # 微信号唯一ID
'profile_url': '', # 最近10条群发页链接
'headimage': '', # 头像
'wechat_name': '', # 名称
'wechat_id': '', # 微信id
'post_perm': '', # 最近一月群发数
'qrcode': '', # 二维码
'introduction': '', # 介绍
'authentication': '' # 认证
}
"""
info = self.search_gzh(wecgat_id_or_name, 1, unlock_callback, identify_image_callback, decode_url)
try:
return next(info)
except StopIteration:
return None
def search_gzh(self, keyword, page=1, unlock_callback=None, identify_image_callback=None, decode_url=True):
"""搜索 公众号
对于出现验证码的情况,可以由使用者自己提供:
1、函数 unlock_callback ,这个函数 handle 出现验证码到解决的整个流程
2、也可以 只提供函数 identify_image_callback,这个函数输入验证码二进制数据,输出验证码文字,剩下的由 wechatsogou 包来解决
注意:
函数 unlock_callback 和 identify_image_callback 只需要提供一个,如果都提供了,那么 identify_image_callback 不起作用
Parameters
----------
keyword : str or unicode
搜索文字
page : int, optional
页数 the default is 1
unlock_callback : callable
处理出现验证码页面的函数,参见 unlock_callback_example
identify_image_callback : callable
处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
decode_url : bool
是否解析 url
Returns
-------
list[dict]
{
'open_id': '', # 微信号唯一ID
'profile_url': '', # 最近10条群发页链接
'headimage': '', # 头像
'wechat_name': '', # 名称
'wechat_id': '', # 微信id
'post_perm': '', # 最近一月群发数
'qrcode': '', # 二维码
'introduction': '', # 介绍
'authentication': '' # 认证
}
Raises
------
WechatSogouRequestsException
requests error
"""
url = WechatSogouRequest.gen_search_gzh_url(keyword, page)
session = requests.session()
resp = self.__get_by_unlock(url,
unlock_platform=self.__unlock_sogou,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback,
session=session)
gzh_list = WechatSogouStructuring.get_gzh_by_search(resp.text)
for i in gzh_list:
if decode_url:
i['profile_url'] = self.__format_url(i['profile_url'], url, resp.text, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback, session=session)
yield i
def search_article(self, keyword, page=1, timesn=WechatSogouConst.search_article_time.anytime,
article_type=WechatSogouConst.search_article_type.all, ft=None, et=None,
unlock_callback=None,
identify_image_callback=None,
decode_url=True):
"""搜索 文章
对于出现验证码的情况,可以由使用者自己提供:
1、函数 unlock_callback ,这个函数 handle 出现验证码到解决的整个流程
2、也可以 只提供函数 identify_image_callback,这个函数输入验证码二进制数据,输出验证码文字,剩下的由 wechatsogou 包来解决
注意:
函数 unlock_callback 和 identify_image_callback 只需要提供一个,如果都提供了,那么 identify_image_callback 不起作用
Parameters
----------
keyword : str or unicode
搜索文字
page : int, optional
页数 the default is 1
timesn : WechatSogouConst.search_article_time
时间 anytime 没有限制 / day 一天 / week 一周 / month 一月 / year 一年 / specific 自定
the default is anytime
article_type : WechatSogouConst.search_article_type
含有内容的类型 image 有图 / video 有视频 / rich 有图和视频 / all 啥都有
ft, et : datetime.date or None
当 tsn 是 specific 时,ft 代表开始时间,如: 2017-07-01
当 tsn 是 specific 时,et 代表结束时间,如: 2017-07-15
unlock_callback : callable
处理出现验证码页面的函数,参见 unlock_callback_example
identify_image_callback : callable
处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
decode_url : bool
是否解析 url
Returns
-------
list[dict]
{
'article': {
'title': '', # 文章标题
'url': '', # 文章链接
'imgs': '', # 文章图片list
'abstract': '', # 文章摘要
'time': '' # 文章推送时间
},
'gzh': {
'profile_url': '', # 公众号最近10条群发页链接
'headimage': '', # 头像
'wechat_name': '', # 名称
'isv': '', # 是否加v
}
}
Raises
------
WechatSogouRequestsException
requests error
"""
url = WechatSogouRequest.gen_search_article_url(keyword, page, timesn, article_type, ft, et)
session = requests.session()
resp = self.__get_by_unlock(url, WechatSogouRequest.gen_search_article_url(keyword),
unlock_platform=self.__unlock_sogou,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback,
session=session)
article_list = WechatSogouStructuring.get_article_by_search(resp.text)
for i in article_list:
if decode_url:
i['article']['url'] = self.__format_url(i['article']['url'], url, resp.text, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback, session=session)
i['gzh']['profile_url'] = self.__format_url(i['gzh']['profile_url'], url, resp.text, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback, session=session)
yield i
def get_gzh_article_by_history(self, keyword=None, url=None,
unlock_callback_sogou=None,
identify_image_callback_sogou=None,
unlock_callback_weixin=None,
identify_image_callback_weixin=None):
"""从 公众号的最近10条群发页面 提取公众号信息 和 文章列表信息
对于出现验证码的情况,可以由使用者自己提供:
1、函数 unlock_callback ,这个函数 handle 出现验证码到解决的整个流程
2、也可以 只提供函数 identify_image_callback,这个函数输入验证码二进制数据,输出验证码文字,剩下的由 wechatsogou 包来解决
注意:
函数 unlock_callback 和 identify_image_callback 只需要提供一个,如果都提供了,那么 identify_image_callback 不起作用
Parameters
----------
keyword : str or unicode
公众号的id 或者name
url : str or unicode
群发页url,如果不提供url,就先去搜索一遍拿到url
unlock_callback_sogou : callable
处理出现 搜索 的时候出现验证码的函数,参见 unlock_callback_example
identify_image_callback_sogou : callable
处理 搜索 的时候处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
unlock_callback_weixin : callable
处理出现 历史页 的时候出现验证码的函数,参见 unlock_callback_example
identify_image_callback_weixin : callable
处理 历史页 的时候处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
Returns
-------
dict
{
'gzh': {
'wechat_name': '', # 名称
'wechat_id': '', # 微信id
'introduction': '', # 描述
'authentication': '', # 认证
'headimage': '' # 头像
},
'article': [
{
'send_id': '', # 群发id,注意不唯一,因为同一次群发多个消息,而群发id一致
'datetime': '', # 群发datatime
'type': '', # 消息类型,均是49,表示图文
'main': 0, # 是否是一次群发的第一次消息
'title': '', # 文章标题
'abstract': '', # 摘要
'fileid': '', #
'content_url': '', # 文章链接
'source_url': '', # 阅读原文的链接
'cover': '', # 封面图
'author': '', # 作者
'copyright_stat': '', # 文章类型,例如:原创啊
},
...
]
}
Raises
------
WechatSogouRequestsException
requests error
"""
if url is None:
gzh_list = self.get_gzh_info(keyword, unlock_callback_sogou, identify_image_callback_sogou)
if gzh_list is None:
return {}
if 'profile_url' not in gzh_list:
raise Exception() # todo use ws exception
url = gzh_list['profile_url']
resp = self.__get_by_unlock(url, WechatSogouRequest.gen_search_article_url(keyword),
unlock_platform=self.__unlock_wechat,
unlock_callback=unlock_callback_weixin,
identify_image_callback=identify_image_callback_weixin)
return WechatSogouStructuring.get_gzh_info_and_article_by_history(resp.text)
def get_article_content(self, url, del_qqmusic=True, del_mpvoice=True, unlock_callback=None,
identify_image_callback=None, hosting_callback=None, raw=False):
"""获取文章原文,避免临时链接失效
Parameters
----------
url : str or unicode
原文链接,临时链接
raw : bool
True: 返回原始html
False: 返回处理后的html
del_qqmusic: bool
True:微信原文中有插入的qq音乐,则删除
False:微信源文中有插入的qq音乐,则保留
del_mpvoice: bool
True:微信原文中有插入的语音消息,则删除
False:微信源文中有插入的语音消息,则保留
unlock_callback : callable
处理 文章明细 的时候出现验证码的函数,参见 unlock_callback_example
identify_image_callback : callable
处理 文章明细 的时候处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
hosting_callback: callable
将微信采集的文章托管到7牛或者阿里云回调函数,输入微信图片源地址,返回托管后地址
Returns
-------
content_html
原文内容
content_img_list
文章中图片列表
Raises
------
WechatSogouRequestsException
"""
resp = self.__get_by_unlock(url,
unlock_platform=self.__unlock_wechat,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback)
resp.encoding = 'utf-8'
if '链接已过期' in resp.text:
raise WechatSogouException('get_article_content 链接 [{}] 已过期'.format(url))
if raw:
return resp.text
content_info = WechatSogouStructuring.get_article_detail(resp.text, del_qqmusic=del_qqmusic,
del_voice=del_mpvoice)
if hosting_callback:
content_info = self.__hosting_wechat_img(content_info, hosting_callback)
return content_info
def get_sugg(self, keyword):
"""获取微信搜狗搜索关键词联想
Parameters
----------
keyword : str or unicode
关键词
Returns
-------
list[str]
联想关键词列表
Raises
------
WechatSogouRequestsException
"""
url = 'http://w.sugg.sogou.com/sugg/ajaj_json.jsp?key={}&type=wxpub&pr=web'.format(
quote(keyword.encode('utf-8')))
r = requests.get(url)
if not r.ok:
raise WechatSogouRequestsException('get_sugg', r)
sugg = re.findall(u'\["' + keyword + '",(.*?),\["', r.text)[0]
return json.loads(sugg)
|
Chyroc/WechatSogou | wechatsogou/api.py | WechatSogouAPI.get_article_content | python | def get_article_content(self, url, del_qqmusic=True, del_mpvoice=True, unlock_callback=None,
identify_image_callback=None, hosting_callback=None, raw=False):
resp = self.__get_by_unlock(url,
unlock_platform=self.__unlock_wechat,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback)
resp.encoding = 'utf-8'
if '链接已过期' in resp.text:
raise WechatSogouException('get_article_content 链接 [{}] 已过期'.format(url))
if raw:
return resp.text
content_info = WechatSogouStructuring.get_article_detail(resp.text, del_qqmusic=del_qqmusic,
del_voice=del_mpvoice)
if hosting_callback:
content_info = self.__hosting_wechat_img(content_info, hosting_callback)
return content_info | 获取文章原文,避免临时链接失效
Parameters
----------
url : str or unicode
原文链接,临时链接
raw : bool
True: 返回原始html
False: 返回处理后的html
del_qqmusic: bool
True:微信原文中有插入的qq音乐,则删除
False:微信源文中有插入的qq音乐,则保留
del_mpvoice: bool
True:微信原文中有插入的语音消息,则删除
False:微信源文中有插入的语音消息,则保留
unlock_callback : callable
处理 文章明细 的时候出现验证码的函数,参见 unlock_callback_example
identify_image_callback : callable
处理 文章明细 的时候处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
hosting_callback: callable
将微信采集的文章托管到7牛或者阿里云回调函数,输入微信图片源地址,返回托管后地址
Returns
-------
content_html
原文内容
content_img_list
文章中图片列表
Raises
------
WechatSogouRequestsException | train | https://github.com/Chyroc/WechatSogou/blob/2e0e9886f555fd8bcfc7ae9718ced6ce955cd24a/wechatsogou/api.py#L491-L541 | [
"def __get_by_unlock(self, url, referer=None, unlock_platform=None, unlock_callback=None, identify_image_callback=None, session=None):\n assert unlock_platform is None or callable(unlock_platform)\n\n if identify_image_callback is None:\n identify_image_callback = identify_image_callback_by_hand\n assert unlock_callback is None or callable(unlock_callback)\n assert callable(identify_image_callback)\n\n if not session:\n session = requests.session()\n resp = self.__get(url, session, headers=self.__set_cookie(referer=referer))\n resp.encoding = 'utf-8'\n if 'antispider' in resp.url or '请输入验证码' in resp.text:\n for i in range(self.captcha_break_times):\n try:\n unlock_platform(url=url, resp=resp, session=session, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback)\n break\n except WechatSogouVcodeOcrException as e:\n if i == self.captcha_break_times - 1:\n raise WechatSogouVcodeOcrException(e)\n\n if '请输入验证码' in resp.text:\n resp = session.get(url)\n resp.encoding = 'utf-8'\n else:\n headers = self.__set_cookie(referer=referer)\n headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; WOW64)'\n resp = self.__get(url, session, headers)\n resp.encoding = 'utf-8'\n\n return resp\n",
"def __hosting_wechat_img(self, content_info, hosting_callback):\n \"\"\"将微信明细中图片托管到云端,同时将html页面中的对应图片替换\n\n Parameters\n ----------\n content_info : dict 微信文章明细字典\n {\n 'content_img_list': [], # 从微信文章解析出的原始图片列表\n 'content_html': '', # 从微信文章解析出文章的内容\n }\n hosting_callback : callable\n 托管回调函数,传入单个图片链接,返回托管后的图片链接\n\n Returns\n -------\n dict\n {\n 'content_img_list': '', # 托管后的图片列表\n 'content_html': '', # 图片链接为托管后的图片链接内容\n }\n \"\"\"\n assert callable(hosting_callback)\n\n content_img_list = content_info.pop(\"content_img_list\")\n content_html = content_info.pop(\"content_html\")\n for idx, img_url in enumerate(content_img_list):\n hosting_img_url = hosting_callback(img_url)\n if not hosting_img_url:\n # todo 定义标准异常\n raise Exception()\n content_img_list[idx] = hosting_img_url\n content_html = content_html.replace(img_url, hosting_img_url)\n\n return dict(content_img_list=content_img_list, content_html=content_html)\n",
"def get_article_detail(text, del_qqmusic=True, del_voice=True):\n \"\"\"根据微信文章的临时链接获取明细\n\n 1. 获取文本中所有的图片链接列表\n 2. 获取微信文章的html内容页面(去除标题等信息)\n\n Parameters\n ----------\n text : str or unicode\n 一篇微信文章的文本\n del_qqmusic: bool\n 删除文章中的qq音乐\n del_voice: bool\n 删除文章中的语音内容\n\n Returns\n -------\n dict\n {\n 'content_html': str # 微信文本内容\n 'content_img_list': list[img_url1, img_url2, ...] # 微信文本中图片列表\n\n }\n \"\"\"\n # 1. 获取微信文本content\n html_obj = BeautifulSoup(text, \"lxml\")\n content_text = html_obj.find('div', {'class': 'rich_media_content', 'id': 'js_content'})\n\n # 2. 删除部分标签\n if del_qqmusic:\n qqmusic = content_text.find_all('qqmusic') or []\n for music in qqmusic:\n music.parent.decompose()\n\n if del_voice:\n # voice是一个p标签下的mpvoice标签以及class为'js_audio_frame db'的span构成,所以将父标签删除\n voices = content_text.find_all('mpvoice') or []\n for voice in voices:\n voice.parent.decompose()\n\n # 3. 获取所有的图片 [img标签,和style中的background-image]\n all_img_set = set()\n all_img_element = content_text.find_all('img') or []\n for ele in all_img_element:\n # 删除部分属性\n img_url = format_image_url(ele.attrs['data-src'])\n del ele.attrs['data-src']\n\n ele.attrs['src'] = img_url\n\n if not img_url.startswith('http'):\n raise WechatSogouException('img_url [{}] 不合法'.format(img_url))\n all_img_set.add(img_url)\n\n backgroud_image = content_text.find_all(style=re.compile(\"background-image\")) or []\n for ele in backgroud_image:\n # 删除部分属性\n if ele.attrs.get('data-src'):\n del ele.attrs['data-src']\n\n if ele.attrs.get('data-wxurl'):\n del ele.attrs['data-wxurl']\n img_url = re.findall(backgroud_image_p, str(ele))\n if not img_url:\n continue\n all_img_set.add(img_url[0])\n\n # 4. 处理iframe\n all_img_element = content_text.find_all('iframe') or []\n for ele in all_img_element:\n # 删除部分属性\n img_url = ele.attrs['data-src']\n del ele.attrs['data-src']\n ele.attrs['src'] = img_url\n\n # 5. 返回数据\n all_img_list = list(all_img_set)\n content_html = content_text.prettify()\n # 去除div[id=js_content]\n content_html = re.findall(js_content, content_html)[0][0]\n return {\n 'content_html': content_html,\n 'content_img_list': all_img_list\n }\n"
] | class WechatSogouAPI(object):
def __init__(self, captcha_break_time=1, headers=None, **kwargs):
"""初始化参数
Parameters
----------
captcha_break_time : int
验证码输入错误重试次数
proxies : dict
代理
timeout : float
超时时间
"""
assert isinstance(captcha_break_time, int) and 0 < captcha_break_time < 20
self.captcha_break_times = captcha_break_time
self.requests_kwargs = kwargs
self.headers = headers
if self.headers:
self.headers['User-Agent'] = random.choice(agents)
else:
self.headers = {'User-Agent': random.choice(agents)}
def __set_cookie(self, suv=None, snuid=None, referer=None):
suv = ws_cache.get('suv') if suv is None else suv
snuid = ws_cache.get('snuid') if snuid is None else snuid
_headers = {'Cookie': 'SUV={};SNUID={};'.format(suv, snuid)}
if referer is not None:
_headers['Referer'] = referer
return _headers
def __set_cache(self, suv, snuid):
ws_cache.set('suv', suv)
ws_cache.set('snuid', snuid)
def __get(self, url, session, headers):
h = {}
if headers:
for k, v in headers.items():
h[k] = v
if self.headers:
for k, v in self.headers.items():
h[k] = v
resp = session.get(url, headers=h, **self.requests_kwargs)
if not resp.ok:
raise WechatSogouRequestsException('WechatSogouAPI get error', resp)
return resp
def __unlock_sogou(self, url, resp, session, unlock_callback=None, identify_image_callback=None):
if unlock_callback is None:
unlock_callback = unlock_sogou_callback_example
millis = int(round(time.time() * 1000))
r_captcha = session.get('http://weixin.sogou.com/antispider/util/seccode.php?tc={}'.format(millis), headers={
'Referer': url,
})
if not r_captcha.ok:
raise WechatSogouRequestsException('WechatSogouAPI get img', r_captcha)
r_unlock = unlock_callback(url, session, resp, r_captcha.content, identify_image_callback)
if r_unlock['code'] != 0:
raise WechatSogouVcodeOcrException(
'[WechatSogouAPI identify image] code: {code}, msg: {msg}'.format(code=r_unlock.get('code'),
msg=r_unlock.get('msg')))
else:
self.__set_cache(session.cookies.get('SUID'), r_unlock['id'])
def __unlock_wechat(self, url, resp, session, unlock_callback=None, identify_image_callback=None):
if unlock_callback is None:
unlock_callback = unlock_weixin_callback_example
r_captcha = session.get('https://mp.weixin.qq.com/mp/verifycode?cert={}'.format(time.time() * 1000))
if not r_captcha.ok:
raise WechatSogouRequestsException('WechatSogouAPI unlock_history get img', resp)
r_unlock = unlock_callback(url, session, resp, r_captcha.content, identify_image_callback)
if r_unlock['ret'] != 0:
raise WechatSogouVcodeOcrException(
'[WechatSogouAPI identify image] code: {ret}, msg: {errmsg}, cookie_count: {cookie_count}'.format(
ret=r_unlock.get('ret'), errmsg=r_unlock.get('errmsg'), cookie_count=r_unlock.get('cookie_count')))
def __get_by_unlock(self, url, referer=None, unlock_platform=None, unlock_callback=None, identify_image_callback=None, session=None):
assert unlock_platform is None or callable(unlock_platform)
if identify_image_callback is None:
identify_image_callback = identify_image_callback_by_hand
assert unlock_callback is None or callable(unlock_callback)
assert callable(identify_image_callback)
if not session:
session = requests.session()
resp = self.__get(url, session, headers=self.__set_cookie(referer=referer))
resp.encoding = 'utf-8'
if 'antispider' in resp.url or '请输入验证码' in resp.text:
for i in range(self.captcha_break_times):
try:
unlock_platform(url=url, resp=resp, session=session, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback)
break
except WechatSogouVcodeOcrException as e:
if i == self.captcha_break_times - 1:
raise WechatSogouVcodeOcrException(e)
if '请输入验证码' in resp.text:
resp = session.get(url)
resp.encoding = 'utf-8'
else:
headers = self.__set_cookie(referer=referer)
headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; WOW64)'
resp = self.__get(url, session, headers)
resp.encoding = 'utf-8'
return resp
def __hosting_wechat_img(self, content_info, hosting_callback):
"""将微信明细中图片托管到云端,同时将html页面中的对应图片替换
Parameters
----------
content_info : dict 微信文章明细字典
{
'content_img_list': [], # 从微信文章解析出的原始图片列表
'content_html': '', # 从微信文章解析出文章的内容
}
hosting_callback : callable
托管回调函数,传入单个图片链接,返回托管后的图片链接
Returns
-------
dict
{
'content_img_list': '', # 托管后的图片列表
'content_html': '', # 图片链接为托管后的图片链接内容
}
"""
assert callable(hosting_callback)
content_img_list = content_info.pop("content_img_list")
content_html = content_info.pop("content_html")
for idx, img_url in enumerate(content_img_list):
hosting_img_url = hosting_callback(img_url)
if not hosting_img_url:
# todo 定义标准异常
raise Exception()
content_img_list[idx] = hosting_img_url
content_html = content_html.replace(img_url, hosting_img_url)
return dict(content_img_list=content_img_list, content_html=content_html)
def __format_url(self, url, referer, text, unlock_callback=None, identify_image_callback=None, session=None):
def _parse_url(url, pads):
b = math.floor(random.random() * 100) + 1
a = url.find("url=")
c = url.find("&k=")
if a != -1 and c == -1:
sum = 0
for i in list(pads) + [a, b]:
sum += int(must_str(i))
a = url[sum]
return '{}&k={}&h={}'.format(url, may_int(b), may_int(a))
if url.startswith('/link?url='):
url = 'https://weixin.sogou.com{}'.format(url)
pads = re.findall(r'href\.substr\(a\+(\d+)\+parseInt\("(\d+)"\)\+b,1\)', text)
url = _parse_url(url, pads[0] if pads else [])
resp = self.__get_by_unlock(url,
referer=referer,
unlock_platform=self.__unlock_sogou,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback,
session=session)
uri = ''
base_url = re.findall(r'var url = \'(.*?)\';', resp.text)
if base_url and len(base_url) > 0:
uri = base_url[0]
mp_url = re.findall(r'url \+= \'(.*?)\';', resp.text)
if mp_url:
uri = uri + ''.join(mp_url)
url = uri.replace('@', '')
return url
def get_gzh_info(self, wecgat_id_or_name, unlock_callback=None, identify_image_callback=None, decode_url=True):
"""获取公众号微信号 wechatid 的信息
因为wechatid唯一确定,所以第一个就是要搜索的公众号
Parameters
----------
wecgat_id_or_name : str or unicode
wechat_id or wechat_name
unlock_callback : callable
处理出现验证码页面的函数,参见 unlock_callback_example
identify_image_callback : callable
处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
Returns
-------
dict or None
{
'open_id': '', # 微信号唯一ID
'profile_url': '', # 最近10条群发页链接
'headimage': '', # 头像
'wechat_name': '', # 名称
'wechat_id': '', # 微信id
'post_perm': '', # 最近一月群发数
'qrcode': '', # 二维码
'introduction': '', # 介绍
'authentication': '' # 认证
}
"""
info = self.search_gzh(wecgat_id_or_name, 1, unlock_callback, identify_image_callback, decode_url)
try:
return next(info)
except StopIteration:
return None
def search_gzh(self, keyword, page=1, unlock_callback=None, identify_image_callback=None, decode_url=True):
"""搜索 公众号
对于出现验证码的情况,可以由使用者自己提供:
1、函数 unlock_callback ,这个函数 handle 出现验证码到解决的整个流程
2、也可以 只提供函数 identify_image_callback,这个函数输入验证码二进制数据,输出验证码文字,剩下的由 wechatsogou 包来解决
注意:
函数 unlock_callback 和 identify_image_callback 只需要提供一个,如果都提供了,那么 identify_image_callback 不起作用
Parameters
----------
keyword : str or unicode
搜索文字
page : int, optional
页数 the default is 1
unlock_callback : callable
处理出现验证码页面的函数,参见 unlock_callback_example
identify_image_callback : callable
处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
decode_url : bool
是否解析 url
Returns
-------
list[dict]
{
'open_id': '', # 微信号唯一ID
'profile_url': '', # 最近10条群发页链接
'headimage': '', # 头像
'wechat_name': '', # 名称
'wechat_id': '', # 微信id
'post_perm': '', # 最近一月群发数
'qrcode': '', # 二维码
'introduction': '', # 介绍
'authentication': '' # 认证
}
Raises
------
WechatSogouRequestsException
requests error
"""
url = WechatSogouRequest.gen_search_gzh_url(keyword, page)
session = requests.session()
resp = self.__get_by_unlock(url,
unlock_platform=self.__unlock_sogou,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback,
session=session)
gzh_list = WechatSogouStructuring.get_gzh_by_search(resp.text)
for i in gzh_list:
if decode_url:
i['profile_url'] = self.__format_url(i['profile_url'], url, resp.text, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback, session=session)
yield i
def search_article(self, keyword, page=1, timesn=WechatSogouConst.search_article_time.anytime,
article_type=WechatSogouConst.search_article_type.all, ft=None, et=None,
unlock_callback=None,
identify_image_callback=None,
decode_url=True):
"""搜索 文章
对于出现验证码的情况,可以由使用者自己提供:
1、函数 unlock_callback ,这个函数 handle 出现验证码到解决的整个流程
2、也可以 只提供函数 identify_image_callback,这个函数输入验证码二进制数据,输出验证码文字,剩下的由 wechatsogou 包来解决
注意:
函数 unlock_callback 和 identify_image_callback 只需要提供一个,如果都提供了,那么 identify_image_callback 不起作用
Parameters
----------
keyword : str or unicode
搜索文字
page : int, optional
页数 the default is 1
timesn : WechatSogouConst.search_article_time
时间 anytime 没有限制 / day 一天 / week 一周 / month 一月 / year 一年 / specific 自定
the default is anytime
article_type : WechatSogouConst.search_article_type
含有内容的类型 image 有图 / video 有视频 / rich 有图和视频 / all 啥都有
ft, et : datetime.date or None
当 tsn 是 specific 时,ft 代表开始时间,如: 2017-07-01
当 tsn 是 specific 时,et 代表结束时间,如: 2017-07-15
unlock_callback : callable
处理出现验证码页面的函数,参见 unlock_callback_example
identify_image_callback : callable
处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
decode_url : bool
是否解析 url
Returns
-------
list[dict]
{
'article': {
'title': '', # 文章标题
'url': '', # 文章链接
'imgs': '', # 文章图片list
'abstract': '', # 文章摘要
'time': '' # 文章推送时间
},
'gzh': {
'profile_url': '', # 公众号最近10条群发页链接
'headimage': '', # 头像
'wechat_name': '', # 名称
'isv': '', # 是否加v
}
}
Raises
------
WechatSogouRequestsException
requests error
"""
url = WechatSogouRequest.gen_search_article_url(keyword, page, timesn, article_type, ft, et)
session = requests.session()
resp = self.__get_by_unlock(url, WechatSogouRequest.gen_search_article_url(keyword),
unlock_platform=self.__unlock_sogou,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback,
session=session)
article_list = WechatSogouStructuring.get_article_by_search(resp.text)
for i in article_list:
if decode_url:
i['article']['url'] = self.__format_url(i['article']['url'], url, resp.text, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback, session=session)
i['gzh']['profile_url'] = self.__format_url(i['gzh']['profile_url'], url, resp.text, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback, session=session)
yield i
def get_gzh_article_by_history(self, keyword=None, url=None,
unlock_callback_sogou=None,
identify_image_callback_sogou=None,
unlock_callback_weixin=None,
identify_image_callback_weixin=None):
"""从 公众号的最近10条群发页面 提取公众号信息 和 文章列表信息
对于出现验证码的情况,可以由使用者自己提供:
1、函数 unlock_callback ,这个函数 handle 出现验证码到解决的整个流程
2、也可以 只提供函数 identify_image_callback,这个函数输入验证码二进制数据,输出验证码文字,剩下的由 wechatsogou 包来解决
注意:
函数 unlock_callback 和 identify_image_callback 只需要提供一个,如果都提供了,那么 identify_image_callback 不起作用
Parameters
----------
keyword : str or unicode
公众号的id 或者name
url : str or unicode
群发页url,如果不提供url,就先去搜索一遍拿到url
unlock_callback_sogou : callable
处理出现 搜索 的时候出现验证码的函数,参见 unlock_callback_example
identify_image_callback_sogou : callable
处理 搜索 的时候处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
unlock_callback_weixin : callable
处理出现 历史页 的时候出现验证码的函数,参见 unlock_callback_example
identify_image_callback_weixin : callable
处理 历史页 的时候处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
Returns
-------
dict
{
'gzh': {
'wechat_name': '', # 名称
'wechat_id': '', # 微信id
'introduction': '', # 描述
'authentication': '', # 认证
'headimage': '' # 头像
},
'article': [
{
'send_id': '', # 群发id,注意不唯一,因为同一次群发多个消息,而群发id一致
'datetime': '', # 群发datatime
'type': '', # 消息类型,均是49,表示图文
'main': 0, # 是否是一次群发的第一次消息
'title': '', # 文章标题
'abstract': '', # 摘要
'fileid': '', #
'content_url': '', # 文章链接
'source_url': '', # 阅读原文的链接
'cover': '', # 封面图
'author': '', # 作者
'copyright_stat': '', # 文章类型,例如:原创啊
},
...
]
}
Raises
------
WechatSogouRequestsException
requests error
"""
if url is None:
gzh_list = self.get_gzh_info(keyword, unlock_callback_sogou, identify_image_callback_sogou)
if gzh_list is None:
return {}
if 'profile_url' not in gzh_list:
raise Exception() # todo use ws exception
url = gzh_list['profile_url']
resp = self.__get_by_unlock(url, WechatSogouRequest.gen_search_article_url(keyword),
unlock_platform=self.__unlock_wechat,
unlock_callback=unlock_callback_weixin,
identify_image_callback=identify_image_callback_weixin)
return WechatSogouStructuring.get_gzh_info_and_article_by_history(resp.text)
def get_gzh_article_by_hot(self, hot_index, page=1, unlock_callback=None, identify_image_callback=None):
"""获取 首页热门文章
Parameters
----------
hot_index : WechatSogouConst.hot_index
首页热门文章的分类(常量):WechatSogouConst.hot_index.xxx
page : int
页数
Returns
-------
list[dict]
{
'gzh': {
'headimage': str, # 公众号头像
'wechat_name': str, # 公众号名称
},
'article': {
'url': str, # 文章临时链接
'title': str, # 文章标题
'abstract': str, # 文章摘要
'time': int, # 推送时间,10位时间戳
'open_id': str, # open id
'main_img': str # 封面图片
}
}
"""
assert hasattr(WechatSogouConst.hot_index, hot_index)
assert isinstance(page, int) and page > 0
url = WechatSogouRequest.gen_hot_url(hot_index, page)
resp = self.__get_by_unlock(url,
unlock_platform=self.__unlock_sogou,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback)
resp.encoding = 'utf-8'
return WechatSogouStructuring.get_gzh_article_by_hot(resp.text)
def get_sugg(self, keyword):
"""获取微信搜狗搜索关键词联想
Parameters
----------
keyword : str or unicode
关键词
Returns
-------
list[str]
联想关键词列表
Raises
------
WechatSogouRequestsException
"""
url = 'http://w.sugg.sogou.com/sugg/ajaj_json.jsp?key={}&type=wxpub&pr=web'.format(
quote(keyword.encode('utf-8')))
r = requests.get(url)
if not r.ok:
raise WechatSogouRequestsException('get_sugg', r)
sugg = re.findall(u'\["' + keyword + '",(.*?),\["', r.text)[0]
return json.loads(sugg)
|
Chyroc/WechatSogou | wechatsogou/api.py | WechatSogouAPI.get_sugg | python | def get_sugg(self, keyword):
url = 'http://w.sugg.sogou.com/sugg/ajaj_json.jsp?key={}&type=wxpub&pr=web'.format(
quote(keyword.encode('utf-8')))
r = requests.get(url)
if not r.ok:
raise WechatSogouRequestsException('get_sugg', r)
sugg = re.findall(u'\["' + keyword + '",(.*?),\["', r.text)[0]
return json.loads(sugg) | 获取微信搜狗搜索关键词联想
Parameters
----------
keyword : str or unicode
关键词
Returns
-------
list[str]
联想关键词列表
Raises
------
WechatSogouRequestsException | train | https://github.com/Chyroc/WechatSogou/blob/2e0e9886f555fd8bcfc7ae9718ced6ce955cd24a/wechatsogou/api.py#L543-L567 | null | class WechatSogouAPI(object):
def __init__(self, captcha_break_time=1, headers=None, **kwargs):
"""初始化参数
Parameters
----------
captcha_break_time : int
验证码输入错误重试次数
proxies : dict
代理
timeout : float
超时时间
"""
assert isinstance(captcha_break_time, int) and 0 < captcha_break_time < 20
self.captcha_break_times = captcha_break_time
self.requests_kwargs = kwargs
self.headers = headers
if self.headers:
self.headers['User-Agent'] = random.choice(agents)
else:
self.headers = {'User-Agent': random.choice(agents)}
def __set_cookie(self, suv=None, snuid=None, referer=None):
suv = ws_cache.get('suv') if suv is None else suv
snuid = ws_cache.get('snuid') if snuid is None else snuid
_headers = {'Cookie': 'SUV={};SNUID={};'.format(suv, snuid)}
if referer is not None:
_headers['Referer'] = referer
return _headers
def __set_cache(self, suv, snuid):
ws_cache.set('suv', suv)
ws_cache.set('snuid', snuid)
def __get(self, url, session, headers):
h = {}
if headers:
for k, v in headers.items():
h[k] = v
if self.headers:
for k, v in self.headers.items():
h[k] = v
resp = session.get(url, headers=h, **self.requests_kwargs)
if not resp.ok:
raise WechatSogouRequestsException('WechatSogouAPI get error', resp)
return resp
def __unlock_sogou(self, url, resp, session, unlock_callback=None, identify_image_callback=None):
if unlock_callback is None:
unlock_callback = unlock_sogou_callback_example
millis = int(round(time.time() * 1000))
r_captcha = session.get('http://weixin.sogou.com/antispider/util/seccode.php?tc={}'.format(millis), headers={
'Referer': url,
})
if not r_captcha.ok:
raise WechatSogouRequestsException('WechatSogouAPI get img', r_captcha)
r_unlock = unlock_callback(url, session, resp, r_captcha.content, identify_image_callback)
if r_unlock['code'] != 0:
raise WechatSogouVcodeOcrException(
'[WechatSogouAPI identify image] code: {code}, msg: {msg}'.format(code=r_unlock.get('code'),
msg=r_unlock.get('msg')))
else:
self.__set_cache(session.cookies.get('SUID'), r_unlock['id'])
def __unlock_wechat(self, url, resp, session, unlock_callback=None, identify_image_callback=None):
if unlock_callback is None:
unlock_callback = unlock_weixin_callback_example
r_captcha = session.get('https://mp.weixin.qq.com/mp/verifycode?cert={}'.format(time.time() * 1000))
if not r_captcha.ok:
raise WechatSogouRequestsException('WechatSogouAPI unlock_history get img', resp)
r_unlock = unlock_callback(url, session, resp, r_captcha.content, identify_image_callback)
if r_unlock['ret'] != 0:
raise WechatSogouVcodeOcrException(
'[WechatSogouAPI identify image] code: {ret}, msg: {errmsg}, cookie_count: {cookie_count}'.format(
ret=r_unlock.get('ret'), errmsg=r_unlock.get('errmsg'), cookie_count=r_unlock.get('cookie_count')))
def __get_by_unlock(self, url, referer=None, unlock_platform=None, unlock_callback=None, identify_image_callback=None, session=None):
assert unlock_platform is None or callable(unlock_platform)
if identify_image_callback is None:
identify_image_callback = identify_image_callback_by_hand
assert unlock_callback is None or callable(unlock_callback)
assert callable(identify_image_callback)
if not session:
session = requests.session()
resp = self.__get(url, session, headers=self.__set_cookie(referer=referer))
resp.encoding = 'utf-8'
if 'antispider' in resp.url or '请输入验证码' in resp.text:
for i in range(self.captcha_break_times):
try:
unlock_platform(url=url, resp=resp, session=session, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback)
break
except WechatSogouVcodeOcrException as e:
if i == self.captcha_break_times - 1:
raise WechatSogouVcodeOcrException(e)
if '请输入验证码' in resp.text:
resp = session.get(url)
resp.encoding = 'utf-8'
else:
headers = self.__set_cookie(referer=referer)
headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; WOW64)'
resp = self.__get(url, session, headers)
resp.encoding = 'utf-8'
return resp
def __hosting_wechat_img(self, content_info, hosting_callback):
"""将微信明细中图片托管到云端,同时将html页面中的对应图片替换
Parameters
----------
content_info : dict 微信文章明细字典
{
'content_img_list': [], # 从微信文章解析出的原始图片列表
'content_html': '', # 从微信文章解析出文章的内容
}
hosting_callback : callable
托管回调函数,传入单个图片链接,返回托管后的图片链接
Returns
-------
dict
{
'content_img_list': '', # 托管后的图片列表
'content_html': '', # 图片链接为托管后的图片链接内容
}
"""
assert callable(hosting_callback)
content_img_list = content_info.pop("content_img_list")
content_html = content_info.pop("content_html")
for idx, img_url in enumerate(content_img_list):
hosting_img_url = hosting_callback(img_url)
if not hosting_img_url:
# todo 定义标准异常
raise Exception()
content_img_list[idx] = hosting_img_url
content_html = content_html.replace(img_url, hosting_img_url)
return dict(content_img_list=content_img_list, content_html=content_html)
def __format_url(self, url, referer, text, unlock_callback=None, identify_image_callback=None, session=None):
def _parse_url(url, pads):
b = math.floor(random.random() * 100) + 1
a = url.find("url=")
c = url.find("&k=")
if a != -1 and c == -1:
sum = 0
for i in list(pads) + [a, b]:
sum += int(must_str(i))
a = url[sum]
return '{}&k={}&h={}'.format(url, may_int(b), may_int(a))
if url.startswith('/link?url='):
url = 'https://weixin.sogou.com{}'.format(url)
pads = re.findall(r'href\.substr\(a\+(\d+)\+parseInt\("(\d+)"\)\+b,1\)', text)
url = _parse_url(url, pads[0] if pads else [])
resp = self.__get_by_unlock(url,
referer=referer,
unlock_platform=self.__unlock_sogou,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback,
session=session)
uri = ''
base_url = re.findall(r'var url = \'(.*?)\';', resp.text)
if base_url and len(base_url) > 0:
uri = base_url[0]
mp_url = re.findall(r'url \+= \'(.*?)\';', resp.text)
if mp_url:
uri = uri + ''.join(mp_url)
url = uri.replace('@', '')
return url
def get_gzh_info(self, wecgat_id_or_name, unlock_callback=None, identify_image_callback=None, decode_url=True):
"""获取公众号微信号 wechatid 的信息
因为wechatid唯一确定,所以第一个就是要搜索的公众号
Parameters
----------
wecgat_id_or_name : str or unicode
wechat_id or wechat_name
unlock_callback : callable
处理出现验证码页面的函数,参见 unlock_callback_example
identify_image_callback : callable
处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
Returns
-------
dict or None
{
'open_id': '', # 微信号唯一ID
'profile_url': '', # 最近10条群发页链接
'headimage': '', # 头像
'wechat_name': '', # 名称
'wechat_id': '', # 微信id
'post_perm': '', # 最近一月群发数
'qrcode': '', # 二维码
'introduction': '', # 介绍
'authentication': '' # 认证
}
"""
info = self.search_gzh(wecgat_id_or_name, 1, unlock_callback, identify_image_callback, decode_url)
try:
return next(info)
except StopIteration:
return None
def search_gzh(self, keyword, page=1, unlock_callback=None, identify_image_callback=None, decode_url=True):
"""搜索 公众号
对于出现验证码的情况,可以由使用者自己提供:
1、函数 unlock_callback ,这个函数 handle 出现验证码到解决的整个流程
2、也可以 只提供函数 identify_image_callback,这个函数输入验证码二进制数据,输出验证码文字,剩下的由 wechatsogou 包来解决
注意:
函数 unlock_callback 和 identify_image_callback 只需要提供一个,如果都提供了,那么 identify_image_callback 不起作用
Parameters
----------
keyword : str or unicode
搜索文字
page : int, optional
页数 the default is 1
unlock_callback : callable
处理出现验证码页面的函数,参见 unlock_callback_example
identify_image_callback : callable
处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
decode_url : bool
是否解析 url
Returns
-------
list[dict]
{
'open_id': '', # 微信号唯一ID
'profile_url': '', # 最近10条群发页链接
'headimage': '', # 头像
'wechat_name': '', # 名称
'wechat_id': '', # 微信id
'post_perm': '', # 最近一月群发数
'qrcode': '', # 二维码
'introduction': '', # 介绍
'authentication': '' # 认证
}
Raises
------
WechatSogouRequestsException
requests error
"""
url = WechatSogouRequest.gen_search_gzh_url(keyword, page)
session = requests.session()
resp = self.__get_by_unlock(url,
unlock_platform=self.__unlock_sogou,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback,
session=session)
gzh_list = WechatSogouStructuring.get_gzh_by_search(resp.text)
for i in gzh_list:
if decode_url:
i['profile_url'] = self.__format_url(i['profile_url'], url, resp.text, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback, session=session)
yield i
def search_article(self, keyword, page=1, timesn=WechatSogouConst.search_article_time.anytime,
article_type=WechatSogouConst.search_article_type.all, ft=None, et=None,
unlock_callback=None,
identify_image_callback=None,
decode_url=True):
"""搜索 文章
对于出现验证码的情况,可以由使用者自己提供:
1、函数 unlock_callback ,这个函数 handle 出现验证码到解决的整个流程
2、也可以 只提供函数 identify_image_callback,这个函数输入验证码二进制数据,输出验证码文字,剩下的由 wechatsogou 包来解决
注意:
函数 unlock_callback 和 identify_image_callback 只需要提供一个,如果都提供了,那么 identify_image_callback 不起作用
Parameters
----------
keyword : str or unicode
搜索文字
page : int, optional
页数 the default is 1
timesn : WechatSogouConst.search_article_time
时间 anytime 没有限制 / day 一天 / week 一周 / month 一月 / year 一年 / specific 自定
the default is anytime
article_type : WechatSogouConst.search_article_type
含有内容的类型 image 有图 / video 有视频 / rich 有图和视频 / all 啥都有
ft, et : datetime.date or None
当 tsn 是 specific 时,ft 代表开始时间,如: 2017-07-01
当 tsn 是 specific 时,et 代表结束时间,如: 2017-07-15
unlock_callback : callable
处理出现验证码页面的函数,参见 unlock_callback_example
identify_image_callback : callable
处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
decode_url : bool
是否解析 url
Returns
-------
list[dict]
{
'article': {
'title': '', # 文章标题
'url': '', # 文章链接
'imgs': '', # 文章图片list
'abstract': '', # 文章摘要
'time': '' # 文章推送时间
},
'gzh': {
'profile_url': '', # 公众号最近10条群发页链接
'headimage': '', # 头像
'wechat_name': '', # 名称
'isv': '', # 是否加v
}
}
Raises
------
WechatSogouRequestsException
requests error
"""
url = WechatSogouRequest.gen_search_article_url(keyword, page, timesn, article_type, ft, et)
session = requests.session()
resp = self.__get_by_unlock(url, WechatSogouRequest.gen_search_article_url(keyword),
unlock_platform=self.__unlock_sogou,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback,
session=session)
article_list = WechatSogouStructuring.get_article_by_search(resp.text)
for i in article_list:
if decode_url:
i['article']['url'] = self.__format_url(i['article']['url'], url, resp.text, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback, session=session)
i['gzh']['profile_url'] = self.__format_url(i['gzh']['profile_url'], url, resp.text, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback, session=session)
yield i
def get_gzh_article_by_history(self, keyword=None, url=None,
unlock_callback_sogou=None,
identify_image_callback_sogou=None,
unlock_callback_weixin=None,
identify_image_callback_weixin=None):
"""从 公众号的最近10条群发页面 提取公众号信息 和 文章列表信息
对于出现验证码的情况,可以由使用者自己提供:
1、函数 unlock_callback ,这个函数 handle 出现验证码到解决的整个流程
2、也可以 只提供函数 identify_image_callback,这个函数输入验证码二进制数据,输出验证码文字,剩下的由 wechatsogou 包来解决
注意:
函数 unlock_callback 和 identify_image_callback 只需要提供一个,如果都提供了,那么 identify_image_callback 不起作用
Parameters
----------
keyword : str or unicode
公众号的id 或者name
url : str or unicode
群发页url,如果不提供url,就先去搜索一遍拿到url
unlock_callback_sogou : callable
处理出现 搜索 的时候出现验证码的函数,参见 unlock_callback_example
identify_image_callback_sogou : callable
处理 搜索 的时候处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
unlock_callback_weixin : callable
处理出现 历史页 的时候出现验证码的函数,参见 unlock_callback_example
identify_image_callback_weixin : callable
处理 历史页 的时候处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
Returns
-------
dict
{
'gzh': {
'wechat_name': '', # 名称
'wechat_id': '', # 微信id
'introduction': '', # 描述
'authentication': '', # 认证
'headimage': '' # 头像
},
'article': [
{
'send_id': '', # 群发id,注意不唯一,因为同一次群发多个消息,而群发id一致
'datetime': '', # 群发datatime
'type': '', # 消息类型,均是49,表示图文
'main': 0, # 是否是一次群发的第一次消息
'title': '', # 文章标题
'abstract': '', # 摘要
'fileid': '', #
'content_url': '', # 文章链接
'source_url': '', # 阅读原文的链接
'cover': '', # 封面图
'author': '', # 作者
'copyright_stat': '', # 文章类型,例如:原创啊
},
...
]
}
Raises
------
WechatSogouRequestsException
requests error
"""
if url is None:
gzh_list = self.get_gzh_info(keyword, unlock_callback_sogou, identify_image_callback_sogou)
if gzh_list is None:
return {}
if 'profile_url' not in gzh_list:
raise Exception() # todo use ws exception
url = gzh_list['profile_url']
resp = self.__get_by_unlock(url, WechatSogouRequest.gen_search_article_url(keyword),
unlock_platform=self.__unlock_wechat,
unlock_callback=unlock_callback_weixin,
identify_image_callback=identify_image_callback_weixin)
return WechatSogouStructuring.get_gzh_info_and_article_by_history(resp.text)
def get_gzh_article_by_hot(self, hot_index, page=1, unlock_callback=None, identify_image_callback=None):
"""获取 首页热门文章
Parameters
----------
hot_index : WechatSogouConst.hot_index
首页热门文章的分类(常量):WechatSogouConst.hot_index.xxx
page : int
页数
Returns
-------
list[dict]
{
'gzh': {
'headimage': str, # 公众号头像
'wechat_name': str, # 公众号名称
},
'article': {
'url': str, # 文章临时链接
'title': str, # 文章标题
'abstract': str, # 文章摘要
'time': int, # 推送时间,10位时间戳
'open_id': str, # open id
'main_img': str # 封面图片
}
}
"""
assert hasattr(WechatSogouConst.hot_index, hot_index)
assert isinstance(page, int) and page > 0
url = WechatSogouRequest.gen_hot_url(hot_index, page)
resp = self.__get_by_unlock(url,
unlock_platform=self.__unlock_sogou,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback)
resp.encoding = 'utf-8'
return WechatSogouStructuring.get_gzh_article_by_hot(resp.text)
def get_article_content(self, url, del_qqmusic=True, del_mpvoice=True, unlock_callback=None,
identify_image_callback=None, hosting_callback=None, raw=False):
"""获取文章原文,避免临时链接失效
Parameters
----------
url : str or unicode
原文链接,临时链接
raw : bool
True: 返回原始html
False: 返回处理后的html
del_qqmusic: bool
True:微信原文中有插入的qq音乐,则删除
False:微信源文中有插入的qq音乐,则保留
del_mpvoice: bool
True:微信原文中有插入的语音消息,则删除
False:微信源文中有插入的语音消息,则保留
unlock_callback : callable
处理 文章明细 的时候出现验证码的函数,参见 unlock_callback_example
identify_image_callback : callable
处理 文章明细 的时候处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
hosting_callback: callable
将微信采集的文章托管到7牛或者阿里云回调函数,输入微信图片源地址,返回托管后地址
Returns
-------
content_html
原文内容
content_img_list
文章中图片列表
Raises
------
WechatSogouRequestsException
"""
resp = self.__get_by_unlock(url,
unlock_platform=self.__unlock_wechat,
unlock_callback=unlock_callback,
identify_image_callback=identify_image_callback)
resp.encoding = 'utf-8'
if '链接已过期' in resp.text:
raise WechatSogouException('get_article_content 链接 [{}] 已过期'.format(url))
if raw:
return resp.text
content_info = WechatSogouStructuring.get_article_detail(resp.text, del_qqmusic=del_qqmusic,
del_voice=del_mpvoice)
if hosting_callback:
content_info = self.__hosting_wechat_img(content_info, hosting_callback)
return content_info
|
Chyroc/WechatSogou | wechatsogou/identify_image.py | unlock_sogou_callback_example | python | def unlock_sogou_callback_example(url, req, resp, img, identify_image_callback):
# no use resp
url_quote = url.split('weixin.sogou.com/')[-1]
unlock_url = 'http://weixin.sogou.com/antispider/thank.php'
data = {
'c': identify_image_callback(img),
'r': '%2F' + url_quote,
'v': 5
}
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Referer': 'http://weixin.sogou.com/antispider/?from=%2f' + url_quote
}
r_unlock = req.post(unlock_url, data, headers=headers)
r_unlock.encoding = 'utf-8'
if not r_unlock.ok:
raise WechatSogouVcodeOcrException(
'unlock[{}] failed: {}'.format(unlock_url, r_unlock.text, r_unlock.status_code))
return r_unlock.json() | 手动打码解锁
Parameters
----------
url : str or unicode
验证码页面 之前的 url
req : requests.sessions.Session
requests.Session() 供调用解锁
resp : requests.models.Response
requests 访问页面返回的,已经跳转了
img : bytes
验证码图片二进制数据
identify_image_callback : callable
处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
Returns
-------
dict
{
'code': '',
'msg': '',
} | train | https://github.com/Chyroc/WechatSogou/blob/2e0e9886f555fd8bcfc7ae9718ced6ce955cd24a/wechatsogou/identify_image.py#L34-L76 | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals, print_function
import time
import requests
from wechatsogou.five import readimg, input
from wechatsogou.filecache import WechatCache
from wechatsogou.exceptions import WechatSogouVcodeOcrException
ws_cache = WechatCache()
def identify_image_callback_by_hand(img):
"""识别二维码
Parameters
----------
img : bytes
验证码图片二进制数据
Returns
-------
str
验证码文字
"""
im = readimg(img)
im.show()
return input("please input code: ")
def unlock_weixin_callback_example(url, req, resp, img, identify_image_callback):
"""手动打码解锁
Parameters
----------
url : str or unicode
验证码页面 之前的 url
req : requests.sessions.Session
requests.Session() 供调用解锁
resp : requests.models.Response
requests 访问页面返回的,已经跳转了
img : bytes
验证码图片二进制数据
identify_image_callback : callable
处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
Returns
-------
dict
{
'ret': '',
'errmsg': '',
'cookie_count': '',
}
"""
# no use resp
unlock_url = 'https://mp.weixin.qq.com/mp/verifycode'
data = {
'cert': time.time() * 1000,
'input': identify_image_callback(img)
}
headers = {
'Host': 'mp.weixin.qq.com',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Referer': url
}
r_unlock = req.post(unlock_url, data, headers=headers)
if not r_unlock.ok:
raise WechatSogouVcodeOcrException(
'unlock[{}] failed: {}[{}]'.format(unlock_url, r_unlock.text, r_unlock.status_code))
return r_unlock.json()
|
Chyroc/WechatSogou | wechatsogou/identify_image.py | unlock_weixin_callback_example | python | def unlock_weixin_callback_example(url, req, resp, img, identify_image_callback):
# no use resp
unlock_url = 'https://mp.weixin.qq.com/mp/verifycode'
data = {
'cert': time.time() * 1000,
'input': identify_image_callback(img)
}
headers = {
'Host': 'mp.weixin.qq.com',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Referer': url
}
r_unlock = req.post(unlock_url, data, headers=headers)
if not r_unlock.ok:
raise WechatSogouVcodeOcrException(
'unlock[{}] failed: {}[{}]'.format(unlock_url, r_unlock.text, r_unlock.status_code))
return r_unlock.json() | 手动打码解锁
Parameters
----------
url : str or unicode
验证码页面 之前的 url
req : requests.sessions.Session
requests.Session() 供调用解锁
resp : requests.models.Response
requests 访问页面返回的,已经跳转了
img : bytes
验证码图片二进制数据
identify_image_callback : callable
处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
Returns
-------
dict
{
'ret': '',
'errmsg': '',
'cookie_count': '',
} | train | https://github.com/Chyroc/WechatSogou/blob/2e0e9886f555fd8bcfc7ae9718ced6ce955cd24a/wechatsogou/identify_image.py#L79-L121 | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals, print_function
import time
import requests
from wechatsogou.five import readimg, input
from wechatsogou.filecache import WechatCache
from wechatsogou.exceptions import WechatSogouVcodeOcrException
ws_cache = WechatCache()
def identify_image_callback_by_hand(img):
"""识别二维码
Parameters
----------
img : bytes
验证码图片二进制数据
Returns
-------
str
验证码文字
"""
im = readimg(img)
im.show()
return input("please input code: ")
def unlock_sogou_callback_example(url, req, resp, img, identify_image_callback):
"""手动打码解锁
Parameters
----------
url : str or unicode
验证码页面 之前的 url
req : requests.sessions.Session
requests.Session() 供调用解锁
resp : requests.models.Response
requests 访问页面返回的,已经跳转了
img : bytes
验证码图片二进制数据
identify_image_callback : callable
处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example
Returns
-------
dict
{
'code': '',
'msg': '',
}
"""
# no use resp
url_quote = url.split('weixin.sogou.com/')[-1]
unlock_url = 'http://weixin.sogou.com/antispider/thank.php'
data = {
'c': identify_image_callback(img),
'r': '%2F' + url_quote,
'v': 5
}
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Referer': 'http://weixin.sogou.com/antispider/?from=%2f' + url_quote
}
r_unlock = req.post(unlock_url, data, headers=headers)
r_unlock.encoding = 'utf-8'
if not r_unlock.ok:
raise WechatSogouVcodeOcrException(
'unlock[{}] failed: {}'.format(unlock_url, r_unlock.text, r_unlock.status_code))
return r_unlock.json()
|
Chyroc/WechatSogou | wechatsogou/request.py | WechatSogouRequest.gen_search_article_url | python | def gen_search_article_url(keyword, page=1, timesn=WechatSogouConst.search_article_time.anytime,
article_type=WechatSogouConst.search_article_type.all, ft=None, et=None):
assert isinstance(page, int) and page > 0
assert timesn in [WechatSogouConst.search_article_time.anytime,
WechatSogouConst.search_article_time.day,
WechatSogouConst.search_article_time.week,
WechatSogouConst.search_article_time.month,
WechatSogouConst.search_article_time.year,
WechatSogouConst.search_article_time.specific]
if timesn == WechatSogouConst.search_article_time.specific:
assert isinstance(ft, datetime.date)
assert isinstance(et, datetime.date)
assert ft <= et
else:
ft = ''
et = ''
interation_image = 458754
interation_video = 458756
if article_type == WechatSogouConst.search_article_type.rich:
interation = '{},{}'.format(interation_image, interation_video)
elif article_type == WechatSogouConst.search_article_type.image:
interation = interation_image
elif article_type == WechatSogouConst.search_article_type.video:
interation = interation_video
else:
interation = ''
qs_dict = OrderedDict()
qs_dict['type'] = _search_type_article
qs_dict['page'] = page
qs_dict['ie'] = 'utf8'
qs_dict['query'] = keyword
qs_dict['interation'] = interation
if timesn != 0:
qs_dict['tsn'] = timesn
qs_dict['ft'] = str(ft)
qs_dict['et'] = str(et)
# TODO 账号内搜索
# '账号内 http://weixin.sogou.com/weixin?type=2&ie=utf8&query=%E9%AB%98%E8%80%83&tsn=3&ft=&et=&interation=458754
# &wxid=oIWsFt1tmWoG6vO6BcsS7St61bRE&usip=nanhangqinggong'
# qs['wxid'] = wxid
# qs['usip'] = usip
return 'http://weixin.sogou.com/weixin?{}'.format(urlencode(qs_dict)) | 拼接搜索 文章 URL
Parameters
----------
keyword : str or unicode
搜索文字
page : int, optional
页数 the default is 1
timesn : WechatSogouConst.search_article_time
时间 anytime 没有限制 / day 一天 / week 一周 / month 一月 / year 一年 / specific 自定
默认是 anytime
article_type : WechatSogouConst.search_article_type
含有内容的类型 image 有图 / video 有视频 / rich 有图和视频 / all 啥都有
默认是 all
ft, et : datetime.date
当 tsn 是 specific 时,ft 代表开始时间,如: 2017-07-01
当 tsn 是 specific 时,et 代表结束时间,如: 2017-07-15
Returns
-------
str
search_article_url | train | https://github.com/Chyroc/WechatSogou/blob/2e0e9886f555fd8bcfc7ae9718ced6ce955cd24a/wechatsogou/request.py#L17-L86 | null | class WechatSogouRequest(object):
@staticmethod
@staticmethod
def gen_search_gzh_url(keyword, page=1):
"""拼接搜索 公众号 URL
Parameters
----------
keyword : str or unicode
搜索文字
page : int, optional
页数 the default is 1
Returns
-------
str
search_gzh_url
"""
assert isinstance(page, int) and page > 0
qs_dict = OrderedDict()
qs_dict['type'] = _search_type_gzh
qs_dict['page'] = page
qs_dict['ie'] = 'utf8'
qs_dict['query'] = keyword
return 'http://weixin.sogou.com/weixin?{}'.format(urlencode(qs_dict))
@staticmethod
def gen_hot_url(hot_index, page=1):
"""拼接 首页热门文章 URL
Parameters
----------
hot_index : WechatSogouConst.hot_index
首页热门文章的分类(常量):WechatSogouConst.hot_index.xxx
page : int
页数
Returns
-------
str
热门文章分类的url
"""
assert hasattr(WechatSogouConst.hot_index, hot_index)
assert isinstance(page, int) and page > 0
index_urls = {
WechatSogouConst.hot_index.hot: 0, # 热门
WechatSogouConst.hot_index.gaoxiao: 1, # 搞笑
WechatSogouConst.hot_index.health: 2, # 养生
WechatSogouConst.hot_index.sifanghua: 3, # 私房话
WechatSogouConst.hot_index.gossip: 4, # 八卦
WechatSogouConst.hot_index.technology: 5, # 科技
WechatSogouConst.hot_index.finance: 6, # 财经
WechatSogouConst.hot_index.car: 7, # 汽车
WechatSogouConst.hot_index.life: 8, # 生活
WechatSogouConst.hot_index.fashion: 9, # 时尚
WechatSogouConst.hot_index.mummy: 10, # 辣妈 / 育儿
WechatSogouConst.hot_index.travel: 11, # 旅行
WechatSogouConst.hot_index.job: 12, # 职场
WechatSogouConst.hot_index.food: 13, # 美食
WechatSogouConst.hot_index.history: 14, # 历史
WechatSogouConst.hot_index.study: 15, # 学霸 / 教育
WechatSogouConst.hot_index.constellation: 16, # 星座
WechatSogouConst.hot_index.sport: 17, # 体育
WechatSogouConst.hot_index.military: 18, # 军事
WechatSogouConst.hot_index.game: 19, # 游戏
WechatSogouConst.hot_index.pet: 20, # 萌宠
}
return 'http://weixin.sogou.com/wapindex/wap/0612/wap_{}/{}.html'.format(index_urls[hot_index], page - 1)
|
Chyroc/WechatSogou | wechatsogou/request.py | WechatSogouRequest.gen_search_gzh_url | python | def gen_search_gzh_url(keyword, page=1):
assert isinstance(page, int) and page > 0
qs_dict = OrderedDict()
qs_dict['type'] = _search_type_gzh
qs_dict['page'] = page
qs_dict['ie'] = 'utf8'
qs_dict['query'] = keyword
return 'http://weixin.sogou.com/weixin?{}'.format(urlencode(qs_dict)) | 拼接搜索 公众号 URL
Parameters
----------
keyword : str or unicode
搜索文字
page : int, optional
页数 the default is 1
Returns
-------
str
search_gzh_url | train | https://github.com/Chyroc/WechatSogou/blob/2e0e9886f555fd8bcfc7ae9718ced6ce955cd24a/wechatsogou/request.py#L89-L112 | null | class WechatSogouRequest(object):
@staticmethod
def gen_search_article_url(keyword, page=1, timesn=WechatSogouConst.search_article_time.anytime,
article_type=WechatSogouConst.search_article_type.all, ft=None, et=None):
"""拼接搜索 文章 URL
Parameters
----------
keyword : str or unicode
搜索文字
page : int, optional
页数 the default is 1
timesn : WechatSogouConst.search_article_time
时间 anytime 没有限制 / day 一天 / week 一周 / month 一月 / year 一年 / specific 自定
默认是 anytime
article_type : WechatSogouConst.search_article_type
含有内容的类型 image 有图 / video 有视频 / rich 有图和视频 / all 啥都有
默认是 all
ft, et : datetime.date
当 tsn 是 specific 时,ft 代表开始时间,如: 2017-07-01
当 tsn 是 specific 时,et 代表结束时间,如: 2017-07-15
Returns
-------
str
search_article_url
"""
assert isinstance(page, int) and page > 0
assert timesn in [WechatSogouConst.search_article_time.anytime,
WechatSogouConst.search_article_time.day,
WechatSogouConst.search_article_time.week,
WechatSogouConst.search_article_time.month,
WechatSogouConst.search_article_time.year,
WechatSogouConst.search_article_time.specific]
if timesn == WechatSogouConst.search_article_time.specific:
assert isinstance(ft, datetime.date)
assert isinstance(et, datetime.date)
assert ft <= et
else:
ft = ''
et = ''
interation_image = 458754
interation_video = 458756
if article_type == WechatSogouConst.search_article_type.rich:
interation = '{},{}'.format(interation_image, interation_video)
elif article_type == WechatSogouConst.search_article_type.image:
interation = interation_image
elif article_type == WechatSogouConst.search_article_type.video:
interation = interation_video
else:
interation = ''
qs_dict = OrderedDict()
qs_dict['type'] = _search_type_article
qs_dict['page'] = page
qs_dict['ie'] = 'utf8'
qs_dict['query'] = keyword
qs_dict['interation'] = interation
if timesn != 0:
qs_dict['tsn'] = timesn
qs_dict['ft'] = str(ft)
qs_dict['et'] = str(et)
# TODO 账号内搜索
# '账号内 http://weixin.sogou.com/weixin?type=2&ie=utf8&query=%E9%AB%98%E8%80%83&tsn=3&ft=&et=&interation=458754
# &wxid=oIWsFt1tmWoG6vO6BcsS7St61bRE&usip=nanhangqinggong'
# qs['wxid'] = wxid
# qs['usip'] = usip
return 'http://weixin.sogou.com/weixin?{}'.format(urlencode(qs_dict))
@staticmethod
@staticmethod
def gen_hot_url(hot_index, page=1):
"""拼接 首页热门文章 URL
Parameters
----------
hot_index : WechatSogouConst.hot_index
首页热门文章的分类(常量):WechatSogouConst.hot_index.xxx
page : int
页数
Returns
-------
str
热门文章分类的url
"""
assert hasattr(WechatSogouConst.hot_index, hot_index)
assert isinstance(page, int) and page > 0
index_urls = {
WechatSogouConst.hot_index.hot: 0, # 热门
WechatSogouConst.hot_index.gaoxiao: 1, # 搞笑
WechatSogouConst.hot_index.health: 2, # 养生
WechatSogouConst.hot_index.sifanghua: 3, # 私房话
WechatSogouConst.hot_index.gossip: 4, # 八卦
WechatSogouConst.hot_index.technology: 5, # 科技
WechatSogouConst.hot_index.finance: 6, # 财经
WechatSogouConst.hot_index.car: 7, # 汽车
WechatSogouConst.hot_index.life: 8, # 生活
WechatSogouConst.hot_index.fashion: 9, # 时尚
WechatSogouConst.hot_index.mummy: 10, # 辣妈 / 育儿
WechatSogouConst.hot_index.travel: 11, # 旅行
WechatSogouConst.hot_index.job: 12, # 职场
WechatSogouConst.hot_index.food: 13, # 美食
WechatSogouConst.hot_index.history: 14, # 历史
WechatSogouConst.hot_index.study: 15, # 学霸 / 教育
WechatSogouConst.hot_index.constellation: 16, # 星座
WechatSogouConst.hot_index.sport: 17, # 体育
WechatSogouConst.hot_index.military: 18, # 军事
WechatSogouConst.hot_index.game: 19, # 游戏
WechatSogouConst.hot_index.pet: 20, # 萌宠
}
return 'http://weixin.sogou.com/wapindex/wap/0612/wap_{}/{}.html'.format(index_urls[hot_index], page - 1)
|
Chyroc/WechatSogou | wechatsogou/request.py | WechatSogouRequest.gen_hot_url | python | def gen_hot_url(hot_index, page=1):
assert hasattr(WechatSogouConst.hot_index, hot_index)
assert isinstance(page, int) and page > 0
index_urls = {
WechatSogouConst.hot_index.hot: 0, # 热门
WechatSogouConst.hot_index.gaoxiao: 1, # 搞笑
WechatSogouConst.hot_index.health: 2, # 养生
WechatSogouConst.hot_index.sifanghua: 3, # 私房话
WechatSogouConst.hot_index.gossip: 4, # 八卦
WechatSogouConst.hot_index.technology: 5, # 科技
WechatSogouConst.hot_index.finance: 6, # 财经
WechatSogouConst.hot_index.car: 7, # 汽车
WechatSogouConst.hot_index.life: 8, # 生活
WechatSogouConst.hot_index.fashion: 9, # 时尚
WechatSogouConst.hot_index.mummy: 10, # 辣妈 / 育儿
WechatSogouConst.hot_index.travel: 11, # 旅行
WechatSogouConst.hot_index.job: 12, # 职场
WechatSogouConst.hot_index.food: 13, # 美食
WechatSogouConst.hot_index.history: 14, # 历史
WechatSogouConst.hot_index.study: 15, # 学霸 / 教育
WechatSogouConst.hot_index.constellation: 16, # 星座
WechatSogouConst.hot_index.sport: 17, # 体育
WechatSogouConst.hot_index.military: 18, # 军事
WechatSogouConst.hot_index.game: 19, # 游戏
WechatSogouConst.hot_index.pet: 20, # 萌宠
}
return 'http://weixin.sogou.com/wapindex/wap/0612/wap_{}/{}.html'.format(index_urls[hot_index], page - 1) | 拼接 首页热门文章 URL
Parameters
----------
hot_index : WechatSogouConst.hot_index
首页热门文章的分类(常量):WechatSogouConst.hot_index.xxx
page : int
页数
Returns
-------
str
热门文章分类的url | train | https://github.com/Chyroc/WechatSogou/blob/2e0e9886f555fd8bcfc7ae9718ced6ce955cd24a/wechatsogou/request.py#L115-L158 | null | class WechatSogouRequest(object):
@staticmethod
def gen_search_article_url(keyword, page=1, timesn=WechatSogouConst.search_article_time.anytime,
article_type=WechatSogouConst.search_article_type.all, ft=None, et=None):
"""拼接搜索 文章 URL
Parameters
----------
keyword : str or unicode
搜索文字
page : int, optional
页数 the default is 1
timesn : WechatSogouConst.search_article_time
时间 anytime 没有限制 / day 一天 / week 一周 / month 一月 / year 一年 / specific 自定
默认是 anytime
article_type : WechatSogouConst.search_article_type
含有内容的类型 image 有图 / video 有视频 / rich 有图和视频 / all 啥都有
默认是 all
ft, et : datetime.date
当 tsn 是 specific 时,ft 代表开始时间,如: 2017-07-01
当 tsn 是 specific 时,et 代表结束时间,如: 2017-07-15
Returns
-------
str
search_article_url
"""
assert isinstance(page, int) and page > 0
assert timesn in [WechatSogouConst.search_article_time.anytime,
WechatSogouConst.search_article_time.day,
WechatSogouConst.search_article_time.week,
WechatSogouConst.search_article_time.month,
WechatSogouConst.search_article_time.year,
WechatSogouConst.search_article_time.specific]
if timesn == WechatSogouConst.search_article_time.specific:
assert isinstance(ft, datetime.date)
assert isinstance(et, datetime.date)
assert ft <= et
else:
ft = ''
et = ''
interation_image = 458754
interation_video = 458756
if article_type == WechatSogouConst.search_article_type.rich:
interation = '{},{}'.format(interation_image, interation_video)
elif article_type == WechatSogouConst.search_article_type.image:
interation = interation_image
elif article_type == WechatSogouConst.search_article_type.video:
interation = interation_video
else:
interation = ''
qs_dict = OrderedDict()
qs_dict['type'] = _search_type_article
qs_dict['page'] = page
qs_dict['ie'] = 'utf8'
qs_dict['query'] = keyword
qs_dict['interation'] = interation
if timesn != 0:
qs_dict['tsn'] = timesn
qs_dict['ft'] = str(ft)
qs_dict['et'] = str(et)
# TODO 账号内搜索
# '账号内 http://weixin.sogou.com/weixin?type=2&ie=utf8&query=%E9%AB%98%E8%80%83&tsn=3&ft=&et=&interation=458754
# &wxid=oIWsFt1tmWoG6vO6BcsS7St61bRE&usip=nanhangqinggong'
# qs['wxid'] = wxid
# qs['usip'] = usip
return 'http://weixin.sogou.com/weixin?{}'.format(urlencode(qs_dict))
@staticmethod
def gen_search_gzh_url(keyword, page=1):
"""拼接搜索 公众号 URL
Parameters
----------
keyword : str or unicode
搜索文字
page : int, optional
页数 the default is 1
Returns
-------
str
search_gzh_url
"""
assert isinstance(page, int) and page > 0
qs_dict = OrderedDict()
qs_dict['type'] = _search_type_gzh
qs_dict['page'] = page
qs_dict['ie'] = 'utf8'
qs_dict['query'] = keyword
return 'http://weixin.sogou.com/weixin?{}'.format(urlencode(qs_dict))
@staticmethod
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.