repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
thumbor/thumbor | tests/filters/test_round_corner.py | 1 | 3229 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
from preggy import expect
from tornado.testing import gen_test
from tests.base import FilterTestCase
class RoundCornerFilterTestCase(FilterTestCase):
@gen_test
async def test_round_corner_filter_with_a_radius(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.round_corner",
"round_corner(50,255,0,0)",
)
expected = self.get_fixture("round_corner.jpg")
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.99)
@gen_test
async def test_round_corner_filter_with_ab_radius(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.round_corner",
"round_corner(50|75,255,0,0)",
)
expected = self.get_fixture("round_corner1.jpg")
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.99)
@gen_test
async def test_round_corner_filter_with_blue_background(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.round_corner",
"round_corner(50,0,0,255)",
)
expected = self.get_fixture("round_corner2.jpg")
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.99)
@gen_test
async def test_round_corner_filter_without_transparent(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.round_corner",
"round_corner(50,255,0,0,0)",
)
expected = self.get_fixture("round_corner3.jpg")
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.99)
@gen_test
async def test_round_corner_filter_with_transparent(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.round_corner",
"round_corner(50,255,0,0,1)",
)
expected = self.get_fixture("round_corner3.jpg")
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.99)
@gen_test
async def test_round_corner_filter_with_transparent_by_png(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.round_corner",
"round_corner(50,255,0,0,1)",
mode="RGBA",
)
expected = self.get_fixture("round_corner.png", mode="RGBA")
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.99)
@gen_test
async def test_round_corner_filter_with_transparent_by_webp(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.round_corner",
"round_corner(50,255,0,0,1)",
mode="RGBA",
)
expected = self.get_fixture("round_corner.webp", mode="RGBA")
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.99)
| mit | 8feab3dd7f32cf74cc9de7a6a8a6fc1c | 30.656863 | 70 | 0.598947 | 3.370564 | false | true | false | false |
thumbor/thumbor | thumbor/filters/upscale.py | 1 | 1064 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
from thumbor.filters import BaseFilter, filter_method
class Filter(BaseFilter):
@filter_method()
async def upscale(self):
target_width = self.context.request.width
target_height = self.context.request.height
source_width, source_height = self.context.request.engine.size
if source_width >= target_width or source_height >= target_height:
return
if (
source_width * 1.0 / target_width
>= source_height * 1.0 / target_height
):
new_width = target_width
new_height = int(source_height * target_width / source_width)
else:
new_width = int(source_width * target_height / source_height)
new_height = target_height
self.engine.resize(new_width, new_height)
| mit | a291df557deff32e044917a3337dc94a | 29.4 | 74 | 0.638158 | 3.77305 | false | false | false | false |
thumbor/thumbor | thumbor/app.py | 1 | 1116 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
import tornado.ioloop
import tornado.web
from libthumbor.url import Url # type: ignore
from thumbor.handlers.imaging import ImagingHandler
class ThumborServiceApp(tornado.web.Application):
def __init__(self, context):
self.context = context
self.debug = getattr(self.context.server, "debug", False)
super().__init__(self.get_handlers(), debug=self.debug)
def get_handlers(self):
handlers = []
for handler_list in self.context.modules.importer.handler_lists:
get_handlers = getattr(handler_list, "get_handlers", None)
if get_handlers is None:
continue
handlers.extend(get_handlers(self.context))
# Imaging handler (GET)
handlers.append(
(Url.regex(), ImagingHandler, {"context": self.context})
)
return handlers
| mit | 6f14d5f72c3c5d2343909ba3ead9f8ec | 27.615385 | 72 | 0.655914 | 3.783051 | false | false | false | false |
thumbor/thumbor | setup.py | 1 | 4931 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
import glob
import logging
import os
from setuptools import Extension, setup
from pathlib import Path
this_directory = Path(__file__).parent
long_description = (this_directory / "README.mkd").read_text()
try:
from thumbor import __version__
except ImportError:
__version__ = "0.0.0"
TESTS_REQUIREMENTS = [
"coverage==6.*,>=6.3.2",
"flake8==3.*,>=3.7.9",
"isort==4.*,>=4.3.21",
"pre-commit==2.*,>=2.17.0",
"preggy==1.*,>=1.4.4",
"pylint==2.*,>=2.4.4",
"pyssim==0.*,>=0.4.0",
"pytest>=6.2.5",
"pytest-asyncio==0.*,>=0.10.0",
"pytest-cov==3.*,>=3.0.0",
"pytest-tldr==0.*,>=0.2.1",
"pytest-xdist==2.*,>=2.4.0",
"redis==4.*,>=4.2.2",
"remotecv>=2.3.0",
"sentry-sdk==0.*,>=0.14.1",
"yanc==0.*,>=0.3.3",
]
OPENCV_REQUIREMENTS = [
"opencv-python-headless==4.*,>=4.2.0",
"numpy==1.*,>=1.18.1",
]
EXTRA_LIBS_REQUIREMENTS = [
# Going to update in a proper commit
"cairosvg>=2.5.2",
"pycurl==7.*,>=7.43.0",
"piexif>=1.*,>=1.1.3",
"pillow-avif-plugin==1.*,>=1.2.2",
"pillow-heif>=0.7.0",
]
ALL_REQUIREMENTS = OPENCV_REQUIREMENTS + EXTRA_LIBS_REQUIREMENTS
def filter_extension_module(name, lib_objs, lib_headers):
return Extension(
f"thumbor.ext.filters.{name}",
[f"thumbor/ext/filters/{name}.c"] + lib_objs,
libraries=["m"],
include_dirs=["thumbor/ext/filters/lib"],
depends=["setup.py"] + lib_objs + lib_headers,
extra_compile_args=[
"-Wall",
"-Wextra",
"-Werror",
"-Wno-unused-parameter",
],
)
def gather_filter_extensions():
files = glob.glob("thumbor/ext/filters/_*.c")
lib_objs = glob.glob("thumbor/ext/filters/lib/*.c")
lib_headers = glob.glob("thumbor/ext/filters/lib/*.h")
return [
filter_extension_module(f[0:-2].split("/")[-1], lib_objs, lib_headers)
for f in files
]
def run_setup(extension_modules=None):
if extension_modules is None:
extension_modules = []
if "CFLAGS" not in os.environ:
os.environ["CFLAGS"] = ""
setup(
name="thumbor",
version=__version__,
description="thumbor is an open-source photo thumbnail service by globo.com",
long_description=long_description,
long_description_content_type="text/markdown",
keywords="imaging face detection feature thumbnail imagemagick pil opencv",
author="globo.com",
author_email="thumbor@googlegroups.com",
url="https://github.com/thumbor/thumbor/wiki",
license="MIT",
python_requires=">=3.6",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: MacOS",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Multimedia :: Graphics :: Presentation",
],
packages=["thumbor"],
package_dir={"thumbor": "thumbor"},
include_package_data=True,
package_data={"": ["*.xml"]},
install_requires=[
"colorama==0.*,>=0.4.3",
"derpconf==0.*,>=0.8.3",
"libthumbor==2.*,>=2.0.2",
"Pillow>=9.0.0",
"pytz>=2019.3.0",
"statsd==3.*,>=3.3.0",
"tornado==6.*,>=6.0.3",
"thumbor-plugins-gifv==0.*,>=0.1.2",
"webcolors==1.*,>=1.10.0",
],
extras_require={
"all": ALL_REQUIREMENTS,
"opencv": OPENCV_REQUIREMENTS,
"tests": ALL_REQUIREMENTS + TESTS_REQUIREMENTS,
},
entry_points={
"console_scripts": [
"thumbor=thumbor.server:main",
"thumbor-url=thumbor.url_composer:main",
"thumbor-config=thumbor.config:generate_config",
"thumbor-doctor=thumbor.doctor:main",
],
},
ext_modules=extension_modules,
)
try:
run_setup(gather_filter_extensions())
except SystemExit as exit_error:
print(f"\n\n{'*' * 66}")
logging.exception(exit_error)
print(f"\n\n{'*' * 66}")
print(
"Couldn't build one or more native extensions"
", skipping compilation.\n\n"
)
run_setup()
| mit | 96297e0df4d2b9c491dca1017cd830b5 | 28.704819 | 85 | 0.547151 | 3.259088 | false | false | false | false |
danielperna84/pyhomematic | pyhomematic/connection.py | 1 | 7194 | import logging
from pyhomematic import _hm
LOG = logging.getLogger(__name__)
class HMConnection():
def __init__(self,
local=_hm.LOCAL,
localport=_hm.LOCALPORT,
remotes=_hm.REMOTES,
remote=None,
remoteport=None,
devicefile=_hm.DEVICEFILE,
paramsetfile=_hm.PARAMSETFILE,
interface_id=_hm.INTERFACE_ID,
autostart=False,
eventcallback=False,
systemcallback=False,
resolvenames=None,
resolveparamsets=False,
rpcusername=None,
rpcpassword=None):
"""
Helper function to quickly create the server thread to which the CCU / Homegear will emit events.
Without specifying the remote data we'll assume we're running Homegear on localhost on the default port.
"""
LOG.debug("HMConnection: Creating server object")
# Device-storage
self.devices = _hm.devices
self.devices_all = _hm.devices_all
self.devices_raw = _hm.devices_raw
self.devices_raw_dict = _hm.devices_raw_dict
self.paramsets = _hm.paramsets
if remote and remoteport:
remotes['default']['ip'] = remote
remotes['default']['port'] = remoteport
if resolvenames:
remotes['default']['resolvenames'] = resolvenames
if rpcusername:
remotes['default']['username'] = rpcusername
if rpcpassword:
remotes['default']['password'] = rpcpassword
try:
self._server = _hm.ServerThread(local=local,
localport=localport,
remotes=remotes,
devicefile=devicefile,
paramsetfile=paramsetfile,
interface_id=interface_id,
eventcallback=eventcallback,
systemcallback=systemcallback,
resolveparamsets=resolveparamsets)
except Exception as err:
LOG.critical("Failed to create server %s", err)
LOG.debug(str(err))
if autostart:
self.start()
def start(self, *args, **kwargs):
"""
Start the server thread if it wasn't created with autostart = True.
"""
if args:
LOG.debug("args: %s" % str(args))
if kwargs:
LOG.debug("kwargs: %s" % str(kwargs))
try:
self._server.start()
self._server.proxyInit()
return True
except Exception as err:
LOG.critical("Failed to start server: %s", err)
LOG.debug(str(err))
self._server.stop()
return False
def stop(self, *args, **kwargs):
"""
Stop the server thread.
"""
if args:
LOG.debug("args: %s" % str(args))
if kwargs:
LOG.debug("kwargs: %s" % str(kwargs))
try:
self._server.stop()
self._server = None
# Device-storage clear
self.devices.clear()
self.devices_all.clear()
self.devices_raw.clear()
self.devices_raw_dict.clear()
return True
except Exception as err:
LOG.critical("Failed to stop server")
LOG.debug(str(err))
return False
def reconnect(self):
"""Reinit all RPC proxy."""
if self._server is not None:
self._server.proxyInit()
def getAllSystemVariables(self, remote):
"""Get all system variables from CCU / Homegear"""
if self._server is not None:
return self._server.getAllSystemVariables(remote)
def getSystemVariable(self, remote, name):
"""Get single system variable from CCU / Homegear"""
if self._server is not None:
return self._server.getSystemVariable(remote, name)
def deleteSystemVariable(self, remote, name):
"""Delete a system variable from CCU / Homegear"""
if self._server is not None:
return self._server.deleteSystemVariable(remote, name)
def setSystemVariable(self, remote, name, value):
"""Set a system variable on CCU / Homegear"""
if self._server is not None:
return self._server.setSystemVariable(remote, name, value)
def getServiceMessages(self, remote):
"""Get service messages from CCU / Homegear"""
if self._server is not None:
return self._server.getServiceMessages(remote)
def rssiInfo(self, remote):
"""Get RSSI information for all devices from CCU / Homegear"""
if self._server is not None:
return self._server.rssiInfo(remote)
def setInstallMode(self, remote, on=True, t=60, mode=1, address=None):
"""Activate or deactivate installmode on CCU / Homegear"""
if self._server is not None:
return self._server.setInstallMode(remote, on, t, mode, address)
def getInstallMode(self, remote):
"""Get remaining time in seconds install mode is active from CCU / Homegear"""
if self._server is not None:
return self._server.getInstallMode(remote)
def getAllMetadata(self, remote, address):
"""Get all metadata of device"""
if self._server is not None:
return self._server.getAllMetadata(remote, address)
def getMetadata(self, remote, address, key):
"""Get metadata of device"""
if self._server is not None:
# pylint: disable=E1121
return self._server.getAllMetadata(remote, address, key)
def setMetadata(self, remote, address, key, value):
"""Set metadata of device"""
if self._server is not None:
# pylint: disable=E1121
return self._server.getAllMetadata(remote, address, key, value)
def deleteMetadata(self, remote, address, key):
"""Delete metadata of device"""
if self._server is not None:
# pylint: disable=E1121
return self._server.deleteMetadata(remote, address, key)
def listBidcosInterfaces(self, remote):
"""Return all available BidCos Interfaces"""
if self._server is not None:
return self._server.listBidcosInterfaces(remote)
def ping(self, remote):
"""Send ping to CCU/Homegear to generate PONG-event"""
if self._server is not None:
self._server.ping(remote)
def homegearCheckInit(self, remote):
"""Check if proxy is still initialized"""
if self._server is not None:
return self._server.homegearCheckInit(remote)
def putParamset(self, remote, address, paramset, value, rx_mode=None):
"""Set paramsets manually"""
if self._server is not None:
return self._server.putParamset(remote, address, paramset, value, rx_mode)
| mit | 8b883e565dd8eff02159bb2865b76ddd | 35.892308 | 112 | 0.560189 | 4.550285 | false | false | false | false |
danielperna84/pyhomematic | manual_test.py | 1 | 7515 | import time
import sys
import logging
import click
from pyhomematic import HMConnection
from pyhomematic.devicetypes.actors import GenericSwitch
from pyhomematic.devicetypes.helper import HelperLowBat, HelperSabotage, HelperWorking, HelperBatteryState, \
HelperValveState, HelperInhibit
from pyhomematic.devicetypes.sensors import WeatherSensor, AreaThermostat, ShutterContact, Smoke, Motion, Remote
from pyhomematic.devicetypes.thermostats import HMThermostat, IPThermostat
def systemcallback(src, *args):
print("##### SYSTEMCALLBACK #######")
print(src)
for arg in args:
print(arg)
print("############################")
def eventcallback(address, interface_id, key, value):
print("## CALLBACK: %s, %s, %s, %s ##" % (address, interface_id, key, value))
@click.command()
@click.option("--local", "-l", default="0.0.0.0", help="Local address for server")
@click.option("--localPort", "-lp", default=0, help="Local Port for server")
@click.option("--remote", "-r", help="Remote address for CCU/homegear")
@click.option("--remotePort", "-rp", default=2001, help="Remote port for CCU/homegear")
@click.option("--address", "-a", help="Address of homematic device for tests")
@click.option("--channel", "-c", default=None, help="Homematic device channel")
@click.option("--state", "-s", default=1, help="Set STATE value for actors")
@click.option("--toggle", "-to", is_flag=True, help="Set STATE is this activated")
@click.option("--timer", "-t", default=30, help="Time in sec for waiting of events (debug)")
@click.option("--debug", "-d", is_flag=True, help="Use DEBUG instead INFO for logger")
@click.option("--user", "-u", default="Admin", help="Username")
@click.option("--password", "-p", default="", help="Password")
@click.option("--variable", "-v", default=None, help="Variable for set data")
@click.option("--data", "-vd", default=None, help="Input data for variable")
def cli(local, localport, remote, remoteport, address, channel, state, toggle,
timer, debug, user, password, variable, data):
# debug?
if debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
try:
# Connect to HM
pyhomematic = HMConnection(interface_id="testpyhomatic",
local=local,
localport=localport,
remote=remote,
remoteport=remoteport,
autostart=True,
rpcusername=user,
rpcpassword=password,
systemcallback=systemcallback)
except Exception:
print("Can't init HMConnection!")
sys.exit(1)
sleepcounter = 0
while not pyhomematic.devices and sleepcounter < 20:
print("Waiting for devices")
sleepcounter += 1
time.sleep(1)
print(pyhomematic.devices)
# read system variables
print("******************************")
print("Read all: %s" % str(pyhomematic.getAllSystemVariables('default')))
if variable is not None:
pyhomematic.setSystemVariable(variable, data)
print("Read: %s" % str(pyhomematic.getSystemVariable(variable)))
print("******************************")
# need test a hm object?
if address in pyhomematic.devices:
device = pyhomematic.devices[address]
print("******************************")
print("* Show metadata from %s" % address)
print("* Elements: %s / Childs: %i" % (device.ELEMENT, len(device.CHANNELS)))
print("* Class: %s" % str(device.__class__))
print("* Base: %s" % str(device.__class__.__bases__))
print("* Sensor datapoint: %s" % str(device.SENSORNODE))
print("* Binary datapoint: %s" % str(device.BINARYNODE))
print("* Write datapoint: %s" % str(device.WRITENODE))
print("* Attribute datapoint: %s" % str(device.ATTRIBUTENODE))
print("* Event datapoint: %s" % str(device.EVENTNODE))
print("* Action datapoint: %s" % str(device.ACTIONNODE))
print("******************************")
# WeatherSensor
if isinstance(device, WeatherSensor):
print(" / Temperature: %f" % device.get_temperature())
print(" / Humidity: %i" % device.get_humidity())
print(" / Rain Counter: %f" % device.get_rain_counter())
print(" / Wind Speed: %f" % device.get_wind_speed())
print(" / Wind Direction: %i" % device.get_wind_direction())
print(" / Wind Direction Range: %i" % device.get_wind_direction_range())
print(" / Sunshineduration: %i" % device.get_sunshineduration())
print(" / Brightness: %i" % device.get_brightness())
print(" / Is Raining: %s" % str(device.is_raining()))
# AreaThermostat
if isinstance(device, AreaThermostat):
print(" / Temperature: %f" % device.get_temperature())
print(" / Humidity: %i" % device.get_humidity())
# ShutterContact
if isinstance(device, ShutterContact):
print(" / Contact open: %s" % str(device.is_open()))
# Smoke
if isinstance(device, Smoke):
print(" / Smoke detect: %s" % str(device.is_smoke()))
# Motion
if isinstance(device, Motion):
print(" / Motion detect: %s" % str(device.is_motion()))
print(" / Brightness: %i" % device.get_brightness())
# Remote
if isinstance(device, Remote):
print(" / is a Remote")
if toggle:
print(" / Press short/long")
device.press_long(channel)
device.press_short(channel)
# Switch
if isinstance(device, GenericSwitch):
print(" / Switch is on: %s" % str(device.is_on(channel)))
if toggle:
print(" / Changee state to: %s" % str(bool(state)))
device.set_state(bool(state), channel)
print(" / Switch is on: %s" % str(device.is_on(channel)))
# Thermostat
if isinstance(device, HMThermostat):
print(" / Working mode: %i" % device.MODE)
print(" / Target temperature: %.1f" % device.get_set_temperature())
print(" / Actual temperature: %.1f" % device.actual_temperature())
if isinstance(device, IPThermostat):
print(" / Window is opened: %s" % str(bool(device.get_window_state())))
########### Attribute #########
print(" / RSSI_PEER: %i" % device.get_rssi())
if isinstance(device, HelperLowBat):
print(" / Low batter: %s" % str(device.low_batt()))
if isinstance(device, HelperSabotage):
print(" / Sabotage: %s" % str(device.sabotage()))
if isinstance(device, HelperWorking):
print(" / Working: %s" % str(device.is_working()))
if isinstance(device, HelperInhibit):
print(" / Inhibit: %s" % str(device.get_inhibit()))
if isinstance(device, HelperValveState):
print(" / Valve state: %i" % device.valve_state())
if isinstance(device, HelperBatteryState):
print(" / Battery state: %f" % device.battery_state())
# do nothing for show & debug events
print("Now waiting for events/callback")
time.sleep(timer)
# end
pyhomematic.stop()
if __name__ == "__main__":
cli()
| mit | 27d88c5e9b50ea6b3c68e7e981041cf7 | 40.519337 | 112 | 0.576447 | 3.881715 | false | false | false | false |
arq5x/gemini | gemini/annotation_provenance/gene_table/map_entrez.py | 8 | 1549 | #!/usr/bin/env python
from collections import defaultdict
files = 'raw_gene_table'
outfile = open(files, 'w')
outfile.write("\t".join(["Chromosome","HGNC_symbol","Ensembl_gene_id","Ensembl_transcript_id","Biotype","Transcript_status",
"CCDS_id","HGNC_id","CDS_length","Protein_length","transcript_start","transcript_end",
"strand","Previous_symbol","Synonymous","entrez_id"]))
outfile.write("\n")
entrez = defaultdict(list)
for lines in open("ensembl75_3",'r'):
if lines.startswith("Ensembl") is False:
seq = lines.strip().split("\t")
(key,value) = (seq[1],seq[2])
entrez[key].append(value)
with open("gene_table") as f:
for each in f:
if each.startswith("Chromosome") is False:
field = each.strip().split("\t")
transcript = field[3]
if transcript in entrez:
for value in entrez[transcript]:
sequence = [field[0],field[1],field[2],field[3],field[4],field[5],field[6],field[7],field[8],field[9],field[10],field[11],field[12],field[13],field[14],value]
else:
# return none for entrez where there is no mapping
value = "None"
sequence = [field[0],field[1],field[2],field[3],field[4],field[5],field[6],field[7],field[8],field[9],field[10],field[11],field[12],field[13],field[14],value]
outfile.write("\t".join(sequence))
outfile.write("\n")
outfile.close()
| mit | 51616c15ee04c86cadc86b883ddc74e8 | 40.864865 | 178 | 0.5694 | 3.449889 | false | false | false | false |
arq5x/gemini | gemini/gemini_query.py | 3 | 5404 | #!/usr/bin/env python
from __future__ import absolute_import
import os
from collections import defaultdict
# gemini imports
from gemini import GeminiQuery
from gemini.GeminiQuery import select_formatter
from gemini.gemini_constants import *
from gemini.gemini_region import add_region_to_query
from gemini.gemini_subjects import (get_subjects, get_subjects_in_family,
get_family_dict)
from gemini.dgidb import query_dgidb
def all_samples_predicate(args):
""" returns a predicate that returns True if, for a variant,
the only samples that have the variant have a given phenotype
"""
subjects = get_subjects(args).values()
return select_subjects_predicate(subjects, args)
def family_wise_predicate(args):
formatter = select_formatter(args)
families = get_family_dict(args)
gq = GeminiQuery.GeminiQuery(args.db, out_format=formatter)
predicates = []
for f in families.values():
family_names = [x.name for x in f]
subjects = get_subjects_in_family(args, f).values()
predicates.append(select_subjects_predicate(subjects, args,
family_names))
def predicate(row):
return sum([p(row) for p in predicates]) >= args.min_kindreds
return predicate
def select_subjects_predicate(subjects, args, subset=None):
subjects = set([s.name for s in subjects])
predicates = []
if "all" in args.in_subject:
predicates.append(variant_in_all_subjects(subjects))
if "none" in args.in_subject:
predicates.append(variant_not_in_subjects(subjects))
if "only" in args.in_subject:
predicates.append(variant_only_in_subjects(subjects, subset))
if "any" in args.in_subject:
predicates.append(variant_in_any_subject(subjects))
if "not" in args.in_subject:
def predicate(row):
return not all([p(row) for p in predicates])
else:
def predicate(row):
return all([p(row) for p in predicates])
return predicate
def variant_in_any_subject(subjects):
def predicate(row):
return subjects.intersection(samples_with_variant(row)) != set()
return predicate
def variant_in_all_subjects(subjects):
def predicate(row):
return subjects.issubset(samples_with_variant(row))
return predicate
def variant_only_in_subjects(subjects, subset=None):
def predicate(row):
if subset:
check = set(subset).intersection(samples_with_variant(row))
else:
check = samples_with_variant(row)
return check and subjects.issuperset(check)
return predicate
def variant_not_in_subjects(subjects):
def predicate(row):
return subjects.intersection(samples_with_variant(row)) == set()
return predicate
def samples_with_variant(row):
return row['variant_samples']
def queries_variants(query):
return "variants" in query.lower()
def get_row_predicates(args):
"""
generate a list of predicates a row must pass in order to be
returned from a query
"""
predicates = []
if args.family_wise:
predicates.append(family_wise_predicate(args))
elif args.sample_filter:
predicates.append(all_samples_predicate(args))
return predicates
def needs_genotypes(args):
return (args.show_variant_samples or
args.family_wise or
args.sample_filter or
args.carrier_summary or
args.show_families or
args.gt_filter)
def needs_gene(args):
return (args.dgidb)
def add_required_columns_to_query(args):
if args.region:
add_region_to_query(args)
def run_query(args):
predicates = get_row_predicates(args)
add_required_columns_to_query(args)
formatter = select_formatter(args)
genotypes_needed = needs_genotypes(args)
gene_needed = needs_gene(args)
try:
subjects = get_subjects(args)
except KeyError:
subjects = []
kwargs = {}
if args.bcolz:
from . import gemini_bcolz
kwargs['variant_id_getter'] = gemini_bcolz.filter
gq = GeminiQuery.GeminiQuery(args.db, out_format=formatter, **kwargs)
gq.run(args.query, args.gt_filter, args.show_variant_samples,
args.sample_delim, predicates, genotypes_needed,
gene_needed, args.show_families, subjects=subjects)
if args.use_header and gq.header:
print(gq.header)
if not args.dgidb:
for row in gq:
print(row)
else:
# collect a list of all the genes that need to be queried
# from DGIdb
genes = defaultdict()
for row in gq:
genes[row['gene']] = True
# collect info from DGIdb
dgidb_info = query_dgidb(genes)
# rerun the query (the cursor is now consumed)
gq = GeminiQuery.GeminiQuery(args.db, out_format=formatter)
gq.run(args.query, args.gt_filter, args.show_variant_samples,
args.sample_delim, predicates, genotypes_needed,
gene_needed, args.show_families, subjects=subjects, **kwargs)
# report the query results with DGIdb info added at the end.
for row in gq:
print(str(row) + "\t" + str(dgidb_info[row['gene']]))
def query(parser, args):
if (args.db is None):
parser.print_help()
run_query(args)
if __name__ == "__main__":
main()
| mit | 0d4d3fb277dfca525a57bd9cf1f03643 | 30.418605 | 76 | 0.649149 | 3.668703 | false | false | false | false |
arq5x/gemini | gemini/annotation_provenance/sanitize-esp.py | 3 | 1855 | from __future__ import print_function
import sys
from collections import OrderedDict
j = 0
for line in sys.stdin:
if line[0] == "#":
if line.startswith(("##INFO=<ID=EA_AC,", "##INFO=<ID=AA_AC,", "##INFO=<ID=TAC,")):
line = line.replace(",Number=.,Type=String,", ",Number=R,Type=Integer,")
line = line.replace("AltAlleles,RefAllele", "RefAllele,AltAlleles")
assert "Number=R" in line
elif line.startswith("##INFO=<ID=GTS,"):
line = line.replace(",Number=.", ",Number=G")
assert "Number=G" in line
print(line, end="")
continue
j += 1
# in body, need to adjust GTS, TAC, AA_AC, EA_AC since ESP stores ref last
# and Number=R means ref should be first.
fields = line.split("\t")
info = OrderedDict((p[0], p[1]) for p in (kv.split("=") for kv in fields[7].split(";")))
# ignore this for now. need to fix if we use them in the db
#order = info['GTS'].split(",")
#A1A1,A1A2,A1A3,A1R,A2A2,A2A3,A2R,A3A3,A3R,RR
alts = fields[4].split(",")
for field in ("TAC", "AA_AC", "EA_AC"):
vals = info[field].split(",")
assert len(vals) == len(alts) + 1, (fields, j)
vals = vals[-1:] + vals[:-1]
assert len(vals) == len(alts) + 1, (fields, j)
info[field] = ",".join(vals)
k = len(alts) + 1
for field in ("GTS", "EA_GTC", "AA_GTC"):
vals = info[field].split(",")
if len(vals) != k * (k + 1) / 2:
# X and Y have incorrect numbers here...
# need to think about this if we end up using them in db
# but the GT fields are currently not used.
assert fields[0] in "XY"
#info[field] = ",".join(vals[-1:] + vals[:-1])
fields[7] = ";".join("%s=%s" % kv for kv in info.items())
print("\t".join(fields), end="")
| mit | d146dfa043c91911d706babf3db53fa6 | 35.372549 | 92 | 0.540701 | 3.081395 | false | false | false | false |
arq5x/gemini | gemini/gemini_browser.py | 3 | 9781 | from __future__ import absolute_import
import os
import warnings
import webbrowser
from collections import namedtuple
from gemini import GeminiQuery
from gemini.gim import (AutoDom, AutoRec, DeNovo, MendelViolations, CompoundHet)
database = None
# based upon bottle example here:
# https://bitbucket.org/timtan/bottlepy-in-real-case
# -- determine where I launch python and config lib path
# base_dir = os.path.dirname(__file__)
# third_party_path = os.path.abspath(os.path.join(base_dir, 'third_party' ))
# sys.path.insert(0, third_party_path)
# -- common bottle importation
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from bottle import TEMPLATE_PATH, Bottle, run, static_file, debug, request
from bottle import jinja2_template as template
debug(True)
base_dir = os.path.dirname(__file__)
TEMPLATE_PATH.append(os.path.abspath(os.path.join(base_dir, 'views')))
# -- the instance app is important
app = Bottle()
# -- serve static files, files located in static
static_folder = 'static'
_static_folder = os.path.join(os.path.dirname(__file__), static_folder)
@app.route('/stats/region/:chrom', method='GET')
def stats_region(chrom):
# Note: chrom is give as an argument
# we then extract start and end using HTML GET
start = request.GET.get('start', '').strip()
end = request.GET.get('end', '').strip()
# construct a query
query = "SELECT start, end from variants"
query += " WHERE chrom = '" + chrom + "'"
query += " AND start >= " + start
query += " AND end <= " + end
# issue the query
gq = GeminiQuery.GeminiQuery(database)
gq._set_gemini_browser(True)
gq.run(query)
# return query results in JSON format
return{'features': [dict(row) for row in gq]}
@app.route('/static/<filepath:path>')
def server_static(filepath):
return static_file(filepath, root=_static_folder)
# -- end of static folder configuration
# -- index page routing
@app.route('/index')
@app.route('/')
def index():
return template('index.j2')
@app.route('/query_json', method='GET')
def query_json():
query = request.GET.get('query', '').strip()
gq = GeminiQuery.GeminiQuery(database)
gq._set_gemini_browser(True)
gq.run(query)
return {'gemini_results': [dict(row) for row in gq]}
@app.route('/query', method='GET')
def query():
def _get_fields():
query = request.GET.get('query', '').strip()
gt_filter = request.GET.get('gt_filter', '').strip()
use_header = request.GET.get('use_header')
igv_links = request.GET.get('igv_links')
return query, gt_filter or None, use_header, igv_links
# user clicked the "submit" button
if request.GET.get('submit', '').strip():
(query, gt_filter, use_header, igv_links) = _get_fields()
if use_header: use_header = True
if igv_links: igv_links = True
gq = GeminiQuery.GeminiQuery(database)
gq._set_gemini_browser(True)
gq.run(query, gt_filter)
if len(query) == 0:
return template('query.j2', dbfile=database)
if igv_links and ('chrom' not in query.lower()
or 'start' not in query.lower()
or 'end' not in query.lower()):
return template('query.j2', dbfile=database,
rows=gq,
igv_links=igv_links,
igv_links_error=True,
use_header=use_header,
gt_filter=gt_filter,
query=query)
else:
return template('query.j2', dbfile=database,
rows=gq,
igv_links=igv_links,
igv_links_error=False,
use_header=use_header,
gt_filter=gt_filter,
query=query)
# user clicked the "save to file" button
elif request.GET.get('save', '').strip():
(query, gt_filter, use_header, igv_links) = _get_fields()
gq = GeminiQuery.GeminiQuery(database)
gq.run(query, gt_filter)
if len(query) == 0:
return template('query.j2', dbfile=database)
# dump the results to a text file. this will be
# stored in /static and a link will be given to
# the user.
tmp_file = '/tmp.txt'
tmp = open(_static_folder + tmp_file, 'w')
for i, row in enumerate(gq):
if i == 0 and use_header:
tmp.write('\t'.join([str(key) for key in row.keys()]) + '\n')
tmp.write('\t'.join([str(row[key]) for key in row.keys()]) + '\n')
tmp.close()
return template('query.j2', dbfile=database,
tmp_file=tmp_file,
igv_links=igv_links,
igv_links_error=True,
use_header=use_header,
gt_filter=gt_filter,
query=query)
# user did nothing.
else:
return template('query.j2', dbfile=database)
default_cols = ['chrom', 'start', 'end', 'ref', 'alt',
'polyphen_pred', 'sift_pred',
'max_aaf_all', 'impact', 'impact_severity',
'gene', 'biotype', 'transcript']
# turn a dictionary into something that can be accessed by attribute.
class Arguments(object):
"""
>>> args = Arguments(db='some.db')
>>> args.db
'some.db'
"""
_opts = ("columns", "db", "filter", "min_kindreds", "families",
"pattern_only", "max_priority", # only for compound_het
"allow_unaffected", "min_gq", "lenient", "min_sample_depth")
def __init__(self, **kwargs):
if not "min_gq" in kwargs: kwargs['min_gq'] = 0
if not "lenient" in kwargs: kwargs['lenient'] = False
for k in ("families", "filter"):
if not k in kwargs: kwargs[k] = None
if not "gt_phred_ll" in kwargs: kwargs['gt_phred_ll'] = None
if not "min_sample_depth" in kwargs: kwargs['min_sample_depth'] = 0
for k in ("min_kindreds", "max_priority"):
if not k in kwargs: kwargs[k] = 1
for k in ("pattern_only", "allow_unaffected"):
if not k in kwargs: kwargs[k] = False
if not "columns" in kwargs:
kwargs['columns'] = ",".join(default_cols)
self.__dict__.update(**kwargs)
@app.route('/de_novo', method='GET')
def de_novo():
# user clicked the "submit" button
if request.GET.get('submit', '').strip():
min_sample_depth = str(request.GET.get('min-depth', '10').strip())
igv_links = request.GET.get('igv_links')
args = Arguments(db=database, min_sample_depth=min_sample_depth)
row_iter = DeNovo(args).report_candidates()
return template('de_novo.j2', dbfile=database,
rows=row_iter,
igv_links=igv_links)
else:
return template('de_novo.j2', dbfile=database)
@app.route('/auto_rec', method='GET')
def auto_rec():
# user clicked the "submit" button
if request.GET.get('submit', '').strip():
min_sample_depth = str(request.GET.get('min-depth', '10').strip())
args = Arguments(db=database, min_sample_depth=min_sample_depth)
row_iter = AutoRec(args).report_candidates()
return template('auto_rec.j2', dbfile=database, rows=row_iter)
else:
return template('auto_rec.j2', dbfile=database)
@app.route('/auto_dom', method='GET')
def auto_dom():
# user clicked the "submit" button
if request.GET.get('submit', '').strip():
min_sample_depth = str(request.GET.get('min-depth', '10').strip())
args = Arguments(db=database, min_sample_depth=min_sample_depth)
row_iter = AutoDom(args).report_candidates()
return template('auto_dom.j2', dbfile=database, rows=row_iter)
else:
return template('auto_dom.j2', dbfile=database)
@app.route('/db_schema', method='GET')
def db_schema():
return template('db_schema.j2')
## Switch between the different available browsers
def browser_puzzle(args):
host = args.host
port = args.port
try:
# Puzzle browser plugin
from puzzle.server import factory as puzzle_app
from puzzle.plugins import GeminiPlugin
from puzzle.server.settings import BaseConfig
except ImportError as e:
raise ImportError("%s\nPlease 'pip install puzzle' if you want to run it\n" % e)
plugin = GeminiPlugin(db=args.db, vtype="sv")
root = os.path.expanduser("~/.puzzle")
BaseConfig.PUZZLE_BACKEND = plugin
BaseConfig.UPLOAD_DIR = os.path.join(root, 'resources')
puzzle_srv = puzzle_app.create_app(config_obj=BaseConfig)
webbrowser.open_new_tab("http://{}:{}".format(host, port))
run(puzzle_srv, host=host, port=port)
def browser_builtin(args):
host = args.host
port = args.port
webbrowser.open_new_tab("http://{}:{}".format(host, port))
run(app, host=host, port=port,
reloader=True, debug=True)
def browser_main(parser, args):
global database
database = args.db
browser = args.use
try:
if args.use == "puzzle":
browser_puzzle(args)
# XXX: https://github.com/dgaston/kvasir
#if args.use == "kvasir":
# raise NotImplementedError
elif args.use == "builtin":
browser_builtin(args)
else:
raise NotImplementedError("GEMINI-compatible Browser '{browser}' not found.".format(browser=browser))
except ImportError as e:
raise ImportError("{exc}\nIs {browser} correctly installed?".format(exc=e, browser=browser))
| mit | fc3ccb6b555a24d8ea5a433f9754a6db | 31.387417 | 113 | 0.587159 | 3.584097 | false | false | false | false |
arq5x/gemini | gemini/gemini_dump.py | 3 | 3806 | #!/usr/bin/env python
from __future__ import absolute_import
from . import compression as Z
from . import database
from . import gemini_utils as util
from .GeminiQuery import GeminiQuery
import sqlalchemy as sql
def get_variants(conn, metadata, args):
"""
Report all columns in the variant table, except for the
genotype vectors.
"""
query = "SELECT * FROM variants \
ORDER BY chrom, start"
res = conn.execute(query)
# build a list of all the column indices that are NOT
# gt_* columns. These will be the columns reported
(col_names, non_gt_idxs) = \
util.get_col_names_and_indices(metadata.tables["variants"], ignore_gt_cols=True)
if args.use_header:
print(args.separator.join(col for col in col_names))
for row in res:
print(args.separator.join('.' if (row[i] is None) else
row[i].encode('utf-8') if type(row[i]) is
unicode else str(row[i]) for i in
non_gt_idxs))
def get_genotypes(conn, metadata, args):
"""For each variant, report each sample's genotype
on a separate line.
"""
idx_to_sample = util.map_indices_to_samples(metadata)
query = "SELECT v.chrom, v.start, v.end, \
v.ref, v.alt, \
v.type, v.sub_type, \
v.aaf, v.in_dbsnp, v.gene, \
v.gts \
FROM variants v \
ORDER BY chrom, start"
res = conn.execute(sql.text(query))
# build a list of all the column indices that are NOT
# gt_* columns. These will be the columns reported
(col_names, non_gt_idxs) = \
util.get_col_names_and_indices(metadata.tables["variants"], ignore_gt_cols=True)
col_names.append('sample')
col_names.append('genotype')
if args.use_header:
print(args.separator.join(col for col in col_names))
unpack = Z.unpack_genotype_blob
import zlib
for row in res:
try:
gts = unpack(row['gts'])
except zlib.error:
unpack = Z.snappy_unpack_blob
gts = unpack(row['gts'])
for idx, gt in enumerate(gts):
# range(len(row)-1) to avoid printing v.gts
a = args.separator.join(str(row[i]) for i in range(len(row)-1))
b = args.separator.join([idx_to_sample[idx], gt])
print(args.separator.join((a, b)))
def get_samples(conn, metadata, args):
"""
Report all of the information about the samples in the DB
"""
query = "SELECT * FROM samples"
res = conn.execute(query)
(col_names, col_idxs) = util.get_col_names_and_indices(metadata.tables["samples"])
if args.use_header:
print(args.separator.join(col_names))
for row in res:
print(args.separator.join(str(row[i]) if row[i] is not None else "." \
for i in range(len(row)) ))
def tfam(args):
"""
Report the information about the samples in the DB in TFAM format:
http://pngu.mgh.harvard.edu/~purcell/plink/data.shtml
"""
query = ("select family_id, name, paternal_id, maternal_id, "
"sex, phenotype from samples")
gq = GeminiQuery(args.db)
gq.run(query)
for row in gq:
print(" ".join(map(str, [row['family_id'], row['name'], row['paternal_id'],
row['maternal_id'], row['sex'], row['phenotype']])))
def dump(parser, args):
conn, metadata = database.get_session_metadata(args.db)
if args.variants:
get_variants(conn, metadata, args)
elif args.genotypes:
get_genotypes(conn, metadata, args)
elif args.samples:
get_samples(conn, metadata, args)
elif args.tfam:
tfam(args)
| mit | afa58cabcd98ddd572ead3f1f064802d | 31.529915 | 88 | 0.581976 | 3.573709 | false | false | false | false |
arq5x/gemini | gemini/gemini_load_chunk.py | 3 | 40226 | #!/usr/bin/env python
from __future__ import print_function, absolute_import
# native Python imports
import os.path
import re
import sys
import sqlalchemy as sql
import numpy as np
import shutil
import uuid
import geneimpacts
# third-party imports
import cyvcf2 as vcf
# gemini modules
from . import version
from .ped import load_ped_file
from . import gene_table
from . import infotag
from . import database
from . import annotations
from . import popgen
from . import structural_variants as svs
from .gemini_constants import *
from .compression import pack_blob
from .config import read_gemini_config
class empty(object):
def __getattr__(self, key):
return None
empty = empty()
def get_phred_lik(gt_phred_likelihoods, dtype=np.int32, empty_val=-1):
"""
Force each sample to have 3 GL's (0/0, 0/1, 1/1).
If no samples have GL's, then we just return None to save space.
"""
m = np.iinfo(dtype).max - 1
out = []
all_empty = True
empty_line = [empty_val] * 3
for row in gt_phred_likelihoods:
# we only try to use the correct PL's if it already has size 3
if row is not None and isinstance(row, (list, tuple)) and len(row) == 3:
out.append([min(m, int(v)) if v is not None else empty_val for v in row])
all_empty = False
else:
out.append(empty_line)
if all_empty:
return None
return np.array(out, dtype=dtype)
def get_extra_effects_fields(args):
"""Retrieve additional effects fields contained in the VCF.
Useful for merging VEP databases with additional fields.
"""
loader = GeminiLoader(args, prepare_db=False)
return [x[0] for x in loader._extra_effect_fields]
def load_clinvar(cpath):
from cyvcf2 import VCF
from collections import defaultdict
lookup = {}
for v in VCF(cpath):
info = v.INFO
gene = info.get('GENEINFO')
if gene is None: continue
#diseases = [x.decode('utf8', 'ignore').encode('ascii', 'ignore') for x in info.get('CLNDBN').split("|") if not x in (".", "not_specified", "not_provided")]
diseases = [x.encode('ascii', 'ignore').decode('ascii', 'ignore') for x in info.get('CLNDN', '.').split("|") if not x in (".", "not_specified", "not_provided")]
if diseases == []: continue
genes = [x.split(":")[0] for x in gene.split("|")]
for gene in genes:
key = v.CHROM, gene
if key in lookup:
lookup[key].extend(diseases)
else:
lookup[key] = diseases
for k in lookup:
lookup[k] = "|".join(sorted(set(lookup[k]))).lower()
return lookup
def fix_col_name(s, patt=re.compile('-|\s|\\\\')):
if s in ('0', '-9'): return s
return patt.sub("_", s)
class GeminiLoader(object):
"""
Object for creating and populating a gemini
database and auxillary data files.
"""
def __init__(self, args, buffer_size=10000, prepare_db=True):
self.args = args
self.seen_multi = False
# create the gemini database
# create a reader for the VCF file
self.vcf_reader = self._get_vcf_reader()
# load sample information
expected = "consequence,codons,amino_acids,gene,symbol,feature,exon,polyphen,sift,protein_position,biotype,warning".split(",")
self._effect_fields = []
self._extra_effect_fields = []
if self.args.anno_type == "VEP":
self._effect_fields = self._get_vep_csq(self.vcf_reader)
# tuples of (db_column, CSQ name)
self._extra_effect_fields = [("vep_%s" % fix_col_name(x.lower()), x) for x in self._effect_fields if not x.lower() in expected]
elif self.args.anno_type == "all":
try:
self.vcf_reader["CSQ"]
except KeyError:
pass
else:
self._effect_fields = self._get_vep_csq(self.vcf_reader)
# tuples of (db_column, CSQ name)
self._extra_effect_fields = [("vep_%s" % fix_col_name(x.lower()), x) for x in self._effect_fields if not x.lower() in expected]
else:
self._effect_fields = []
self._extra_effect_fields = []
if not prepare_db:
self.c, self.metadata = database.get_session_metadata(args.db)
return
self._create_db([x[0] for x in self._extra_effect_fields])
self._extra_empty = dict((x[0], None) for x in self._extra_effect_fields)
if not self.args.no_genotypes and not self.args.no_load_genotypes:
# load the sample info from the VCF file.
self._prepare_samples()
# initialize genotype counts for each sample
self._init_sample_gt_counts()
self.num_samples = len(self.samples)
else:
self.num_samples = 0
self.clinvar_chrom_gene_lookup = load_clinvar(annotations.get_anno_files(self.args)['clinvar'])
self.buffer_size = buffer_size
self._get_anno_version()
def store_vcf_header(self):
"""Store the raw VCF header.
"""
database.insert_vcf_header(self.c, self.metadata, self.vcf_reader.raw_header)
def store_resources(self):
"""Create table of annotation resources used in this gemini database.
"""
database.insert_resources(self.c, self.metadata, annotations.get_resources(self.args))
def store_version(self):
"""Create table documenting which gemini version was used for this db.
"""
database.insert_version(self.c, self.metadata, version.__version__.strip())
def _get_vid(self):
if hasattr(self.args, 'offset'):
v_id = int(self.args.offset)
else:
v_id = 1
return v_id
def _multiple_alts_message(self):
self.seen_multi = 1
sys.stderr.write("\n")
sys.stderr.write("warning: variant with multiple alternate alleles found.\n")
sys.stderr.write(" in order to reduce the number of false negatives\n")
sys.stderr.write(" we recommend to split multiple alts. see: \
http://gemini.readthedocs.org/en/latest/content/preprocessing.html#preprocess\n")
def populate_from_vcf(self):
"""
"""
self.v_id = self._get_vid()
self.counter = 0
self.var_buffer = []
self.var_impacts_buffer = []
self.skipped = 0
# need to keep the objects in memory since we just borrow it in python.
obj_buffer = []
reader = self.vcf_reader
anno_keys = {}
if self.args.anno_type in ("snpEff", "all"):
if "ANN" in reader:
desc = reader["ANN"]["Description"]
parts = [x.strip("\"'") for x in re.split("\s*\|\s*", desc.split(":", 1)[1].strip('" '))]
anno_keys["ANN"] = parts
elif "EFF" in reader:
parts = [x.strip(" [])'(\"") for x in re.split("\||\(", reader["EFF"]["Description"].split(":", 1)[1].strip())]
anno_keys["EFF"] = parts
else:
print("snpEff header not found")
if self.args.anno_type in ("VEP", "all"):
if "CSQ" in reader:
parts = [x.strip(" [])'(\"") for x in re.split("\||\(",
reader["CSQ"]["Description"].split(":", 1)[1].strip())]
anno_keys["CSQ"] = parts
if self.args.anno_type in ("BCFT", "all"):
if "BCSQ" in reader:
desc = reader["BCSQ"]["Description"]
parts = desc.split(']', 1)[1].split(']')[0].replace('[','').split("|")
anno_keys['BCSQ'] = parts
# process and load each variant in the VCF file
for var in self.vcf_reader:
if not var.ALT or len(var.ALT) == 0:
continue
if len(var.ALT) > 1 and not self.seen_multi:
self._multiple_alts_message()
if self.args.passonly and (var.FILTER is not None and var.FILTER != "."):
self.skipped += 1
continue
(variant, variant_impacts) = self._prepare_variation(var, anno_keys)
obj_buffer.append(var)
# add the core variant info to the variant buffer
self.var_buffer.append(variant)
# add each of the impact for this variant (1 per gene/transcript)
self.var_impacts_buffer.extend(variant_impacts)
# buffer full - time to insert into DB
if len(self.var_buffer) >= self.buffer_size:
database.insert_variation(self.c, self.metadata, self.var_buffer)
sys.stderr.write("pid " + str(os.getpid()) + ": " +
str(self.counter) + " variants processed.\n")
database.insert_variation_impacts(self.c, self.metadata,
self.var_impacts_buffer)
# binary.genotypes.append(var_buffer)
# reset for the next batch
obj_buffer = []
self.var_buffer = []
self.var_impacts_buffer = []
self.v_id += 1
self.counter += 1
# final load to the database
self.v_id -= 1
if self.var_buffer:
database.insert_variation(self.c, self.metadata, self.var_buffer)
database.insert_variation_impacts(self.c, self.metadata, self.var_impacts_buffer)
sys.stderr.write("pid " + str(os.getpid()) + ": " +
str(self.counter) + " variants processed.\n")
if self.args.passonly:
sys.stderr.write("pid " + str(os.getpid()) + ": " +
str(self.skipped) + " skipped due to having the "
"FILTER field set.\n")
def _update_extra_headers(self, headers, cur_fields):
"""Update header information for extra fields.
"""
for field, val in cur_fields.items():
headers[field] = self._get_field_type(val, headers.get(field, "integer"))
return headers
def _get_field_type(self, val, cur_type):
start_checking = False
for name, check_fn in [("integer", int), ("float", float), ("text", str)]:
if name == cur_type:
start_checking = True
if start_checking:
try:
check_fn(val)
break
except:
continue
return name
def build_indices_and_disconnect(self):
"""
Create the db table indices and close up
db connection
"""
# index our tables for speed
database.create_indices(self.c)
# commit data and close up
database.close_and_commit(self.c)
def _get_vcf_reader(self):
return vcf.VCFReader(self.args.vcf)
def _get_anno_version(self):
"""
Extract the snpEff or VEP version used to annotate the VCF
"""
# default to unknown version
self.args.version = None
if self.args.anno_type == "snpEff":
try:
version_string = self.vcf_reader['SnpEffVersion']['SnpEffVersion']
except KeyError:
error = ("\nWARNING: VCF is not annotated with snpEff, check documentation at:\n"\
"http://gemini.readthedocs.org/en/latest/content/functional_annotation.html#stepwise-installation-and-usage-of-snpeff\n")
raise KeyError(error)
# e.g., "SnpEff 3.0a (build 2012-07-08), by Pablo Cingolani"
# or "3.3c (build XXXX), by Pablo Cingolani"
version_string = version_string.replace('"', '') # No quotes
toks = version_string.split()
if "SnpEff" in toks[0]:
self.args.raw_version = toks[1] # SnpEff *version*, etc
else:
self.args.raw_version = toks[0] # *version*, etc
# e.g., 3.0a -> 3
self.args.maj_version = int(self.args.raw_version.split('.')[0])
elif self.args.anno_type == "BCFT":
##bcftools/csqVersion=1.3.1-179-gd7f6692+htslib-1.3.2-138-g4811eab
lines = self.vcf_reader.raw_header.split("\n")
try:
x = next(l for l in lines if l.startswith("##bcftools/csqVersion"))
self.args.raw_version = x[len("##bcftools/csqVersion="):]
except StopIteration:
pass
elif self.args.anno_type == "VEP":
pass
def _get_vep_csq(self, reader):
"""
Test whether the VCF header meets expectations for
proper execution of VEP for use with Gemini.
"""
required = ["Consequence"]
try:
parts = reader["CSQ"]["Description"].strip().replace('"', '').split("Format: ")[-1].split("|")
all_found = True
for check in required:
if check not in parts:
all_found = False
break
if all_found:
return parts
except KeyError:
print("VEP: KeyError, did not find expected fields")
# Should not reach this point, badly formatted VEP-VCF annotation
error = "\nERROR: Check gemini docs for the recommended VCF annotation with VEP"\
"\nhttp://gemini.readthedocs.org/en/latest/content/functional_annotation.html#stepwise-installation-and-usage-of-vep"
raise ValueError(error)
def _create_db(self, effect_fields=None):
"""
private method to open a new DB
and create the gemini schema.
"""
# open up a new database
db_path = self.args.db if not hasattr(self.args, 'tmp_db') else self.args.tmp_db
if os.path.exists(db_path):
os.remove(db_path)
self.c, self.metadata = database.create_tables(db_path, effect_fields or [], not self.args.skip_pls)
session = self.c
if session.bind.name == "sqlite":
self.c.execute('PRAGMA synchronous=OFF')
self.c.execute('PRAGMA journal_mode=MEMORY')
# create the gemini database tables for the new DB
database.create_sample_table(self.c, self.metadata, self.args)
def _prepare_variation(self, var, anno_keys):
"""private method to collect metrics for a single variant (var) in a VCF file.
Extracts variant information, variant impacts and extra fields for annotation.
"""
extra_fields = {}
# these metric require that genotypes are present in the file
call_rate = None
hwe_p_value = None
pi_hat = None
inbreeding_coeff = None
hom_ref = het = hom_alt = unknown = None
# only compute certain metrics if genoypes are available
if not self.args.no_genotypes and not self.args.no_load_genotypes:
hom_ref = var.num_hom_ref
hom_alt = var.num_hom_alt
het = var.num_het
unknown = var.num_unknown
call_rate = var.call_rate
aaf = var.aaf
hwe_p_value, inbreeding_coeff = \
popgen.get_hwe_likelihood(hom_ref, het, hom_alt, aaf)
pi_hat = var.nucl_diversity
else:
aaf = infotag.extract_aaf(var)
if not isinstance(aaf, (float, int)):
if aaf is not None:
aaf = max(aaf)
############################################################
# collect annotations from gemini's custom annotation files
# but only if the size of the variant is <= 50kb
############################################################
if var.end - var.POS < 50000:
pfam_domain = annotations.get_pfamA_domains(var)
cyto_band = annotations.get_cyto_info(var)
rs_ids = annotations.get_dbsnp_info(var)
clinvar_info = annotations.get_clinvar_info(var)
in_dbsnp = 0 if rs_ids is None else 1
rmsk_hits = annotations.get_rmsk_info(var)
in_cpg = annotations.get_cpg_island_info(var)
in_segdup = annotations.get_segdup_info(var)
is_conserved = annotations.get_conservation_info(var)
esp = annotations.get_esp_info(var)
thousandG = annotations.get_1000G_info(var)
recomb_rate = annotations.get_recomb_info(var)
gms = annotations.get_gms(var)
grc = annotations.get_grc(var)
in_cse = annotations.get_cse(var)
encode_tfbs = annotations.get_encode_tfbs(var)
encode_dnaseI = annotations.get_encode_dnase_clusters(var)
encode_cons_seg = annotations.get_encode_consensus_segs(var)
gerp_el = annotations.get_gerp_elements(var)
vista_enhancers = annotations.get_vista_enhancers(var)
cosmic_ids = annotations.get_cosmic_info(var)
fitcons = annotations.get_fitcons(var)
Exac = annotations.get_exac_info(var)
gnomad = annotations.get_gnomad_info(var)
#load CADD scores by default
if self.args.skip_cadd is False:
(cadd_raw, cadd_scaled) = annotations.get_cadd_scores(var)
else:
(cadd_raw, cadd_scaled) = (None, None)
# load the GERP score for this variant by default.
gerp_bp = None
if self.args.skip_gerp_bp is False:
gerp_bp = annotations.get_gerp_bp(var)
# the variant is too big to annotate
else:
pfam_domain = None
cyto_band = None
rs_ids = None
clinvar_info = annotations.ClinVarInfo()
in_dbsnp = None
rmsk_hits = None
in_cpg = None
in_segdup = None
is_conserved = None
esp = annotations.ESPInfo(False, -1, -1, -1, 0)
thousandG = annotations.EMPTY_1000G
Exac = annotations.EXAC_EMPTY
gnomad = annotations.GNOMAD_EMPTY
recomb_rate = None
gms = annotations.GmsTechs(None, None, None)
grc = None
in_cse = None
encode_tfbs = None
encode_dnaseI = annotations.ENCODEDnaseIClusters(None, None)
encode_cons_seg = annotations.ENCODESegInfo(None, None, None, None, None, None)
gerp_el = None
vista_enhancers = None
cosmic_ids = None
fitcons = None
cadd_raw = None
cadd_scaled = None
gerp_bp = None
top_impact = empty
if anno_keys == {}:
impacts = []
else:
impacts = []
if self.args.anno_type in ("all", "snpEff"):
try:
if "EFF" in anno_keys:
impacts += [geneimpacts.OldSnpEff(e, anno_keys["EFF"]) for e in var.INFO["EFF"].split(",")]
elif "ANN" in anno_keys:
impacts += [geneimpacts.SnpEff(e, anno_keys["ANN"]) for e in var.INFO["ANN"].split(",")]
except KeyError:
pass
if self.args.anno_type in ("all", "BCFT"):
try:
impacts += [geneimpacts.BCFT(e, anno_keys["BCSQ"]) for e in var.INFO["BCSQ"].split(",")]
except KeyError:
pass
if self.args.anno_type in ("all", "VEP"):
try:
impacts += [geneimpacts.VEP(e, anno_keys["CSQ"]) for e in var.INFO["CSQ"].split(",")]
except KeyError:
pass
for i, im in enumerate(impacts, start=1):
im.anno_id = i
if impacts != []:
top_impact = geneimpacts.Effect.top_severity(impacts)
if isinstance(top_impact, list):
top_impact = top_impact[0]
filter = None
if var.FILTER is not None and var.FILTER != ".":
if isinstance(var.FILTER, list):
filter = ";".join(var.FILTER)
else:
filter = var.FILTER
vcf_id = None
if var.ID is not None and var.ID != ".":
vcf_id = var.ID
chrom = var.CHROM if var.CHROM.startswith("chr") else "chr" + var.CHROM
clinvar_gene_phenotype = None
if top_impact.gene is not None:
clinvar_gene_phenotype = self.clinvar_chrom_gene_lookup.get((chrom[3:], top_impact.gene))
# build up numpy arrays for the genotype information.
# these arrays will be pickled-to-binary, compressed,
# and loaded as BLOB values (see compression.pack_blob)
gt_phred_ll_homref = gt_phred_ll_het = gt_phred_ll_homalt = None
if not (self.args.no_genotypes or self.args.no_load_genotypes):
gt_bases = var.gt_bases
gt_types = var.gt_types
gt_phases = var.gt_phases
gt_depths = var.gt_depths
gt_ref_depths = var.gt_ref_depths
gt_alt_depths = var.gt_alt_depths
gt_alt_freqs = var.gt_alt_freqs
gt_quals = var.gt_quals
#gt_copy_numbers = np.array(var.gt_copy_numbers, np.float32) # 1.0 2.0 2.1 -1
gt_copy_numbers = None
if not self.args.skip_pls:
gt_phred_ll_homref = var.gt_phred_ll_homref
gt_phred_ll_het = var.gt_phred_ll_het
gt_phred_ll_homalt = var.gt_phred_ll_homalt
# tally the genotypes
self._update_sample_gt_counts(gt_types)
else:
gt_bases = gt_types = gt_phases = gt_depths = gt_ref_depths = None
gt_alt_freqs = gt_alt_depths = gt_quals = gt_copy_numbers = None
if self.args.skip_info_string:
info = None
else:
info = dict(var.INFO)
assert isinstance(thousandG.aaf_AMR, (int, float))
# were functional impacts predicted by SnpEFF or VEP?
# if so, build up a row for each of the impacts / transcript
# extract structural variants
sv = svs.StructuralVariant(var)
ci_left = sv.get_ci_left()
ci_right = sv.get_ci_right()
# construct the core variant record.
# 1 row per variant to VARIANTS table
variant = dict(chrom=chrom, start=var.start, end=var.end,
vcf_id=vcf_id, variant_id=self.v_id, anno_id=top_impact.anno_id,
ref=var.REF, alt=','.join([x or "" for x in var.ALT]),
qual=var.QUAL, filter=filter, type=var.var_type,
sub_type=var.var_subtype, gts=pack_blob(gt_bases),
gt_types=pack_blob(gt_types),
gt_phases=pack_blob(gt_phases), gt_depths=pack_blob(gt_depths),
gt_ref_depths=pack_blob(gt_ref_depths), gt_alt_depths=pack_blob(gt_alt_depths),
gt_alt_freqs=pack_blob(gt_alt_freqs),
gt_quals=pack_blob(gt_quals), gt_copy_numbers=pack_blob(gt_copy_numbers),
call_rate=call_rate, in_dbsnp=bool(in_dbsnp),
rs_ids=rs_ids,
sv_cipos_start_left=ci_left[0],
sv_cipos_end_left=ci_left[1],
sv_cipos_start_right=ci_right[0],
sv_cipos_end_right=ci_right[1],
sv_length=sv.get_length(),
sv_is_precise=sv.is_precise(),
sv_tool=sv.get_sv_tool(),
sv_evidence_type=sv.get_evidence_type(),
sv_event_id=sv.get_event_id(),
sv_mate_id=sv.get_mate_id(),
sv_strand=sv.get_strand(),
in_omim=bool(clinvar_info.clinvar_in_omim),
clinvar_sig=clinvar_info.clinvar_sig,
clinvar_disease_name=clinvar_info.clinvar_disease_name,
clinvar_origin=clinvar_info.clinvar_origin,
clinvar_dsdb=clinvar_info.clinvar_dsdb,
clinvar_dsdbid=clinvar_info.clinvar_dsdbid,
clinvar_gene_phenotype=clinvar_gene_phenotype,
geno2mp_hpo_ct=annotations.get_geno2mp_ct(var),
pfam_domain=pfam_domain, cyto_band=cyto_band, rmsk=rmsk_hits,
in_cpg_island=bool(in_cpg),
in_segdup=bool(in_segdup), is_conserved=bool(is_conserved),
gerp_bp_score=gerp_bp, gerp_element_pval=gerp_el,
num_hom_ref=hom_ref, num_het=het, num_hom_alt=hom_alt,
num_unknown=unknown,
aaf=aaf, hwe=hwe_p_value, inbreeding_coeff=inbreeding_coeff,
pi=pi_hat,
recomb_rate=recomb_rate, gene=top_impact.gene,
transcript=top_impact.transcript,
is_exonic=top_impact.is_exonic,
is_coding=top_impact.is_coding,
is_splicing=top_impact.is_splicing,
is_lof=top_impact.is_lof, exon=top_impact.exon,
codon_change=top_impact.codon_change, aa_change=top_impact.aa_change,
aa_length=top_impact.aa_length, biotype=top_impact.biotype,
impact=top_impact.top_consequence, impact_so=top_impact.so,
impact_severity=top_impact.effect_severity,
polyphen_pred=top_impact.polyphen_pred,
polyphen_score=top_impact.polyphen_score,
sift_pred=top_impact.sift_pred, sift_score=top_impact.sift_score,
anc_allele=infotag.get_ancestral_allele(var), rms_bq=infotag.get_rms_bq(var),
cigar=infotag.get_cigar(var),
depth=infotag.get_depth(var), strand_bias=infotag.get_strand_bias(var),
rms_map_qual=infotag.get_rms_map_qual(var), in_hom_run=infotag.get_homopol_run(var),
num_mapq_zero=infotag.get_map_qual_zero(var),
num_alleles=infotag.get_num_of_alleles(var),
num_reads_w_dels=infotag.get_frac_dels(var),
haplotype_score=infotag.get_haplotype_score(var),
qual_depth=infotag.get_quality_by_depth(var),
allele_count=infotag.get_allele_count(var), allele_bal=infotag.get_allele_bal(var),
# bools?
in_hm2=infotag.in_hm2(var), in_hm3=infotag.in_hm3(var),
is_somatic=infotag.is_somatic(var),
somatic_score=infotag.get_somatic_score(var),
in_esp=esp.found, aaf_esp_ea=esp.aaf_EA,
aaf_esp_aa=esp.aaf_AA, aaf_esp_all=esp.aaf_ALL,
exome_chip=bool(esp.exome_chip),
in_1kg=thousandG.found,
aaf_1kg_amr=thousandG.aaf_AMR,
aaf_1kg_eas=thousandG.aaf_EAS,
aaf_1kg_sas=thousandG.aaf_SAS,
aaf_1kg_afr=thousandG.aaf_AFR,
aaf_1kg_eur=thousandG.aaf_EUR,
aaf_1kg_all=thousandG.aaf_ALL,
grc=grc,
gms_illumina=gms.illumina,
gms_solid=gms.solid,
gms_iontorrent=gms.iontorrent, in_cse=in_cse,
encode_tfbs=encode_tfbs,
encode_dnaseI_cell_count=encode_dnaseI.cell_count,
encode_dnaseI_cell_list=encode_dnaseI.cell_list,
encode_consensus_gm12878=encode_cons_seg.gm12878,
encode_consensus_h1hesc=encode_cons_seg.h1hesc,
encode_consensus_helas3=encode_cons_seg.helas3,
encode_consensus_hepg2=encode_cons_seg.hepg2,
encode_consensus_huvec=encode_cons_seg.huvec,
encode_consensus_k562=encode_cons_seg.k562,
vista_enhancers=vista_enhancers,
cosmic_ids=cosmic_ids,
info=pack_blob(info),
cadd_raw=cadd_raw,
cadd_scaled=cadd_scaled,
fitcons=fitcons,
in_exac=Exac.found,
aaf_exac_all=Exac.aaf_ALL,
aaf_adj_exac_all=Exac.adj_aaf_ALL,
aaf_adj_exac_afr=Exac.aaf_AFR,
aaf_adj_exac_amr=Exac.aaf_AMR,
aaf_adj_exac_eas=Exac.aaf_EAS,
aaf_adj_exac_fin=Exac.aaf_FIN,
aaf_adj_exac_nfe=Exac.aaf_NFE,
aaf_adj_exac_oth=Exac.aaf_OTH,
aaf_adj_exac_sas=Exac.aaf_SAS,
exac_num_het=Exac.num_het,
exac_num_hom_alt=Exac.num_hom_alt,
exac_num_chroms=Exac.num_chroms,
aaf_gnomad_all=gnomad.aaf_ALL,
aaf_gnomad_afr=gnomad.aaf_AFR,
aaf_gnomad_amr=gnomad.aaf_AMR,
aaf_gnomad_asj=gnomad.aaf_ASJ,
aaf_gnomad_eas=gnomad.aaf_EAS,
aaf_gnomad_fin=gnomad.aaf_FIN,
aaf_gnomad_nfe=gnomad.aaf_NFE,
aaf_gnomad_oth=gnomad.aaf_OTH,
aaf_gnomad_sas=gnomad.aaf_SAS,
aaf_gnomad_non_neuro=gnomad.aaf_non_neuro,
aaf_gnomad_controls=gnomad.aaf_controls,
aaf_gnomad_non_cancer=gnomad.aaf_non_cancer,
gnomad_popmax_AF=gnomad.popmax_AF,
gnomad_num_het=gnomad.num_het,
gnomad_num_hom_alt=gnomad.num_hom_alt,
gnomad_num_chroms=gnomad.num_chroms,
)
if not self.args.skip_pls:
variant['gt_phred_ll_homref'] = pack_blob(gt_phred_ll_homref)
variant['gt_phred_ll_het'] = pack_blob(gt_phred_ll_het)
variant['gt_phred_ll_homalt'] = pack_blob(gt_phred_ll_homalt)
variant['max_aaf_all'] = max(-1,
variant['aaf_esp_ea'],
variant['aaf_esp_aa'],
variant['aaf_1kg_amr'],
variant['aaf_1kg_eas'],
variant['aaf_1kg_sas'],
variant['aaf_1kg_afr'],
variant['aaf_1kg_eur'],
variant['aaf_adj_exac_afr'],
variant['aaf_adj_exac_amr'],
variant['aaf_adj_exac_eas'],
variant['aaf_adj_exac_nfe'],
variant['aaf_adj_exac_sas'],
variant['aaf_gnomad_afr'],
variant['aaf_gnomad_amr'],
variant['aaf_gnomad_eas'],
variant['aaf_gnomad_nfe'],
variant['aaf_gnomad_sas'],
)
variant.update(self._extra_empty)
variant_impacts = []
for idx, impact in enumerate(impacts or [], start=1):
is_top = impact == top_impact
var_impact = dict(variant_id=self.v_id, anno_id=idx, gene=impact.gene,
transcript=impact.transcript, is_exonic=impact.is_exonic,
is_coding=impact.is_coding, is_lof=impact.is_lof,
is_splicing=impact.is_splicing,
exon=impact.exon, codon_change=impact.codon_change,
aa_change=impact.aa_change, aa_length=impact.aa_length,
biotype=impact.biotype, impact=impact.top_consequence,
impact_so=impact.so, impact_severity=impact.effect_severity,
polyphed_pred=impact.polyphen_pred, polyphen_score=impact.polyphen_score,
sift_pred=impact.sift_pred,
sift_score=impact.sift_score)
for dbkey, infokey in self._extra_effect_fields:
if not infokey in impact.effects: continue
if dbkey[-1] == "m" and dbkey.endswith("_num"):
try:
var_impact[dbkey] = float(impact.effects[infokey])
if is_top: variant[dbkey] = float(impact.effects[infokey])
except ValueError:
var_impact[dbkey] = None
if is_top: variant[dbkey] = None
else:
var_impact[dbkey] = impact.effects[infokey]
if is_top: variant[dbkey] = impact.effects[infokey]
variant_impacts.append(var_impact)
return variant, variant_impacts
def _prepare_samples(self):
"""
private method to load sample information
"""
if not self.args.no_genotypes:
self.samples = self.vcf_reader.samples
self.sample_to_id = {}
for idx, sample in enumerate(self.samples):
self.sample_to_id[sample] = idx + 1
self.ped_hash = {}
if self.args.ped_file is not None:
self.ped_hash = load_ped_file(self.args.ped_file)
sample_list = []
for sample in self.samples:
i = self.sample_to_id[sample]
if sample in self.ped_hash:
fields = self.ped_hash[sample]
sample_list = [i] + fields
elif len(self.ped_hash) > 0:
raise ValueError("Sample %s found in the VCF but "
"not in the PED file.\n" % (sample))
else:
# if there is no ped file given, just fill in the name and
# sample_id and set the other required fields to None
sample_list = [i, 0, sample, 0, 0, -9, -9]
database.insert_sample(self.c, self.metadata, sample_list)
def _get_gene_detailed(self):
"""
define a gene detailed table
"""
#unique identifier for each entry
i = 0
table_contents = detailed_list = []
config = read_gemini_config(args=self.args)
path_dirname = config["annotation_dir"]
file_handle = os.path.join(path_dirname, 'detailed_gene_table_v75')
for line in open(file_handle, 'r'):
field = line.strip().split("\t")
if not field[0].startswith("Chromosome"):
i += 1
table = gene_table.gene_detailed(field)
detailed_list = [str(i),table.chrom,table.gene,table.is_hgnc,
table.ensembl_gene_id,table.ensembl_trans_id,
table.biotype,table.trans_status,table.ccds_id,
table.hgnc_id,table.entrez,table.cds_length,table.protein_length,
table.transcript_start,table.transcript_end,
table.strand,table.synonym,table.rvis,table.mam_phenotype]
table_contents.append(detailed_list)
if i % 1000 == 0:
database.insert_gene_detailed(self.c, self.metadata, table_contents)
table_contents = []
database.insert_gene_detailed(self.c, self.metadata, table_contents)
def _get_gene_summary(self):
"""
define a gene summary table
"""
#unique identifier for each entry
i = 0
contents = summary_list = []
config = read_gemini_config(args=self.args)
path_dirname = config["annotation_dir"]
file = os.path.join(path_dirname, 'summary_gene_table_v75')
for line in open(file, 'r'):
col = line.strip().split("\t")
if not col[0].startswith("Chromosome"):
i += 1
table = gene_table.gene_summary(col)
# defaul cosmic census to False
cosmic_census = 0
summary_list = [str(i),table.chrom,table.gene,table.is_hgnc,
table.ensembl_gene_id,table.hgnc_id,
table.transcript_min_start,
table.transcript_max_end,table.strand,
table.synonym,table.rvis,table.mam_phenotype,
cosmic_census]
contents.append(summary_list)
if i % 1000 == 0:
database.insert_gene_summary(self.c, self.metadata, contents)
contents = []
database.insert_gene_summary(self.c, self.metadata, contents)
def update_gene_table(self):
"""
"""
gene_table.update_cosmic_census_genes(self.c, self.metadata, self.args)
def _init_sample_gt_counts(self):
"""
Initialize a 2D array of counts for tabulating
the count of each genotype type for eaxh sample.
The first dimension is one bucket for each sample.
The second dimension (size=4) is a count for each gt type.
Index 0 == # of hom_ref genotypes for the sample
Index 1 == # of het genotypes for the sample
Index 2 == # of missing genotypes for the sample
Index 3 == # of hom_alt genotypes for the sample
"""
self.sample_gt_counts = np.array(np.zeros((len(self.samples), 4)),
dtype='uint32')
def _update_sample_gt_counts(self, gt_types):
"""
Update the count of each gt type for each sample
"""
for idx, gt_type in enumerate(gt_types):
self.sample_gt_counts[idx][gt_type] += 1
def store_sample_gt_counts(self):
"""
Update the count of each gt type for each sample
"""
tbl = self.metadata.tables["sample_genotype_counts"]
cols = database._get_cols(tbl)
assert cols == ["sample_id", "num_hom_ref", "num_het", "num_hom_alt",
"num_unknown"], cols
self.c.commit()
self.c.execute(tbl.insert(), [dict(zip(cols, (idx + 1,
int(gtc[HOM_REF]),
int(gtc[HET]),
int(gtc[HOM_ALT]),
int(gtc[UNKNOWN]))))
for idx, gtc in enumerate(self.sample_gt_counts)])
self.c.commit()
def load(parser, args):
if (args.db is None or args.vcf is None):
parser.print_help()
exit("ERROR: load needs both a VCF file and a database file\n")
if args.anno_type not in ['snpEff', 'VEP', 'BCFT', None, "all"]:
parser.print_help()
exit("\nERROR: Unsupported selection for -t\n")
# collect of the the add'l annotation files
annotations.load_annos(args)
# create a new gemini loader and populate
# the gemini db and files from the VCF
for try_count in range(2):
try:
if try_count > 0:
args.tmp_db = os.path.join(args.tempdir, "%s.db" % uuid.uuid4())
gemini_loader = GeminiLoader(args)
gemini_loader.store_resources()
gemini_loader.store_version()
gemini_loader.store_vcf_header()
gemini_loader.populate_from_vcf()
gemini_loader.update_gene_table()
gemini_loader.build_indices_and_disconnect()
if not args.no_genotypes and not args.no_load_genotypes:
gemini_loader.store_sample_gt_counts()
if try_count > 0:
shutil.move(args.tmp_db, args.db)
break
except sql.exc.OperationalError as e:
sys.stderr.write("sqlalchemy.OperationalError: %s\n" % e)
else:
raise Exception(("Attempted workaround for SQLite locking issue on NFS "
"drives has failed. One possible reason is that the temp directory "
"%s is also on an NFS drive.") % args.tempdir)
| mit | f95462b75667b8ba14ae48371ecb7087 | 42.207304 | 168 | 0.532019 | 3.659904 | false | false | false | false |
arq5x/gemini | gemini/gemini_stats.py | 3 | 9808 | #!/usr/bin/env python
from __future__ import absolute_import, print_function
import numpy as np
import collections
from collections import Counter
from . import compression as Z
import sqlalchemy as sql
from . import gemini_utils as util
from .gemini_constants import *
from . import GeminiQuery
def get_tstv(conn, metadata, args):
"""
Report the transition / transversion ratio.
"""
ts_cmd = "SELECT count(1) \
FROM variants \
WHERE type = \'snp\' \
AND sub_type = \'ts\'"
tv_cmd = "SELECT count(1) \
FROM variants v \
WHERE type = \'snp\' \
AND sub_type = \'tv\'"
# get the number of transitions
res = conn.execute(sql.text(ts_cmd))
ts = res.fetchone()[0]
# get the number of transversions
res = conn.execute(sql.text(tv_cmd))
tv = res.fetchone()[0]
# report the transitions, transversions, and the ts/tv ratio
print("ts" + '\t' + \
"tv" + '\t' + "ts/tv")
print(str(ts) + '\t' + \
str(tv) + '\t' + \
str(tstv(ts, tv)))
def get_tstv_coding(conn, metadata, args):
"""
Report the transition / transversion ratio in coding regions.
"""
ts_cmd = "SELECT count(1) \
FROM variants v \
WHERE v.type = \'snp\' \
AND v.sub_type = \'ts\' \
AND v.is_coding = 1"
tv_cmd = "SELECT count(1) \
FROM variants v \
WHERE v.type = \'snp\' \
AND v.sub_type = \'tv\' \
AND v.is_coding = 1"
# get the number of transitions
res = conn.execute(ts_cmd)
ts = res.fetchone()[0]
# get the number of transversions
res = conn.execute(tv_cmd)
tv = res.fetchone()[0]
# report the transitions, transversions, and the ts/tv ratio
print("ts" + '\t' + \
"tv" + '\t' + "ts/tv")
print(str(ts) + '\t' + \
str(tv) + '\t' + \
str(tstv(ts, tv)))
def get_tstv_noncoding(conn, metadata, args):
"""
Report the transition / transversion ratio in non-coding regions.
"""
ts_cmd = "SELECT count(1) \
FROM variants v \
WHERE v.type = \'snp\' \
AND v.sub_type = \'ts\' \
AND v.is_coding = 0"
tv_cmd = "SELECT count(1) \
FROM variants v \
WHERE v.type = \'snp\' \
AND v.sub_type = \'tv\' \
AND v.is_coding = 0"
# get the number of transitions
res = conn.execute(ts_cmd)
ts = res.fetchone()[0]
# get the number of transversions
res = conn.execute(tv_cmd)
tv = res.fetchone()[0]
# report the transitions, transversions, and the ts/tv ratio
print("ts" + '\t' + \
"tv" + '\t' + "ts/tv")
print(str(ts) + '\t' + \
str(tv) + '\t' + \
str(tstv(ts, tv)))
def tstv(ts, tv):
"""
Calculate ts/tv, and avoid division by zero error
"""
try:
return round(float(ts) / float(tv), 4)
except ZeroDivisionError:
return 0
def get_snpcounts(conn, metadata, args):
"""
Report the count of each type of SNP.
"""
query = "SELECT ref, alt, count(1) \
FROM variants \
WHERE type = \'snp\' \
GROUP BY ref, alt"
# get the ref and alt alleles for all snps.
res = conn.execute(sql.text(query))
print('\t'.join(['type', 'count']))
for row in res:
print('\t'.join([str(row['ref']) + "->" + str(row['alt']),
str(row['count(1)'])]))
def get_sfs(conn, metadata, args):
"""
Report the site frequency spectrum
"""
precision = 3
query = "SELECT round(aaf," + str(precision) + "), count(1) \
FROM variants \
GROUP BY round(aaf," + str(precision) + ")"
res = conn.execute(sql.text(query))
print('\t'.join(['aaf', 'count']))
for row in res:
print('\t'.join([str(row[0]), str(row[1])]))
def get_mds(conn, metadata, args):
"""
Compute the pairwise genetic distance between each sample.
"""
idx_to_sample = {}
res = conn.execute(sql.text("select sample_id, name from samples"))
for row in res:
idx_to_sample[int(row['sample_id']) - 1] = row['name']
query = "SELECT DISTINCT v.variant_id, v.gt_types\
FROM variants v\
WHERE v.type = 'snp'"
res = conn.execute(query)
# keep a list of numeric genotype values
# for each sample
genotypes = collections.defaultdict(list)
import zlib
unpack = Z.unpack_genotype_blob
for row in res:
try:
gt_types = unpack(row['gt_types'])
except zlib.error:
unpack = Z.snappy_unpack_blob
gt_types = unpack(row['gt_types'])
# at this point, gt_types is a numpy array
# idx: 0 1 2 3 4 5 6 .. #samples
# type [0 1 2 1 2 0 0 .. ]
for idx, gt_type in enumerate(gt_types):
sample = idx_to_sample[idx]
genotypes[sample].append(gt_type)
mds = collections.defaultdict(float)
# convert the genotype list for each sample
# to a numpy array for performance.
# masks stores an array of T/F indicating which genotypes are
# known (True, [0,1,2]) and unknown (False [-1]).
masks = {}
for s in genotypes:
sample = str(s)
x = np.array(genotypes[sample])
genotypes[sample] = x
masks[sample] = \
np.ma.masked_where(genotypes[sample] != UNKNOWN,
genotypes[sample]).mask
# compute the euclidean distance for each s1/s2 combination
# using numpy's vectorized sum() and square() operations.
# we use the mask arrays to identify the indices of known genotypes
# for each sample. by doing a bitwise AND of the mask arrays for the
# two samples, we have a mask array of variants where __both__ samples
# were called.
for sample1 in genotypes:
for sample2 in genotypes:
pair = (sample1, sample2)
# which variants have known genotypes for both samples?
both_mask = masks[str(sample1)] & masks[str(sample2)]
genotype1 = genotypes[sample1]
genotype2 = genotypes[sample2]
# distance between s1 and s2:
eucl_dist = float(np.sum(np.square((genotype1 - genotype2)[both_mask]))) \
/ \
float(np.sum(both_mask))
mds[pair] = eucl_dist
# report the pairwise MDS for each sample pair.
print("sample1\tsample2\tdistance")
for pair in mds:
print("\t".join([str(pair[0]), str(pair[1]), str(round(mds[pair], 4))]))
def get_variants_by_sample(conn, metadata, args):
"""
Report the number of variants observed for each sample
where the sample had a non-ref genotype
"""
idx_to_sample = util.map_indices_to_samples(metadata)
# report.
print('\t'.join(['sample', 'total']))
query = "SELECT sample_id, \
(num_het + num_hom_alt) as total \
FROM sample_genotype_counts"
res = conn.execute(sql.text(query))
for row in res:
sample = idx_to_sample[row['sample_id'] - 1]
print("\t".join(str(s) for s in [sample,
row['total']]))
def get_gtcounts_by_sample(conn, metadata, args):
"""
Report the count of each genotype class
observed for each sample.
"""
idx_to_sample = util.map_indices_to_samples(metadata)
# report.
print('\t'.join(['sample', 'num_hom_ref', 'num_het',
'num_hom_alt', 'num_unknown', 'total']))
query = "SELECT *, \
(num_hom_ref + num_het + num_hom_alt + num_unknown) as total \
FROM sample_genotype_counts"
res = conn.execute(query)
# count the number of each genotype type obs. for each sample.
for row in res:
sample = idx_to_sample[row['sample_id'] -1]
print("\t".join(str(s) for s in [sample,
row['num_hom_ref'],
row['num_het'],
row['num_hom_alt'],
row['num_unknown'],
row['total']]))
def summarize_query_by_sample(args):
gq = GeminiQuery.GeminiQuery(args.db)
gq.run(args.query, show_variant_samples=True, gt_filter=args.gt_filter)
total_counts = Counter()
het_counts = Counter()
hom_alt_counts = Counter()
hom_ref_counts = Counter()
print("\t".join(["sample", "total", "num_het", "num_hom_alt",
"num_hom_ref"]))
for row in gq:
total_counts.update(row["variant_samples"])
het_counts.update(row["het_samples"])
hom_alt_counts.update(row["hom_alt_samples"])
hom_ref_counts.update(row["hom_ref_samples"])
for key in total_counts.keys():
count_row = [key, total_counts.get(key, 0), het_counts.get(key, 0),
hom_alt_counts.get(key, 0), hom_ref_counts.get(key, 0)]
print("\t".join(map(str, count_row)))
def stats(parser, args):
from . import database
conn, metadata = database.get_session_metadata(args.db)
if args.tstv:
get_tstv(conn, metadata, args)
elif args.tstv_coding:
get_tstv_coding(conn, metadata, args)
elif args.tstv_noncoding:
get_tstv_noncoding(conn, metadata, args)
elif args.snp_counts:
get_snpcounts(conn, metadata, args)
elif args.sfs:
get_sfs(conn, metadata, args)
elif args.variants_by_sample:
get_variants_by_sample(conn, metadata, args)
elif args.genotypes_by_sample:
get_gtcounts_by_sample(conn, metadata, args)
elif args.mds:
get_mds(conn, metadata, args)
elif args.query:
summarize_query_by_sample(args)
| mit | c2577046380674af97f58101816096aa | 31.157377 | 86 | 0.556688 | 3.479248 | false | false | false | false |
arq5x/gemini | gemini/tool_homozygosity_runs.py | 3 | 7165 | from __future__ import absolute_import
import os
import sys
from collections import defaultdict
from .gemini_constants import *
from . import GeminiQuery
class Site(object):
def __init__(self, row):
self.chrom = row['chrom']
self.end = int(row['end'])
self.gt_type = None
def _prune_run(run):
"""
Prune the current run of genotypes.
Remove genotypes from the left of the first
non-homozygous genotype, since, owing to the
same logic behind run length encoding, those
genotypes cannot be part of a longer run than
we have seen before.
For example:
breaks = * * *
run = H H H h H H H U H H H h H H H H H H
prune_1 = H H H U H H H h H H H H H H
prune_2 = H H H h H H H H H H
prune_3 = H H H H H H
"""
try:
first_het_idx = run.index('H')
except:
first_het_idx = None
try:
first_unk_idx = run.index('U')
except:
first_unk_idx = None
if first_het_idx is not None and first_unk_idx is not None:
idx_of_first_disruption = min(run.index('H'), run.index('U'))
elif first_het_idx is not None:
idx_of_first_disruption = first_het_idx
elif first_unk_idx is not None:
idx_of_first_disruption = first_unk_idx
else:
# no interuptions, return an empty list
return 0, 0, len(run), []
hets_removed = run[0:idx_of_first_disruption+1].count('H')
unks_removed = run[0:idx_of_first_disruption+1].count('U')
homs_removed = idx_of_first_disruption - (hets_removed + unks_removed) + 1
return hets_removed, unks_removed, homs_removed, run[idx_of_first_disruption+1:]
def sweep_genotypes_for_rohs(args, chrom, samples):
"""
Sweep through the genotypes for each sample in search of ROHs.
Note: If the genotype was homozygous, the end position
of the variant is stored. Otherwise 'H' for het
and 'U' for unknown.
"""
hom_count = 0
het_count = 0
unk_count = 0
curr_run = []
for sample in samples:
sites = iter(samples[sample])
for site in sites:
# retain the last homozygote from previous
# run. See function docs for details
if len(curr_run):
hets_removed, unks_removed, homs_removed, curr_run = \
_prune_run(curr_run)
# reset for next run
hom_count -= homs_removed
het_count -= hets_removed
unk_count -= unks_removed
# sweep through the active sites until we encounter
# too many HETS or UNKNOWN genotypes.
while het_count <= args.max_hets and unk_count <= args.max_unknowns:
if site != 'H' and site != 'U':
hom_count +=1
curr_run.append(site)
elif site == 'H':
curr_run.append(site)
het_count += 1
elif site == 'U':
curr_run.append(site)
unk_count += 1
try:
site = next(sites)
except StopIteration:
break
# skip the current run unless it contains enough sites.
if hom_count >= args.min_snps:
run_start = min(c for c in curr_run if c not in ['H', 'U'])
run_end = max(c for c in curr_run if c not in ['H', 'U'])
run_length = run_end - run_start
# report the run if it is long enough.
if run_length >= args.min_size:
density_per_kb = float(len(curr_run) * 1000) / float(run_length)
print("\t".join(str(s) for s in [chrom,
run_start, run_end, sample,
hom_count, round(density_per_kb, 4),
run_length]))
else:
curr_run = []
hom_count = 0
het_count = 0
unk_count = 0
def get_homozygosity_runs(args):
gq = GeminiQuery.GeminiQuery(args.db)
# get a mapping of sample ids to sample indices
idx2smp = gq.index2sample
smp2idx = gq.sample2index
sm_index = []
# prepare a lookup of just the samples
# for which the user wishes to search for ROHs
if args.samples is not None:
sample_filter = args.samples.strip().split(",")
for sample in sample_filter:
try:
idx = smp2idx[sample]
except:
raise ValueError("Sample %s could not be found.\n" \
% (sample))
sm_index.append(smp2idx[sample])
else:
for sample in smp2idx:
sm_index.append(smp2idx[sample])
###########################################################################
# Phase 1. Retrieve the variants for each chrom/sample
###########################################################################
query = "SELECT chrom, start, end, gt_types, gt_depths \
FROM variants \
WHERE type = 'snp' \
AND filter is NULL \
AND depth >= " + str(args.min_total_depth) + \
" ORDER BY chrom, end"
sys.stderr.write("LOG: Querying and ordering variants by chromosomal position.\n")
gq.run(query, needs_genotypes=True)
print("\t".join(['chrom',
'start', 'end', 'sample',
'num_of_snps','density_per_kb',
'run_length_in_bp']))
variants_seen = 0
samples = defaultdict(list)
prev_chrom = None
curr_chrom = None
for row in gq:
variants_seen += 1
if variants_seen % 10000 == 0:
sys.stderr.write("LOG: Loaded %d variants. Current variant on %s, position %d.\n" \
% (variants_seen, row['chrom'], row['end']))
gt_types = row['gt_types']
gt_depths = row['gt_depths']
curr_chrom = row['chrom']
# the chromosome has changed. search for ROHs in the previous chrom
if curr_chrom != prev_chrom and prev_chrom is not None:
sweep_genotypes_for_rohs(args, prev_chrom, samples)
samples = defaultdict(list)
# associate the genotype for the variant with each sample
for idx in sm_index:
sample = idx2smp[idx]
gt_type = gt_types[idx]
depth = gt_depths[idx]
# the genotype must have had sufficient depth to be considered
if depth < args.min_genotype_depth:
continue
if (gt_type == HOM_ALT or gt_type == HOM_REF):
samples[sample].append(row['end'])
elif gt_type == HET:
samples[sample].append('H')
elif gt_type == UNKNOWN:
samples[sample].append('U')
prev_chrom = curr_chrom
# search for ROHs in the final chromosome
sweep_genotypes_for_rohs(args, curr_chrom, samples)
def run(parser, args):
if os.path.exists(args.db):
# run the roh caller
get_homozygosity_runs(args)
| mit | e94b44afc5471f5dc22d5f048722befa | 32.325581 | 95 | 0.527006 | 3.771053 | false | false | false | false |
arq5x/gemini | gemini/gemini_actionable_mutations.py | 3 | 2408 | #!/usr/bin/env python
from __future__ import absolute_import
from collections import defaultdict
from gemini.gemini_constants import *
from gemini import gemini_subjects
from gemini.dgidb import query_dgidb
from gemini import GeminiQuery
def get_actionable_mutations(parser, args):
t_n_pairs = gemini_subjects.get_families(args.db)
query = "SELECT variants.chrom, start, end, ref, alt, \
variants.gene, impact, is_somatic, \
gene_summary.in_cosmic_census \
FROM variants, gene_summary \
WHERE variants.is_somatic = 1 \
AND (variants.type = 'snp' \
OR variants.type = 'indel') \
AND (variants.impact_severity = 'HIGH' \
OR variants.impact_severity = 'MED') \
AND variants.chrom = gene_summary.chrom \
AND variants.gene = gene_summary.gene \
AND gene_summary.in_cosmic_census = 1"
# collect the relevant genes and query DGIDB
gq = GeminiQuery.GeminiQuery(args.db)
gq.run(query)
genes = defaultdict()
for row in gq:
genes[row['gene']] = True
# collect info from DGIdb
dgidb_info = query_dgidb(genes)
# now rerun the query and report actionable mutations per DGIDB and COSMIC census.
gq = GeminiQuery.GeminiQuery(args.db)
gq.run(query)
print('\t'.join(['tum_name', 'chrom', 'start', 'end', 'ref', 'alt', \
'gene', 'impact', 'is_somatic', 'in_cosmic_census',
'dgidb_info']))
for row in gq:
for pair in t_n_pairs:
samples = pair.subjects
if len(samples) != 2:
continue
tumor = pair.subjects[0]
normal = pair.subjects[1]
# swap if we guessed the tumor incorrectly
if tumor.affected is False:
tumor, normal = normal, tumor
print('\t'.join(str(s) for s in [tumor.name, row['chrom'], \
row['start'], row['end'], \
row['ref'], row['alt'], \
row['gene'], row['impact'], \
row['is_somatic'], \
row['in_cosmic_census'], \
str(dgidb_info[row['gene']])]))
| mit | 0a9dc38b34ba64565f403753ca7f69f9 | 37.83871 | 86 | 0.516611 | 3.858974 | false | false | false | false |
arq5x/gemini | gemini/scripts/gemini_install.py | 3 | 7421 | #!/usr/bin/env python
"""Installer for gemini: a lightweight db framework for disease and population genetics.
https://github.com/arq5x/gemini
Handles installation of:
- Required third party software
- Required Python libraries
- Gemini application
- Associated data files
Requires: Python 2.7 (or 2.6 and argparse), git, and compilers (gcc, g++)
Run gemini_install.py -h for usage.
"""
from __future__ import print_function
import argparse
import platform
import os
import shutil
import subprocess
import sys
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
import urllib
remotes = {"requirements_conda": "",
"versioned_installations": "https://raw.githubusercontent.com/arq5x/gemini/master/versioning/",
"anaconda": "http://repo.continuum.io/miniconda/Miniconda2-latest-%s-x86%s.sh"}
remotes_dev = remotes.copy()
remotes_dev.update({
"requirements_conda": "https://raw.githubusercontent.com/arq5x/gemini/dev/versioning/unstable/requirements_conda.txt",
})
remotes_bp = remotes_dev
remotes_bp.update({
"requirements_conda": "https://raw.githubusercontent.com/brentp/gemini/dev/versioning/unstable/requirements_conda.txt",
})
def main(args, remotes=remotes):
check_dependencies()
clean_env_variables()
work_dir = os.path.join(os.getcwd(), "tmpgemini_install")
if not os.path.exists(work_dir):
os.makedirs(work_dir)
os.chdir(work_dir)
if args.gemini_version in ("unstable", "bp"):
if args.gemini_version == "unstable":
remotes = remotes_dev
else:
remotes = remotes_bp
requirements_conda = remotes['requirements_conda']
urllib.urlretrieve(requirements_conda, filename='_conda_dev.txt')
# quick hack to support testing installs:
if args.gemini_version == "bp":
for f in '_conda_dev.txt':
contents = open(f).read().replace('arq5x', 'brentp')
with open(f, 'w') as fh:
fh.write(contents)
remotes.update({'requirements_conda': '_conda_dev.txt'})
elif args.gemini_version != 'latest':
requirements_conda = os.path.join(remotes['versioned_installations'],
args.gemini_version, 'requirements_conda.txt')
try:
urlopen(requirements_conda)
except:
raise ValueError('Gemini version %s could not be found. Try the latest version.' % args.gemini_version)
remotes.update({'requirements_conda': requirements_conda})
print("Installing isolated base python installation")
make_dirs(args)
anaconda = install_anaconda_python(args, remotes)
print("Installing base gemini package...")
gemini = install_conda_pkgs(anaconda, remotes, args)
install_rest(gemini, args)
print("Finished: gemini, tools and data installed")
if args.tooldir:
print(" Tools installed in:\n %s" % args.tooldir)
print(" NOTE: be sure to add %s/bin to your PATH." % args.tooldir)
if args.install_data:
print(" Data installed in:\n %s" % args.datadir)
print(" NOTE: Install data files for GERP_bp & CADD_scores (not installed by default).\n ")
shutil.rmtree(work_dir)
def install_conda_pkgs(anaconda, remotes, args):
if args.gemini_version != 'latest':
req_file = '_conda-requirements-%s.txt' % args.gemini_version
urllib.urlretrieve(remotes["requirements_conda"], filename=req_file)
pkgs = ["--file", req_file]
else:
req_file = None
pkgs = ["gemini"]
channels = ["-c", "conda-forge", "-c", "bioconda"]
print(" ".join([anaconda["conda"], "install", "--yes"] + channels + pkgs))
subprocess.check_call([anaconda["conda"], "install", "--yes"] + channels + pkgs)
if req_file and os.path.exists(req_file):
os.remove(req_file)
return os.path.join(anaconda["dir"], "bin", "gemini")
def install_anaconda_python(args, remotes):
"""Provide isolated installation of Anaconda python.
http://docs.continuum.io/anaconda/index.html
"""
anaconda_dir = os.path.join(args.datadir, "anaconda")
bindir = os.path.join(anaconda_dir, "bin")
conda = os.path.join(bindir, "conda")
if platform.mac_ver()[0]:
distribution = "macosx"
else:
distribution = "linux"
if platform.architecture()[0] == "32bit":
arch = ""
else:
arch = "_64"
if not os.path.exists(anaconda_dir) or not os.path.exists(conda):
if os.path.exists(anaconda_dir):
shutil.rmtree(anaconda_dir)
url = remotes["anaconda"] % ("MacOSX" if distribution == "macosx" else "Linux", arch)
if not os.path.exists(os.path.basename(url)):
subprocess.check_call(["wget", "--continue", "--no-check-certificate", url])
subprocess.check_call("bash %s -b -p %s" %
(os.path.basename(url), anaconda_dir), shell=True)
return {"conda": conda,
"dir": anaconda_dir}
def install_rest(gemini, args):
"""Install biological data and tests used by gemini.
"""
if os.path.exists(os.path.join(args.datadir, "data")):
annotation_dir = os.path.join(args.datadir, "data")
else:
annotation_dir = os.path.join(args.datadir, "gemini_data")
cmd = [gemini, "--annotation-dir", annotation_dir, "update", "--dataonly"]
if not args.install_data:
cmd += ["--nodata"]
if args.tooldir:
cmd += ["--tooldir", args.tooldir]
print(" ".join(cmd))
subprocess.check_call(cmd)
def make_dirs(args):
for dname in [args.datadir, args.tooldir]:
if dname and not os.path.exists(dname):
os.makedirs(dname)
def clean_env_variables():
"""Adjust environmental variables which can cause conflicts with installed anaconda python.
"""
for k in ["PYTHONPATH", "PYTHONHOME"]:
os.environ.pop(k, None)
# https://docs.python.org/2/using/cmdline.html#envvar-PYTHONNOUSERSITE
os.environ["PYTHONNOUSERSITE"] = "1"
def check_dependencies():
"""Ensure required tools for installation are present.
"""
print("Checking required dependencies...")
for cmd, url in [("wget", "http://www.gnu.org/software/wget/")]:
try:
retcode = subprocess.call([cmd, "--version"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except OSError:
retcode = 127
if retcode == 127:
raise OSError("gemini requires %s (%s)" % (cmd, url))
else:
print(" %s found" % cmd)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Automated installer for gemini framework.")
parser.add_argument("tooldir", help="Directory to install 3rd party software tools",
type=os.path.abspath)
parser.add_argument("datadir", help="Directory to install anaconda python and gemini data files",
type=os.path.abspath)
parser.add_argument("--nodata", help="Do not install data dependencies",
dest="install_data", action="store_false", default=True)
parser.add_argument("--gemini-version", dest="gemini_version", default="latest",
help="Install one specific gemini version with a fixed dependency chain.")
if len(sys.argv) == 1:
parser.print_help()
else:
main(parser.parse_args())
| mit | 4c504cc8e4647cfbb7c0109e47cb5af3 | 37.853403 | 123 | 0.634012 | 3.677403 | false | false | false | false |
arq5x/gemini | gemini/gemini_region.py | 3 | 3162 | #!/usr/bin/env python
from __future__ import absolute_import
import re
import os
import sys
from . import GeminiQuery
from .GeminiQuery import select_formatter
def _report_results(args, query, gq):
# report the results of the region query
gq.run(query, show_variant_samples=args.show_variant_samples)
if args.use_header and gq.header:
print(gq.header)
for row in gq:
print(row)
def get_region(args, gq):
region_regex = re.compile("(\S+):(\d+)-(\d+)")
try:
region = region_regex.findall(args.region)[0]
except IndexError:
raise IndexError("Malformed region (--reg) string")
if len(region) != 3:
raise IndexError("Malformed region (--reg) string")
chrom = region[0]
start = region[1]
end = region[2]
if args.columns is not None:
query = "SELECT " + str(args.columns) + \
" FROM variants "
else:
query = "SELECT * FROM variants "
query += "WHERE chrom = " + "'" + chrom + "'" + \
" AND ((start BETWEEN " + start + " AND " + end + ")" +\
" OR (end BETWEEN " + start + " AND " + end + "))"
if args.filter:
query += " AND " + args.filter
query += " ORDER BY chrom, start"
_report_results(args, query, gq)
def get_gene(args, gq):
"""
Report all variants in a specific gene.
"""
if args.columns is not None:
query = "SELECT " + str(args.columns) + \
" FROM variants "
else:
query = "SELECT * FROM variants "
query += "WHERE gene = " + "'" + args.gene + "' "
if args.filter:
query += " AND " + args.filter
query += " ORDER BY chrom, start"
_report_results(args, query, gq)
def add_region_to_query(args):
region_regex = re.compile("(\S+):(\d+)-(\d+)")
try:
region = region_regex.findall(args.region)[0]
except IndexError:
raise IndexError("Malformed region (--reg) string")
if len(region) != 3:
raise IndexError("Malformed region (--reg) string")
chrom = region[0]
start = region[1]
end = region[2]
where_clause = " chrom = " + "'" + chrom + "'" + \
" AND ((start BETWEEN " + start + " AND " + end + ")" +\
" OR (end BETWEEN " + start + " AND " + end + "))"
args.query = _add_to_where_clause(args.query, where_clause)
def _add_to_where_clause(query, where_clause):
where_index = query.lower().find("where")
prefix = query[0:where_index]
suffix = query[where_index + len("where"):]
if where_index == -1:
query += " WHERE " + where_clause
else:
query = "{0} WHERE ({1}) AND ({2})".format(prefix, suffix, where_clause)
return query
def region(parser, args):
if os.path.exists(args.db):
formatter = select_formatter(args)
gq = GeminiQuery.GeminiQuery(args.db, out_format=formatter)
if args.region is not None and args.gene is not None:
raise ValueError('Choose either --reg or --gene, not both.\n')
elif args.region is not None:
get_region(args, gq)
elif args.gene is not None:
get_gene(args, gq)
| mit | e015f0e5a6532645d8ee5736d08b49f9 | 25.35 | 80 | 0.567679 | 3.501661 | false | false | false | false |
pyannote/pyannote-audio | pyannote/audio/utils/preview.py | 1 | 10623 | # MIT License
#
# Copyright (c) 2020- CNRS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import Union
try:
from IPython.display import Audio as IPythonAudio
from IPython.display import Video as IPythonVideo
IPYTHON_INSTALLED = True
except ImportError:
IPYTHON_INSTALLED = False
import tempfile
import warnings
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
try:
from moviepy.editor import AudioClip, VideoClip
from moviepy.video.io.bindings import mplfig_to_npimage
MOVIEPY_INSTALLED = True
except ImportError:
MOVIEPY_INSTALLED = False
from typing import Mapping
import torch
from pyannote.core import (
Annotation,
Segment,
SlidingWindow,
SlidingWindowFeature,
Timeline,
notebook,
)
from pyannote.audio.core.io import Audio, AudioFile
from pyannote.audio.core.model import Model
from pyannote.audio.utils.signal import Binarize
def listen(audio_file: AudioFile, segment: Segment = None) -> None:
"""listen to audio
Allows playing of audio files. It will play the whole thing unless
given a `Segment` to crop to.
Parameters
----------
audio_file : AudioFile
A str, Path or ProtocolFile to be loaded.
segment : Segment, optional
The segment to crop the playback to.
Defaults to playback the whole file.
"""
if not IPYTHON_INSTALLED:
warnings.warn("You need IPython installed to use this method")
return
if segment is None:
waveform, sr = Audio()(audio_file)
else:
waveform, sr = Audio().crop(audio_file, segment)
return IPythonAudio(waveform.flatten(), rate=sr)
def preview(
audio_file: AudioFile,
segment: Segment = None,
zoom: float = 10.0,
video_fps: int = 5,
video_ext: str = "webm",
display: bool = True,
**views,
):
"""Preview
Parameters
----------
audio_file : AudioFile
A str, Path or ProtocolFile to be previewed
segment : Segment, optional
The segment to crop the preview to.
Defaults to preview the whole file.
video_fps : int, optional
Video frame rate. Defaults to 5. Higher frame rate leads
to a smoother video but longer processing time.
video_ext : str, optional
One of {"webm", "mp4", "ogv"} according to what your
browser supports. Defaults to "webm" as it seems to
be supported by most browsers (see caniuse.com/webm)/
display : bool, optional
Wrap the video in a IPython.display.Video instance for
visualization in notebooks (default). Set to False if
you are only interested in saving the video preview to
disk.
**views : dict
Additional views. See Usage section below.
Returns
-------
* IPython.display.Video instance if `display` is True (default)
* path to video preview file if `display` is False
Usage
-----
>>> assert isinstance(annotation, pyannote.core.Annotation)
>>> assert isinstance(scores, pyannote.core.SlidingWindowFeature)
>>> assert isinstance(timeline, pyannote.core.Timeline)
>>> preview("audio.wav", reference=annotation, speech_probability=scores, speech_regions=timeline)
# will create a video with 4 views. from to bottom:
# "reference", "speech probability", "speech regions", and "waveform")
"""
if not MOVIEPY_INSTALLED:
warnings.warn("You need MoviePy installed to use this method")
return
if display and not IPYTHON_INSTALLED:
warnings.warn(
"Since IPython is not installed, this method cannot be used "
"with default display=True option. Either run this method in "
"a notebook or use display=False to save video preview to disk."
)
if isinstance(audio_file, Mapping) and "uri" in audio_file:
uri = audio_file["uri"]
elif isinstance(audio_file, (str, Path)):
uri = Path(audio_file).name
else:
raise ValueError("Unsupported 'audio_file' type.")
temp_dir = tempfile.mkdtemp(prefix="pyannote-audio-preview")
video_path = f"{temp_dir}/{uri}.{video_ext}"
audio = Audio(sample_rate=16000, mono=True)
if segment is None:
duration = audio.get_duration(audio_file)
segment = Segment(start=0.0, end=duration)
# load waveform as SlidingWindowFeautre
data, sample_rate = audio.crop(audio_file, segment)
data = data.numpy().T
samples = SlidingWindow(
start=segment.start, duration=1 / sample_rate, step=1 / sample_rate
)
waveform = SlidingWindowFeature(data, samples)
ylim_waveform = np.min(data), np.max(data)
def make_audio_frame(T: float):
if isinstance(T, np.ndarray):
return np.take(data, (T * sample_rate).astype(np.int64))
return data[round(T * sample_rate)]
audio_clip = AudioClip(make_audio_frame, duration=segment.duration, fps=sample_rate)
# reset notebook just once so that colors are coherent between views
notebook.reset()
# initialize subplots with one row per view + one view for waveform
nrows = len(views) + 1
fig, axes = plt.subplots(
nrows=nrows, ncols=1, figsize=(10, 2 * nrows), squeeze=False
)
*ax_views, ax_wav = axes[:, 0]
# TODO: be smarter based on all SlidingWindowFeature views
ylim = (-0.1, 1.1)
def make_frame(T: float):
# make sure all subsequent calls to notebook.plot_*
# will only display the region center on current time
t = T + segment.start
notebook.crop = Segment(t - 0.5 * zoom, t + 0.5 * zoom)
ax_wav.clear()
notebook.plot_feature(waveform, ax=ax_wav, time=True, ylim=ylim_waveform)
# display time cursor
ax_wav.plot([t, t], ylim_waveform, "k--")
# display view name
ax_wav.axes.get_yaxis().set_visible(True)
ax_wav.axes.get_yaxis().set_ticks([])
ax_wav.set_ylabel("waveform")
for (name, view), ax_view in zip(views.items(), ax_views):
ax_view.clear()
if isinstance(view, Timeline):
notebook.plot_timeline(view, ax=ax_view, time=False)
elif isinstance(view, Annotation):
notebook.plot_annotation(view, ax=ax_view, time=False, legend=True)
elif isinstance(view, SlidingWindowFeature):
# TODO: be smarter about ylim
notebook.plot_feature(view, ax=ax_view, time=False, ylim=ylim)
# display time cursor
ax_view.plot([t, t], ylim, "k--")
# display view name
ax_view.axes.get_yaxis().set_visible(True)
ax_view.axes.get_yaxis().set_ticks([])
ax_view.set_ylabel(" ".join(name.split("_")))
return mplfig_to_npimage(fig)
video_clip = VideoClip(make_frame, duration=segment.duration)
video_clip = video_clip.set_audio(audio_clip)
video_clip.write_videofile(
video_path,
fps=video_fps,
audio=True,
audio_fps=sample_rate,
preset="ultrafast",
logger="bar",
)
plt.close(fig)
if not display:
return video_path
return IPythonVideo(video_path, embed=True)
def preview_training_samples(
model: Model,
blank: float = 1.0,
video_fps: int = 5,
video_ext: str = "webm",
display: bool = True,
) -> Union[IPythonVideo, str]:
"""Preview training samples of a given model
Parameters
----------
Model : Model
Model, already setup for training (i.e. call model.setup(stage="fit") first).
blank : float, optional
Add blank of that many seconds between each sample. Defaults to 1.0
video_fps : int, optional
Video frame rate. Defaults to 5. Higher frame rate leads
to a smoother video but longer processing time.
video_ext : str, optional
One of {"webm", "mp4", "ogv"} according to what your
browser supports. Defaults to "webm" as it seems to
be supported by most browsers (see caniuse.com/webm)/
display : bool, optional
Wrap the video in a IPython.display.Video instance for
visualization in notebooks (default). Set to False if
you are only interested in saving the video preview to
disk.
Returns
-------
* IPython.display.Video instance if `display` is True (default)
* path to video preview file if `display` is False
"""
batch = next(iter(model.train_dataloader()))
batch_size, num_channels, num_samples = batch["X"].shape
batch_size, num_frames, num_speakers = batch["y"].shape
sample_rate = model.audio.sample_rate
batch_num_samples = int(batch_size * (num_samples + blank * sample_rate))
batch_num_frames = int(model.introspection(batch_num_samples)[0])
waveform = torch.zeros((1, batch_num_samples))
reference = torch.zeros((batch_num_frames, num_speakers))
for b, (X, y) in enumerate(zip(batch["X"], batch["y"])):
X_idx = int(b * (num_samples + blank * sample_rate))
waveform[:, X_idx : X_idx + num_samples] = X
y_idx, _ = model.introspection(X_idx)
reference[y_idx : y_idx + num_frames, :] = y
reference = Binarize()(SlidingWindowFeature(reference, model.introspection.frames))
audio_file = {
"waveform": waveform,
"sample_rate": sample_rate,
"uri": model.task.logging_prefix + "TrainingSamples",
}
return preview(
audio_file,
video_fps=video_fps,
video_ext=video_ext,
display=display,
reference=reference,
)
| mit | 1f0408f68115130c949cfa273d36d151 | 31.686154 | 102 | 0.651793 | 3.864314 | false | false | false | false |
matplotlib/basemap | packages/basemap/src/mpl_toolkits/basemap/proj.py | 1 | 18036 | from __future__ import (absolute_import, division, print_function)
import numpy as np
import pyproj
import math
try:
from inspect import cleandoc as dedent
except ImportError:
# Deprecated as of version 3.1. Not quite the same
# as textwrap.dedent.
from matplotlib.cbook import dedent
__version__ = "1.3.6+dev"
_dg2rad = math.radians(1.)
_rad2dg = math.degrees(1.)
_cylproj = ['cyl','merc','mill','gall']
_pseudocyl = ['moll','kav7','eck4','robin','sinu','mbtfpq','vandg','hammer']
_upper_right_out_of_bounds = (
'the upper right corner of the plot is not in the map projection region')
_lower_left_out_of_bounds = (
'the lower left corner of the plot is not in the map projection region')
class Proj(object):
"""
peforms cartographic transformations (converts from longitude,latitude
to native map projection x,y coordinates and vice versa) using proj
(http://proj.maptools.org/)
Uses a pyrex generated C-interface to libproj.
__init__ method sets up projection information.
__call__ method compute transformations.
See docstrings for __init__ and __call__ for details.
Contact: Jeff Whitaker <jeffrey.s.whitaker@noaa.gov>
"""
def __init__(self,projparams,llcrnrlon,llcrnrlat,
urcrnrlon,urcrnrlat,urcrnrislatlon=True):
"""
initialize a Proj class instance.
Input 'projparams' is a dictionary containing proj map
projection control parameter key/value pairs.
See the proj documentation (http://www.remotesensing.org/proj/)
for details.
llcrnrlon,llcrnrlat are lon and lat (in degrees) of lower
left hand corner of projection region.
urcrnrlon,urcrnrlat are lon and lat (in degrees) of upper
right hand corner of projection region if urcrnrislatlon=True
(default). Otherwise, urcrnrlon,urcrnrlat are x,y in projection
coordinates (units meters), assuming the lower left corner is x=0,y=0.
"""
self.projparams = projparams
self.projection = projparams['proj']
# rmajor is the semi-major axis.
# rminor is the semi-minor axis.
# esq is eccentricity squared.
try:
self.rmajor = projparams['a']
self.rminor = projparams['b']
except:
try:
self.rmajor = projparams['R']
except:
self.rmajor = projparams['bR_a']
self.rminor = self.rmajor
if self.rmajor == self.rminor:
self.ellipsoid = False
else:
self.ellipsoid = True
self.flattening = (self.rmajor-self.rminor)/self.rmajor
self.esq = (self.rmajor**2 - self.rminor**2)/self.rmajor**2
self.llcrnrlon = llcrnrlon
self.llcrnrlat = llcrnrlat
if self.projection == 'cyl':
llcrnrx = llcrnrlon
llcrnry = llcrnrlat
elif self.projection == 'ob_tran':
self._proj4 = pyproj.Proj(projparams)
llcrnrx,llcrnry = self(llcrnrlon,llcrnrlat)
llcrnrx = _rad2dg*llcrnrx; llcrnry = _rad2dg*llcrnry
if llcrnrx < 0: llcrnrx = llcrnrx + 360
elif self.projection in 'ortho':
if (llcrnrlon == -180 and llcrnrlat == -90 and
urcrnrlon == 180 and urcrnrlat == 90):
self._fulldisk = True
self._proj4 = pyproj.Proj(projparams)
llcrnrx = -self.rmajor
llcrnry = -self.rmajor
self._width = 0.5*(self.rmajor+self.rminor)
self._height = 0.5*(self.rmajor+self.rminor)
urcrnrx = -llcrnrx
urcrnry = -llcrnry
else:
self._fulldisk = False
self._proj4 = pyproj.Proj(projparams)
llcrnrx, llcrnry = self(llcrnrlon,llcrnrlat)
if llcrnrx > 1.e20 or llcrnry > 1.e20:
raise ValueError(_lower_left_out_of_bounds)
elif self.projection == 'aeqd' and\
(llcrnrlon == -180 and llcrnrlat == -90 and urcrnrlon == 180 and\
urcrnrlat == 90):
self._fulldisk = True
self._proj4 = pyproj.Proj(projparams)
# raise an exception for ellipsoids - there appears to be a bug
# in proj4 that causes the inverse transform to fail for points
# more than 90 degrees of arc away from center point for ellipsoids
# (works fine for spheres) - below is an example
#from pyproj import Proj
#p1 = Proj(proj='aeqd',a=6378137.00,b=6356752.3142,lat_0=0,lon_0=0)
#x,y= p1(91,0)
#lon,lat = p1(x,y,inverse=True) # lon is 89 instead of 91
if self.ellipsoid:
msg = dedent("""
full disk (whole world) Azimuthal Equidistant projection can
only be drawn for a perfect sphere""")
raise ValueError(msg)
llcrnrx = -np.pi*self.rmajor
llcrnry = -np.pi*self.rmajor
self._width = -llcrnrx
self._height = -llcrnry
urcrnrx = -llcrnrx
urcrnry = -llcrnry
elif self.projection == 'geos':
self._proj4 = pyproj.Proj(projparams)
# find major and minor axes of ellipse defining map proj region.
# h is measured from surface of earth at equator.
h = projparams['h'] + self.rmajor
# latitude of horizon on central meridian
lonmax = 90.-(180./np.pi)*np.arcsin(self.rmajor/h)
# longitude of horizon on equator
latmax = 90.-(180./np.pi)*np.arcsin(self.rminor/h)
# truncate to nearest hundredth of a degree (to make sure
# they aren't slightly over the horizon)
latmax = int(100*latmax)/100.
lonmax = int(100*lonmax)/100.
# width and height of visible projection
P = pyproj.Proj(proj='geos',a=self.rmajor,\
b=self.rminor,lat_0=0,lon_0=0,h=projparams['h'])
x1,y1 = P(0.,latmax); x2,y2 = P(lonmax,0.)
width = x2; height = y1
self._height = height
self._width = width
if (llcrnrlon == -180 and llcrnrlat == -90 and
urcrnrlon == 180 and urcrnrlat == 90):
self._fulldisk = True
llcrnrx = -width
llcrnry = -height
urcrnrx = -llcrnrx
urcrnry = -llcrnry
else:
self._fulldisk = False
llcrnrx, llcrnry = self(llcrnrlon,llcrnrlat)
if llcrnrx > 1.e20 or llcrnry > 1.e20:
raise ValueError(_lower_left_out_of_bounds)
elif self.projection == 'nsper':
self._proj4 = pyproj.Proj(projparams)
# find major and minor axes of ellipse defining map proj region.
# h is measured from surface of earth at equator.
h = projparams['h'] + self.rmajor
# latitude of horizon on central meridian
lonmax = 90.-(180./np.pi)*np.arcsin(self.rmajor/h)
# longitude of horizon on equator
latmax = 90.-(180./np.pi)*np.arcsin(self.rmajor/h)
# truncate to nearest hundredth of a degree (to make sure
# they aren't slightly over the horizon)
latmax = int(100*latmax)/100.
lonmax = int(100*lonmax)/100.
# width and height of visible projection
P = pyproj.Proj(proj='nsper',a=self.rmajor,\
b=self.rminor,lat_0=0,lon_0=0,h=projparams['h'])
x1,y1 = P(0.,latmax); x2,y2 = P(lonmax,0.)
width = x2; height = y1
self._height = height
self._width = width
if (llcrnrlon == -180 and llcrnrlat == -90 and
urcrnrlon == 180 and urcrnrlat == 90):
self._fulldisk = True
llcrnrx = -width
llcrnry = -height
urcrnrx = -llcrnrx
urcrnry = -llcrnry
else:
self._fulldisk = False
llcrnrx, llcrnry = self(llcrnrlon,llcrnrlat)
if llcrnrx > 1.e20 or llcrnry > 1.e20:
raise ValueError(_lower_left_out_of_bounds)
elif self.projection in _pseudocyl:
self._proj4 = pyproj.Proj(projparams)
xtmp,urcrnry = self(projparams['lon_0'],90.)
urcrnrx,xtmp = self(projparams['lon_0']+180.,0)
llcrnrx = -urcrnrx
llcrnry = -urcrnry
if self.ellipsoid and self.projection in ['kav7','eck4','mbtfpq']:
msg = "this projection can only be drawn for a perfect sphere"
raise ValueError(msg)
else:
self._proj4 = pyproj.Proj(projparams)
llcrnrx, llcrnry = self(llcrnrlon,llcrnrlat)
if self.projection == 'aeqd': self._fulldisk=False
# compute x_0, y_0 so ll corner of domain is x=0,y=0.
# note that for 'cyl' x,y == lon,lat
if self.projection != 'ob_tran':
self.projparams['x_0']=-llcrnrx
self.projparams['y_0']=-llcrnry
# reset with x_0, y_0.
if self.projection not in ['cyl','ob_tran']:
self._proj4 = pyproj.Proj(projparams)
llcrnry = 0.
llcrnrx = 0.
elif self.projection != 'ob_tran':
llcrnrx = llcrnrlon
llcrnry = llcrnrlat
if urcrnrislatlon:
self.urcrnrlon = urcrnrlon
self.urcrnrlat = urcrnrlat
if self.projection not in ['ortho','geos','nsper','aeqd'] + _pseudocyl:
urcrnrx,urcrnry = self(urcrnrlon,urcrnrlat)
if self.projection == 'ob_tran':
urcrnrx = _rad2dg*urcrnrx; urcrnry = _rad2dg*urcrnry
if urcrnrx < 0: urcrnrx = urcrnrx + 360
elif self.projection in ['ortho','geos','nsper','aeqd']:
if self._fulldisk:
urcrnrx = 2.*self._width
urcrnry = 2.*self._height
else:
urcrnrx,urcrnry = self(urcrnrlon,urcrnrlat)
if urcrnrx > 1.e20 or urcrnry > 1.e20:
raise ValueError(_upper_right_out_of_bounds)
elif self.projection in _pseudocyl:
xtmp,urcrnry = self(projparams['lon_0'],90.)
urcrnrx,xtmp = self(projparams['lon_0']+180.,0)
else:
urcrnrx = urcrnrlon
urcrnry = urcrnrlat
urcrnrlon, urcrnrlat = self(urcrnrx, urcrnry, inverse=True)
self.urcrnrlon = urcrnrlon
self.urcrnrlat = urcrnrlat
# corners of domain.
self.llcrnrx = llcrnrx
self.llcrnry = llcrnry
self.urcrnrx = urcrnrx
self.urcrnry = urcrnry
if urcrnrx > llcrnrx:
self.xmin = llcrnrx
self.xmax = urcrnrx
else:
self.xmax = llcrnrx
self.xmin = urcrnrx
if urcrnry > llcrnry:
self.ymin = llcrnry
self.ymax = urcrnry
else:
self.ymax = llcrnry
self.ymin = urcrnry
def __call__(self, *args, **kw):
# x,y,inverse=False):
"""
Calling a Proj class instance with the arguments lon, lat will
convert lon/lat (in degrees) to x/y native map projection
coordinates (in meters). If optional keyword 'inverse' is
True (default is False), the inverse transformation from x/y
to lon/lat is performed.
For cylindrical equidistant projection ('cyl'), this
does nothing (i.e. x,y == lon,lat).
lon,lat can be either scalar floats or N arrays.
"""
if len(args) == 1:
xy = args[0]
onearray = True
else:
x,y = args
onearray = False
if self.projection == 'cyl': # for cyl x,y == lon,lat
if onearray:
return xy
else:
return x,y
inverse = kw.get('inverse', False)
if onearray:
outxy = self._proj4(xy, inverse=inverse)
else:
outx,outy = self._proj4(x, y, inverse=inverse)
if inverse:
if self.projection in ['merc','mill','gall']:
if self.projection == 'merc':
coslat = math.cos(math.radians(self.projparams['lat_ts']))
sinlat = math.sin(math.radians(self.projparams['lat_ts']))
else:
coslat = 1.
sinlat = 0.
# radius of curvature of the ellipse perpendicular to
# the plane of the meridian.
rcurv = self.rmajor*coslat/math.sqrt(1.-self.esq*sinlat**2)
if onearray:
outxy[:,0] = _rad2dg*(xy[:,0]/rcurv) + self.llcrnrlon
else:
try: # x a scalar or an array
outx = _rad2dg*(x/rcurv) + self.llcrnrlon
except: # x a sequence
outx = [_rad2dg*(xi/rcurv) + self.llcrnrlon for xi in x]
else:
if self.projection in ['merc','mill','gall']:
if self.projection == 'merc':
coslat = math.cos(math.radians(self.projparams['lat_ts']))
sinlat = math.sin(math.radians(self.projparams['lat_ts']))
else:
coslat = 1.
sinlat = 0.
# radius of curvature of the ellipse perpendicular to
# the plane of the meridian.
rcurv = self.rmajor*coslat/math.sqrt(1.-self.esq*sinlat**2)
if onearray:
outxy[:,0] = rcurv*_dg2rad*(xy[:,0]-self.llcrnrlon)
else:
try: # x is a scalar or an array
outx = rcurv*_dg2rad*(x-self.llcrnrlon)
except: # x is a sequence.
outx = [rcurv*_dg2rad*(xi-self.llcrnrlon) for xi in x]
if onearray:
return outxy
else:
return outx, outy
def makegrid(self,nx,ny,returnxy=False):
"""
return arrays of shape (ny,nx) containing lon,lat coordinates of
an equally spaced native projection grid.
if returnxy=True, the x,y values of the grid are returned also.
"""
dx = (self.urcrnrx-self.llcrnrx)/(nx-1)
dy = (self.urcrnry-self.llcrnry)/(ny-1)
x = self.llcrnrx+dx*np.indices((ny,nx),np.float32)[1,:,:]
y = self.llcrnry+dy*np.indices((ny,nx),np.float32)[0,:,:]
lons, lats = self(x, y, inverse=True)
if returnxy:
return lons, lats, x, y
else:
return lons, lats
def makegrid3d(self,nx,ny,returnxy=False):
"""
return array of shape (ny,nx, 2) containing lon,lat coordinates of
an equally spaced native projection grid.
if returnxy=True, the x,y values of the grid are returned also.
"""
dx = (self.urcrnrx-self.llcrnrx)/(nx-1)
dy = (self.urcrnry-self.llcrnry)/(ny-1)
xy = np.empty((ny,nx,2), np.float64)
xy[...,0] = self.llcrnrx+dx*np.indices((ny,nx),np.float32)[1,:,:]
xy[...,1] = self.llcrnry+dy*np.indices((ny,nx),np.float32)[0,:,:]
lonlat = self(xy, inverse=True)
if returnxy:
return lonlat, xy
else:
return lonlat
if __name__ == "__main__":
params = {}
params['proj'] = 'lcc'
params['R'] = 6371200
params['lat_1'] = 50
params['lat_2'] = 50
params['lon_0'] = -107
nx = 349; ny = 277; dx = 32463.41; dy = dx
awips221 = Proj(params,-145.5,1.0,(nx-1)*dx,(ny-1)*dy,urcrnrislatlon=False)
# AWIPS grid 221 parameters
# (from http://www.nco.ncep.noaa.gov/pmb/docs/on388/tableb.html)
llcornerx, llcornery = awips221(-145.5,1.)
# find 4 lon/lat corners of AWIPS grid 221.
llcornerx = 0.; llcornery = 0.
lrcornerx = dx*(nx-1); lrcornery = 0.
ulcornerx = 0.; ulcornery = dy*(ny-1)
urcornerx = dx*(nx-1); urcornery = dy*(ny-1)
llcornerlon, llcornerlat = awips221(llcornerx, llcornery, inverse=True)
lrcornerlon, lrcornerlat = awips221(lrcornerx, lrcornery, inverse=True)
urcornerlon, urcornerlat = awips221(urcornerx, urcornery, inverse=True)
ulcornerlon, ulcornerlat = awips221(ulcornerx, ulcornery, inverse=True)
import sys
sys.stdout.write('4 corners of AWIPS grid 221:\n')
sys.stdout.write('%s %s\n' % llcornerlon, llcornerlat)
sys.stdout.write('%s %s\n' % lrcornerlon, lrcornerlat)
sys.stdout.write('%s %s\n' % urcornerlon, urcornerlat)
sys.stdout.write('%s %s\n' % ulcornerlon, ulcornerlat)
sys.stdout.write('from GRIB docs\n')
sys.stdout.write('(http://www.nco.ncep.noaa.gov/pmb/docs/on388/tableb.html)\n')
sys.stdout.write(' -145.5 1.0\n')
sys.stdout.write(' -68.318 0.897\n')
sys.stdout.write(' -2.566 46.352\n')
sys.stdout.write(' 148.639 46.635\n')
# compute lons and lats for the whole AWIPS grid 221 (377x249).
import time; t1 = time.clock()
lons, lats = awips221.makegrid(nx,ny)
t2 = time.clock()
sys.stdout.write('compute lats/lons for all points on AWIPS 221 grid (%sx%s)\n' %(nx,ny))
sys.stdout.write('max/min lons\n')
sys.stdout.write('%s %s\n' % min(np.ravel(lons)),max(np.ravel(lons)))
sys.stdout.write('max/min lats\n')
sys.stdout.write('%s %s\n' % min(np.ravel(lats)),max(np.ravel(lats)))
sys.stdout.write('took %s secs\n' % t2-t1)
sys.stdout.write('Same thing but with a single 3-D array\n')
t1 = time.clock()
lonlat, xy = awips221.makegrid3d(nx,ny, returnxy=True)
t2 = time.clock()
sys.stdout.write('took %s secs\n' % t2-t1)
assert (lons==lonlat[...,0]).all(), "The longitudes are different"
assert (lats==lonlat[...,1]).all(), "The latitudes are different"
| mit | 117e3737c829a90f849fb8fae81abd98 | 41.840855 | 93 | 0.553449 | 3.299671 | false | false | false | false |
matplotlib/basemap | examples/embedding_map_in_wx.py | 2 | 2509 | from __future__ import (absolute_import, division, print_function)
#!/usr/bin/env python
"""
An example of how to use wx or wxagg in an application with the Basemap module
"""
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from matplotlib.figure import Figure
from mpl_toolkits.basemap import Basemap
from wx import *
class CanvasFrame(Frame):
def __init__(self):
Frame.__init__(self,None,-1,
'CanvasFrame',size=(550,350))
self.SetBackgroundColour(NamedColor("WHITE"))
self.figure = Figure()
self.canvas = FigureCanvas(self, -1, self.figure)
self.ax = self.figure.add_subplot(111)
self.sizer = BoxSizer(VERTICAL)
self.sizer.Add(self.canvas, 1, LEFT | TOP | GROW)
self.SetSizer(self.sizer)
self.Fit()
self.add_toolbar() # comment this out for no toolbar
self.plot_map()
def add_toolbar(self):
self.toolbar = NavigationToolbar2Wx(self.canvas)
self.toolbar.Realize()
if Platform == '__WXMAC__':
# Mac platform (OSX 10.3, MacPython) does not seem to cope with
# having a toolbar in a sizer. This work-around gets the buttons
# back, but at the expense of having the toolbar at the top
self.SetToolBar(self.toolbar)
else:
# On Windows platform, default window size is incorrect, so set
# toolbar width to figure width.
tw, th = self.toolbar.GetSizeTuple()
fw, fh = self.canvas.GetSizeTuple()
# By adding toolbar in sizer, we are able to put it at the bottom
# of the frame - so appearance is closer to GTK version.
# As noted above, doesn't work for Mac.
self.toolbar.SetSize(Size(fw, th))
self.sizer.Add(self.toolbar, 0, LEFT | EXPAND)
# update the axes menu on the toolbar
self.toolbar.update()
def plot_map(self):
map = Basemap(ax=self.ax)
map.drawcoastlines()
map.drawcountries()
map.drawmapboundary()
map.fillcontinents(color='coral', lake_color='aqua')
map.drawmapboundary(fill_color='aqua')
self.figure.canvas.draw()
class App(App):
def OnInit(self):
'Create the main window and insert the custom frame'
frame = CanvasFrame()
frame.Show(True)
return True
app = App(0)
app.MainLoop()
| mit | 4e82ab1ac3f2cd4591c4613fe82e034c | 31.584416 | 79 | 0.628537 | 3.81307 | false | false | false | false |
matplotlib/basemap | examples/testgdal.py | 2 | 2723 | from __future__ import (absolute_import, division, print_function)
"""
example showing how to plot data from a DEM file and an ESRI shape file using
gdal (http://pypi.python.org/pypi/GDAL).
"""
from osgeo import gdal, ogr
from mpl_toolkits.basemap import Basemap, cm
import numpy as np
import matplotlib.pyplot as plt
from numpy import ma
# read 2.5 minute U.S. DEM file using gdal.
# (http://www.prism.oregonstate.edu/docs/meta/dem_25m.htm)
gd = gdal.Open('us_25m.dem')
array = gd.ReadAsArray()
# get lat/lon coordinates from DEM file.
coords = gd.GetGeoTransform()
nlons = array.shape[1]; nlats = array.shape[0]
delon = coords[1]
delat = coords[5]
lons = coords[0] + delon*np.arange(nlons)
lats = coords[3] + delat*np.arange(nlats)[::-1] # reverse lats
# setup figure.
fig = plt.figure(figsize=(11,6))
# setup basemap instance.
m = Basemap(llcrnrlon=-119,llcrnrlat=22,urcrnrlon=-64,urcrnrlat=49,
projection='lcc',lat_1=33,lat_2=45,lon_0=-95)
# create masked array, reversing data in latitude direction
# (so that data is oriented in increasing latitude, as transform_scalar requires).
topoin = ma.masked_values(array[::-1,:],-999.)
# transform DEM data to a 4 km native projection grid
nx = int((m.xmax-m.xmin)/4000.)+1; ny = int((m.ymax-m.ymin)/4000.)+1
topodat = m.transform_scalar(topoin,lons,lats,nx,ny,masked=True)
# plot DEM image on map.
im = m.imshow(topodat,cmap=cm.GMT_haxby_r)
# draw meridians and parallels.
m.drawparallels(np.arange(20,71,10),labels=[1,0,0,0])
m.drawmeridians(np.arange(-120,-40,10),labels=[0,0,0,1])
# plot state boundaries from shapefile using ogr.
g = ogr.Open ("st99_d00.shp")
L = g.GetLayer(0) # data is in 1st layer.
for feat in L: # iterate over features in layer
geo = feat.GetGeometryRef()
# iterate over geometries.
for count in range(geo.GetGeometryCount()):
geom = geo.GetGeometryRef(count)
if not geom.GetGeometryCount(): # just one geometry.
# get lon,lat points
lons = [geom.GetX(i) for i in range(geom.GetPointCount())]
lats = [geom.GetY(i) for i in range(geom.GetPointCount())]
# convert to map projection coords.
x, y = m(lons,lats)
# plot on map.
m.plot(x,y,'k')
else: # iterate over nested geometries.
for cnt in range( geom.GetGeometryCount()):
g = geom.GetGeometryRef( cnt )
lons = [g.GetX(i) for i in range(g.GetPointCount())]
lats = [g.GetY(i) for i in range(g.GetPointCount())]
x, y = m(lons,lats)
m.plot(x,y,'k')
# draw colorbar.
m.colorbar(im)
plt.title(gd.GetDescription()+' with state boundaries from '+g.GetName(),y=1.05)
plt.show()
| mit | a81746060b9b02d3836548ce0afd5dd9 | 40.257576 | 82 | 0.658098 | 2.966231 | false | false | false | false |
pyannote/pyannote-audio | tests/tasks/test_reproducibility.py | 1 | 1892 | import pytorch_lightning as pl
import torch
from pyannote.audio.models.segmentation.debug import SimpleSegmentationModel
from pyannote.audio.tasks import MultiLabelSegmentation, VoiceActivityDetection
from pyannote.database import FileFinder, get_protocol
def setup_tasks(task):
protocol = get_protocol(
"Debug.SpeakerDiarization.Debug", preprocessors={"audio": FileFinder()}
)
vad = task(protocol, duration=0.2, batch_size=32, num_workers=4)
return protocol, vad
def create_dl(model, task):
m = model(task=task)
m.setup("fit")
return task.train_dataloader()
def get_next5(dl):
last5 = []
it = iter(dl)
for i in range(5):
last5.append(next(it))
return last5
def test_seeding_ensures_data_loaders():
"Setting a global seed for the dataloaders ensures that we get data back in the same order"
pl.seed_everything(1)
for task in [VoiceActivityDetection, MultiLabelSegmentation]:
protocol, vad = setup_tasks(task)
dl = create_dl(SimpleSegmentationModel, vad)
last5a = get_next5(dl)
protocol, vad = setup_tasks(task)
dl = create_dl(SimpleSegmentationModel, vad)
last5b = get_next5(dl)
for i in range(len(last5b)):
assert torch.equal(last5a[i]["X"], last5b[i]["X"])
def test_different_seeds():
"Changing the global seed will change the order of the data that loads"
for task in [VoiceActivityDetection, MultiLabelSegmentation]:
protocol, vad = setup_tasks(task)
pl.seed_everything(4)
dl = create_dl(SimpleSegmentationModel, vad)
last5a = get_next5(dl)
protocol, vad = setup_tasks(task)
pl.seed_everything(5)
dl = create_dl(SimpleSegmentationModel, vad)
last5b = get_next5(dl)
for i in range(5):
assert not torch.equal(last5a[i]["X"], last5b[i]["X"])
| mit | cd4a50f02c9f77b44cc06778d030ef47 | 29.031746 | 95 | 0.667019 | 3.372549 | false | false | false | false |
pyannote/pyannote-audio | pyannote/audio/pipelines/voice_activity_detection.py | 1 | 13174 | # MIT License
#
# Copyright (c) 2018-2021 CNRS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Voice activity detection pipelines"""
import tempfile
from copy import deepcopy
from types import MethodType
from typing import Callable, Optional, Text, Union
import numpy as np
from pyannote.core import Annotation, SlidingWindowFeature
from pyannote.database.protocol import SpeakerDiarizationProtocol
from pyannote.metrics.detection import (
DetectionErrorRate,
DetectionPrecisionRecallFMeasure,
)
from pyannote.pipeline.parameter import Categorical, Integer, LogUniform, Uniform
from pytorch_lightning import Trainer
from torch.optim import SGD
from torch_audiomentations.core.transforms_interface import BaseWaveformTransform
from pyannote.audio import Inference
from pyannote.audio.core.callback import GraduallyUnfreeze
from pyannote.audio.core.io import AudioFile
from pyannote.audio.core.pipeline import Pipeline
from pyannote.audio.pipelines.utils import (
PipelineAugmentation,
PipelineInference,
PipelineModel,
get_augmentation,
get_devices,
get_inference,
get_model,
)
from pyannote.audio.tasks import VoiceActivityDetection as VoiceActivityDetectionTask
from pyannote.audio.utils.signal import Binarize
class OracleVoiceActivityDetection(Pipeline):
"""Oracle voice activity detection pipeline"""
@staticmethod
def apply(file: AudioFile) -> Annotation:
"""Return groundtruth voice activity detection
Parameter
---------
file : AudioFile
Must provide a "annotation" key.
Returns
-------
hypothesis : `pyannote.core.Annotation`
Speech regions
"""
speech = file["annotation"].get_timeline().support()
return speech.to_annotation(generator="string", modality="speech")
class VoiceActivityDetection(Pipeline):
"""Voice activity detection pipeline
Parameters
----------
segmentation : Model, str, or dict, optional
Pretrained segmentation (or voice activity detection) model.
Defaults to "pyannote/segmentation".
See pyannote.audio.pipelines.utils.get_model for supported format.
fscore : bool, optional
Optimize (precision/recall) fscore. Defaults to optimizing detection
error rate.
use_auth_token : str, optional
When loading private huggingface.co models, set `use_auth_token`
to True or to a string containing your hugginface.co authentication
token that can be obtained by running `huggingface-cli login`
inference_kwargs : dict, optional
Keywords arguments passed to Inference.
Hyper-parameters
----------------
onset, offset : float
Onset/offset detection thresholds
min_duration_on : float
Remove speech regions shorter than that many seconds.
min_duration_off : float
Fill non-speech regions shorter than that many seconds.
"""
def __init__(
self,
segmentation: PipelineModel = "pyannote/segmentation",
fscore: bool = False,
use_auth_token: Union[Text, None] = None,
**inference_kwargs,
):
super().__init__()
self.segmentation = segmentation
self.fscore = fscore
# load model and send it to GPU (when available and not already on GPU)
model = get_model(segmentation, use_auth_token=use_auth_token)
if model.device.type == "cpu":
(segmentation_device,) = get_devices(needs=1)
model.to(segmentation_device)
inference_kwargs["pre_aggregation_hook"] = lambda scores: np.max(
scores, axis=-1, keepdims=True
)
self._segmentation = Inference(model, **inference_kwargs)
# hyper-parameters used for hysteresis thresholding
self.onset = Uniform(0.0, 1.0)
self.offset = Uniform(0.0, 1.0)
# hyper-parameters used for post-processing i.e. removing short speech regions
# or filling short gaps between speech regions
self.min_duration_on = Uniform(0.0, 1.0)
self.min_duration_off = Uniform(0.0, 1.0)
def default_parameters(self):
# parameters optimized on DIHARD 3 development set
if self.segmentation == "pyannote/segmentation":
return {
"onset": 0.767,
"offset": 0.377,
"min_duration_on": 0.136,
"min_duration_off": 0.067,
}
raise NotImplementedError()
def classes(self):
return ["SPEECH"]
def initialize(self):
"""Initialize pipeline with current set of parameters"""
self._binarize = Binarize(
onset=self.onset,
offset=self.offset,
min_duration_on=self.min_duration_on,
min_duration_off=self.min_duration_off,
)
CACHED_SEGMENTATION = "cache/segmentation/inference"
def apply(self, file: AudioFile, hook: Optional[Callable] = None) -> Annotation:
"""Apply voice activity detection
Parameters
----------
file : AudioFile
Processed file.
hook : callable, optional
Hook called after each major step of the pipeline with the following
signature: hook("step_name", step_artefact, file=file)
Returns
-------
speech : Annotation
Speech regions.
"""
# setup hook (e.g. for debugging purposes)
hook = self.setup_hook(file, hook=hook)
# apply segmentation model (only if needed)
# output shape is (num_chunks, num_frames, 1)
if self.training:
if self.CACHED_SEGMENTATION in file:
segmentations = file[self.CACHED_SEGMENTATION]
else:
segmentations = self._segmentation(file)
file[self.CACHED_SEGMENTATION] = segmentations
else:
segmentations: SlidingWindowFeature = self._segmentation(file)
hook("segmentation", segmentations)
speech: Annotation = self._binarize(segmentations)
speech.uri = file["uri"]
return speech.rename_labels({label: "SPEECH" for label in speech.labels()})
def get_metric(self) -> Union[DetectionErrorRate, DetectionPrecisionRecallFMeasure]:
"""Return new instance of detection metric"""
if self.fscore:
return DetectionPrecisionRecallFMeasure(collar=0.0, skip_overlap=False)
return DetectionErrorRate(collar=0.0, skip_overlap=False)
def get_direction(self):
if self.fscore:
return "maximize"
return "minimize"
class AdaptiveVoiceActivityDetection(Pipeline):
"""Adaptive voice activity detection pipeline
Let M be a pretrained voice activity detection model.
For each file f, this pipeline starts by applying the model to obtain a first set of
speech/non-speech labels.
Those (automatic, possibly erroneous) labels are then used to fine-tune M on the very
same file f into a M_f model, in a self-supervised manner.
Finally, the fine-tuned model M_f is applied to file f to obtain the final (and
hopefully better) speech/non-speech labels.
During fine-tuning, frames where the pretrained model M is very confident are weighted
more than those with lower confidence: the intuition is that the model will use these
high confidence regions to adapt to recording conditions (e.g. background noise) and
hence will eventually be better on the parts of f where it was initially not quite
confident.
Conversely, to avoid overfitting too much to those high confidence regions, we use
data augmentation and freeze all but the final few layers of the pretrained model M.
Parameters
----------
segmentation : Model, str, or dict, optional
Pretrained segmentation model.
Defaults to "hbredin/VoiceActivityDetection-PyanNet-DIHARD".
augmentation : BaseWaveformTransform, or dict, optional
torch_audiomentations waveform transform, used during fine-tuning.
Defaults to no augmentation.
fscore : bool, optional
Optimize (precision/recall) fscore.
Defaults to optimizing detection error rate.
Hyper-parameters
----------------
num_epochs : int
Number of epochs (where one epoch = going through the file once).
batch_size : int
Batch size.
learning_rate : float
Learning rate.
See also
--------
pyannote.audio.pipelines.utils.get_inference
"""
def __init__(
self,
segmentation: PipelineInference = "hbredin/VoiceActivityDetection-PyanNet-DIHARD",
augmentation: PipelineAugmentation = None,
fscore: bool = False,
):
super().__init__()
# pretrained segmentation model
self.inference: Inference = get_inference(segmentation)
self.augmentation: BaseWaveformTransform = get_augmentation(augmentation)
self.fscore = fscore
self.num_epochs = Integer(0, 10)
self.batch_size = Categorical([1, 2, 4, 8, 16, 32])
self.learning_rate = LogUniform(1e-6, 1)
def apply(self, file: AudioFile) -> Annotation:
# create a copy of file
file = dict(file)
# get segmentation scores from pretrained segmentation model
file["seg"] = self.inference(file)
# infer voice activity detection scores
file["vad"] = np.max(file["seg"], axis=1, keepdims=True)
# apply voice activity detection pipeline with default parameters
vad_pipeline = VoiceActivityDetection("vad").instantiate(
{
"onset": 0.5,
"offset": 0.5,
"min_duration_on": 0.0,
"min_duration_off": 0.0,
}
)
file["annotation"] = vad_pipeline(file)
# do not fine tune the model if num_epochs is zero
if self.num_epochs == 0:
return file["annotation"]
# infer model confidence from segmentation scores
# TODO: scale confidence differently (e.g. via an additional binarisation threshold hyper-parameter)
file["confidence"] = np.min(
np.abs((file["seg"] - 0.5) / 0.5), axis=1, keepdims=True
)
# create a dummy train-only protocol where `file` is the only training file
class DummyProtocol(SpeakerDiarizationProtocol):
name = "DummyProtocol"
def train_iter(self):
yield file
vad_task = VoiceActivityDetectionTask(
DummyProtocol(),
duration=self.inference.duration,
weight="confidence",
batch_size=self.batch_size,
augmentation=self.augmentation,
)
vad_model = deepcopy(self.inference.model)
vad_model.task = vad_task
def configure_optimizers(model):
return SGD(model.parameters(), lr=self.learning_rate)
vad_model.configure_optimizers = MethodType(configure_optimizers, vad_model)
with tempfile.TemporaryDirectory() as default_root_dir:
trainer = Trainer(
max_epochs=self.num_epochs,
gpus=1,
callbacks=[GraduallyUnfreeze(epochs_per_stage=self.num_epochs + 1)],
enable_checkpointing=False,
default_root_dir=default_root_dir,
)
trainer.fit(vad_model)
inference = Inference(
vad_model,
device=self.inference.device,
batch_size=self.inference.batch_size,
progress_hook=self.inference.progress_hook,
)
file["vad"] = inference(file)
return vad_pipeline(file)
def get_metric(self) -> Union[DetectionErrorRate, DetectionPrecisionRecallFMeasure]:
"""Return new instance of detection metric"""
if self.fscore:
return DetectionPrecisionRecallFMeasure(collar=0.0, skip_overlap=False)
return DetectionErrorRate(collar=0.0, skip_overlap=False)
def get_direction(self):
if self.fscore:
return "maximize"
return "minimize"
| mit | 34cc3b407945a9c6f64ba6f50a078b93 | 34.41129 | 108 | 0.654521 | 4.289482 | false | false | false | false |
pyannote/pyannote-audio | pyannote/audio/models/blocks/pooling.py | 1 | 3016 | # MIT License
#
# Copyright (c) 2020 CNRS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import warnings
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
class StatsPool(nn.Module):
"""Statistics pooling
Compute temporal mean and (unbiased) standard deviation
and returns their concatenation.
Reference
---------
https://en.wikipedia.org/wiki/Weighted_arithmetic_mean
"""
def forward(
self, sequences: torch.Tensor, weights: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""Forward pass
Parameters
----------
sequences : (batch, channel, frames) torch.Tensor
Sequences.
weights : (batch, frames) torch.Tensor, optional
When provided, compute weighted mean and standard deviation.
Returns
-------
output : (batch, 2 * channel) torch.Tensor
Concatenation of mean and (unbiased) standard deviation.
"""
if weights is None:
mean = sequences.mean(dim=2)
std = sequences.std(dim=2, unbiased=True)
else:
weights = weights.unsqueeze(dim=1)
# (batch, 1, frames)
num_frames = sequences.shape[2]
num_weights = weights.shape[2]
if num_frames != num_weights:
warnings.warn(
f"Mismatch between frames ({num_frames}) and weights ({num_weights}) numbers."
)
weights = F.interpolate(
weights, size=num_frames, mode="linear", align_corners=False
)
v1 = weights.sum(dim=2)
mean = torch.sum(sequences * weights, dim=2) / v1
dx2 = torch.square(sequences - mean.unsqueeze(2))
v2 = torch.square(weights).sum(dim=2)
var = torch.sum(dx2 * weights, dim=2) / (v1 - v2 / v1)
std = torch.sqrt(var)
return torch.cat([mean, std], dim=1)
| mit | 2b1cfe9ce34e1dedfc6351ca4bb0d359 | 33.272727 | 98 | 0.639257 | 4.308571 | false | false | false | false |
pyannote/pyannote-audio | pyannote/audio/utils/metric.py | 1 | 12433 | # The MIT License (MIT)
#
# Copyright (c) 2022- CNRS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
try:
from functools import singledispatchmethod
except ImportError:
from singledispatchmethod import singledispatchmethod
from typing import Dict, List, Optional
import numpy as np
from pyannote.core import (
Annotation,
Segment,
SlidingWindow,
SlidingWindowFeature,
Timeline,
)
from pyannote.metrics.base import BaseMetric
from pyannote.metrics.detection import DetectionPrecisionRecallFMeasure
from pyannote.metrics.diarization import DiarizationErrorRate
from pyannote.audio.utils.permutation import permutate
def discrete_diarization_error_rate(reference: np.ndarray, hypothesis: np.ndarray):
"""Discrete diarization error rate
Parameters
----------
reference : (num_frames, num_speakers) np.ndarray
Discretized reference diarization.
reference[f, s] = 1 if sth speaker is active at frame f, 0 otherwise
hypothesis : (num_frames, num_speakers) np.ndarray
Discretized hypothesized diarization.
hypothesis[f, s] = 1 if sth speaker is active at frame f, 0 otherwise
Returns
-------
der : float
(false_alarm + missed_detection + confusion) / total
components : dict
Diarization error rate components, in number of frames.
Keys are "false alarm", "missed detection", "confusion", and "total".
"""
reference = reference.astype(np.half)
hypothesis = hypothesis.astype(np.half)
# permutate hypothesis to maximize similarity to reference
(hypothesis,), _ = permutate(reference[np.newaxis], hypothesis)
# total speech duration (in number of frames)
total = 1.0 * np.sum(reference)
# false alarm and missed detection (in number of frames)
detection_error = np.sum(hypothesis, axis=1) - np.sum(reference, axis=1)
false_alarm = np.maximum(0, detection_error)
missed_detection = np.maximum(0, -detection_error)
# speaker confusion (in number of frames)
confusion = np.sum((hypothesis != reference) * hypothesis, axis=1) - false_alarm
false_alarm = np.sum(false_alarm)
missed_detection = np.sum(missed_detection)
confusion = np.sum(confusion)
der = (false_alarm + missed_detection + confusion) / total
return (
der,
{
"false alarm": false_alarm,
"missed detection": missed_detection,
"confusion": confusion,
"total": total,
},
)
class DiscreteDiarizationErrorRate(BaseMetric):
"""Compute diarization error rate on discretized annotations"""
@classmethod
def metric_name(cls):
return "discrete diarization error rate"
@classmethod
def metric_components(cls):
return ["total", "false alarm", "missed detection", "confusion"]
def compute_components(
self,
reference,
hypothesis,
uem: Optional[Timeline] = None,
):
return self.compute_components_helper(hypothesis, reference, uem=uem)
@singledispatchmethod
def compute_components_helper(
self, hypothesis, reference, uem: Optional[Timeline] = None
):
klass = hypothesis.__class__.__name__
raise NotImplementedError(
f"Providing hypothesis as {klass} instances is not supported."
)
@compute_components_helper.register
def der_from_ndarray(
self,
hypothesis: np.ndarray,
reference: np.ndarray,
uem: Optional[Timeline] = None,
):
if reference.ndim != 2:
raise NotImplementedError(
"Only (num_frames, num_speakers)-shaped reference is supported."
)
if uem is not None:
raise ValueError("`uem` is not supported with numpy arrays.")
ref_num_frames, ref_num_speakers = reference.shape
if hypothesis.ndim != 2:
raise NotImplementedError(
"Only (num_frames, num_speakers)-shaped hypothesis is supported."
)
hyp_num_frames, hyp_num_speakers = hypothesis.shape
if ref_num_frames != hyp_num_frames:
raise ValueError(
"reference and hypothesis must have the same number of frames."
)
if hyp_num_speakers > ref_num_speakers:
reference = np.pad(
reference, ((0, 0), (0, hyp_num_speakers - ref_num_speakers))
)
elif ref_num_speakers > hyp_num_speakers:
hypothesis = np.pad(
hypothesis, ((0, 0), (0, ref_num_speakers - hyp_num_speakers))
)
return discrete_diarization_error_rate(reference, hypothesis)[1]
@compute_components_helper.register
def der_from_swf(
self,
hypothesis: SlidingWindowFeature,
reference: Annotation,
uem: Optional[Timeline] = None,
):
ndim = hypothesis.data.ndim
if ndim < 2 or ndim > 3:
raise NotImplementedError(
"Only (num_frames, num_speakers) or (num_chunks, num_frames, num_speakers)-shaped "
"hypothesis is supported."
)
# use hypothesis support and resolution when provided as (num_frames, num_speakers)
if ndim == 2:
support = hypothesis.extent
resolution = hypothesis.sliding_window
# use hypothesis support and estimate resolution when provided as (num_chunks, num_frames, num_speakers)
elif ndim == 3:
chunks = hypothesis.sliding_window
num_chunks, num_frames, _ = hypothesis.data.shape
support = Segment(chunks[0].start, chunks[num_chunks - 1].end)
resolution = chunks.duration / num_frames
# discretize reference annotation
reference = reference.discretize(support, resolution=resolution)
# if (num_frames, num_speakers)-shaped, compute just one DER for the whole file
if ndim == 2:
if uem is None:
return self.compute_components_helper(hypothesis.data, reference.data)
if not Timeline([support]).covers(uem):
raise ValueError("`uem` must fully cover hypothesis extent.")
components = self.init_components()
for segment in uem:
h = hypothesis.crop(segment)
r = reference.crop(segment)
segment_component = self.compute_components_helper(h, r)
for name in self.components_:
components[name] += segment_component[name]
return components
# if (num_chunks, num_frames, num_speakers)-shaed, compute one DER per chunk and aggregate
elif ndim == 3:
components = self.init_components()
for window, hypothesis_window in hypothesis:
# Skip any window not fully covered by a segment of the uem
if uem is not None and not uem.covers(Timeline([window])):
continue
reference_window = reference.crop(window, mode="center")
common_num_frames = min(num_frames, reference_window.shape[0])
window_components = self.compute_components_helper(
hypothesis_window[:common_num_frames],
reference_window[:common_num_frames],
)
for name in self.components_:
components[name] += window_components[name]
return components
def compute_metric(self, components):
return (
components["false alarm"]
+ components["missed detection"]
+ components["confusion"]
) / components["total"]
class SlidingDiarizationErrorRate(BaseMetric):
def __init__(self, window: float = 10.0):
super().__init__()
self.window = window
@classmethod
def metric_name(cls):
return "window diarization error rate"
@classmethod
def metric_components(cls):
return ["total", "false alarm", "missed detection", "confusion"]
def compute_components(
self,
reference,
hypothesis,
uem: Optional[Timeline] = None,
):
if uem is None:
raise ValueError(
"SlidingDiarizationErrorRate expects `uem` to be provided."
)
der = DiarizationErrorRate()
window = SlidingWindow(duration=self.window, step=0.5 * self.window)
for chunk in window(uem):
_ = der(
reference.crop(chunk), hypothesis.crop(chunk), uem=Timeline([chunk])
)
return der[:]
def compute_metric(self, components):
return (
components["false alarm"]
+ components["missed detection"]
+ components["confusion"]
) / components["total"]
class MacroAverageFMeasure(BaseMetric):
"""Compute macro-average F-measure
Parameters
----------
collar : float, optional
Duration (in seconds) of collars removed from evaluation around
boundaries of reference segments (one half before, one half after).
beta : float, optional
When beta > 1, greater importance is given to recall.
When beta < 1, greater importance is given to precision.
Defaults to 1.
See also
--------
pyannote.metrics.detection.DetectionPrecisionRecallFMeasure
"""
def metric_components(self):
return self.classes
@classmethod
def metric_name(cls):
return "Macro F-measure"
def __init__(
self,
classes: List[str], # noqa
collar: float = 0.0,
beta: float = 1.0,
**kwargs,
):
self.metric_name_ = self.metric_name()
self.classes = classes
self.components_ = set(self.metric_components())
self.collar = collar
self.beta = beta
self._sub_metrics: Dict[str, DetectionPrecisionRecallFMeasure] = {
label: DetectionPrecisionRecallFMeasure(collar=collar, beta=beta, **kwargs)
for label in self.classes
}
self.reset()
def reset(self):
super().reset()
for sub_metric in self._sub_metrics.values():
sub_metric.reset()
def compute_components(
self, reference: Annotation, hypothesis: Annotation, uem=None, **kwargs
):
details = self.init_components()
for label, sub_metric in self._sub_metrics.items():
details[label] = sub_metric(
reference=reference.subset([label]),
hypothesis=hypothesis.subset([label]),
uem=uem,
**kwargs,
)
return details
def compute_metric(self, detail: Dict[str, float]):
return np.mean(list(detail.values()))
def report(self, display=False):
df = super().report(display=False)
for label, sub_metric in self._sub_metrics.items():
df.loc["TOTAL"][label] = abs(sub_metric)
if display:
print(
df.to_string(
index=True,
sparsify=False,
justify="right",
float_format=lambda f: "{0:.2f}".format(f),
)
)
return df
def __abs__(self):
return np.mean([abs(sub_metric) for sub_metric in self._sub_metrics.values()])
| mit | 38658799bfd59e7ed15113995003754e | 31.632546 | 112 | 0.614494 | 4.28872 | false | false | false | false |
matplotlib/basemap | examples/warpimage.py | 2 | 5106 | from __future__ import (absolute_import, division, print_function)
from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
# illustrate use of warpimage method to display an image background
# on the map projection region. Default background is the 'blue
# marble' image from NASA (http://visibleearth.nasa.gov).
# create new figure
fig=plt.figure()
# define orthographic projection centered on North America.
m = Basemap(projection='ortho',lat_0=40,lon_0=-100,resolution='l')
# display a non-default image.
m.warpimage(image='earth_lights_lrg.jpg')
# draw coastlines.
m.drawcoastlines(linewidth=0.5,color='0.5')
# draw lat/lon grid lines every 30 degrees.
m.drawmeridians(np.arange(0,360,30),color='0.5')
m.drawparallels(np.arange(-90,90,30),color='0.5')
plt.title("Lights at Night image warped from 'cyl' to 'ortho' projection",fontsize=12)
print('warp to orthographic map ...')
# create new figure
fig=plt.figure()
# define projection centered on North America.
m = Basemap(projection='mbtfpq',lon_0=-100,resolution='l')
m.bluemarble(scale=0.5)
# draw coastlines.
m.drawcoastlines(linewidth=0.5,color='0.5')
# draw lat/lon grid lines every 30 degrees.
m.drawmeridians(np.arange(0,360,60),color='0.5')
m.drawparallels(np.arange(-90,90,30),color='0.5')
plt.title("Blue Marble image warped from 'cyl' to 'mbtfpq' projection",fontsize=12)
print('warp to McBryde-Thomas Flat-Polar Quartic map ...')
# create new figure
fig=plt.figure()
# define projection centered on North America.
m = Basemap(projection='hammer',lon_0=-100,resolution='l')
m.bluemarble(scale=0.5)
# draw coastlines.
m.drawcoastlines(linewidth=0.5,color='0.5')
# draw lat/lon grid lines every 30 degrees.
m.drawmeridians(np.arange(0,360,60),color='0.5')
m.drawparallels(np.arange(-90,90,30),color='0.5')
plt.title("Blue Marble image warped from 'cyl' to 'hammer' projection",fontsize=12)
print('warp to Hammer map ...')
# create new figure
fig=plt.figure()
# define cylindrical equidistant projection.
m = Basemap(projection='cyl',llcrnrlon=-180,llcrnrlat=-90,urcrnrlon=180,urcrnrlat=90,resolution='l')
# plot (unwarped) rgba image.
im = m.bluemarble(scale=0.5)
# draw coastlines.
m.drawcoastlines(linewidth=0.5,color='0.5')
# draw lat/lon grid lines.
m.drawmeridians(np.arange(-180,180,60),labels=[0,0,0,1],color='0.5')
m.drawparallels(np.arange(-90,90,30),labels=[1,0,0,0],color='0.5')
plt.title("Blue Marble image - native 'cyl' projection",fontsize=12)
print('plot native cylindrical map (no warping needed) ...')
# create new figure
fig=plt.figure()
# define cylindrical equidistant projection.
m = Basemap(projection='cyl',llcrnrlon=0,llcrnrlat=-60,urcrnrlon=360,urcrnrlat=60,resolution='l')
# plot (unwarped) rgba image.
im = m.bluemarble(scale=0.5)
# draw coastlines.
m.drawcoastlines(linewidth=0.5,color='0.5')
# draw lat/lon grid lines.
m.drawmeridians(np.arange(0,360,60),labels=[0,0,0,1],color='0.5')
m.drawparallels(np.arange(-90,90,30),labels=[1,0,0,0],color='0.5')
plt.title("Blue Marble image - non-native 'cyl' projection",fontsize=12)
print('plot non-native cylindrical map (warping needed) ...')
# create new figure
fig=plt.figure()
# define orthographic projection centered on Europe.
m = Basemap(projection='ortho',lat_0=40,lon_0=40,resolution='l')
# plot a gray-scale image specified from a URL.
im = m.warpimage("http://earthobservatory.nasa.gov/Features/BlueMarble/Images/gebco_bathy.5400x2700.jpg")
# draw coastlines.
m.drawcoastlines(linewidth=0.5,color='0.5')
# draw lat/lon grid lines every 30 degrees.
m.drawmeridians(np.arange(0,360,30),color='0.5')
m.drawparallels(np.arange(-90,90,30),color='0.5')
plt.title("Blue Marble image warped from 'cyl' to 'ortho' projection",fontsize=12)
print('warp to orthographic map ...')
# create new figure
fig=plt.figure()
# define Lambert Conformal basemap for North America.
m = Basemap(llcrnrlon=-145.5,llcrnrlat=1.,urcrnrlon=-2.566,urcrnrlat=46.352,\
rsphere=(6378137.00,6356752.3142),lat_1=50.,lon_0=-107.,\
resolution='i',area_thresh=1000.,projection='lcc')
im = m.bluemarble(scale=0.5)
# draw coastlines.
m.drawcoastlines(linewidth=0.5,color='0.5')
# draw parallels and meridians.
# label on left, right and bottom of map.
parallels = np.arange(0.,80,20.)
m.drawparallels(parallels,labels=[1,1,0,1],color='0.5')
meridians = np.arange(10.,360.,30.)
m.drawmeridians(meridians,labels=[1,1,0,1],color='0.5')
plt.title("Blue Marble image warped from 'cyl' to 'lcc' projection",fontsize=12)
print('warp to lambert conformal map ...')
# create new figure
fig=plt.figure()
# define oblique mercator map.
m = Basemap(height=24000000,width=12000000,
resolution=None,projection='omerc',\
lon_0=-100,lat_0=15,lon_2=-120,lat_2=65,lon_1=-50,lat_1=-55)
# plot warped rgba image.
im = m.bluemarble(scale=0.5)
# draw lat/lon grid lines every 20 degrees.
m.drawmeridians(np.arange(0,360,20),color='0.5')
m.drawparallels(np.arange(-80,81,20),color='0.5')
plt.title("Blue Marble image warped from 'cyl' to 'omerc' projection",fontsize=12)
print('warp to oblique mercator map ...')
plt.show()
| mit | 27f2066c070606ce06674c54c7f4578a | 39.848 | 105 | 0.730709 | 2.63876 | false | false | false | false |
pypa/virtualenv | src/virtualenv/activation/via_template.py | 2 | 2260 | import os
import sys
from abc import ABCMeta, abstractmethod
from .activator import Activator
if sys.version_info >= (3, 7):
from importlib.resources import read_binary
else:
from importlib_resources import read_binary
class ViaTemplateActivator(Activator, metaclass=ABCMeta):
@abstractmethod
def templates(self):
raise NotImplementedError
def generate(self, creator):
dest_folder = creator.bin_dir
replacements = self.replacements(creator, dest_folder)
generated = self._generate(replacements, self.templates(), dest_folder, creator)
if self.flag_prompt is not None:
creator.pyenv_cfg["prompt"] = self.flag_prompt
return generated
def replacements(self, creator, dest_folder): # noqa: U100
return {
"__VIRTUAL_PROMPT__": "" if self.flag_prompt is None else self.flag_prompt,
"__VIRTUAL_ENV__": str(creator.dest),
"__VIRTUAL_NAME__": creator.env_name,
"__BIN_NAME__": str(creator.bin_dir.relative_to(creator.dest)),
"__PATH_SEP__": os.pathsep,
}
def _generate(self, replacements, templates, to_folder, creator):
generated = []
for template in templates:
text = self.instantiate_template(replacements, template, creator)
dest = to_folder / self.as_name(template)
# use write_bytes to avoid platform specific line normalization (\n -> \r\n)
dest.write_bytes(text.encode("utf-8"))
generated.append(dest)
return generated
def as_name(self, template):
return template.name
def instantiate_template(self, replacements, template, creator):
# read content as binary to avoid platform specific line normalization (\n -> \r\n)
binary = read_binary(self.__module__, str(template))
text = binary.decode("utf-8", errors="strict")
for key, value in replacements.items():
value = self._repr_unicode(creator, value)
text = text.replace(key, value)
return text
@staticmethod
def _repr_unicode(creator, value): # noqa: U100
return value # by default, we just let it be unicode
__all__ = [
"ViaTemplateActivator",
]
| mit | eb8d50212154b5abcaf25764eceba4fa | 34.3125 | 91 | 0.633628 | 4.139194 | false | false | false | false |
pypa/virtualenv | tests/unit/discovery/py_info/test_py_info.py | 2 | 19621 | import copy
import functools
import itertools
import json
import logging
import os
import sys
import sysconfig
from collections import namedtuple
from pathlib import Path
from textwrap import dedent
import pytest
from virtualenv.discovery import cached_py_info
from virtualenv.discovery.py_info import PythonInfo, VersionInfo
from virtualenv.discovery.py_spec import PythonSpec
from virtualenv.info import IS_PYPY, fs_supports_symlink
CURRENT = PythonInfo.current_system()
def test_current_as_json():
result = CURRENT._to_json()
parsed = json.loads(result)
a, b, c, d, e = sys.version_info
assert parsed["version_info"] == {"major": a, "minor": b, "micro": c, "releaselevel": d, "serial": e}
def test_bad_exe_py_info_raise(tmp_path, session_app_data):
exe = str(tmp_path)
with pytest.raises(RuntimeError) as context:
PythonInfo.from_exe(exe, session_app_data)
msg = str(context.value)
assert "code" in msg
assert exe in msg
def test_bad_exe_py_info_no_raise(tmp_path, caplog, capsys, session_app_data):
caplog.set_level(logging.NOTSET)
exe = str(tmp_path)
result = PythonInfo.from_exe(exe, session_app_data, raise_on_error=False)
assert result is None
out, _ = capsys.readouterr()
assert not out
messages = [r.message for r in caplog.records if r.name != "filelock"]
assert len(messages) == 2
msg = messages[0]
assert "get interpreter info via cmd: " in msg
msg = messages[1]
assert str(exe) in msg
assert "code" in msg
@pytest.mark.parametrize(
"spec",
itertools.chain(
[sys.executable],
[
f"{impl}{'.'.join(str(i) for i in ver)}{arch}"
for impl, ver, arch in itertools.product(
(
[CURRENT.implementation]
+ (["python"] if CURRENT.implementation == "CPython" else [])
+ (
[CURRENT.implementation.lower()]
if CURRENT.implementation != CURRENT.implementation.lower()
else []
)
),
[sys.version_info[0 : i + 1] for i in range(3)],
["", f"-{CURRENT.architecture}"],
)
],
),
)
def test_satisfy_py_info(spec):
parsed_spec = PythonSpec.from_string_spec(spec)
matches = CURRENT.satisfies(parsed_spec, True)
assert matches is True
def test_satisfy_not_arch():
parsed_spec = PythonSpec.from_string_spec(
f"{CURRENT.implementation}-{64 if CURRENT.architecture == 32 else 32}",
)
matches = CURRENT.satisfies(parsed_spec, True)
assert matches is False
def _generate_not_match_current_interpreter_version():
result = []
for i in range(3):
ver = sys.version_info[0 : i + 1]
for a in range(len(ver)):
for o in [-1, 1]:
temp = list(ver)
temp[a] += o
result.append(".".join(str(i) for i in temp))
return result
_NON_MATCH_VER = _generate_not_match_current_interpreter_version()
@pytest.mark.parametrize("spec", _NON_MATCH_VER)
def test_satisfy_not_version(spec):
parsed_spec = PythonSpec.from_string_spec(f"{CURRENT.implementation}{spec}")
matches = CURRENT.satisfies(parsed_spec, True)
assert matches is False
def test_py_info_cached_error(mocker, tmp_path, session_app_data):
spy = mocker.spy(cached_py_info, "_run_subprocess")
with pytest.raises(RuntimeError):
PythonInfo.from_exe(str(tmp_path), session_app_data)
with pytest.raises(RuntimeError):
PythonInfo.from_exe(str(tmp_path), session_app_data)
assert spy.call_count == 1
@pytest.mark.skipif(not fs_supports_symlink(), reason="symlink is not supported")
def test_py_info_cached_symlink_error(mocker, tmp_path, session_app_data):
spy = mocker.spy(cached_py_info, "_run_subprocess")
with pytest.raises(RuntimeError):
PythonInfo.from_exe(str(tmp_path), session_app_data)
symlinked = tmp_path / "a"
symlinked.symlink_to(tmp_path)
with pytest.raises(RuntimeError):
PythonInfo.from_exe(str(symlinked), session_app_data)
assert spy.call_count == 2
def test_py_info_cache_clear(mocker, session_app_data):
spy = mocker.spy(cached_py_info, "_run_subprocess")
result = PythonInfo.from_exe(sys.executable, session_app_data)
assert result is not None
count = 1 if result.executable == sys.executable else 2 # at least two, one for the venv, one more for the host
assert spy.call_count >= count
PythonInfo.clear_cache(session_app_data)
assert PythonInfo.from_exe(sys.executable, session_app_data) is not None
assert spy.call_count >= 2 * count
@pytest.mark.skipif(not fs_supports_symlink(), reason="symlink is not supported")
def test_py_info_cached_symlink(mocker, tmp_path, session_app_data):
spy = mocker.spy(cached_py_info, "_run_subprocess")
first_result = PythonInfo.from_exe(sys.executable, session_app_data)
assert first_result is not None
count = spy.call_count
# at least two, one for the venv, one more for the host
exp_count = 1 if first_result.executable == sys.executable else 2
assert count >= exp_count # at least two, one for the venv, one more for the host
new_exe = tmp_path / "a"
new_exe.symlink_to(sys.executable)
pyvenv = Path(sys.executable).parents[1] / "pyvenv.cfg"
if pyvenv.exists():
(tmp_path / pyvenv.name).write_text(pyvenv.read_text())
new_exe_str = str(new_exe)
second_result = PythonInfo.from_exe(new_exe_str, session_app_data)
assert second_result.executable == new_exe_str
assert spy.call_count == count + 1 # no longer needed the host invocation, but the new symlink is must
PyInfoMock = namedtuple("PyInfoMock", ["implementation", "architecture", "version_info"])
@pytest.mark.parametrize(
("target", "position", "discovered"),
[
(
PyInfoMock("CPython", 64, VersionInfo(3, 6, 8, "final", 0)),
0,
[
PyInfoMock("CPython", 64, VersionInfo(3, 6, 9, "final", 0)),
PyInfoMock("PyPy", 64, VersionInfo(3, 6, 8, "final", 0)),
],
),
(
PyInfoMock("CPython", 64, VersionInfo(3, 6, 8, "final", 0)),
0,
[
PyInfoMock("CPython", 64, VersionInfo(3, 6, 9, "final", 0)),
PyInfoMock("CPython", 32, VersionInfo(3, 6, 9, "final", 0)),
],
),
(
PyInfoMock("CPython", 64, VersionInfo(3, 8, 1, "final", 0)),
0,
[
PyInfoMock("CPython", 32, VersionInfo(2, 7, 12, "rc", 2)),
PyInfoMock("PyPy", 64, VersionInfo(3, 8, 1, "final", 0)),
],
),
],
)
def test_system_executable_no_exact_match(target, discovered, position, tmp_path, mocker, caplog, session_app_data):
"""Here we should fallback to other compatible"""
caplog.set_level(logging.DEBUG)
def _make_py_info(of):
base = copy.deepcopy(CURRENT)
base.implementation = of.implementation
base.version_info = of.version_info
base.architecture = of.architecture
return base
discovered_with_path = {}
names = []
selected = None
for pos, i in enumerate(discovered):
path = tmp_path / str(pos)
path.write_text("")
py_info = _make_py_info(i)
py_info.system_executable = CURRENT.system_executable
py_info.executable = CURRENT.system_executable
py_info.base_executable = str(path)
if pos == position:
selected = py_info
discovered_with_path[str(path)] = py_info
names.append(path.name)
target_py_info = _make_py_info(target)
mocker.patch.object(target_py_info, "_find_possible_exe_names", return_value=names)
mocker.patch.object(target_py_info, "_find_possible_folders", return_value=[str(tmp_path)])
# noinspection PyUnusedLocal
def func(k, app_data, resolve_to_host, raise_on_error, env): # noqa: U100
return discovered_with_path[k]
mocker.patch.object(target_py_info, "from_exe", side_effect=func)
target_py_info.real_prefix = str(tmp_path)
target_py_info.system_executable = None
target_py_info.executable = str(tmp_path)
mapped = target_py_info._resolve_to_system(session_app_data, target_py_info)
assert mapped.system_executable == CURRENT.system_executable
found = discovered_with_path[mapped.base_executable]
assert found is selected
assert caplog.records[0].msg == "discover exe for %s in %s"
for record in caplog.records[1:-1]:
assert record.message.startswith("refused interpreter ")
assert record.levelno == logging.DEBUG
warn_similar = caplog.records[-1]
assert warn_similar.levelno == logging.DEBUG
assert warn_similar.msg.startswith("no exact match found, chosen most similar")
def test_py_info_ignores_distutils_config(monkeypatch, tmp_path):
raw = f"""
[install]
prefix={tmp_path}{os.sep}prefix
install_purelib={tmp_path}{os.sep}purelib
install_platlib={tmp_path}{os.sep}platlib
install_headers={tmp_path}{os.sep}headers
install_scripts={tmp_path}{os.sep}scripts
install_data={tmp_path}{os.sep}data
"""
(tmp_path / "setup.cfg").write_text(dedent(raw))
monkeypatch.chdir(tmp_path)
py_info = PythonInfo.from_exe(sys.executable)
distutils = py_info.distutils_install
for key, value in distutils.items():
assert not value.startswith(str(tmp_path)), f"{key}={value}"
def test_discover_exe_on_path_non_spec_name_match(mocker):
suffixed_name = f"python{CURRENT.version_info.major}.{CURRENT.version_info.minor}m"
if sys.platform == "win32":
suffixed_name += Path(CURRENT.original_executable).suffix
spec = PythonSpec.from_string_spec(suffixed_name)
mocker.patch.object(CURRENT, "original_executable", str(Path(CURRENT.executable).parent / suffixed_name))
assert CURRENT.satisfies(spec, impl_must_match=True) is True
def test_discover_exe_on_path_non_spec_name_not_match(mocker):
suffixed_name = f"python{CURRENT.version_info.major}.{CURRENT.version_info.minor}m"
if sys.platform == "win32":
suffixed_name += Path(CURRENT.original_executable).suffix
spec = PythonSpec.from_string_spec(suffixed_name)
mocker.patch.object(
CURRENT,
"original_executable",
str(Path(CURRENT.executable).parent / f"e{suffixed_name}"),
)
assert CURRENT.satisfies(spec, impl_must_match=True) is False
@pytest.mark.skipif(IS_PYPY, reason="setuptools distutil1s patching does not work")
def test_py_info_setuptools():
from setuptools.dist import Distribution
assert Distribution
PythonInfo()
@pytest.mark.usefixtures("_skip_if_test_in_system")
def test_py_info_to_system_raises(session_app_data, mocker, caplog):
caplog.set_level(logging.DEBUG)
mocker.patch.object(PythonInfo, "_find_possible_folders", return_value=[])
result = PythonInfo.from_exe(sys.executable, app_data=session_app_data, raise_on_error=False)
assert result is None
log = caplog.records[-1]
assert log.levelno == logging.INFO
expected = f"ignore {sys.executable} due cannot resolve system due to RuntimeError('failed to detect "
assert expected in log.message
def _stringify_schemes_dict(schemes_dict):
"""
Since this file has from __future__ import unicode_literals, we manually cast all values of mocked install_schemes
to str() as the original schemes are not unicode on Python 2.
"""
return {str(n): {str(k): str(v) for k, v in s.items()} for n, s in schemes_dict.items()}
def test_custom_venv_install_scheme_is_prefered(mocker):
# The paths in this test are Fedora paths, but we set them for nt as well, so the test also works on Windows,
# despite the actual values are nonsense there.
# Values were simplified to be compatible with all the supported Python versions.
default_scheme = {
"stdlib": "{base}/lib/python{py_version_short}",
"platstdlib": "{platbase}/lib/python{py_version_short}",
"purelib": "{base}/local/lib/python{py_version_short}/site-packages",
"platlib": "{platbase}/local/lib/python{py_version_short}/site-packages",
"include": "{base}/include/python{py_version_short}",
"platinclude": "{platbase}/include/python{py_version_short}",
"scripts": "{base}/local/bin",
"data": "{base}/local",
}
venv_scheme = {key: path.replace("local", "") for key, path in default_scheme.items()}
sysconfig_install_schemes = {
"posix_prefix": default_scheme,
"nt": default_scheme,
"pypy": default_scheme,
"pypy_nt": default_scheme,
"venv": venv_scheme,
}
if getattr(sysconfig, "get_preferred_scheme", None):
# define the prefix as sysconfig.get_preferred_scheme did before 3.11
sysconfig_install_schemes["nt" if os.name == "nt" else "posix_prefix"] = default_scheme
if sys.version_info[0] == 2:
sysconfig_install_schemes = _stringify_schemes_dict(sysconfig_install_schemes)
# On Python < 3.10, the distutils schemes are not derived from sysconfig schemes
# So we mock them as well to assert the custom "venv" install scheme has priority
distutils_scheme = {
"purelib": "$base/local/lib/python$py_version_short/site-packages",
"platlib": "$platbase/local/lib/python$py_version_short/site-packages",
"headers": "$base/include/python$py_version_short/$dist_name",
"scripts": "$base/local/bin",
"data": "$base/local",
}
distutils_schemes = {
"unix_prefix": distutils_scheme,
"nt": distutils_scheme,
}
if sys.version_info[0] == 2:
distutils_schemes = _stringify_schemes_dict(distutils_schemes)
# We need to mock distutils first, so they don't see the mocked sysconfig,
# if imported for the first time.
# That can happen if the actual interpreter has the "venv" INSTALL_SCHEME
# and hence this is the first time we are touching distutils in this process.
# If distutils saw our mocked sysconfig INSTALL_SCHEMES, we would need
# to define all install schemes.
mocker.patch("distutils.command.install.INSTALL_SCHEMES", distutils_schemes)
mocker.patch("sysconfig._INSTALL_SCHEMES", sysconfig_install_schemes)
pyinfo = PythonInfo()
pyver = f"{pyinfo.version_info.major}.{pyinfo.version_info.minor}"
assert pyinfo.install_path("scripts") == "bin"
assert pyinfo.install_path("purelib").replace(os.sep, "/") == f"lib/python{pyver}/site-packages"
@pytest.mark.skipif(not (os.name == "posix" and sys.version_info[:2] >= (3, 11)), reason="POSIX 3.11+ specific")
def test_fallback_existent_system_executable(mocker):
current = PythonInfo()
# Posix may execute a "python" out of a venv but try to set the base_executable
# to "python" out of the system installation path. PEP 394 informs distributions
# that "python" is not required and the standard `make install` does not provide one
# Falsify some data to look like we're in a venv
current.prefix = current.exec_prefix = "/tmp/tmp.izZNCyINRj/venv"
current.executable = current.original_executable = os.path.join(current.prefix, "bin/python")
# Since we don't know if the distribution we're on provides python, use a binary that should not exist
mocker.patch.object(sys, "_base_executable", os.path.join(os.path.dirname(current.system_executable), "idontexist"))
mocker.patch.object(sys, "executable", current.executable)
# ensure it falls back to an alternate binary name that exists
current._fast_get_system_executable()
assert os.path.basename(current.system_executable) in [
f"python{v}" for v in (current.version_info.major, f"{current.version_info.major}.{current.version_info.minor}")
]
assert os.path.exists(current.system_executable)
@pytest.mark.skipif(sys.version_info[:2] != (3, 10), reason="3.10 specific")
def test_uses_posix_prefix_on_debian_3_10_without_venv(mocker):
# this is taken from ubuntu 22.04 /usr/lib/python3.10/sysconfig.py
sysconfig_install_schemes = {
"posix_prefix": {
"stdlib": "{installed_base}/{platlibdir}/python{py_version_short}",
"platstdlib": "{platbase}/{platlibdir}/python{py_version_short}",
"purelib": "{base}/lib/python{py_version_short}/site-packages",
"platlib": "{platbase}/{platlibdir}/python{py_version_short}/site-packages",
"include": "{installed_base}/include/python{py_version_short}{abiflags}",
"platinclude": "{installed_platbase}/include/python{py_version_short}{abiflags}",
"scripts": "{base}/bin",
"data": "{base}",
},
"posix_home": {
"stdlib": "{installed_base}/lib/python",
"platstdlib": "{base}/lib/python",
"purelib": "{base}/lib/python",
"platlib": "{base}/lib/python",
"include": "{installed_base}/include/python",
"platinclude": "{installed_base}/include/python",
"scripts": "{base}/bin",
"data": "{base}",
},
"nt": {
"stdlib": "{installed_base}/Lib",
"platstdlib": "{base}/Lib",
"purelib": "{base}/Lib/site-packages",
"platlib": "{base}/Lib/site-packages",
"include": "{installed_base}/Include",
"platinclude": "{installed_base}/Include",
"scripts": "{base}/Scripts",
"data": "{base}",
},
"deb_system": {
"stdlib": "{installed_base}/{platlibdir}/python{py_version_short}",
"platstdlib": "{platbase}/{platlibdir}/python{py_version_short}",
"purelib": "{base}/lib/python3/dist-packages",
"platlib": "{platbase}/{platlibdir}/python3/dist-packages",
"include": "{installed_base}/include/python{py_version_short}{abiflags}",
"platinclude": "{installed_platbase}/include/python{py_version_short}{abiflags}",
"scripts": "{base}/bin",
"data": "{base}",
},
"posix_local": {
"stdlib": "{installed_base}/{platlibdir}/python{py_version_short}",
"platstdlib": "{platbase}/{platlibdir}/python{py_version_short}",
"purelib": "{base}/local/lib/python{py_version_short}/dist-packages",
"platlib": "{platbase}/local/lib/python{py_version_short}/dist-packages",
"include": "{installed_base}/local/include/python{py_version_short}{abiflags}",
"platinclude": "{installed_platbase}/local/include/python{py_version_short}{abiflags}",
"scripts": "{base}/local/bin",
"data": "{base}",
},
}
# reset the default in case we're on a system which doesn't have this problem
sysconfig_get_path = functools.partial(sysconfig.get_path, scheme="posix_local")
# make it look like python3-distutils is not available
mocker.patch.dict(sys.modules, {"distutils.command": None})
mocker.patch("sysconfig._INSTALL_SCHEMES", sysconfig_install_schemes)
mocker.patch("sysconfig.get_path", sysconfig_get_path)
mocker.patch("sysconfig.get_default_scheme", return_value="posix_local")
pyinfo = PythonInfo()
pyver = f"{pyinfo.version_info.major}.{pyinfo.version_info.minor}"
assert pyinfo.install_path("scripts") == "bin"
assert pyinfo.install_path("purelib").replace(os.sep, "/") == f"lib/python{pyver}/site-packages"
| mit | 182297f51a55b5d10da9f732416db3bd | 40.658174 | 120 | 0.645482 | 3.606801 | false | true | false | false |
pypa/virtualenv | src/virtualenv/create/via_global_ref/builtin/cpython/mac_os.py | 2 | 14433 | """The Apple Framework builds require their own customization"""
import logging
import os
import struct
import subprocess
from abc import ABCMeta, abstractmethod
from pathlib import Path
from textwrap import dedent
from virtualenv.create.via_global_ref.builtin.ref import ExePathRefToDest, PathRefToDest, RefMust
from virtualenv.info import IS_MAC_ARM64
from .common import CPython, CPythonPosix, is_mac_os_framework
from .cpython2 import CPython2PosixBase
from .cpython3 import CPython3
class CPythonmacOsFramework(CPython, metaclass=ABCMeta):
@classmethod
def can_describe(cls, interpreter):
return is_mac_os_framework(interpreter) and super().can_describe(interpreter)
def create(self):
super().create()
# change the install_name of the copied python executables
target = self.desired_mach_o_image_path()
current = self.current_mach_o_image_path()
for src in self._sources:
if isinstance(src, ExePathRefToDest):
if src.must == RefMust.COPY or not self.symlinks:
exes = [self.bin_dir / src.base]
if not self.symlinks:
exes.extend(self.bin_dir / a for a in src.aliases)
for exe in exes:
fix_mach_o(str(exe), current, target, self.interpreter.max_size)
@classmethod
def _executables(cls, interpreter):
for _, targets, must, when in super()._executables(interpreter):
# Make sure we use the embedded interpreter inside the framework, even if sys.executable points to the
# stub executable in ${sys.prefix}/bin.
# See http://groups.google.com/group/python-virtualenv/browse_thread/thread/17cab2f85da75951
fixed_host_exe = Path(interpreter.prefix) / "Resources" / "Python.app" / "Contents" / "MacOS" / "Python"
yield fixed_host_exe, targets, must, when
@abstractmethod
def current_mach_o_image_path(self):
raise NotImplementedError
@abstractmethod
def desired_mach_o_image_path(self):
raise NotImplementedError
class CPython2macOsFramework(CPythonmacOsFramework, CPython2PosixBase):
@classmethod
def can_create(cls, interpreter):
if not IS_MAC_ARM64 and super().can_describe(interpreter):
return super().can_create(interpreter)
return False
def current_mach_o_image_path(self):
return os.path.join(self.interpreter.prefix, "Python")
def desired_mach_o_image_path(self):
return "@executable_path/../Python"
@classmethod
def sources(cls, interpreter):
yield from super().sources(interpreter)
# landmark for exec_prefix
exec_marker_file, to_path, _ = cls.from_stdlib(cls.mappings(interpreter), "lib-dynload")
yield PathRefToDest(exec_marker_file, dest=to_path)
# add a copy of the host python image
exe = Path(interpreter.prefix) / "Python"
yield PathRefToDest(exe, dest=lambda self, _: self.dest / "Python", must=RefMust.COPY) # noqa: U101
# add a symlink to the Resources dir
resources = Path(interpreter.prefix) / "Resources"
yield PathRefToDest(resources, dest=lambda self, _: self.dest / "Resources") # noqa: U101
@property
def reload_code(self):
result = super().reload_code
result = dedent(
f"""
# the bundled site.py always adds the global site package if we're on python framework build, escape this
import sysconfig
config = sysconfig.get_config_vars()
before = config["PYTHONFRAMEWORK"]
try:
config["PYTHONFRAMEWORK"] = ""
{result}
finally:
config["PYTHONFRAMEWORK"] = before
""",
)
return result
class CPython2macOsArmFramework(CPython2macOsFramework, CPythonmacOsFramework, CPython2PosixBase):
@classmethod
def can_create(cls, interpreter):
if IS_MAC_ARM64 and super(CPythonmacOsFramework, cls).can_describe(interpreter):
return super(CPythonmacOsFramework, cls).can_create(interpreter)
return False
def create(self):
super(CPython2macOsFramework, self).create()
self.fix_signature()
def fix_signature(self):
"""
On Apple M1 machines (arm64 chips), rewriting the python executable invalidates its signature.
In python2 this results in a unusable python exe which just dies.
As a temporary workaround we can codesign the python exe during the creation process.
"""
exe = self.exe
try:
logging.debug("Changing signature of copied python exe %s", exe)
bak_dir = exe.parent / "bk"
# Reset the signing on Darwin since the exe has been modified.
# Note codesign fails on the original exe, it needs to be copied and moved back.
bak_dir.mkdir(parents=True, exist_ok=True)
subprocess.check_call(["cp", str(exe), str(bak_dir)])
subprocess.check_call(["mv", str(bak_dir / exe.name), str(exe)])
bak_dir.rmdir()
metadata = "--preserve-metadata=identifier,entitlements,flags,runtime"
cmd = ["codesign", "-s", "-", metadata, "-f", str(exe)]
logging.debug("Changing Signature: %s", cmd)
subprocess.check_call(cmd)
except Exception:
logging.fatal("Could not change MacOS code signing on copied python exe at %s", exe)
raise
class CPython3macOsFramework(CPythonmacOsFramework, CPython3, CPythonPosix):
def current_mach_o_image_path(self):
return "@executable_path/../../../../Python3"
def desired_mach_o_image_path(self):
return "@executable_path/../.Python"
@classmethod
def sources(cls, interpreter):
yield from super().sources(interpreter)
# add a symlink to the host python image
exe = Path(interpreter.prefix) / "Python3"
yield PathRefToDest(exe, dest=lambda self, _: self.dest / ".Python", must=RefMust.SYMLINK) # noqa: U101
@property
def reload_code(self):
result = super().reload_code
result = dedent(
f"""
# the bundled site.py always adds the global site package if we're on python framework build, escape this
import sys
before = sys._framework
try:
sys._framework = None
{result}
finally:
sys._framework = before
""",
)
return result
def fix_mach_o(exe, current, new, max_size):
"""
https://en.wikipedia.org/wiki/Mach-O
Mach-O, short for Mach object file format, is a file format for executables, object code, shared libraries,
dynamically-loaded code, and core dumps. A replacement for the a.out format, Mach-O offers more extensibility and
faster access to information in the symbol table.
Each Mach-O file is made up of one Mach-O header, followed by a series of load commands, followed by one or more
segments, each of which contains between 0 and 255 sections. Mach-O uses the REL relocation format to handle
references to symbols. When looking up symbols Mach-O uses a two-level namespace that encodes each symbol into an
'object/symbol name' pair that is then linearly searched for by first the object and then the symbol name.
The basic structure—a list of variable-length "load commands" that reference pages of data elsewhere in the file—was
also used in the executable file format for Accent. The Accent file format was in turn, based on an idea from Spice
Lisp.
With the introduction of Mac OS X 10.6 platform the Mach-O file underwent a significant modification that causes
binaries compiled on a computer running 10.6 or later to be (by default) executable only on computers running Mac
OS X 10.6 or later. The difference stems from load commands that the dynamic linker, in previous Mac OS X versions,
does not understand. Another significant change to the Mach-O format is the change in how the Link Edit tables
(found in the __LINKEDIT section) function. In 10.6 these new Link Edit tables are compressed by removing unused and
unneeded bits of information, however Mac OS X 10.5 and earlier cannot read this new Link Edit table format.
"""
try:
logging.debug("change Mach-O for %s from %s to %s", exe, current, new)
_builtin_change_mach_o(max_size)(exe, current, new)
except Exception as e:
logging.warning("Could not call _builtin_change_mac_o: %s. " "Trying to call install_name_tool instead.", e)
try:
cmd = ["install_name_tool", "-change", current, new, exe]
subprocess.check_call(cmd)
except Exception:
logging.fatal("Could not call install_name_tool -- you must " "have Apple's development tools installed")
raise
def _builtin_change_mach_o(maxint):
MH_MAGIC = 0xFEEDFACE # noqa: N806
MH_CIGAM = 0xCEFAEDFE # noqa: N806
MH_MAGIC_64 = 0xFEEDFACF # noqa: N806
MH_CIGAM_64 = 0xCFFAEDFE # noqa: N806
FAT_MAGIC = 0xCAFEBABE # noqa: N806
BIG_ENDIAN = ">" # noqa: N806
LITTLE_ENDIAN = "<" # noqa: N806
LC_LOAD_DYLIB = 0xC # noqa: N806
class FileView:
"""A proxy for file-like objects that exposes a given view of a file. Modified from macholib."""
def __init__(self, file_obj, start=0, size=maxint):
if isinstance(file_obj, FileView):
self._file_obj = file_obj._file_obj
else:
self._file_obj = file_obj
self._start = start
self._end = start + size
self._pos = 0
def __repr__(self):
return f"<fileview [{self._start:d}, {self._end:d}] {self._file_obj!r}>"
def tell(self):
return self._pos
def _checkwindow(self, seek_to, op):
if not (self._start <= seek_to <= self._end):
msg = f"{op} to offset {seek_to:d} is outside window [{self._start:d}, {self._end:d}]"
raise OSError(msg)
def seek(self, offset, whence=0):
seek_to = offset
if whence == os.SEEK_SET:
seek_to += self._start
elif whence == os.SEEK_CUR:
seek_to += self._start + self._pos
elif whence == os.SEEK_END:
seek_to += self._end
else:
raise OSError(f"Invalid whence argument to seek: {whence!r}")
self._checkwindow(seek_to, "seek")
self._file_obj.seek(seek_to)
self._pos = seek_to - self._start
def write(self, content):
here = self._start + self._pos
self._checkwindow(here, "write")
self._checkwindow(here + len(content), "write")
self._file_obj.seek(here, os.SEEK_SET)
self._file_obj.write(content)
self._pos += len(content)
def read(self, size=maxint):
assert size >= 0
here = self._start + self._pos
self._checkwindow(here, "read")
size = min(size, self._end - here)
self._file_obj.seek(here, os.SEEK_SET)
read_bytes = self._file_obj.read(size)
self._pos += len(read_bytes)
return read_bytes
def read_data(file, endian, num=1):
"""Read a given number of 32-bits unsigned integers from the given file with the given endianness."""
res = struct.unpack(endian + "L" * num, file.read(num * 4))
if len(res) == 1:
return res[0]
return res
def mach_o_change(at_path, what, value):
"""Replace a given name (what) in any LC_LOAD_DYLIB command found in the given binary with a new name (value),
provided it's shorter."""
def do_macho(file, bits, endian):
# Read Mach-O header (the magic number is assumed read by the caller)
cpu_type, cpu_sub_type, file_type, n_commands, size_of_commands, flags = read_data(file, endian, 6)
# 64-bits header has one more field.
if bits == 64:
read_data(file, endian)
# The header is followed by n commands
for _ in range(n_commands):
where = file.tell()
# Read command header
cmd, cmd_size = read_data(file, endian, 2)
if cmd == LC_LOAD_DYLIB:
# The first data field in LC_LOAD_DYLIB commands is the offset of the name, starting from the
# beginning of the command.
name_offset = read_data(file, endian)
file.seek(where + name_offset, os.SEEK_SET)
# Read the NUL terminated string
load = file.read(cmd_size - name_offset).decode()
load = load[: load.index("\0")]
# If the string is what is being replaced, overwrite it.
if load == what:
file.seek(where + name_offset, os.SEEK_SET)
file.write(value.encode() + b"\0")
# Seek to the next command
file.seek(where + cmd_size, os.SEEK_SET)
def do_file(file, offset=0, size=maxint):
file = FileView(file, offset, size)
# Read magic number
magic = read_data(file, BIG_ENDIAN)
if magic == FAT_MAGIC:
# Fat binaries contain nfat_arch Mach-O binaries
n_fat_arch = read_data(file, BIG_ENDIAN)
for _ in range(n_fat_arch):
# Read arch header
cpu_type, cpu_sub_type, offset, size, align = read_data(file, BIG_ENDIAN, 5)
do_file(file, offset, size)
elif magic == MH_MAGIC:
do_macho(file, 32, BIG_ENDIAN)
elif magic == MH_CIGAM:
do_macho(file, 32, LITTLE_ENDIAN)
elif magic == MH_MAGIC_64:
do_macho(file, 64, BIG_ENDIAN)
elif magic == MH_CIGAM_64:
do_macho(file, 64, LITTLE_ENDIAN)
assert len(what) >= len(value)
with open(at_path, "r+b") as f:
do_file(f)
return mach_o_change
__all__ = [
"CPythonmacOsFramework",
"CPython2macOsFramework",
"CPython3macOsFramework",
]
| mit | e6b9c04d148dfb7510b29c58bac3c868 | 41.067055 | 120 | 0.606626 | 3.978219 | false | false | false | false |
pypa/virtualenv | src/virtualenv/run/plugin/base.py | 2 | 2267 | import sys
from collections import OrderedDict
if sys.version_info >= (3, 8):
from importlib.metadata import entry_points
importlib_metadata_version = ()
else:
from importlib_metadata import entry_points, version
importlib_metadata_version = tuple(int(i) for i in version("importlib_metadata").split(".")[:2])
class PluginLoader:
_OPTIONS = None
_ENTRY_POINTS = None
@classmethod
def entry_points_for(cls, key):
if sys.version_info >= (3, 10) or importlib_metadata_version >= (3, 6):
return OrderedDict((e.name, e.load()) for e in cls.entry_points().select(group=key))
else:
return OrderedDict((e.name, e.load()) for e in cls.entry_points().get(key, {}))
@staticmethod
def entry_points():
if PluginLoader._ENTRY_POINTS is None:
PluginLoader._ENTRY_POINTS = entry_points()
return PluginLoader._ENTRY_POINTS
class ComponentBuilder(PluginLoader):
def __init__(self, interpreter, parser, name, possible):
self.interpreter = interpreter
self.name = name
self._impl_class = None
self.possible = possible
self.parser = parser.add_argument_group(title=name)
self.add_selector_arg_parse(name, list(self.possible))
@classmethod
def options(cls, key):
if cls._OPTIONS is None:
cls._OPTIONS = cls.entry_points_for(key)
return cls._OPTIONS
def add_selector_arg_parse(self, name, choices): # noqa: U100
raise NotImplementedError
def handle_selected_arg_parse(self, options):
selected = getattr(options, self.name)
if selected not in self.possible:
raise RuntimeError(f"No implementation for {self.interpreter}")
self._impl_class = self.possible[selected]
self.populate_selected_argparse(selected, options.app_data)
return selected
def populate_selected_argparse(self, selected, app_data):
self.parser.description = f"options for {self.name} {selected}"
self._impl_class.add_parser_arguments(self.parser, self.interpreter, app_data)
def create(self, options):
return self._impl_class(options, self.interpreter)
__all__ = [
"PluginLoader",
"ComponentBuilder",
]
| mit | e4797bd2b867eb82b99b24bb685c7096 | 31.855072 | 100 | 0.65461 | 3.977193 | false | false | false | false |
pypa/virtualenv | src/virtualenv/create/via_global_ref/builtin/cpython/common.py | 3 | 2323 | from abc import ABCMeta
from collections import OrderedDict
from pathlib import Path
from virtualenv.create.describe import PosixSupports, WindowsSupports
from virtualenv.create.via_global_ref.builtin.ref import RefMust, RefWhen
from ..via_global_self_do import ViaGlobalRefVirtualenvBuiltin
class CPython(ViaGlobalRefVirtualenvBuiltin, metaclass=ABCMeta):
@classmethod
def can_describe(cls, interpreter):
return interpreter.implementation == "CPython" and super().can_describe(interpreter)
@classmethod
def exe_stem(cls):
return "python"
class CPythonPosix(CPython, PosixSupports, metaclass=ABCMeta):
"""Create a CPython virtual environment on POSIX platforms"""
@classmethod
def _executables(cls, interpreter):
host_exe = Path(interpreter.system_executable)
major, minor = interpreter.version_info.major, interpreter.version_info.minor
targets = OrderedDict((i, None) for i in ["python", f"python{major}", f"python{major}.{minor}", host_exe.name])
must = RefMust.COPY if interpreter.version_info.major == 2 else RefMust.NA
yield host_exe, list(targets.keys()), must, RefWhen.ANY
class CPythonWindows(CPython, WindowsSupports, metaclass=ABCMeta):
@classmethod
def _executables(cls, interpreter):
# symlink of the python executables does not work reliably, copy always instead
# - https://bugs.python.org/issue42013
# - venv
host = cls.host_python(interpreter)
for path in (host.parent / n for n in {"python.exe", host.name}):
yield host, [path.name], RefMust.COPY, RefWhen.ANY
# for more info on pythonw.exe see https://stackoverflow.com/a/30313091
python_w = host.parent / "pythonw.exe"
yield python_w, [python_w.name], RefMust.COPY, RefWhen.ANY
@classmethod
def host_python(cls, interpreter):
return Path(interpreter.system_executable)
def is_mac_os_framework(interpreter):
if interpreter.platform == "darwin":
framework_var = interpreter.sysconfig_vars.get("PYTHONFRAMEWORK")
value = "Python3" if interpreter.version_info.major == 3 else "Python"
return framework_var == value
return False
__all__ = [
"CPython",
"CPythonPosix",
"CPythonWindows",
"is_mac_os_framework",
]
| mit | 9d2d101b428bff1df195f6d9f2bcd22b | 35.296875 | 119 | 0.697374 | 3.943973 | false | false | false | false |
pypa/virtualenv | src/virtualenv/util/lock.py | 2 | 4662 | """holds locking functionality that works across processes"""
import logging
import os
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager
from pathlib import Path
from threading import Lock, RLock
from filelock import FileLock, Timeout
class _CountedFileLock(FileLock):
def __init__(self, lock_file):
parent = os.path.dirname(lock_file)
if not os.path.isdir(parent):
try:
os.makedirs(parent)
except OSError:
pass
super().__init__(lock_file)
self.count = 0
self.thread_safe = RLock()
def acquire(self, timeout=None, poll_interval=0.05):
with self.thread_safe:
if self.count == 0:
super().acquire(timeout, poll_interval)
self.count += 1
def release(self, force=False):
with self.thread_safe:
if self.count == 1:
super().release(force=force)
self.count = max(self.count - 1, 0)
_lock_store = {}
_store_lock = Lock()
class PathLockBase(metaclass=ABCMeta):
def __init__(self, folder):
path = Path(folder)
self.path = path.resolve() if path.exists() else path
def __repr__(self):
return f"{self.__class__.__name__}({self.path})"
def __div__(self, other):
return type(self)(self.path / other)
def __truediv__(self, other):
return self.__div__(other)
@abstractmethod
def __enter__(self):
raise NotImplementedError
@abstractmethod
def __exit__(self, exc_type, exc_val, exc_tb): # noqa: U100
raise NotImplementedError
@abstractmethod
@contextmanager
def lock_for_key(self, name, no_block=False): # noqa: U100
raise NotImplementedError
@abstractmethod
@contextmanager
def non_reentrant_lock_for_key(self, name): # noqa: U100
raise NotImplementedError
class ReentrantFileLock(PathLockBase):
def __init__(self, folder):
super().__init__(folder)
self._lock = None
def _create_lock(self, name=""):
lock_file = str(self.path / f"{name}.lock")
with _store_lock:
if lock_file not in _lock_store:
_lock_store[lock_file] = _CountedFileLock(lock_file)
return _lock_store[lock_file]
@staticmethod
def _del_lock(lock):
if lock is not None:
with _store_lock:
with lock.thread_safe:
if lock.count == 0:
_lock_store.pop(lock.lock_file, None)
def __del__(self):
self._del_lock(self._lock)
def __enter__(self):
self._lock = self._create_lock()
self._lock_file(self._lock)
def __exit__(self, exc_type, exc_val, exc_tb): # noqa: U100
self._release(self._lock)
self._del_lock(self._lock)
self._lock = None
def _lock_file(self, lock, no_block=False):
# multiple processes might be trying to get a first lock... so we cannot check if this directory exist without
# a lock, but that lock might then become expensive, and it's not clear where that lock should live.
# Instead here we just ignore if we fail to create the directory.
try:
os.makedirs(str(self.path))
except OSError:
pass
try:
lock.acquire(0.0001)
except Timeout:
if no_block:
raise
logging.debug("lock file %s present, will block until released", lock.lock_file)
lock.release() # release the acquire try from above
lock.acquire()
@staticmethod
def _release(lock):
lock.release()
@contextmanager
def lock_for_key(self, name, no_block=False):
lock = self._create_lock(name)
try:
try:
self._lock_file(lock, no_block)
yield
finally:
self._release(lock)
finally:
self._del_lock(lock)
lock = None
@contextmanager
def non_reentrant_lock_for_key(self, name):
with _CountedFileLock(str(self.path / f"{name}.lock")):
yield
class NoOpFileLock(PathLockBase):
def __enter__(self):
raise NotImplementedError
def __exit__(self, exc_type, exc_val, exc_tb): # noqa: U100
raise NotImplementedError
@contextmanager
def lock_for_key(self, name, no_block=False): # noqa: U100
yield
@contextmanager
def non_reentrant_lock_for_key(self, name): # noqa: U100
yield
__all__ = [
"NoOpFileLock",
"ReentrantFileLock",
"Timeout",
]
| mit | 6fb4360426ec7874fb10b327d5bd2fe3 | 26.916168 | 118 | 0.575933 | 3.954198 | false | false | false | false |
zarr-developers/numcodecs | numcodecs/delta.py | 1 | 2845 | import numpy as np
from .abc import Codec
from .compat import ensure_ndarray, ndarray_copy
class Delta(Codec):
"""Codec to encode data as the difference between adjacent values.
Parameters
----------
dtype : dtype
Data type to use for decoded data.
astype : dtype, optional
Data type to use for encoded data.
Notes
-----
If `astype` is an integer data type, please ensure that it is
sufficiently large to store encoded values. No checks are made and data
may become corrupted due to integer overflow if `astype` is too small.
Note also that the encoded data for each chunk includes the absolute
value of the first element in the chunk, and so the encoded data type in
general needs to be large enough to store absolute values from the array.
Examples
--------
>>> import numcodecs
>>> import numpy as np
>>> x = np.arange(100, 120, 2, dtype='i2')
>>> codec = numcodecs.Delta(dtype='i2', astype='i1')
>>> y = codec.encode(x)
>>> y
array([100, 2, 2, 2, 2, 2, 2, 2, 2, 2], dtype=int8)
>>> z = codec.decode(y)
>>> z
array([100, 102, 104, 106, 108, 110, 112, 114, 116, 118], dtype=int16)
"""
codec_id = 'delta'
def __init__(self, dtype, astype=None):
self.dtype = np.dtype(dtype)
if astype is None:
self.astype = self.dtype
else:
self.astype = np.dtype(astype)
if self.dtype == object or self.astype == object:
raise ValueError('object arrays are not supported')
def encode(self, buf):
# normalise input
arr = ensure_ndarray(buf).view(self.dtype)
# flatten to simplify implementation
arr = arr.reshape(-1, order='A')
# setup encoded output
enc = np.empty_like(arr, dtype=self.astype)
# set first element
enc[0] = arr[0]
# compute differences
enc[1:] = np.diff(arr)
return enc
def decode(self, buf, out=None):
# normalise input
enc = ensure_ndarray(buf).view(self.astype)
# flatten to simplify implementation
enc = enc.reshape(-1, order='A')
# setup decoded output
dec = np.empty_like(enc, dtype=self.dtype)
# decode differences
np.cumsum(enc, out=dec)
# handle output
out = ndarray_copy(dec, out)
return out
def get_config(self):
# override to handle encoding dtypes
return dict(
id=self.codec_id,
dtype=self.dtype.str,
astype=self.astype.str
)
def __repr__(self):
r = '{}(dtype={!r}'.format(type(self).__name__, self.dtype.str)
if self.astype != self.dtype:
r += ', astype=%r' % self.astype.str
r += ')'
return r
| mit | d960141b6d73e7d6ed66cf39808a2afb | 26.355769 | 77 | 0.576098 | 3.907967 | false | false | false | false |
zarr-developers/numcodecs | numcodecs/astype.py | 1 | 2237 | import numpy as np
from .abc import Codec
from .compat import ndarray_copy, ensure_ndarray
class AsType(Codec):
"""Filter to convert data between different types.
Parameters
----------
encode_dtype : dtype
Data type to use for encoded data.
decode_dtype : dtype, optional
Data type to use for decoded data.
Notes
-----
If `encode_dtype` is of lower precision than `decode_dtype`, please be
aware that data loss can occur by writing data to disk using this filter.
No checks are made to ensure the casting will work in that direction and
data corruption will occur.
Examples
--------
>>> import numcodecs
>>> import numpy as np
>>> x = np.arange(100, 120, 2, dtype=np.int8)
>>> x
array([100, 102, 104, 106, 108, 110, 112, 114, 116, 118], dtype=int8)
>>> f = numcodecs.AsType(encode_dtype=x.dtype, decode_dtype=np.int16)
>>> y = f.decode(x)
>>> y
array([100, 102, 104, 106, 108, 110, 112, 114, 116, 118], dtype=int16)
>>> z = f.encode(y)
>>> z
array([100, 102, 104, 106, 108, 110, 112, 114, 116, 118], dtype=int8)
"""
codec_id = 'astype'
def __init__(self, encode_dtype, decode_dtype):
self.encode_dtype = np.dtype(encode_dtype)
self.decode_dtype = np.dtype(decode_dtype)
def encode(self, buf):
# normalise input
arr = ensure_ndarray(buf).view(self.decode_dtype)
# convert and copy
enc = arr.astype(self.encode_dtype)
return enc
def decode(self, buf, out=None):
# normalise input
enc = ensure_ndarray(buf).view(self.encode_dtype)
# convert and copy
dec = enc.astype(self.decode_dtype)
# handle output
out = ndarray_copy(dec, out)
return out
def get_config(self):
return {
'id': self.codec_id,
'encode_dtype': self.encode_dtype.str,
'decode_dtype': self.decode_dtype.str,
}
def __repr__(self):
return (
'{}(encode_dtype={!r}, decode_dtype={!r})'.format(
type(self).__name__,
self.encode_dtype.str,
self.decode_dtype.str
)
)
| mit | d33b5a8e5e694ffe3cf7227c8105b26d | 25.630952 | 77 | 0.571301 | 3.728333 | false | false | false | false |
mar10/wsgidav | wsgidav/samples/hg_dav_provider.py | 1 | 22535 | # -*- coding: utf-8 -*-
# (c) 2009-2022 Martin Wendt and contributors; see WsgiDAV https://github.com/mar10/wsgidav
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
DAV provider that publishes a Mercurial repository.
Note: This is **not** production code!
The repository is rendered as three top level collections.
edit:
Contains the working directory, i.e. all files. This includes uncommitted
changes and untracked new files.
This folder is writable.
released:
Contains the latest committed files, also known as 'tip'.
This folder is read-only.
archive:
Contains the last 10 revisions as sub-folders.
This folder is read-only.
Sample layout::
/<share>/
edit/
server/
ext_server.py
README.txt
released/
archive/
19/
18/
...
Supported features:
#. Copying or moving files from ``/edit/..`` to the ``/edit/..`` folder will
result in a ``hg copy`` or ``hg rename``.
#. Deleting resources from ``/edit/..`` will result in a ``hg remove``.
#. Copying or moving files from ``/edit/..`` to the ``/released`` folder will
result in a ``hg commit``.
Note that the destination path is ignored, instead the source path is used.
So a user can drag a file or folder from somewhere under the ``edit/..``
directory and drop it directly on the ``released`` directory to commit
changes.
#. To commit all changes, simply drag'n'drop the ``/edit`` folder on the
``/released`` folder.
#. Creating new collections results in creation of a file called ``.directory``,
which is then ``hg add`` ed since Mercurial doesn't track directories.
#. Some attributes are published as live properties, such as ``{hg:}date``.
Known limitations:
#. This 'commit by drag-and-drop' only works, if the WebDAV clients produces
MOVE or COPY requests. Alas, some clients will send PUT, MKCOL, ... sequences
instead.
#. Adding and then removing a file without committing after the 'add' will
leave this file on disk (untracked)
This happens for example whit lock files that Open Office Write and other
applications will create.
#. Dragging the 'edit' folder onto 'released' with Windows File Explorer will
remove the folder in the explorer view, although WsgiDAV did not delete it.
This seems to be done by the client.
See:
http://mercurial.selenic.com/wiki/MercurialApi
Requirements:
``easy_install mercurial`` or install the API as non-standalone version
from here: http://mercurial.berkwood.com/
http://mercurial.berkwood.com/binaries/mercurial-1.4.win32-py2.6.exe
"""
import os
import sys
import time
from hashlib import md5
from pprint import pprint
from wsgidav import util
from wsgidav.dav_error import HTTP_FORBIDDEN, DAVError
from wsgidav.dav_provider import DAVProvider, _DAVResource
from wsgidav.samples.dav_provider_tools import VirtualCollection
try:
import mercurial.ui
from mercurial import commands, hg
from mercurial.__version__ import version as hgversion
# from mercurial import util as hgutil
except ImportError:
print(
"Could not import Mercurial API. Try 'easy_install -U mercurial'.",
file=sys.stderr,
)
raise
__docformat__ = "reStructuredText en"
_logger = util.get_module_logger(__name__)
BUFFER_SIZE = 8192
# ============================================================================
# HgResource
# ============================================================================
class HgResource(_DAVResource):
"""Abstract base class for all resources."""
def __init__(self, path, is_collection, environ, rev, localHgPath):
super().__init__(path, is_collection, environ)
self.rev = rev
self.localHgPath = localHgPath
self.absFilePath = self._getFilePath()
assert "\\" not in self.localHgPath
assert "/" not in self.absFilePath
if is_collection:
self.fctx = None
else:
# Change Context for the requested revision:
# rev=None: current working dir
# rev="tip": TIP
# rev=<int>: Revision ID
wdctx = self.provider.repo[self.rev]
self.fctx = wdctx[self.localHgPath]
# util.status("HgResource: path=%s, rev=%s, localHgPath=%s, fctx=%s" % (
# self.path, self.rev, self.localHgPath, self.fctx))
# util.status("HgResource: name=%s, dn=%s, abspath=%s" % (
# self.name, self.get_display_name(), self.absFilePath))
def _getFilePath(self, *addParts):
parts = self.localHgPath.split("/")
if addParts:
parts.extend(addParts)
return os.path.join(self.provider.repo.root, *parts)
def _commit(self, message):
user = self.environ.get("wsgidav.auth.user_name") or "Anonymous"
commands.commit(
self.provider.ui,
self.provider.repo,
self.localHgPath,
addremove=True,
user=user,
message=message,
)
def _check_write_access(self):
"""Raise HTTP_FORBIDDEN, if resource is unwritable."""
if self.rev is not None:
# Only working directory may be edited
raise DAVError(HTTP_FORBIDDEN)
def get_content_length(self):
if self.is_collection:
return None
return self.fctx.size()
def get_content_type(self):
if self.is_collection:
return None
# (mimetype, _mimeencoding) = mimetypes.guess_type(self.path)
# if not mimetype:
# return "application/octet-stream"
# return mimetype
return util.guess_mime_type(self.path)
def get_creation_date(self):
# statresults = os.stat(self._file_path)
# return statresults[stat.ST_CTIME]
return None # TODO
def get_display_name(self):
if self.is_collection or self.fctx.filerev() is None:
return self.name
return "%s@%s" % (self.name, self.fctx.filerev())
def get_etag(self):
return (
md5(self.path).hexdigest()
+ "-"
+ util.to_str(self.get_last_modified())
+ "-"
+ str(self.get_content_length())
)
def get_last_modified(self):
if self.is_collection:
return None
# (secs, tz-ofs)
return self.fctx.date()[0]
def support_ranges(self):
return False
def get_member_names(self):
assert self.is_collection
cache = self.environ["wsgidav.hg.cache"][util.to_str(self.rev)]
dirinfos = cache["dirinfos"]
if self.localHgPath not in dirinfos:
return []
return dirinfos[self.localHgPath][0] + dirinfos[self.localHgPath][1]
# return self.provider._listMembers(self.path)
def get_member(self, name):
# Rely on provider to get member oinstances
assert self.is_collection
return self.provider.get_resource_inst(
util.join_uri(self.path, name), self.environ
)
def get_display_info(self):
if self.is_collection:
return {"type": "Directory"}
return {"type": "File"}
def get_property_names(self, *, is_allprop):
"""Return list of supported property names in Clark Notation.
See DAVResource.get_property_names()
"""
# Let base class implementation add supported live and dead properties
propNameList = super().get_property_names(is_allprop=is_allprop)
# Add custom live properties (report on 'allprop' and 'propnames')
if self.fctx:
propNameList.extend(
[
"{hg:}branch",
"{hg:}date",
"{hg:}description",
"{hg:}filerev",
"{hg:}rev",
"{hg:}user",
]
)
return propNameList
def get_property_value(self, name):
"""Return the value of a property.
See get_property_value()
"""
# Supported custom live properties
if name == "{hg:}branch":
return self.fctx.branch()
elif name == "{hg:}date":
# (secs, tz-ofs)
return util.to_str(self.fctx.date()[0])
elif name == "{hg:}description":
return self.fctx.description()
elif name == "{hg:}filerev":
return util.to_str(self.fctx.filerev())
elif name == "{hg:}rev":
return util.to_str(self.fctx.rev())
elif name == "{hg:}user":
return util.to_str(self.fctx.user())
# Let base class implementation report live and dead properties
return super().get_property_value(name)
def set_property_value(self, name, value, dry_run=False):
"""Set or remove property value.
See DAVResource.set_property_value()
"""
raise DAVError(HTTP_FORBIDDEN)
def prevent_locking(self):
"""Return True, to prevent locking.
See prevent_locking()
"""
if self.rev is not None:
# Only working directory may be locked
return True
return False
def create_empty_resource(self, name):
"""Create and return an empty (length-0) resource as member of self.
See DAVResource.create_empty_resource()
"""
assert self.is_collection
self._check_write_access()
filepath = self._getFilePath(name)
f = open(filepath, "w")
f.close()
commands.add(self.provider.ui, self.provider.repo, filepath)
# get_resource_inst() won't work, because the cached manifest is outdated
# return self.provider.get_resource_inst(self.path.rstrip("/")+"/"+name, self.environ)
return HgResource(
self.path.rstrip("/") + "/" + name,
False,
self.environ,
self.rev,
self.localHgPath + "/" + name,
)
def create_collection(self, name):
"""Create a new collection as member of self.
A dummy member is created, because Mercurial doesn't handle folders.
"""
assert self.is_collection
self._check_write_access()
collpath = self._getFilePath(name)
os.mkdir(collpath)
filepath = self._getFilePath(name, ".directory")
f = open(filepath, "w")
f.write("Created by WsgiDAV.")
f.close()
commands.add(self.provider.ui, self.provider.repo, filepath)
def get_content(self):
"""Open content as a stream for reading.
See DAVResource.get_content()
"""
assert not self.is_collection
d = self.fctx.data()
return util.StringIO(d)
def begin_write(self, *, content_type=None):
"""Open content as a stream for writing.
See DAVResource.begin_write()
"""
assert not self.is_collection
self._check_write_access()
mode = "wb"
# GC issue 57: always store as binary
# if contentType and contentType.startswith("text"):
# mode = "w"
return open(self.absFilePath, mode, BUFFER_SIZE)
def end_write(self, *, with_errors):
"""Called when PUT has finished writing.
See DAVResource.end_write()
"""
if not with_errors:
commands.add(self.provider.ui, self.provider.repo, self.localHgPath)
# def handle_delete(self):
# """Handle a DELETE request natively.
#
# """
# self._check_write_access()
# return False
def support_recursive_delete(self):
"""Return True, if delete() may be called on non-empty collections
(see comments there)."""
return True
def delete(self):
"""Remove this resource (recursive)."""
self._check_write_access()
filepath = self._getFilePath()
commands.remove(self.provider.ui, self.provider.repo, filepath, force=True)
def handle_copy(self, dest_path, *, depth_infinity):
"""Handle a COPY request natively."""
destType, destHgPath = util.pop_path(dest_path)
destHgPath = destHgPath.strip("/")
ui = self.provider.ui
repo = self.provider.repo
_logger.info("handle_copy %s -> %s" % (self.localHgPath, destHgPath))
if self.rev is None and destType == "edit":
# COPY /edit/a/b to /edit/c/d: turn into 'hg copy -f a/b c/d'
commands.copy(ui, repo, self.localHgPath, destHgPath, force=True)
elif self.rev is None and destType == "released":
# COPY /edit/a/b to /released/c/d
# This is interpreted as 'hg commit a/b' (ignoring the dest. path)
self._commit("WsgiDAV commit (COPY %s -> %s)" % (self.path, dest_path))
else:
raise DAVError(HTTP_FORBIDDEN)
# Return True: request was handled
return True
def handle_move(self, dest_path):
"""Handle a MOVE request natively."""
destType, destHgPath = util.pop_path(dest_path)
destHgPath = destHgPath.strip("/")
ui = self.provider.ui
repo = self.provider.repo
_logger.info("handle_copy %s -> %s" % (self.localHgPath, destHgPath))
if self.rev is None and destType == "edit":
# MOVE /edit/a/b to /edit/c/d: turn into 'hg rename -f a/b c/d'
commands.rename(ui, repo, self.localHgPath, destHgPath, force=True)
elif self.rev is None and destType == "released":
# MOVE /edit/a/b to /released/c/d
# This is interpreted as 'hg commit a/b' (ignoring the dest. path)
self._commit("WsgiDAV commit (MOVE %s -> %s)" % (self.path, dest_path))
else:
raise DAVError(HTTP_FORBIDDEN)
# Return True: request was handled
return True
# ============================================================================
# HgResourceProvider
# ============================================================================
class HgResourceProvider(DAVProvider):
"""
DAV provider that serves a VirtualResource derived structure.
"""
def __init__(self, repoRoot):
super().__init__()
self.repoRoot = repoRoot
print("Mercurial version %s" % hgversion)
self.ui = mercurial.ui.ui()
self.repo = hg.repository(self.ui, repoRoot)
self.ui.status("Connected to repository %s\n" % self.repo.root)
self.repoRoot = self.repo.root
# Some commands (remove) seem to expect cwd set to the repo
# TODO: try to go along without this, because it prevents serving
# multiple repos. Instead pass absolute paths to the commands.
# print(os.getcwd())
os.chdir(self.repo.root)
# Verify integrity of the repository
_logger.warning("Verify repository '%s' tree..." % self.repo.root)
commands.verify(self.ui, self.repo)
# self.ui.status("Changelog: %s\n" % self.repo.changelog)
print("Status:")
pprint(self.repo.status())
self.repo.ui.status(
"the default user_name to be used in commits: %s\n"
% self.repo.ui.user_name()
)
# self.repo.ui.status("a short form of user name USER %s\n" % self.repo.ui.shortuser(user))
self.ui.status("Expandpath: %s\n" % self.repo.ui.expandpath(repoRoot))
print("Working directory state summary:")
self.ui.pushbuffer()
commands.summary(self.ui, self.repo, remote=False)
res = self.ui.popbuffer().strip()
reslines = [tuple(line.split(":", 1)) for line in res.split("\n")]
pprint(reslines)
print("Repository state summary:")
self.ui.pushbuffer()
commands.identify(self.ui, self.repo, num=True, id=True, branch=True, tags=True)
res = self.ui.popbuffer().strip()
reslines = [tuple(line.split(":", 1)) for line in res.split("\n")]
pprint(reslines)
self._get_log()
def _get_log(self, limit=None):
"""Read log entries into a list of dictionaries."""
self.ui.pushbuffer()
commands.log(self.ui, self.repo, limit=limit, date=None, rev=None, user=None)
res = self.ui.popbuffer().strip()
logList = []
for logentry in res.split("\n\n"):
log = {}
logList.append(log)
for line in logentry.split("\n"):
k, v = line.split(":", 1)
assert k in ("changeset", "tag", "user", "date", "summary")
log[k.strip()] = v.strip()
log["parsed_date"] = util.parse_time_string(log["date"])
local_id, unid = log["changeset"].split(":")
log["local_id"] = int(local_id)
log["unid"] = unid
# pprint(logList)
return logList
def _get_repo_info(self, environ, rev, reload=False):
"""Return a dictionary containing all files under source control.
dirinfos:
Dictionary containing direct members for every collection.
{folderpath: (collectionlist, filelist), ...}
files:
Sorted list of all file paths in the manifest.
filedict:
Dictionary containing all files under source control.
::
{'dirinfos': {'': (['wsgidav',
'tools',
'WsgiDAV.egg-info',
'tests'],
['index.rst',
'wsgidav MAKE_DAILY_BUILD.launch',
'wsgidav run_server.py DEBUG.launch',
'wsgidav-paste.conf',
...
'setup.py']),
'wsgidav': (['addons', 'samples', 'server', 'interfaces'],
['__init__.pyc',
'dav_error.pyc',
'dav_provider.pyc',
...
'wsgidav_app.py']),
},
'files': ['.hgignore',
'ADDONS.txt',
'wsgidav/samples/mysql_dav_provider.py',
...
],
'filedict': {'.hgignore': True,
'README.txt': True,
'WsgiDAV.egg-info/PKG-INFO': True,
}
}
"""
caches = environ.setdefault("wsgidav.hg.cache", {})
if caches.get(util.to_str(rev)) is not None:
_logger.debug("_get_repo_info(%s): cache hit." % rev)
return caches[util.to_str(rev)]
start_time = time.time()
self.ui.pushbuffer()
commands.manifest(self.ui, self.repo, rev)
res = self.ui.popbuffer()
files = []
dirinfos = {}
filedict = {}
for file in res.split("\n"):
if file.strip() == "":
continue
file = file.replace("\\", "/")
# add all parent directories to 'dirinfos'
parents = file.split("/")
if len(parents) >= 1:
p1 = ""
for i in range(0, len(parents) - 1):
p2 = parents[i]
dir = dirinfos.setdefault(p1, ([], []))
if p2 not in dir[0]:
dir[0].append(p2)
if p1 == "":
p1 = p2
else:
p1 = "%s/%s" % (p1, p2)
dirinfos.setdefault(p1, ([], []))[1].append(parents[-1])
filedict[file] = True
files.sort()
cache = {"files": files, "dirinfos": dirinfos, "filedict": filedict}
caches[util.to_str(rev)] = cache
_logger.info("_getRepoInfo(%s) took %.3f" % (rev, time.time() - start_time))
return cache
# def _listMembers(self, path, rev=None):
# """Return a list of all non-collection members"""
# # Pattern for direct members:
# glob = "glob:" + os.path.join(path, "*").lstrip("/")
# print(glob)
# self.ui.pushbuffer()
# commands.status(self.ui, self.repo,
# glob,
# all=True)
# lines = self.ui.popbuffer().strip().split("\n")
# pprint(lines)
# return dict
def get_resource_inst(self, path, environ):
"""Return HgResource object for path.
See DAVProvider.get_resource_inst()
"""
self._count_get_resource_inst += 1
# HG expects the resource paths without leading '/'
localHgPath = path.strip("/")
rev = None
cmd, rest = util.pop_path(path)
if cmd == "":
return VirtualCollection(
path, environ, "root", ["edit", "released", "archive"]
)
elif cmd == "edit":
localHgPath = rest.strip("/")
rev = None
elif cmd == "released":
localHgPath = rest.strip("/")
rev = "tip"
elif cmd == "archive":
if rest == "/":
# Browse /archive: return a list of revision folders:
loglist = self._get_log(limit=10)
members = [util.to_str(m["local_id"]) for m in loglist]
return VirtualCollection(path, environ, "Revisions", members)
revid, rest = util.pop_path(rest)
try:
int(revid)
except Exception:
# Tried to access /archive/anyname
return None
# Access /archive/19
rev = revid
localHgPath = rest.strip("/")
else:
return None
# read mercurial repo into request cache
cache = self._get_repo_info(environ, rev)
if localHgPath in cache["filedict"]:
# It is a version controlled file
return HgResource(path, False, environ, rev, localHgPath)
if localHgPath in cache["dirinfos"] or localHgPath == "":
# It is an existing folder
return HgResource(path, True, environ, rev, localHgPath)
return None
| mit | cd6f9c57cc261159395febc8eb4f0509 | 35.17175 | 106 | 0.546039 | 4.004798 | false | false | false | false |
mar10/wsgidav | wsgidav/dc/simple_dc.py | 1 | 4728 | # -*- coding: utf-8 -*-
# (c) 2009-2022 Martin Wendt and contributors; see WsgiDAV https://github.com/mar10/wsgidav
# Original PyFileServer (c) 2005 Ho Chun Wei.
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
"""
Implementation of a domain controller that uses realm/user_name/password mappings
from the configuration file and uses the share path as realm name.
user_mapping is defined a follows::
simple_dc: {
user_mapping = {
"realm1": {
"John Smith": {
"password": "YouNeverGuessMe",
},
"Dan Brown": {
"password": "DontGuessMeEither",
"roles": ["editor"]
}
},
"realm2": {
...
}
},
}
The "*" pseudo-share is used to pass a default definition::
user_mapping = {
"*": { // every share except for 'realm2'
"Dan Brown": {
"password": "DontGuessMeEither",
"roles": ["editor"]
}
},
"realm2": {
...
}
},
A share (even the "*" pseudo-share) can be set to True to allow anonymous access::
user_mapping = {
"*": {
"Dan Brown": {
"password": "DontGuessMeEither",
"roles": ["editor"]
},
},
"realm2": True
},
The SimpleDomainController fulfills the requirements of a DomainController as
used for authentication with http_authenticator.HTTPAuthenticator for the
WsgiDAV application.
Domain Controllers must provide the methods as described in
DomainControllerBase_
.. _DomainControllerBase : dc/base_dc.py
"""
from wsgidav import util
from wsgidav.dc.base_dc import BaseDomainController
__docformat__ = "reStructuredText"
_logger = util.get_module_logger(__name__)
class SimpleDomainController(BaseDomainController):
def __init__(self, wsgidav_app, config):
super().__init__(wsgidav_app, config)
dc_conf = util.get_dict_value(config, "simple_dc", as_dict=True)
self.user_map = dc_conf.get("user_mapping")
if self.user_map is None:
raise RuntimeError("Missing option: simple_dc.user_mapping")
for share, data in self.user_map.items():
if type(data) not in (bool, dict) or not data:
raise RuntimeError(
"Invalid option: simple_dc.user_mapping['{}']: must be True or non-empty dict.".format(
share
)
)
return
def __str__(self):
return "{}()".format(self.__class__.__name__)
def _get_realm_entry(self, realm, user_name=None):
"""Return the matching user_map entry (falling back to default '*' if any)."""
realm_entry = self.user_map.get(realm)
if realm_entry is None:
realm_entry = self.user_map.get("*")
if user_name is None or realm_entry is None:
return realm_entry
return realm_entry.get(user_name)
def get_domain_realm(self, path_info, environ):
"""Resolve a relative url to the appropriate realm name."""
realm = self._calc_realm_from_path_provider(path_info, environ)
return realm
def require_authentication(self, realm, environ):
"""Return True if this realm requires authentication (grant anonymous access otherwise)."""
realm_entry = self._get_realm_entry(realm)
if realm_entry is None:
_logger.error(
'Missing configuration simple_dc.user_mapping["{}"] (or "*"): '
"realm is not accessible!".format(realm)
)
return realm_entry is not True
def basic_auth_user(self, realm, user_name, password, environ):
"""Returns True if this user_name/password pair is valid for the realm,
False otherwise. Used for basic authentication."""
user = self._get_realm_entry(realm, user_name)
if user is not None and password == user.get("password"):
environ["wsgidav.auth.roles"] = user.get("roles", [])
return True
return False
def supports_http_digest_auth(self):
# We have access to a plaintext password (or stored hash)
return True
def digest_auth_user(self, realm, user_name, environ):
"""Computes digest hash A1 part."""
user = self._get_realm_entry(realm, user_name)
if user is None:
return False
password = user.get("password")
environ["wsgidav.auth.roles"] = user.get("roles", [])
return self._compute_http_digest_a1(realm, user_name, password)
| mit | c021712c1d36fa447261b4f0a96d0420 | 32.771429 | 107 | 0.57868 | 4.061856 | false | false | false | false |
mar10/wsgidav | wsgidav/http_authenticator.py | 1 | 24109 | # -*- coding: utf-8 -*-
# (c) 2009-2022 Martin Wendt and contributors; see WsgiDAV https://github.com/mar10/wsgidav
# Original PyFileServer (c) 2005 Ho Chun Wei.
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
"""
WSGI middleware for HTTP basic and digest authentication.
Usage::
from http_authenticator import HTTPAuthenticator
WSGIApp = HTTPAuthenticator(ProtectedWSGIApp, domain_controller, accept_basic,
accept_digest, default_to_digest)
where:
ProtectedWSGIApp is the application requiring authenticated access
domain_controller is a domain controller object meeting specific
requirements (below)
accept_basic is a boolean indicating whether to accept requests using
the basic authentication scheme (default = True)
accept_digest is a boolean indicating whether to accept requests using
the digest authentication scheme (default = True)
default_to_digest is a boolean. if True, an unauthenticated request will
be sent a digest authentication required response, else the unauthenticated
request will be sent a basic authentication required response
(default = True)
The HTTPAuthenticator will put the following authenticated information in the
environ dictionary::
environ["wsgidav.auth.realm"] = realm name
environ["wsgidav.auth.user_name"] = user_name
environ["wsgidav.auth.roles"] = <tuple> (optional)
environ["wsgidav.auth.permissions"] = <tuple> (optional)
**Domain Controllers**
The HTTP basic and digest authentication schemes are based on the following
concept:
Each requested relative URI can be resolved to a realm for authentication,
for example:
/fac_eng/courses/ee5903/timetable.pdf -> might resolve to realm 'Engineering General'
/fac_eng/examsolns/ee5903/thisyearssolns.pdf -> might resolve to realm 'Engineering Lecturers'
/med_sci/courses/m500/surgery.htm -> might resolve to realm 'Medical Sciences General'
and each realm would have a set of user_name and password pairs that would
allow access to the resource.
A domain controller provides this information to the HTTPAuthenticator.
This allows developers to write their own domain controllers, that might,
for example, interface with their own user database.
for simple applications, a SimpleDomainController is provided that will take
in a single realm name (for display) and a single dictionary of user_name (key)
and password (value) string pairs
Usage::
from wsgidav.dc.simple_dc import SimpleDomainController
users = dict(({'John Smith': 'YouNeverGuessMe', 'Dan Brown': 'DontGuessMeEither'})
realm = 'Sample Realm'
domain_controller = SimpleDomainController(users, realm)
Domain Controllers must provide the methods as described in
``wsgidav.interfaces.domaincontrollerinterface`` (interface_)
.. _interface : interfaces/domaincontrollerinterface.py
The environ variable here is the WSGI 'environ' dictionary. It is passed to
all methods of the domain controller as a means for developers to pass information
from previous middleware or server config (if required).
"""
import base64
import inspect
import random
import re
import time
from hashlib import md5
from textwrap import dedent
from wsgidav import util
from wsgidav.dav_error import HTTP_NOT_FOUND, DAVError
from wsgidav.dc.simple_dc import SimpleDomainController
from wsgidav.mw.base_mw import BaseMiddleware
from wsgidav.util import calc_base64, calc_hexdigest, dynamic_import_class
__docformat__ = "reStructuredText"
_logger = util.get_module_logger(__name__)
def make_domain_controller(wsgidav_app, config):
auth_conf = util.get_dict_value(config, "http_authenticator", as_dict=True)
dc = auth_conf.get("domain_controller")
org_dc = dc
if dc is True or not dc:
# True or null:
dc = SimpleDomainController
elif util.is_basestring(dc):
# If a plain string is passed, try to import it as class
dc = dynamic_import_class(dc)
if inspect.isclass(dc):
# If a class is passed, instantiate that
dc = dc(wsgidav_app, config)
else:
raise RuntimeError(
"Could not resolve domain controller class (got {})".format(org_dc)
)
# print("make_domain_controller", dc)
return dc
# ========================================================================
# HTTPAuthenticator
# ========================================================================
class HTTPAuthenticator(BaseMiddleware):
"""WSGI Middleware for basic and digest authentication."""
error_message_401 = dedent(
"""\
<html>
<head><title>401 Access not authorized</title></head>
<body>
<h1>401 Access not authorized</h1>
</body>
</html>
"""
)
def __init__(self, wsgidav_app, next_app, config):
super().__init__(wsgidav_app, next_app, config)
self._verbose = config.get("verbose", 3)
self.config = config
dc = make_domain_controller(wsgidav_app, config)
self.domain_controller = dc
hotfixes = util.get_dict_value(config, "hotfixes", as_dict=True)
# HOT FIX for Windows XP (Microsoft-WebDAV-MiniRedir/5.1.2600):
# When accessing a share '/dav/', XP sometimes sends digests for '/'.
# With this fix turned on, we allow '/' digests, when a matching '/dav' account
# is present.
self.winxp_accept_root_share_login = hotfixes.get(
"winxp_accept_root_share_login", False
)
# HOTFIX for Windows
# MW 2013-12-31: DON'T set this (will MS office to use anonymous always in
# some scenarios)
self.win_accept_anonymous_options = hotfixes.get(
"win_accept_anonymous_options", False
)
auth_conf = util.get_dict_value(config, "http_authenticator", as_dict=True)
self.accept_basic = auth_conf.get("accept_basic", True)
self.accept_digest = auth_conf.get("accept_digest", True)
self.default_to_digest = auth_conf.get("default_to_digest", True)
self.trusted_auth_header = auth_conf.get("trusted_auth_header", None)
if not dc.supports_http_digest_auth() and (
self.accept_digest or self.default_to_digest or not self.accept_basic
):
raise RuntimeError(
"{} does not support digest authentication.\n"
"Set accept_basic=True, accept_digest=False, default_to_digest=False".format(
dc.__class__.__name__
)
)
self._nonce_dict = dict([])
self._header_parser = re.compile(r"([\w]+)=([^,]*),")
# Note: extra parser to handle digest auth requests from certain
# clients, that leave commas un-encoded to interfere with the above.
self._header_fix_parser = re.compile(r'([\w]+)=("[^"]*,[^"]*"),')
self._header_method = re.compile(r"^([\w]+)")
def get_domain_controller(self):
return self.domain_controller
def allow_anonymous_access(self, share):
return not self.domain_controller.require_authentication(share, None)
def __call__(self, environ, start_response):
realm = self.domain_controller.get_domain_realm(environ["PATH_INFO"], environ)
environ["wsgidav.auth.realm"] = realm
environ["wsgidav.auth.user_name"] = ""
# The domain controller MAY set those values depending on user's
# authorization:
environ["wsgidav.auth.roles"] = None
environ["wsgidav.auth.permissions"] = None
# _logger.debug(
# "HTTPAuthenticator realm({}): '{}'".format(environ["PATH_INFO"], realm)
# )
# _logger.debug("{}".format(environ))
force_logout = False
if "logout" in environ.get("QUERY_STRING", ""):
force_logout = True
_logger.warning("Force logout")
force_allow = False
if self.win_accept_anonymous_options and environ["REQUEST_METHOD"] == "OPTIONS":
_logger.warning("No authorization required for OPTIONS method")
force_allow = True
if force_allow or not self.domain_controller.require_authentication(
realm, environ
):
# No authentication needed
# _logger.debug("No authorization required for realm '{}'".format(realm))
# environ["wsgidav.auth.realm"] = realm
# environ["wsgidav.auth.user_name"] = ""
return self.next_app(environ, start_response)
if self.trusted_auth_header and environ.get(self.trusted_auth_header):
# accept a user_name that was injected by a trusted upstream server
_logger.debug(
"Accept trusted user_name {}='{}'for realm '{}'".format(
self.trusted_auth_header,
environ.get(self.trusted_auth_header),
realm,
)
)
# environ["wsgidav.auth.realm"] = realm
environ["wsgidav.auth.user_name"] = environ.get(self.trusted_auth_header)
return self.next_app(environ, start_response)
if "HTTP_AUTHORIZATION" in environ and not force_logout:
auth_header = environ["HTTP_AUTHORIZATION"]
auth_match = self._header_method.search(auth_header)
auth_method = "None"
if auth_match:
auth_method = auth_match.group(1).lower()
if auth_method == "digest" and self.accept_digest:
return self.handle_digest_auth_request(environ, start_response)
elif auth_method == "digest" and self.accept_basic:
return self.send_basic_auth_response(environ, start_response)
elif auth_method == "basic" and self.accept_basic:
return self.handle_basic_auth_request(environ, start_response)
# The requested auth method is not supported.
elif self.default_to_digest and self.accept_digest:
return self.send_digest_auth_response(environ, start_response)
elif self.accept_basic:
return self.send_basic_auth_response(environ, start_response)
_logger.warning(
"HTTPAuthenticator: respond with 400 Bad request; Auth-Method: {}".format(
auth_method
)
)
start_response(
"400 Bad Request",
[("Content-Length", "0"), ("Date", util.get_rfc1123_time())],
)
return [""]
if self.default_to_digest:
return self.send_digest_auth_response(environ, start_response)
return self.send_basic_auth_response(environ, start_response)
def send_basic_auth_response(self, environ, start_response):
realm = self.domain_controller.get_domain_realm(environ["PATH_INFO"], environ)
_logger.debug("401 Not Authorized for realm '{}' (basic)".format(realm))
wwwauthheaders = 'Basic realm="{}"'.format(realm)
body = util.to_bytes(self.error_message_401)
start_response(
"401 Not Authorized",
[
("WWW-Authenticate", wwwauthheaders),
("Content-Type", "text/html; charset=utf-8"),
("Content-Length", str(len(body))),
("Date", util.get_rfc1123_time()),
],
)
return [body]
def handle_basic_auth_request(self, environ, start_response):
realm = self.domain_controller.get_domain_realm(environ["PATH_INFO"], environ)
auth_header = environ["HTTP_AUTHORIZATION"]
auth_value = ""
try:
auth_value = auth_header[len("Basic ") :].strip()
except Exception:
auth_value = ""
auth_value = base64.decodebytes(util.to_bytes(auth_value))
auth_value = util.to_str(auth_value)
user_name, password = auth_value.split(":", 1)
if self.domain_controller.basic_auth_user(realm, user_name, password, environ):
environ["wsgidav.auth.realm"] = realm
environ["wsgidav.auth.user_name"] = user_name
return self.next_app(environ, start_response)
_logger.warning(
"Authentication (basic) failed for user '{}', realm '{}'.".format(
user_name, realm
)
)
return self.send_basic_auth_response(environ, start_response)
def send_digest_auth_response(self, environ, start_response):
realm = self.domain_controller.get_domain_realm(environ["PATH_INFO"], environ)
random.seed()
serverkey = hex(random.getrandbits(32))[2:]
etagkey = calc_hexdigest(environ["PATH_INFO"])
timekey = str(time.time())
nonce_source = timekey + calc_hexdigest(
timekey + ":" + etagkey + ":" + serverkey
)
nonce = calc_base64(nonce_source)
wwwauthheaders = (
'Digest realm="{}", nonce="{}", algorithm=MD5, qop="auth"'.format(
realm, nonce
)
)
_logger.debug(
"401 Not Authorized for realm '{}' (digest): {}".format(
realm, wwwauthheaders
)
)
body = util.to_bytes(self.error_message_401)
start_response(
"401 Not Authorized",
[
("WWW-Authenticate", wwwauthheaders),
("Content-Type", "text/html; charset=utf-8"),
("Content-Length", str(len(body))),
("Date", util.get_rfc1123_time()),
],
)
return [body]
def handle_digest_auth_request(self, environ, start_response):
realm = self.domain_controller.get_domain_realm(environ["PATH_INFO"], environ)
if not realm:
raise DAVError(
HTTP_NOT_FOUND,
context_info=f"Could not resolve realm for {environ['PATH_INFO']}",
)
is_invalid_req = False
invalid_req_reasons = []
auth_header_dict = {}
auth_headers = environ["HTTP_AUTHORIZATION"] + ","
if not auth_headers.lower().strip().startswith("digest"):
is_invalid_req = True
invalid_req_reasons.append(
"HTTP_AUTHORIZATION must start with 'digest': {}".format(auth_headers)
)
# Hotfix for Windows file manager and OSX Finder:
# Some clients don't urlencode paths in auth header, so uri value may
# contain commas, which break the usual regex headerparser. Example:
# Digest user_name="user",realm="/",uri="a,b.txt",nc=00000001, ...
# -> [..., ('uri', '"a'), ('nc', '00000001'), ...]
# Override any such values with carefully extracted ones.
auth_header_list = self._header_parser.findall(auth_headers)
auth_header_fixlist = self._header_fix_parser.findall(auth_headers)
if auth_header_fixlist:
_logger.info(
"Fixing auth_header comma-parsing: extend {} with {}".format(
auth_header_list, auth_header_fixlist
)
)
auth_header_list += auth_header_fixlist
for auth_header in auth_header_list:
auth_header_key = auth_header[0]
auth_header_value = auth_header[1].strip().strip('"')
auth_header_dict[auth_header_key] = auth_header_value
# _logger.debug(
# "handle_digest_auth_request: {}".format(environ["HTTP_AUTHORIZATION"])
# )
# _logger.debug(" -> {}".format(auth_header_dict))
req_username = None
if "username" in auth_header_dict:
req_username = auth_header_dict["username"]
if not req_username:
is_invalid_req = True
invalid_req_reasons.append(
"`username` is empty: {!r}".format(req_username)
)
elif r"\\" in req_username:
# Hotfix for Windows XP:
# net use W: http://127.0.0.1/dav /USER:DOMAIN\tester tester
# will send the name with double backslashes ('DOMAIN\\tester')
# but send the digest for the simple name ('DOMAIN\tester').
req_username_org = req_username
req_username = req_username.replace("\\\\", "\\")
_logger.info(
"Fixing Windows name with double backslash: '{}' --> '{}'".format(
req_username_org, req_username
)
)
# pre_check = self.domain_controller.is_realm_user(
# realm, req_username, environ
# )
# if pre_check is False:
# is_invalid_req = True
# invalid_req_reasons.append(
# "Not a realm-user: '{}'/'{}'".format(realm, req_username)
# )
else:
is_invalid_req = True
invalid_req_reasons.append("Missing 'username' in headers")
# TODO: Chun added this comments, but code was commented out:
# Do not do realm checking - a hotfix for WinXP using some other realm's
# auth details for this realm - if user/password match
if "realm" in auth_header_dict:
if auth_header_dict["realm"].upper() != realm.upper():
if (
self.winxp_accept_root_share_login
and auth_header_dict["realm"] == "/"
):
# Hotfix: also accept '/'
_logger.info("winxp_accept_root_share_login")
else:
is_invalid_req = True
invalid_req_reasons.append("Realm mismatch: '{}'".format(realm))
if "algorithm" in auth_header_dict:
if auth_header_dict["algorithm"].upper() != "MD5":
is_invalid_req = True # only MD5 supported
invalid_req_reasons.append("Unsupported 'algorithm' in headers")
req_uri = auth_header_dict.get("uri")
if "nonce" in auth_header_dict:
req_nonce = auth_header_dict["nonce"]
else:
is_invalid_req = True
invalid_req_reasons.append("Expected 'nonce' in headers")
req_has_qop = False
if "qop" in auth_header_dict:
req_has_qop = True
req_qop = auth_header_dict["qop"]
if req_qop.lower() != "auth":
is_invalid_req = True # only auth supported, auth-int not supported
invalid_req_reasons.append("Expected 'qop' == 'auth'")
else:
req_qop = None
if "cnonce" in auth_header_dict:
req_cnonce = auth_header_dict["cnonce"]
else:
req_cnonce = None
if req_has_qop:
is_invalid_req = True
invalid_req_reasons.append(
"Expected 'cnonce' in headers if qop is passed"
)
if "nc" in auth_header_dict: # is read but nonce-count checking not implemented
req_nc = auth_header_dict["nc"]
else:
req_nc = None
if req_has_qop:
is_invalid_req = True
invalid_req_reasons.append("Expected 'nc' in headers if qop is passed")
if "response" in auth_header_dict:
req_response = auth_header_dict["response"]
else:
is_invalid_req = True
invalid_req_reasons.append("Expected 'response' in headers")
if not is_invalid_req:
req_method = environ["REQUEST_METHOD"]
required_digest = self._compute_digest_response(
realm,
req_username,
req_method,
req_uri,
req_nonce,
req_cnonce,
req_qop,
req_nc,
environ,
)
if not required_digest:
# Rejected by domain controller
is_invalid_req = True
invalid_req_reasons.append(
"Rejected by DC.digest_auth_user('{}', '{}')".format(
realm, req_username
)
)
elif required_digest != req_response:
warning_msg = (
"_compute_digest_response('{}', '{}', ...): {} != {}".format(
realm, req_username, required_digest, req_response
)
)
if self.winxp_accept_root_share_login and realm != "/":
# _logger.warning(warning_msg + " => trying '/' realm")
# Hotfix: also accept '/' digest
root_digest = self._compute_digest_response(
"/",
req_username,
req_method,
req_uri,
req_nonce,
req_cnonce,
req_qop,
req_nc,
environ,
)
if root_digest == req_response:
_logger.warning(
"handle_digest_auth_request: HOTFIX: accepting '/' login for '{}'.".format(
realm
)
)
else:
is_invalid_req = True
invalid_req_reasons.append(
warning_msg + " (also tried '/' realm)"
)
else:
is_invalid_req = True
invalid_req_reasons.append(warning_msg)
else:
# _logger.debug("digest succeeded for realm '{}', user '{}'"
# .format(realm, req_username))
pass
if is_invalid_req:
invalid_req_reasons.append("Headers:\n {}".format(auth_header_dict))
if self._verbose >= 4:
_logger.warning(
"Authentication (digest) failed for user '{}', realm '{}':\n {}".format(
req_username, realm, "\n ".join(invalid_req_reasons)
)
)
else:
_logger.warning(
"Authentication (digest) failed for user '{}', realm '{}'.".format(
req_username, realm
)
)
return self.send_digest_auth_response(environ, start_response)
environ["wsgidav.auth.realm"] = realm
environ["wsgidav.auth.user_name"] = req_username
return self.next_app(environ, start_response)
def _compute_digest_response(
self, realm, user_name, method, uri, nonce, cnonce, qop, nc, environ
):
"""Computes digest hash.
Calculation of the A1 (HA1) part is delegated to the dc interface method
`digest_auth_user()`.
Args:
realm (str):
user_name (str):
method (str): WebDAV Request Method
uri (str):
nonce (str): server generated nonce value
cnonce (str): client generated cnonce value
qop (str): quality of protection
nc (str) (number), nonce counter incremented by client
Returns:
MD5 hash string
or False if user rejected by domain controller
"""
def md5h(data):
return md5(util.to_bytes(data)).hexdigest()
def md5kd(secret, data):
return md5h(secret + ":" + data)
A1 = self.domain_controller.digest_auth_user(realm, user_name, environ)
if not A1:
return False
A2 = method + ":" + uri
if qop:
res = md5kd(
A1, nonce + ":" + nc + ":" + cnonce + ":" + qop + ":" + md5h(A2)
)
else:
res = md5kd(A1, nonce + ":" + md5h(A2))
return res
| mit | 94e03384508737906903c872cc1b84a3 | 38.137987 | 103 | 0.559874 | 4.205303 | false | false | false | false |
mar10/wsgidav | wsgidav/mw/base_mw.py | 1 | 1521 | # -*- coding: utf-8 -*-
"""
Abstract base middleware class (optional use).
"""
from abc import ABC, abstractmethod
from wsgidav.util import NO_DEFAULT, get_dict_value
__docformat__ = "reStructuredText"
class BaseMiddleware(ABC):
"""Abstract base middleware class (optional).
Note: this is a convenience class, that *may* be used to implement WsgiDAV
middlewares. However it is not a reqiuement: any object that implements
the WSGI specification can be added to the stack.
Derived classes in WsgiDAV include::
wsgidav.dir_browser.WsgiDavDirBrowser
wsgidav.mw.debug_filter.WsgiDavDebugFilter
wsgidav.error_printer.ErrorPrinter
wsgidav.http_authenticator.HTTPAuthenticator
wsgidav.request_resolver.RequestResolver
"""
def __init__(self, wsgidav_app, next_app, config):
self.wsgidav_app = wsgidav_app
self.next_app = next_app
self.config = config
self.verbose = config.get("verbose", 3)
@abstractmethod
def __call__(self, environ, start_response):
raise NotImplementedError
def __repr__(self):
return "{}.{}".format(self.__module__, self.__class__.__name__)
def is_disabled(self):
"""Optionally return True to skip this module on startup."""
return False
def get_config(self, key_path: str, default=NO_DEFAULT):
"""Optionally return True to skip this module on startup."""
res = get_dict_value(self.config, key_path, default)
return res
| mit | e7e335d3929fa435cd5ae4b8ab1ed2d5 | 30.6875 | 78 | 0.666009 | 3.81203 | false | true | false | false |
mar10/wsgidav | wsgidav/dav_provider.py | 1 | 59062 | # -*- coding: utf-8 -*-
# (c) 2009-2022 Martin Wendt and contributors; see WsgiDAV https://github.com/mar10/wsgidav
# Original PyFileServer (c) 2005 Ho Chun Wei.
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
"""
Abstract base class for DAV resource providers.
This module serves these purposes:
1. Documentation of the DAVProvider interface
2. Common base class for all DAV providers
3. Default implementation for most functionality that a resource provider must
deliver.
If no default implementation can be provided, then all write actions generate
FORBIDDEN errors. Read requests generate NOT_IMPLEMENTED errors.
**_DAVResource, DAVCollection, DAVNonCollection**
Represents an existing (i.e. mapped) WebDAV resource or collection.
A _DAVResource object is created by a call to the DAVProvider.
The resource may then be used to query different attributes like ``res.name``,
``res.is_collection``, ``res.get_content_length()``, and ``res.support_etag()``.
It also implements operations, that require an *existing* resource, like:
``get_preferred_path()``, ``create_collection()``, or ``get_property_value()``.
Usage::
res = provider.get_resource_inst(path, environ)
if res is not None:
print(res.getName())
**DAVProvider**
A DAV provider represents a shared WebDAV system.
There is only one provider instance per share, which is created during
server start-up. After that, the dispatcher (``request_resolver.RequestResolver``)
parses the request URL and adds it to the WSGI environment, so it
can be accessed like this::
provider = environ["wsgidav.provider"]
The main purpose of the provider is to create _DAVResource objects for URLs::
res = provider.get_resource_inst(path, environ)
**Supporting Objects**
The DAVProvider takes two supporting objects:
propertyManager
An object that provides storage for dead properties assigned for webDAV resources.
PropertyManagers must provide the methods as described in
``wsgidav.interfaces.propertymanagerinterface``
See prop_man.property_manager.PropertyManager for a sample implementation
using shelve.
lockManager
An object that implements locking on webDAV resources.
It contains an instance of ``LockStorageDict``
lockStorage
An object that provides storage for locks made on webDAV resources.
LockStorages must provide the methods as described in
``wsgidav.interfaces.lockmanagerinterface``
See lock_storage for a sample implementation using shelve.
See :doc:`reference_guide` for more information about the WsgiDAV architecture.
"""
import os
import sys
import time
import traceback
from abc import ABC, abstractmethod
from datetime import datetime
from typing import Optional
from urllib.parse import quote, unquote
from wsgidav import util, xml_tools
from wsgidav.dav_error import (
HTTP_FORBIDDEN,
HTTP_NOT_FOUND,
DAVError,
PRECONDITION_CODE_ProtectedProperty,
as_DAVError,
)
from wsgidav.util import etree
__docformat__ = "reStructuredText"
_logger = util.get_module_logger(__name__)
_standardLivePropNames = [
"{DAV:}creationdate",
"{DAV:}displayname",
"{DAV:}getcontenttype",
"{DAV:}resourcetype",
"{DAV:}getlastmodified",
"{DAV:}getcontentlength",
"{DAV:}getetag",
"{DAV:}getcontentlanguage",
# "{DAV:}source", # removed in rfc4918
]
_lockPropertyNames = ["{DAV:}lockdiscovery", "{DAV:}supportedlock"]
# ========================================================================
# _DAVResource
# ========================================================================
class _DAVResource(ABC):
r"""Represents a single existing DAV resource instance.
A resource may be a collection (aka 'folder') or a non-collection (aka
'file').
_DAVResource is the common base class for the specialized classes::
_DAVResource
+- DAVCollection
\- DAVNonCollection
Instances of this class are created through the DAVProvider::
res = provider.get_resource_inst(path, environ)
if res and res.is_collection():
print(res.get_display_name())
In the example above, res will be ``None``, if the path cannot be mapped to
an existing resource.
The following attributes and methods are considered 'cheap'::
res.path
res.provider
res.name
res.is_collection
res.environ
Querying other attributes is considered 'expensive' and may be delayed until
the first access.
get_content_length()
get_content_type()
get_creation_date()
get_display_name()
get_etag()
get_last_modified()
support_ranges()
support_etag()
support_modified()
support_content_length()
These functions return ``None``, if the property is not available, or
not supported.
See also DAVProvider.get_resource_inst().
"""
def __init__(self, path: str, is_collection: bool, environ: dict):
assert util.is_str(path)
assert path == "" or path.startswith("/")
self.provider = environ["wsgidav.provider"]
self.path = path
self.is_collection = is_collection
self.environ = environ
self.name = util.get_uri_name(self.path)
def __repr__(self):
return f"{self.__class__.__name__}({self.path!r})"
# def getContentLanguage(self):
# """Contains the Content-Language header returned by a GET without accept
# headers.
#
# The getcontentlanguage property MUST be defined on any DAV compliant
# resource that returns the Content-Language header on a GET.
# """
# raise NotImplementedError
@abstractmethod
def get_content_length(self) -> Optional[int]:
"""Contains the Content-Length header returned by a GET without accept
headers.
The getcontentlength property MUST be defined on any DAV compliant
resource that returns the Content-Length header in response to a GET.
This method MUST be implemented by non-collections only.
"""
if self.is_collection:
return None
raise NotImplementedError
def get_content_type(self) -> Optional[str]:
"""Contains the Content-Type header returned by a GET without accept
headers.
This getcontenttype property MUST be defined on any DAV compliant
resource that returns the Content-Type header in response to a GET.
See http://www.webdav.org/specs/rfc4918.html#PROPERTY_getcontenttype
This method MUST be implemented by non-collections only.
"""
if self.is_collection:
return None
raise NotImplementedError
def get_creation_date(self) -> Optional[datetime]:
"""Records the time and date the resource was created.
The creationdate property should be defined on all DAV compliant
resources. If present, it contains a timestamp of the moment when the
resource was created (i.e., the moment it had non-null state).
This method SHOULD be implemented, especially by non-collections.
"""
return None
def get_directory_info(self):
"""Return a list of dictionaries with information for directory
rendering.
This default implementation return None, so the dir browser will
traverse all members.
This method COULD be implemented for collection resources.
"""
assert self.is_collection
return None
def get_display_name(self) -> str:
"""Provides a name for the resource that is suitable for presentation to
a user.
The displayname property should be defined on all DAV compliant
resources. If present, the property contains a description of the
resource that is suitable for presentation to a user.
This default implementation returns `name`, which is the last path
segment.
"""
return self.name
def get_display_info(self):
"""Return additional info dictionary for displaying (optional).
This information is not part of the DAV specification, but meant for use
by the dir browser middleware.
This default implementation returns ``{'type': '...'}``
"""
if self.is_collection:
return {"type": "Directory"}
elif os.extsep in self.name:
ext = self.name.split(os.extsep)[-1].upper()
if len(ext) < 5:
return {"type": "{}-File".format(ext)}
return {"type": "File"}
def get_etag(self):
"""
See http://www.webdav.org/specs/rfc4918.html#PROPERTY_getetag
This method SHOULD be implemented, especially by non-collections.
"""
return None
def get_last_modified(self):
"""Contains the Last-Modified header returned by a GET method without
accept headers.
Return None, if this live property is not supported.
Note that the last-modified date on a resource may reflect changes in
any part of the state of the resource, not necessarily just a change to
the response to the GET method. For example, a change in a property may
cause the last-modified date to change. The getlastmodified property
MUST be defined on any DAV compliant resource that returns the
Last-Modified header in response to a GET.
This method SHOULD be implemented, especially by non-collections.
"""
return None
def set_last_modified(self, dest_path, time_stamp, *, dry_run):
"""Set last modified time for destPath to timeStamp on epoch-format"""
raise NotImplementedError
def support_ranges(self):
"""Return True, if this non-resource supports Range on GET requests.
This method MUST be implemented by non-collections only.
"""
raise NotImplementedError
def support_content_length(self):
"""Return True, if this resource supports Content-Length.
This default implementation checks `self.get_content_length() is None`.
"""
return self.get_content_length() is not None
def support_etag(self):
"""Return True, if this resource supports ETags."""
raise NotImplementedError
# This default implementation checks `self.get_etag() is None`.
# return self.get_etag() is not None
def support_modified(self):
"""Return True, if this resource supports last modified dates.
This default implementation checks `self.get_last_modified() is None`.
"""
return self.get_last_modified() is not None
def get_preferred_path(self):
"""Return preferred mapping for a resource mapping.
Different URLs may map to the same resource, e.g.:
'/a/b' == '/A/b' == '/a/b/'
get_preferred_path() returns the same value for all these variants, e.g.:
'/a/b/' (assuming resource names considered case insensitive)
@param path: a UTF-8 encoded, unquoted byte string.
@return: a UTF-8 encoded, unquoted byte string.
"""
if self.path in ("", "/"):
return "/"
# Append '/' for collections
if self.is_collection and not self.path.endswith("/"):
return self.path + "/"
# TODO: handle case-sensitivity, depending on OS
# (FileSystemProvider could do this with os.path:
# (?) on unix we can assume that the path already matches exactly the case of filepath
# on windows we could use path.lower() or get the real case from the
# file system
return self.path
def get_ref_url(self):
"""Return the quoted, absolute, unique URL of a resource, relative to appRoot.
Byte string, UTF-8 encoded, quoted.
Starts with a '/'. Collections also have a trailing '/'.
This is basically the same as get_preferred_path, but deals with
'virtual locations' as well.
e.g. '/a/b' == '/A/b' == '/bykey/123' == '/byguid/abc532'
get_ref_url() returns the same value for all these URLs, so it can be
used as a key for locking and persistence storage.
DAV providers that allow virtual-mappings must override this method.
See also comments in DEVELOPERS.txt glossary.
"""
return quote(self.provider.share_path + self.get_preferred_path())
# def getRefKey(self):
# """Return an unambigous identifier string for a resource.
#
# Since it is always unique for one resource, <refKey> is used as key for
# the lock- and property storage dictionaries.
#
# This default implementation calls get_ref_url(), and strips a possible
# trailing '/'.
# """
# refKey = self.get_ref_url(path)
# if refKey == "/":
# return refKey
# return refKey.rstrip("/")
def get_href(self):
"""Convert path to a URL that can be passed to XML responses.
Byte string, UTF-8 encoded, quoted.
See http://www.webdav.org/specs/rfc4918.html#rfc.section.8.3
We are using the path-absolute option. i.e. starting with '/'.
URI ; See section 3.2.1 of [RFC2068]
"""
# Nautilus chokes, if href encodes '(' as '%28'
# So we don't encode 'extra' and 'safe' characters (see rfc2068 3.2.1)
safe = "/" + "!*'()," + "$-_|."
return quote(
self.provider.mount_path
+ self.provider.share_path
+ self.get_preferred_path(),
safe=safe,
)
# def getParent(self):
# """Return parent _DAVResource or None.
#
# There is NO checking, if the parent is really a mapped collection.
# """
# parentpath = util.get_uri_parent(self.path)
# if not parentpath:
# return None
# return self.provider.get_resource_inst(parentpath)
def get_member(self, name):
"""Return child resource with a given name (None, if not found).
This method COULD be overridden by a derived class, for performance
reasons.
This default implementation calls self.provider.get_resource_inst().
"""
raise NotImplementedError # implemented by DAVCollecion
def get_member_list(self):
"""Return a list of direct members (_DAVResource or derived objects).
This default implementation calls self.get_member_names() and
self.get_member() for each of them.
A provider COULD overwrite this for performance reasons.
"""
if not self.is_collection:
raise NotImplementedError
memberList = []
for name in self.get_member_names():
member = self.get_member(name)
assert member is not None
memberList.append(member)
return memberList
def get_member_names(self):
"""Return list of (direct) collection member names (UTF-8 byte strings).
Every provider MUST provide this method for collection resources.
"""
raise NotImplementedError
def get_descendants(
self,
*,
collections=True,
resources=True,
depth_first=False,
depth="infinity",
add_self=False,
):
"""Return a list _DAVResource objects of a collection (children,
grand-children, ...).
This default implementation calls self.get_member_list() recursively.
This function may also be called for non-collections (with add_self=True).
:Parameters:
depth_first : bool
use <False>, to list containers before content.
(e.g. when moving / copying branches.)
Use <True>, to list content before containers.
(e.g. when deleting branches.)
depth : string
'0' | '1' | 'infinity'
"""
assert depth in ("0", "1", "infinity")
res = []
if add_self and not depth_first:
res.append(self)
if depth != "0" and self.is_collection:
for child in self.get_member_list():
if not child:
self.get_member_list()
want = (collections and child.is_collection) or (
resources and not child.is_collection
)
if want and not depth_first:
res.append(child)
if child.is_collection and depth == "infinity":
res.extend(
child.get_descendants(
collections=collections,
resources=resources,
depth_first=depth_first,
depth=depth,
add_self=False,
)
)
if want and depth_first:
res.append(child)
if add_self and depth_first:
res.append(self)
return res
# --- Properties ---------------------------------------------------------
def get_property_names(self, *, is_allprop):
"""Return list of supported property names in Clark Notation.
Note that 'allprop', despite its name, which remains for
backward-compatibility, does not return every property, but only dead
properties and the live properties defined in RFC4918.
This default implementation returns a combination of:
- Supported standard live properties in the {DAV:} namespace, if the
related getter method returns not None.
- {DAV:}lockdiscovery and {DAV:}supportedlock, if a lock manager is
present
- If a property manager is present, then a list of dead properties is
appended
A resource provider may override this method, to add a list of
supported custom live property names.
"""
# Live properties
propNameList = []
propNameList.append("{DAV:}resourcetype")
if self.get_creation_date() is not None:
propNameList.append("{DAV:}creationdate")
if self.get_content_length() is not None:
assert not self.is_collection
propNameList.append("{DAV:}getcontentlength")
if self.get_content_type() is not None:
propNameList.append("{DAV:}getcontenttype")
if self.get_last_modified() is not None:
propNameList.append("{DAV:}getlastmodified")
if self.get_display_name() is not None:
propNameList.append("{DAV:}displayname")
if self.get_etag() is not None:
propNameList.append("{DAV:}getetag")
# Locking properties
if self.provider.lock_manager and not self.prevent_locking():
propNameList.extend(_lockPropertyNames)
# Dead properties
if self.provider.prop_manager:
refUrl = self.get_ref_url()
propNameList.extend(
self.provider.prop_manager.get_properties(refUrl, self.environ)
)
return propNameList
def get_properties(self, mode, *, name_list=None):
"""Return properties as list of 2-tuples (name, value).
If mode is 'name', then None is returned for the value.
name
the property name in Clark notation.
value
may have different types, depending on the status:
- string or unicode: for standard property values.
- etree.Element: for complex values.
- DAVError in case of errors.
- None: if mode == 'name'.
@param mode: "allprop", "name", or "named"
@param name_list: list of property names in Clark Notation (required for mode 'named')
This default implementation basically calls self.get_property_names() to
get the list of names, then call self.get_property_value on each of them.
"""
assert mode in ("allprop", "name", "named")
if mode in ("allprop", "name"):
# TODO: 'allprop' could have nameList, when <include> option is
# implemented
assert name_list is None
name_list = self.get_property_names(is_allprop=mode == "allprop")
else:
assert name_list is not None
propList = []
namesOnly = mode == "name"
for name in name_list:
try:
if namesOnly:
propList.append((name, None))
else:
value = self.get_property_value(name)
propList.append((name, value))
except DAVError as e:
propList.append((name, e))
except Exception as e:
propList.append((name, as_DAVError(e)))
if self.provider.verbose >= 2:
traceback.print_exc(10, sys.stdout)
return propList
def get_property_value(self, name):
"""Return the value of a property.
name:
the property name in Clark notation.
return value:
may have different types, depending on the status:
- string or unicode: for standard property values.
- lxml.etree.Element: for complex values.
If the property is not available, a DAVError is raised.
This default implementation handles ``{DAV:}lockdiscovery`` and
``{DAV:}supportedlock`` using the associated lock manager.
All other *live* properties (i.e. name starts with ``{DAV:}``) are
delegated to the self.xxx() getters.
Finally, other properties are considered *dead*, and are handled by
the associated property manager.
"""
refUrl = self.get_ref_url()
# lock properties
lm = self.provider.lock_manager
if lm and name == "{DAV:}lockdiscovery":
# TODO: we return HTTP_NOT_FOUND if no lockmanager is present.
# Correct?
activelocklist = lm.get_url_lock_list(refUrl)
lockdiscoveryEL = etree.Element(name)
for lock in activelocklist:
activelockEL = etree.SubElement(lockdiscoveryEL, "{DAV:}activelock")
locktypeEL = etree.SubElement(activelockEL, "{DAV:}locktype")
# Note: make sure `{DAV:}` is not handled as format tag:
etree.SubElement(locktypeEL, "{}{}".format("{DAV:}", lock["type"]))
lockscopeEL = etree.SubElement(activelockEL, "{DAV:}lockscope")
# Note: make sure `{DAV:}` is not handled as format tag:
etree.SubElement(lockscopeEL, "{}{}".format("{DAV:}", lock["scope"]))
etree.SubElement(activelockEL, "{DAV:}depth").text = lock["depth"]
if lock["owner"]:
# lock["owner"] is an XML string
# owner may be empty (#64)
ownerEL = xml_tools.string_to_xml(lock["owner"])
activelockEL.append(ownerEL)
timeout = lock["timeout"]
if timeout < 0:
timeout = "Infinite"
else:
# The time remaining on the lock
expire = lock["expire"]
timeout = "Second-" + str(int(expire - time.time()))
etree.SubElement(activelockEL, "{DAV:}timeout").text = timeout
locktokenEL = etree.SubElement(activelockEL, "{DAV:}locktoken")
etree.SubElement(locktokenEL, "{DAV:}href").text = lock["token"]
# TODO: this is ugly:
# res.get_property_value("{DAV:}lockdiscovery")
#
# lockRoot = self.get_href(self.provider.ref_url_to_path(lock["root"]))
lockPath = self.provider.ref_url_to_path(lock["root"])
lockRes = self.provider.get_resource_inst(lockPath, self.environ)
# FIXME: test for None
lockHref = lockRes.get_href()
lockrootEL = etree.SubElement(activelockEL, "{DAV:}lockroot")
etree.SubElement(lockrootEL, "{DAV:}href").text = lockHref
return lockdiscoveryEL
elif lm and name == "{DAV:}supportedlock":
# TODO: we return HTTP_NOT_FOUND if no lockmanager is present. Correct?
# TODO: the lockmanager should decide about it's features
supportedlockEL = etree.Element(name)
lockentryEL = etree.SubElement(supportedlockEL, "{DAV:}lockentry")
lockscopeEL = etree.SubElement(lockentryEL, "{DAV:}lockscope")
etree.SubElement(lockscopeEL, "{DAV:}exclusive")
locktypeEL = etree.SubElement(lockentryEL, "{DAV:}locktype")
etree.SubElement(locktypeEL, "{DAV:}write")
lockentryEL = etree.SubElement(supportedlockEL, "{DAV:}lockentry")
lockscopeEL = etree.SubElement(lockentryEL, "{DAV:}lockscope")
etree.SubElement(lockscopeEL, "{DAV:}shared")
locktypeEL = etree.SubElement(lockentryEL, "{DAV:}locktype")
etree.SubElement(locktypeEL, "{DAV:}write")
return supportedlockEL
elif name.startswith("{DAV:}"):
# Standard live property (raises HTTP_NOT_FOUND if not supported)
if name == "{DAV:}creationdate" and self.get_creation_date() is not None:
# Note: uses RFC3339 format (ISO 8601)
return util.get_rfc3339_time(self.get_creation_date())
elif name == "{DAV:}getcontenttype" and self.get_content_type() is not None:
return self.get_content_type()
elif name == "{DAV:}resourcetype":
if self.is_collection:
resourcetypeEL = etree.Element(name)
etree.SubElement(resourcetypeEL, "{DAV:}collection")
return resourcetypeEL
return ""
elif (
name == "{DAV:}getlastmodified" and self.get_last_modified() is not None
):
# Note: uses RFC1123 format
return util.get_rfc1123_time(self.get_last_modified())
elif (
name == "{DAV:}getcontentlength"
and self.get_content_length() is not None
):
# Note: must be a numeric string
return str(self.get_content_length())
elif name == "{DAV:}getetag" and self.get_etag() is not None:
return self.get_etag()
elif name == "{DAV:}displayname" and self.get_display_name() is not None:
return self.get_display_name()
# Unsupported, no persistence available, or property not found
raise DAVError(HTTP_NOT_FOUND)
# Dead property
pm = self.provider.prop_manager
if pm:
value = pm.get_property(refUrl, name, self.environ)
if value is not None:
return xml_tools.string_to_xml(value)
# No persistence available, or property not found
raise DAVError(HTTP_NOT_FOUND)
def set_property_value(self, name, value, *, dry_run=False):
"""Set a property value or remove a property.
value == None means 'remove property'.
Raise HTTP_FORBIDDEN if property is read-only, or not supported.
When dry_run is True, this function should raise errors, as in a real
run, but MUST NOT change any data.
This default implementation
- raises HTTP_FORBIDDEN, if trying to modify a locking property
- raises HTTP_FORBIDDEN, if trying to modify an immutable {DAV:}
property
- handles Windows' Win32LastModifiedTime to set the getlastmodified
property, if enabled
- stores everything else as dead property, if a property manager is
present.
- raises HTTP_FORBIDDEN, else
Removing a non-existing prop is NOT an error.
Note: RFC 4918 states that {DAV:}displayname 'SHOULD NOT be protected'
A resource provider may override this method, to update supported custom
live properties.
"""
assert value is None or xml_tools.is_etree_element(value)
if name in _lockPropertyNames:
# Locking properties are always read-only
raise DAVError(
HTTP_FORBIDDEN, err_condition=PRECONDITION_CODE_ProtectedProperty
)
# Live property
config = self.environ["wsgidav.config"]
# hotfixes = util.get_dict_value(config, "hotfixes", as_dict=True)
mutableLiveProps = config.get("mutable_live_props", [])
# Accept custom live property updates on resources if configured.
if (
name.startswith("{DAV:}")
and name in _standardLivePropNames
and name in mutableLiveProps
):
# Please note that some properties should not be mutable according
# to RFC4918. This includes the 'getlastmodified' property, which
# it may still make sense to make mutable in order to support time
# stamp changes from e.g. utime calls or the touch or rsync -a
# commands.
if name in ("{DAV:}getlastmodified", "{DAV:}last_modified"):
try:
return self.set_last_modified(
self.path, value.text, dry_run=dry_run
)
except Exception:
_logger.warning(
"Provider does not support set_last_modified on {}.".format(
self.path
)
)
# Unsupported or not allowed
raise DAVError(HTTP_FORBIDDEN)
# Handle MS Windows Win32LastModifiedTime, if enabled.
# Note that the WebDAV client in Win7 and earler has issues and can't be used
# with this so we ignore older clients. Others pre-Win10 should be tested.
if name.startswith("{urn:schemas-microsoft-com:}"):
agent = self.environ.get("HTTP_USER_AGENT", "None")
hotfixes = util.get_dict_value(config, "hotfixes", as_dict=True)
win32_emu = hotfixes.get("emulate_win32_lastmod", False)
if win32_emu and "MiniRedir/6.1" not in agent:
if "Win32LastModifiedTime" in name:
return self.set_last_modified(
self.path, value.text, dry_run=dry_run
)
elif "Win32FileAttributes" in name:
return True
elif "Win32CreationTime" in name:
return True
elif "Win32LastAccessTime" in name:
return True
# Dead property
pm = self.provider.prop_manager
if pm and not name.startswith("{DAV:}"):
refUrl = self.get_ref_url()
if value is None:
return pm.remove_property(refUrl, name, dry_run, self.environ)
else:
value = etree.tostring(value)
return pm.write_property(refUrl, name, value, dry_run, self.environ)
raise DAVError(HTTP_FORBIDDEN)
def remove_all_properties(self, *, recursive):
"""Remove all associated dead properties."""
if self.provider.prop_manager:
self.provider.prop_manager.remove_properties(
self.get_ref_url(), self.environ
)
# --- Locking ------------------------------------------------------------
def prevent_locking(self):
"""Return True, to prevent locking.
This default implementation returns ``False``, so standard processing
takes place: locking (and refreshing of locks) is implemented using
the lock manager, if one is configured.
"""
return False
def is_locked(self):
"""Return True, if URI is locked."""
if self.provider.lock_manager is None:
return False
return self.provider.lock_manager.is_url_locked(self.get_ref_url())
def remove_all_locks(self, *, recursive):
if self.provider.lock_manager:
self.provider.lock_manager.remove_all_locks_from_url(
self.get_ref_url(), recursive=recursive
)
# --- Read / write -------------------------------------------------------
def create_empty_resource(self, name):
"""Create and return an empty (length-0) resource as member of self.
Called for LOCK requests on unmapped URLs.
Preconditions (to be ensured by caller):
- this must be a collection
- <self.path + name> must not exist
- there must be no conflicting locks
Returns a DAVResuource.
This method MUST be implemented by all providers that support write
access.
This default implementation simply raises HTTP_FORBIDDEN.
"""
assert self.is_collection
raise DAVError(HTTP_FORBIDDEN)
def create_collection(self, name):
"""Create a new collection as member of self.
Preconditions (to be ensured by caller):
- this must be a collection
- <self.path + name> must not exist
- there must be no conflicting locks
This method MUST be implemented by all providers that support write
access.
This default implementation raises HTTP_FORBIDDEN.
"""
assert self.is_collection
raise DAVError(HTTP_FORBIDDEN)
def get_content(self):
"""Open content as a stream for reading.
Returns a file-like object / stream containing the contents of the
resource specified.
The calling application will close() the stream.
This method MUST be implemented by all providers.
"""
assert not self.is_collection
raise NotImplementedError
def begin_write(self, *, content_type=None):
"""Open content as a stream for writing.
This method MUST be implemented by all providers that support write
access.
"""
assert not self.is_collection
raise DAVError(HTTP_FORBIDDEN)
def end_write(self, *, with_errors):
"""Called when PUT has finished writing.
This is only a notification that MAY be handled.
"""
return None
def handle_delete(self):
"""Handle a DELETE request natively.
This method is called by the DELETE handler after checking for valid
request syntax and making sure that there are no conflicting locks and
If-headers.
Depending on the return value, this provider can control further
processing:
False:
handle_delete() did not do anything. WsgiDAV will process the request
by calling delete() for every resource, bottom-up.
True:
handle_delete() has successfully performed the DELETE request.
HTTP_NO_CONTENT will be reported to the DAV client.
List of errors:
handle_delete() tried to perform the delete request, but failed
completely or partially. A list of errors is returned like
``[ (<ref-url>, <DAVError>), ... ]``
These errors will be reported to the client.
DAVError raised:
handle_delete() refuses to perform the delete request. The DAVError
will be reported to the client.
An implementation may choose to apply other semantics and return True.
For example deleting '/by_tag/cool/myres' may simply remove the 'cool'
tag from 'my_res'.
In this case, the resource might still be available by other URLs, so
locks and properties are not removed.
This default implementation returns ``False``, so standard processing
takes place.
Implementation of this method is OPTIONAL.
"""
return False
def support_recursive_delete(self):
"""Return True, if delete() may be called on non-empty collections
(see comments there).
This method MUST be implemented for collections (not called on
non-collections).
"""
assert self.is_collection
raise NotImplementedError
def delete(self):
"""Remove this resource (recursive).
Preconditions (ensured by caller):
- there are no conflicting locks or If-headers
- if support_recursive_delete() is False, and this is a collection,
all members have already been deleted.
When support_recursive_delete is True, this method must be prepared to
handle recursive deletes. This implies that child errors must be
reported as tuple list [ (<ref-url>, <DAVError>), ... ].
See http://www.webdav.org/specs/rfc4918.html#delete-collections
This function
- removes this resource
- if this is a non-empty collection, also removes all members.
Note that this may only occur, if support_recursive_delete is True.
- For recursive deletes, return a list of error tuples for all failed
resource paths.
- removes associated direct locks
- removes associated dead properties
- raises HTTP_FORBIDDEN for read-only resources
- raises HTTP_INTERNAL_ERROR on error
This method MUST be implemented by all providers that support write
access.
"""
raise NotImplementedError
def handle_copy(self, dest_path, *, depth_infinity):
"""Handle a COPY request natively.
This method is called by the COPY handler after checking for valid
request syntax and making sure that there are no conflicting locks and
If-headers.
Depending on the return value, this provider can control further
processing:
False:
handle_copy() did not do anything. WsgiDAV will process the request
by calling copy_move_single() for every resource, bottom-up.
True:
handle_copy() has successfully performed the COPY request.
HTTP_NO_CONTENT/HTTP_CREATED will be reported to the DAV client.
List of errors:
handle_copy() tried to perform the copy request, but failed
completely or partially. A list of errors is returned like
``[ (<ref-url>, <DAVError>), ... ]``
These errors will be reported to the client.
DAVError raised:
handle_copy() refuses to perform the copy request. The DAVError
will be reported to the client.
An implementation may choose to apply other semantics and return True.
For example copying '/by_tag/cool/myres' to '/by_tag/hot/myres' may
simply add a 'hot' tag.
In this case, the resource might still be available by other URLs, so
locks and properties are not removed.
This default implementation returns ``False``, so standard processing
takes place.
Implementation of this method is OPTIONAL.
"""
return False
def copy_move_single(self, dest_path, *, is_move):
"""Copy or move this resource to destPath (non-recursive).
Preconditions (ensured by caller):
- there must not be any conflicting locks on destination
- overwriting is only allowed (i.e. destPath exists), when source and
dest are of the same type ((non-)collections) and a Overwrite='T'
was passed
- destPath must not be a child path of this resource
This function
- Overwrites non-collections content, if destination exists.
- MUST NOT copy collection members.
- MUST NOT copy locks.
- SHOULD copy live properties, when appropriate.
E.g. displayname should be copied, but creationdate should be
reset if the target did not exist before.
See http://www.webdav.org/specs/rfc4918.html#dav.properties
- SHOULD copy dead properties.
- raises HTTP_FORBIDDEN for read-only providers
- raises HTTP_INTERNAL_ERROR on error
When is_move is True,
- Live properties should be moved too (e.g. creationdate)
- Non-collections must be moved, not copied
- For collections, this function behaves like in copy-mode:
detination collection must be created and properties are copied.
Members are NOT created.
The source collection MUST NOT be removed.
This method MUST be implemented by all providers that support write
access.
"""
raise NotImplementedError
def handle_move(self, dest_path):
"""Handle a MOVE request natively.
This method is called by the MOVE handler after checking for valid
request syntax and making sure that there are no conflicting locks and
If-headers.
Depending on the return value, this provider can control further
processing:
False:
handle_move() did not do anything. WsgiDAV will process the request
by calling delete() and copy_move_single() for every resource,
bottom-up.
True:
handle_move() has successfully performed the MOVE request.
HTTP_NO_CONTENT/HTTP_CREATED will be reported to the DAV client.
List of errors:
handle_move() tried to perform the move request, but failed
completely or partially. A list of errors is returned like
``[ (<ref-url>, <DAVError>), ... ]``
These errors will be reported to the client.
DAVError raised:
handle_move() refuses to perform the move request. The DAVError
will be reported to the client.
An implementation may choose to apply other semantics and return True.
For example moving '/by_tag/cool/myres' to '/by_tag/hot/myres' may
simply remove the 'cool' tag from 'my_res' and add a 'hot' tag instead.
In this case, the resource might still be available by other URLs, so
locks and properties are not removed.
This default implementation returns ``False``, so standard processing
takes place.
Implementation of this method is OPTIONAL.
"""
return False
def support_recursive_move(self, dest_path):
"""Return True, if move_recursive() is available (see comments there)."""
assert self.is_collection
raise NotImplementedError
def move_recursive(self, dest_path):
"""Move this resource and members to destPath.
This method is only called, when support_recursive_move() returns True.
MOVE is frequently used by clients to rename a file without changing its
parent collection, so it's not appropriate to reset all live properties
that are set at resource creation. For example, the DAV:creationdate
property value SHOULD remain the same after a MOVE.
Preconditions (ensured by caller):
- there must not be any conflicting locks or If-header on source
- there must not be any conflicting locks or If-header on destination
- destPath must not exist
- destPath must not be a member of this resource
This method must be prepared to handle recursive moves. This implies
that child errors must be reported as tuple list
[ (<ref-url>, <DAVError>), ... ].
See http://www.webdav.org/specs/rfc4918.html#move-collections
This function
- moves this resource and all members to destPath.
- MUST NOT move associated locks.
Instead, if the source (or children thereof) have locks, then
these locks should be removed.
- SHOULD maintain associated live properties, when applicable
See http://www.webdav.org/specs/rfc4918.html#dav.properties
- MUST maintain associated dead properties
- raises HTTP_FORBIDDEN for read-only resources
- raises HTTP_INTERNAL_ERROR on error
An implementation may choose to apply other semantics.
For example copying '/by_tag/cool/myres' to '/by_tag/new/myres' may
simply add a 'new' tag to 'my_res'.
This method is only called, when self.support_recursive_move() returns
True. Otherwise, the request server implements MOVE using delete/copy.
This method MAY be implemented in order to improve performance.
"""
raise DAVError(HTTP_FORBIDDEN)
def resolve(self, script_name, path_info):
"""Return a _DAVResource object for the path (None, if not found).
`path_info`: is a URL relative to this object.
DAVCollection.resolve() provides an implementation.
"""
raise NotImplementedError
def finalize_headers(self, environ, response_headers):
"""Perform custom operations on the response headers.
This gets called before the response is started.
It enables adding additional headers or modifying the default ones.
"""
return None
# ========================================================================
# DAVNonCollection
# ========================================================================
class DAVNonCollection(_DAVResource):
"""
A DAVNonCollection is a _DAVResource, that has content (like a 'file' on
a filesystem).
A DAVNonCollecion is able to read and write file content.
See also _DAVResource
"""
def __init__(self, path: str, environ: dict):
_DAVResource.__init__(self, path, False, environ)
@abstractmethod
def get_content_length(self):
"""Returns the byte length of the content.
MUST be implemented.
See also _DAVResource.get_content_length()
"""
raise NotImplementedError
def get_content_type(self):
"""Contains the Content-Type header returned by a GET without accept
headers.
This getcontenttype property MUST be defined on any DAV compliant
resource that returns the Content-Type header in response to a GET.
See http://www.webdav.org/specs/rfc4918.html#PROPERTY_getcontenttype
This default implementation guesses the type from the filen name.
"""
return util.guess_mime_type(self.path)
@abstractmethod
def get_content(self):
"""Open content as a stream for reading.
Returns a file-like object / stream containing the contents of the
resource specified.
The application will close() the stream.
This method MUST be implemented by all providers.
"""
raise NotImplementedError
@abstractmethod
def get_etag(self):
"""
See http://www.webdav.org/specs/rfc4918.html#PROPERTY_getetag
This method SHOULD be implemented, especially by non-collections.
Return None if not supported for this resource instance.
See also `DAVNonCollection.support_etag()` and `util.get_file_etag(path)`.
"""
@abstractmethod
def support_etag(self):
"""Return True, if this resource supports ETags.
See also `DAVNonCollection.get_etag()`.
"""
def support_ranges(self):
"""Return True, if this non-resource supports Range on GET requests.
This default implementation returns False.
"""
return False
def begin_write(self, *, content_type=None):
"""Open content as a stream for writing.
This method MUST be implemented by all providers that support write
access.
"""
raise DAVError(HTTP_FORBIDDEN)
def end_write(self, *, with_errors):
"""Called when PUT has finished writing.
This is only a notification that MAY be handled.
"""
pass
def resolve(self, script_name, path_info):
"""Return a _DAVResource object for the path (None, if not found).
Since non-collection don't have members, we return None if path is not
empty.
"""
if path_info in ("", "/"):
return self
return None
# ========================================================================
# DAVCollection
# ========================================================================
class DAVCollection(_DAVResource):
"""
A DAVCollection is a _DAVResource, that has members (like a 'folder' on
a filesystem).
A DAVCollecion 'knows' its members, and how to obtain them from the backend
storage.
There is also optional built-in support for member caching.
See also _DAVResource
"""
def __init__(self, path, environ):
_DAVResource.__init__(self, path, True, environ)
# Allow caching of members
# self.memberCache = {"enabled": False,
# "expire": 10, # Purge, if not used for n seconds
# "maxAge": 60, # Force purge, if older than n seconds
# "created": None,
# "lastUsed": None,
# "members": None,
# }
# def _cacheSet(self, members):
# if self.memberCache["enabled"]:
# if not members:
# # We cannot cache None, because _cacheGet() == None means 'not in cache'
# members = []
# self.memberCache["created"] = self.memberCache["lastUsed"] = datetime.now()
# self.memberCache["members"] = members
#
# def _cacheGet(self):
# if not self.memberCache["enabled"]:
# return None
# now = datetime.now()
# if (now - self.memberCache["lastUsed"]) > self.memberCache["expire"]:
# return None
# elif (now - self.memberCache["created"]) > self.memberCache["maxAge"]:
# return None
# self.memberCache["lastUsed"] = datetime.now()
# return self.memberCache["members"]
#
# def _cachePurge(self):
# self.memberCache["created"] = self.memberCache["lastUsed"] = None
# self.memberCache["members"] = None
# def getContentLanguage(self):
# return None
def get_content_length(self):
return None
def get_content_type(self):
return None
def create_empty_resource(self, name):
"""Create and return an empty (length-0) resource as member of self.
Called for LOCK requests on unmapped URLs.
Preconditions (to be ensured by caller):
- this must be a collection
- <self.path + name> must not exist
- there must be no conflicting locks
Returns a DAVResuource.
This method MUST be implemented by all providers that support write
access.
This default implementation simply raises HTTP_FORBIDDEN.
"""
raise DAVError(HTTP_FORBIDDEN)
def create_collection(self, name):
"""Create a new collection as member of self.
Preconditions (to be ensured by caller):
- this must be a collection
- <self.path + name> must not exist
- there must be no conflicting locks
This method MUST be implemented by all providers that support write
access.
This default implementation raises HTTP_FORBIDDEN.
"""
assert self.is_collection
raise DAVError(HTTP_FORBIDDEN)
def get_etag(self):
"""
See http://www.webdav.org/specs/rfc4918.html#PROPERTY_getetag
For non-collections we default to None, because it is harder to implement.
See also `DAVCollection.support_etag()`.
"""
return None
def get_member(self, name):
"""Return child resource with a given name (None, if not found).
This method COULD be overridden by a derived class, for performance
reasons.
This default implementation calls self.provider.get_resource_inst().
"""
assert self.is_collection
return self.provider.get_resource_inst(
util.join_uri(self.path, name), self.environ
)
@abstractmethod
def get_member_names(self):
"""Return list of (direct) collection member names (UTF-8 byte strings).
This method MUST be implemented.
"""
assert self.is_collection
raise NotImplementedError
def support_etag(self):
"""Return True, if this resource supports ETags.
For non-collections we default to False, because it is harder to implement.
See also `DAVCollection.get_etag()`.
"""
return False
def support_recursive_delete(self):
"""Return True, if delete() may be called on non-empty collections
(see comments there).
This default implementation returns False.
"""
return False
def delete(self):
"""Remove this resource (possibly recursive).
This method MUST be implemented if resource allows write access.
See _DAVResource.delete()
"""
raise DAVError(HTTP_FORBIDDEN)
def copy_move_single(self, dest_path, *, is_move):
"""Copy or move this resource to destPath (non-recursive).
This method MUST be implemented if resource allows write access.
See _DAVResource.copy_move_single()
"""
raise DAVError(HTTP_FORBIDDEN)
def support_recursive_move(self, dest_path):
"""Return True, if move_recursive() is available (see comments there)."""
return False
def move_recursive(self, dest_path):
"""Move this resource and members to destPath.
This method MAY be implemented in order to improve performance.
"""
raise DAVError(HTTP_FORBIDDEN)
def resolve(self, script_name, path_info):
"""Return a _DAVResource object for the path (None, if not found).
`path_info`: is a URL relative to this object.
"""
if path_info in ("", "/"):
return self
assert path_info.startswith("/")
name, rest = util.pop_path(path_info)
res = self.get_member(name)
if res is None or rest in ("", "/"):
return res
return res.resolve(util.join_uri(script_name, name), rest)
# ========================================================================
# DAVProvider
# ========================================================================
class DAVProvider(ABC):
"""Abstract base class for DAV resource providers.
There will be only one DAVProvider instance per share (not per request).
"""
def __init__(self):
self.mount_path = ""
self.share_path = None
self.lock_manager = None
self.prop_manager = None
self.verbose = 3
self._count_get_resource_inst = 0
self._count_get_resource_inst_init = 0
# self.caseSensitiveUrls = True
def __repr__(self):
return self.__class__.__name__
def is_readonly(self):
return False
def set_mount_path(self, mount_path):
"""Set application root for this resource provider.
This is the value of SCRIPT_NAME, when WsgiDAVApp is called.
"""
assert mount_path in ("", "/") or (
mount_path.startswith("/") and not mount_path.endswith("/")
)
self.mount_path = mount_path
def set_share_path(self, share_path):
"""Set application location for this resource provider.
@param share_path: a UTF-8 encoded, unquoted byte string.
"""
# if isinstance(share_path, unicode):
# share_path = share_path.encode("utf8")
assert share_path == "" or share_path.startswith("/")
if share_path == "/":
share_path = "" # This allows to code 'absPath = share_path + path'
assert share_path in ("", "/") or not share_path.endswith("/")
self.share_path = share_path
def set_lock_manager(self, lock_manager):
if lock_manager and not hasattr(lock_manager, "check_write_permission"):
raise ValueError(
"Must be compatible with wsgidav.lock_man.lock_manager.LockManager"
)
self.lock_manager = lock_manager
def set_prop_manager(self, prop_manager):
if prop_manager and not hasattr(prop_manager, "copy_properties"):
raise ValueError(
"Must be compatible with wsgidav.prop_man.property_manager.PropertyManager"
)
self.prop_manager = prop_manager
def ref_url_to_path(self, ref_url):
"""Convert a refUrl to a path, by stripping the share prefix.
Used to calculate the <path> from a storage key by inverting get_ref_url().
"""
return "/" + unquote(util.removeprefix(ref_url, self.share_path)).lstrip("/")
@abstractmethod
def get_resource_inst(self, path, environ):
"""Return a _DAVResource object for path.
Should be called only once per request and resource::
res = provider.get_resource_inst(path, environ)
if res and not res.is_collection:
print(res.get_content_type())
If <path> does not exist, None is returned.
<environ> may be used by the provider to implement per-request caching.
See _DAVResource for details.
This method MUST be implemented.
"""
raise NotImplementedError
def exists(self, path, environ):
"""Return True, if path maps to an existing resource.
This method should only be used, if no other information is queried
for <path>. Otherwise a _DAVResource should be created first.
This method SHOULD be overridden by a more efficient implementation.
"""
return self.get_resource_inst(path, environ) is not None
def is_collection(self, path, environ):
"""Return True, if path maps to an existing collection resource.
This method should only be used, if no other information is queried
for <path>. Otherwise a _DAVResource should be created first.
"""
res = self.get_resource_inst(path, environ)
return res and res.is_collection
def custom_request_handler(self, environ, start_response, default_handler):
"""Optionally implement custom request handling.
requestmethod = environ["REQUEST_METHOD"]
Either
- handle the request completely
- do additional processing and call default_handler(environ, start_response)
"""
return default_handler(environ, start_response)
| mit | 2c22c161868ce66f8bc623c4d8f5d6bb | 36.052698 | 102 | 0.605178 | 4.506486 | false | false | false | false |
hall-lab/svtools | scripts/del_pe_resolution.py | 1 | 2986 | #!/usr/bin/env python
from __future__ import division
import json
from collections import Counter
import argparse
def calc_insert_density(hist):
'''Transform a histogram of counts to a density'''
dens = Counter()
total = sum(hist.values())
for i in list(hist):
dens[i] = float(hist[i])/total
return dens
def overlap(dens, shift):
'''Shift a density over by "shift" bp and calculate the overlap'''
total = 0.0
for x in xrange(shift, max(dens) + 1):
total += min(dens[x], dens[x - shift])
return total
def find_overlap(dens, target):
'''Find amount to shift the density to achieve the target overlap value'''
shift = max(dens) - 1
current = overlap(dens, shift)
last = current
while shift >= 0 and current <= target:
last = current
shift -= 1
current = overlap(dens, shift)
return (shift + 1, last)
def load_svtyper_json(json_file):
'''Load an svtyper json'''
with open(json_file) as f:
doc = json.load(f)
return doc
def create_hist(lib):
'''Create a histogram from svtyper json information'''
return Counter({
int(k): int(v) for k, v in lib['histogram'].items()
})
def calculate_overlaps(doc, target):
'''Calculate the minimum variant size with target discriminating power'''
for sample in doc:
for lib in doc[sample]['libraryArray']:
hist = create_hist(lib)
dens = calc_insert_density(hist)
(size, overlap_prob) = find_overlap(dens, target)
return (sample, size, overlap_prob)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Calculate variant size resolution based on cutoff',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('json_file', nargs='+',
help='svtyper json files to evaluate'
)
parser.add_argument('--overlap', type=float, metavar='FLOAT',
default=0.05,
help='maximum density of overlap between discordant \
and concordant insert size distributions'
)
args = parser.parse_args()
print '\t'.join(('Sample', 'MinimumSize', 'Overlap'))
for f in args.json_file:
doc = load_svtyper_json(f)
results = calculate_overlaps(doc, 0.05)
print '\t'.join([str(x) for x in results])
# Here thar be tests
def test_calc_insert_density():
t = Counter({1: 1, 2: 2, 3: 1})
expected = Counter({1: 0.25, 2: 0.5, 3: 0.25})
assert(calc_insert_density(t) == expected)
def test_overlap():
t = Counter({1: 0.25, 2: 0.5, 3: 0.25})
assert(overlap(t, 0) == 1.0)
assert(overlap(t, 3) == 0.0)
assert(overlap(t, 1) == 0.5)
def test_find_overlap():
t = Counter({1: 0.2, 2: 0.5, 3: 0.3})
assert(find_overlap(t, 1.0) == (0, 1.0))
assert(find_overlap(t, 0.5) == (1, 0.5))
| mit | 2aeeca5081b5b9efe87faa22217bc6c5 | 28.86 | 78 | 0.586738 | 3.584634 | false | false | false | false |
hall-lab/svtools | tests/vcfpaste_tests.py | 3 | 8789 | from unittest import TestCase, main
import os
import svtools.vcfpaste
import sys
import tempfile
import difflib
class IntegrationTest_vcfpaste(TestCase):
# FIXME We really don't need to have this stuff run with every test. Run once...
def setUp(self):
test_directory = os.path.dirname(os.path.abspath(__file__))
self.test_data_dir = os.path.join(test_directory, 'test_data', 'vcfpaste')
# glob vcfs
vcfs = list()
for sample in ('NA12878', 'NA12891', 'NA12892'):
vcfs.append(os.path.join(self.test_data_dir, '{0}.vcf'.format(sample)))
# write out list since we have the paths and have to get those right
temp_descriptor, self.list_of_vcfs = tempfile.mkstemp()
temp_handle = os.fdopen(temp_descriptor, 'w')
temp_descriptor2, self.list_of_vcfs_with_truncated = tempfile.mkstemp()
temp_handle2 = os.fdopen(temp_descriptor2, 'w')
temp_descriptor3, self.list_of_gz_vcfs = tempfile.mkstemp()
temp_handle3 = os.fdopen(temp_descriptor3, 'w')
temp_descriptor4, self.list_of_vcfs_with_8col = tempfile.mkstemp()
temp_handle4 = os.fdopen(temp_descriptor4, 'w')
for vcf_path in vcfs:
temp_handle.write(vcf_path + '\n')
temp_handle2.write(vcf_path + '\n')
temp_handle4.write(vcf_path + '\n')
temp_handle3.write(vcf_path + '.gz\n')
temp_handle.close()
temp_handle3.close()
truncated_vcf = os.path.join(self.test_data_dir, 'truncated.vcf')
temp_handle2.write(truncated_vcf + '\n')
temp_handle2.close()
too_skinny_vcf = os.path.join(self.test_data_dir, '8col_NA12878.vcf')
temp_handle4.write(too_skinny_vcf + '\n')
temp_handle4.close()
self.master = os.path.join(self.test_data_dir, 'master.vcf')
self.thin_master = os.path.join(self.test_data_dir, 'thin_master.vcf')
self.too_thin_master = os.path.join(self.test_data_dir, '6col_master.vcf')
def tearDown(self):
os.remove(self.list_of_vcfs)
os.remove(self.list_of_vcfs_with_truncated)
os.remove(self.list_of_gz_vcfs)
def run_integration_test_without_master(self):
expected_result = os.path.join(self.test_data_dir, 'expected_no_master.vcf')
temp_descriptor, temp_output_path = tempfile.mkstemp(suffix='.vcf')
output_handle = os.fdopen(temp_descriptor, 'w')
try:
paster = svtools.vcfpaste.Vcfpaste(self.list_of_vcfs, master=None, sum_quals=True)
paster.execute(output_handle)
finally:
output_handle.close()
expected_lines = open(expected_result).readlines()
produced_lines = open(temp_output_path).readlines()
diff = difflib.unified_diff(produced_lines, expected_lines, fromfile=temp_output_path, tofile=expected_result)
result = '\n'.join(diff)
if result != '':
for line in result:
sys.stdout.write(line)
self.assertFalse(result)
os.remove(temp_output_path)
def run_integration_test_without_master_gzipped(self):
expected_result = os.path.join(self.test_data_dir, 'expected_no_master.vcf')
temp_descriptor, temp_output_path = tempfile.mkstemp(suffix='.vcf')
output_handle = os.fdopen(temp_descriptor, 'w')
try:
paster = svtools.vcfpaste.Vcfpaste(self.list_of_gz_vcfs, master=None, sum_quals=True)
paster.execute(output_handle)
finally:
output_handle.close()
expected_lines = open(expected_result).readlines()
produced_lines = open(temp_output_path).readlines()
diff = difflib.unified_diff(produced_lines, expected_lines, fromfile=temp_output_path, tofile=expected_result)
result = '\n'.join(diff)
if result != '':
for line in result:
sys.stdout.write(line)
self.assertFalse(result)
os.remove(temp_output_path)
def run_integration_test_with_master(self):
master_file = os.path.join(self.test_data_dir, 'master.vcf')
expected_result = os.path.join(self.test_data_dir, 'expected_master.vcf')
temp_descriptor, temp_output_path = tempfile.mkstemp(suffix='.vcf')
output_handle = os.fdopen(temp_descriptor, 'w')
try:
paster = svtools.vcfpaste.Vcfpaste(self.list_of_vcfs, master=master_file, sum_quals=True)
paster.execute(output_handle)
finally:
output_handle.close()
expected_lines = open(expected_result).readlines()
produced_lines = open(temp_output_path).readlines()
diff = difflib.unified_diff(produced_lines, expected_lines, fromfile=temp_output_path, tofile=expected_result)
result = '\n'.join(diff)
if result != '':
for line in result:
sys.stdout.write(line)
self.assertFalse(result)
os.remove(temp_output_path)
def run_integration_test_with_thin_master(self):
expected_result = os.path.join(self.test_data_dir, 'expected_master.vcf')
temp_descriptor, temp_output_path = tempfile.mkstemp(suffix='.vcf')
output_handle = os.fdopen(temp_descriptor, 'w')
try:
paster = svtools.vcfpaste.Vcfpaste(self.list_of_vcfs, master=self.thin_master, sum_quals=True)
paster.execute(output_handle)
finally:
output_handle.close()
expected_lines = open(expected_result).readlines()
produced_lines = open(temp_output_path).readlines()
diff = difflib.unified_diff(produced_lines, expected_lines, fromfile=temp_output_path, tofile=expected_result)
result = '\n'.join(diff)
if result != '':
for line in result:
sys.stdout.write(line)
self.assertFalse(result)
os.remove(temp_output_path)
def run_integration_test_with_too_thin_master(self):
expected_result = os.path.join(self.test_data_dir, 'expected_master.vcf')
temp_descriptor, temp_output_path = tempfile.mkstemp(suffix='.vcf')
output_handle = os.fdopen(temp_descriptor, 'w')
paster = svtools.vcfpaste.Vcfpaste(self.list_of_vcfs, master=self.too_thin_master, sum_quals=True)
with self.assertRaises(SystemExit) as cm:
paster.execute(output_handle)
exception = cm.exception
self.assertEqual(exception.error_code, 1)
output_handle.close()
os.remove(temp_output_path)
def run_integration_test_with_truncated_vcf(self):
temp_descriptor, temp_output_path = tempfile.mkstemp(suffix='.vcf')
output_handle = os.fdopen(temp_descriptor, 'w')
paster = svtools.vcfpaste.Vcfpaste(self.list_of_vcfs_with_truncated, master=None, sum_quals=True)
with self.assertRaises(SystemExit) as cm:
paster.execute(output_handle)
exception = cm.exception
self.assertEqual(exception.error_code, 1)
output_handle.close()
os.remove(temp_output_path)
def run_integration_test_with_too_thin_vcf(self):
temp_descriptor, temp_output_path = tempfile.mkstemp(suffix='.vcf')
output_handle = os.fdopen(temp_descriptor, 'w')
paster = svtools.vcfpaste.Vcfpaste(self.list_of_vcfs_with_8col, master=None, sum_quals=True)
with self.assertRaises(SystemExit) as cm:
paster.execute(output_handle)
exception = cm.exception
self.assertEqual(exception.error_code, 1)
output_handle.close()
os.remove(temp_output_path)
class Test_vcfpaste(TestCase):
def test_init_w_defaults(self):
paster = svtools.vcfpaste.Vcfpaste('a_file_o_vcf_filenames')
self.assertEqual(paster.vcf_list, 'a_file_o_vcf_filenames')
self.assertIsNone(paster.master)
self.assertIsNone(paster.sum_quals)
def test_init_w_specified(self):
paster = svtools.vcfpaste.Vcfpaste('some_file', 'master_blaster', True)
self.assertEqual(paster.vcf_list, 'some_file')
self.assertEqual(paster.master, 'master_blaster')
self.assertTrue(paster.sum_quals)
class Test_vcfpaste_ui(TestCase):
def test_add_arguments(self):
parser = svtools.vcfpaste.command_parser()
args1 = parser.parse_args(['--vcf-list', 'some_list'])
self.assertEqual(args1.vcf_list, 'some_list')
self.assertFalse(args1.sum_quals)
self.assertIsNone(args1.master)
args2 = parser.parse_args(['-f', 'some_list', '-m', 'some_master', '-q'])
self.assertEqual(args2.vcf_list, 'some_list')
self.assertTrue(args2.sum_quals)
self.assertEqual(args2.master, 'some_master')
if __name__ == "__main__":
main()
| mit | 6788e71f20235f22a431a41653393b2a | 43.614213 | 118 | 0.634429 | 3.498806 | false | true | false | false |
hall-lab/svtools | svtools/vcf/variant.py | 1 | 6816 | from svtools.vcf.genotype import Genotype
import sys
class Variant(object):
'''
Class for storing information stored in a VCF line
'''
def __init__(self, var_list, vcf):
'''
Initialize values.
If fixed_genotypes is True then a string corresponding to the
genotype portion of the line is cached for printing later.
'''
self.chrom = var_list[0]
self.pos = int(var_list[1])
self.var_id = var_list[2]
self.ref = var_list[3]
self.alt = var_list[4]
self.qual = var_list[5]
self.filter = var_list[6]
self.sample_list = vcf.sample_list
self.info_list = vcf.info_list
self.info = dict()
self.format_list = vcf.format_list # NOTE - This always will contain GT because that's added in the VCF file class
self.format_set = {i.id for i in vcf.format_list}
self.gts = None
# fill in empty sample genotypes
if len(var_list) < 8:
sys.stderr.write('\nError: VCF file must have at least 8 columns\n')
exit(1)
if len(var_list) == 8:
# 8 column VCF. No genotypes or format field
self.format_string = None
self.format_dict = None
self.gts_string = None
else:
# make a genotype for each sample at variant
self.format_string = var_list[8] # FIXME Invalid with 8 column VCF
self.format_dict = { key: index for index, key in enumerate(self.format_string.split(':')) }
self.gts_string = '\t'.join(var_list[9:]) # FIXME Invalid with 8 column VCF
if 'GT' not in self.format_dict:
self.format_dict['GT'] = len(self.format_dict) #add GT if it doesn't exist
self._uncache_gts() # FIXME This will die if 8 column VCF
self.info = dict()
i_split = [a.split('=') for a in var_list[7].split(';')] # temp list of split info column
for i in i_split:
if len(i) == 1:
i.append(True)
self.info[i[0]] = i[1]
def _parse_genotypes(self, genotype_array):
'''
Parse the genotype strings
'''
gts = dict()
for index, sample_string in enumerate(genotype_array):
sample_name = self.sample_list[index]
sample_field = sample_string.split(':')
g = Genotype(self, sample_field)
gts[sample_name] = g
return gts
def set_info(self, field, value):
'''
Set value of the specified field in the INFO section.
The INFO field must exist already.
'''
if field in [i.id for i in self.info_list]:
self.info[field] = value
else:
sys.stderr.write('\nError: invalid INFO field, \"' + field + '\"\n')
exit(1)
def get_info(self, field):
'''
Get a value for the given INFO field
'''
return self.info[field]
def get_info_string(self):
'''
Construct the INFO string for printing. Order is matched to the header.
'''
i_list = list()
for info_field in self.info_list:
if info_field.id in self.info.keys():
if info_field.type == 'Flag':
if self.info[info_field.id]:
i_list.append(info_field.id)
else:
i_list.append('%s=%s' % (info_field.id, self.info[info_field.id]))
return ';'.join(i_list)
def get_format_string(self, use_cached_format_string=False):
'''
Construct the FORMAT field containing the names of the fields in the Genotype columns
If 8-column VCF then format_string will be None
'''
if use_cached_format_string or self.gts is None:
return self.format_string
else:
f_list = list()
for f in self.format_list:
if f.id in self.format_dict:
f_list.append(f.id)
return ':'.join(f_list)
def get_gt_string(self, use_cached_gt_string=False):
'''
Construct the genotype string.
If 8-column VCF then this returns None
'''
if self.gts:
if use_cached_gt_string:
return self.gts_string
else:
return '\t'.join(self.genotype(s).get_gt_string() for s in self.sample_list)
else:
return self.gts_string
def _uncache_gts(self):
'''
Parse genotypes if they are requested
This is a no-op if gts are already uncached or if 8-column VCF
'''
if self.gts is None and self.gts_string is not None:
self.gts = self._parse_genotypes(self.gts_string.split('\t'))
def genotypes(self):
'''
Return a list of all genotype data in the Variant line
Returns an empty list if 8-column VCF
'''
self._uncache_gts()
if self.gts is not None:
return self.gts.values()
else:
return list()
def genotype(self, sample_name):
'''
Return the Genotype object for the requested sample
'''
self._uncache_gts()
try:
return self.gts[sample_name]
except KeyError:
sys.stderr.write('\nError: invalid sample name, \"' + sample_name + '\"\n')
sys.exit(1)
def set_genotype(self, sample_name, new_genotype):
'''
Set the Genotype object for the given sample. Programmer needs to be
very careful about what gets added here as there is no error checking.
This will throw an error if 8-column VCF
'''
self._uncache_gts()
try:
self.gts[sample_name] = new_genotype
except KeyError:
sys.stderr.write('\nError: invalid sample name, \"' + sample_name + '\"\n')
sys.exit(1)
def get_var_string(self, use_cached_gt_string=False):
'''
Return the String representation for this line
'''
fields = [
self.chrom,
self.pos,
self.var_id,
self.ref,
self.alt,
self.qual,
self.filter,
self.get_info_string()
]
if self.format_dict:
gts_string = self.get_gt_string(use_cached_gt_string)
if gts_string is None:
sys.stderr.write("Unable to construct or retrieve genotype string\n")
sys.exit(1)
else:
fields += [
self.get_format_string(use_cached_gt_string),
gts_string
]
return '\t'.join(map(str, fields))
| mit | 99664f13a9dcdab2477eea8f0ab60172 | 33.953846 | 123 | 0.533891 | 3.908257 | false | false | false | false |
hall-lab/svtools | svtools/breakpoint.py | 1 | 4187 | import sys
import l_bp
from exceptions import MissingProbabilitiesException
class BreakpointInterval(object):
'''
Class for storing the range and probability distribution
of a breakpoint
'''
# Constant value for slop padding
SLOP_PROB = 1e-100
def __init__(self, chrom, start, end, p):
self.chrom = chrom
self.start = start
self.end = end
self.p = p
def pad_slop(self, percent_slop, fixed_slop):
'''
Add slop to the interval
'''
slop = int(max(percent_slop * (self.end - self.start + 1), fixed_slop))
self.start -= slop
self.end += slop
self.p = [BreakpointInterval.SLOP_PROB] * slop + self.p + [BreakpointInterval.SLOP_PROB] * slop
self._trim()
self._normalize()
def _trim(self):
'''
Trim any part of range past the beginning of the chromosome
'''
if self.start < 0:
self.p = self.p[-self.start:]
self.start = 0
def _normalize(self):
'''
Normalize interval's probability to sum to 1
'''
sum_p = sum(self.p)
self.p = [float(x)/sum_p for x in self.p]
def common_range(self, other):
return max(self.start, other.start), min(self.end, other.end)
def overlap_prob(self, other, c_start, c_len):
start_off = c_start - self.start
other_start_off = c_start - other.start
ovl = 0
for i in range(c_len):
ovl += min(self.p[i + start_off], other.p[i + other_start_off])
return ovl
class Breakpoint(object):
'''
Class for storing information about Breakpoints for merging
'''
def __init__(self, line, percent_slop=0, fixed_slop=0):
'''
Initialize with slop for probabilities
'''
self.l = line
(self.sv_type,
chr_l,
chr_r,
self.strands,
start_l,
end_l,
start_r,
end_r,
m) = l_bp.split_v(line)
try:
self.left = BreakpointInterval(chr_l, start_l, end_l, self.floats_from_tag(m, 'PRPOS'))
self.right = BreakpointInterval(chr_r, start_r, end_r, self.floats_from_tag(m, 'PREND'))
except RuntimeError as e:
raise MissingProbabilitiesException(str(e))
if ((percent_slop > 0) or (fixed_slop > 0)):
self.left.pad_slop(percent_slop, fixed_slop)
self.right.pad_slop(percent_slop, fixed_slop)
def __str__(self):
'''
Convert back to a string
'''
return '\t'.join([str(x) for x in [self.left.chrom,
self.left.start,
self.left.end,
self.right.chrom,
self.right.start,
self.right.end,
self.sv_type,
self.strands,
self.left.p,
self.right.p]])
def ovl(self, b):
'''
Calculate overlapping cumulative probability value as weight?
0 if not overlapping.
'''
if ((self.left.chrom != b.left.chrom) or
(self.right.chrom != b.right.chrom) or
(self.sv_type != b.sv_type)):
return 0
#get common intervals
c_start_l, c_end_l = self.left.common_range(b.left)
c_start_r, c_end_r = self.right.common_range(b.right)
c_l_len = c_end_l - c_start_l + 1
c_r_len = c_end_r - c_start_r + 1
if (c_l_len < 1) or (c_r_len < 1):
return 0
ovl_l = self.left.overlap_prob(b.left, c_start_l, c_l_len)
ovl_r = self.right.overlap_prob(b.right, c_start_r, c_r_len)
return ovl_l * ovl_r
@staticmethod
def floats_from_tag(info_dict, tag):
if tag in info_dict:
return [float(x) for x in info_dict[tag].split(',')]
else:
raise RuntimeError('Required tag {0} not found.'.format(tag))
| mit | 7e1899d0424dc7427aafd18c039a62a1 | 30.481203 | 103 | 0.504418 | 3.666375 | false | false | false | false |
hall-lab/svtools | setup.py | 1 | 1436 | from setuptools import setup, find_packages
import versioneer
'''
This package provides tools for combining, genotyping and refining structural variant calls from LUMPY in a highly scalable way. The current package efficiently scales to process thousands of individuals.
'''
setup(
name='svtools',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description='Tools for processing and analyzing structural variants',
long_description=__doc__,
url='https://github.com/hall-lab/svtools',
author='Ira Hall lab',
author_email='dlarson@genome.wustl.edu',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
],
keywords='genomics structural variants sv bioinformatics',
packages=find_packages(exclude=['tests']),
include_package_data=True,
install_requires=['svtyper==0.7.1', 'numpy', 'scipy', 'statsmodels', 'pandas==0.19.2', 'setuptools',
'google-auth',
'google-cloud-storage',
'google-compute-engine',
'crcmod',
'logzero'],
scripts=['scripts/create_coordinates'],
entry_points={
'console_scripts': [
'svtools=svtools.cli:main',
]
},
)
| mit | 81b7d5d948567f7e9e2f48d3ecba9506 | 29.553191 | 204 | 0.648329 | 3.902174 | false | false | false | false |
hall-lab/svtools | svtools/vcfsort.py | 1 | 1158 | import sys
import argparse
from svtools.external_cmd import ExternalCmd
class VcfSort(ExternalCmd):
def __init__(self):
super(VcfSort, self).__init__('vcfsort', 'bin/vcfsort')
def description():
return 'sort a VCF file'
def epilog():
return 'To read in stdin and output to a file, use /dev/stdin or - as the first positional argument.'
def add_arguments_to_parser(parser):
parser.add_argument('input', metavar='<VCF>', nargs='?', help='VCF file to sort (default: stdin)')
parser.add_argument('output', metavar='<VCF>', nargs='?', help='output file to write to (default: stdout)')
parser.set_defaults(entry_point=run_from_args)
def command_parser():
parser = argparse.ArgumentParser(description=description())
add_arguments_to_parser(parser)
return parser
def run_from_args(args):
opts = list()
if args.input:
opts.append(args.input)
if args.output:
opts.append(args.output)
sort_cmd_runner = VcfSort()
sort_cmd_runner.run_cmd_with_options(opts)
if __name__ == "__main__":
parser = command_parser()
args = parser.parse_args()
sys.exit(args.entry_point(args))
| mit | 6955285734f16ca7082063b3f83417eb | 29.473684 | 111 | 0.670121 | 3.509091 | false | false | false | false |
venth/aws-adfs | test/test_account_aliases_fetcher.py | 1 | 15770 | from aws_adfs import account_aliases_fetcher
def _aws_account(account_alias, account_no):
return u'<div class="saml-account-name">Account: {} ({})</div>'.format(account_alias, account_no)
def _aws_account_without_alias(account_no):
return u'<div class="saml-account-name">Account: {}</div>'.format(account_no)
def _account_page_response_text(response_text):
response = type('', (), {})()
response.request = type('', (), {})()
response.request.body = ""
response.request.headers = {}
response.request.url = {}
response.status_code = 'irrelevant'
response.headers = {}
response.text = response_text
return response
def _account_page_response(accounts):
return _account_page_response_text(u'''
<html>
<body>
<div>
<form>
<fieldset>
{}
</fieldset>
</form>
</div>
</body>
</html>
'''.format('\n'.join([account for account in accounts]))
)
def _failed_account_page_response():
return _account_page_response_text(u'<html></html>')
class TestAccountAliasesFetcher:
def test_returns_empty_account_dictionary_when_no_account_are_named(self):
# given user with no aws accounts
self.authenticated_session.post = lambda *args, **kwargs: _account_page_response([])
# when gets account aliases via fetcher
accounts = account_aliases_fetcher.account_aliases(self.authenticated_session,
self.irrelevant_username,
self.irrelevant_password,
self.irrelevant_auth_method,
self.authenticated_saml_response,
self.irrelevant_config)
# then returns no accounts
assert accounts == {}
def test_returns_one_account_when_one_account_is_listed(self):
# given user with no aws accounts
account_no = '123'
account_alias = 'single'
self.authenticated_session.post = lambda *args, **kwargs: _account_page_response([_aws_account(account_alias, account_no)])
# when gets account aliases via fetcher
accounts = account_aliases_fetcher.account_aliases(self.authenticated_session,
self.irrelevant_username,
self.irrelevant_password,
self.irrelevant_auth_method,
self.authenticated_saml_response,
self.irrelevant_config)
# then returns no accounts
assert accounts == {account_no: account_alias}
def test_returns_two_accounts_when_two_accounts_are_listed(self):
# given user with no aws accounts
account_no = '1'
account_alias = 'single'
second_account_no = '2'
second_account_alias = 'bingle'
self.authenticated_session.post = lambda *args, **kwargs: _account_page_response([
_aws_account(account_alias, account_no),
_aws_account(second_account_alias, second_account_no),
])
# when gets account aliases via fetcher
accounts = account_aliases_fetcher.account_aliases(self.authenticated_session,
self.irrelevant_username,
self.irrelevant_password,
self.irrelevant_auth_method,
self.authenticated_saml_response,
self.irrelevant_config)
# then returns no accounts
assert accounts == {account_no: account_alias, second_account_no: second_account_alias}
def test_returns_accounts_expected_for_real_case_response(self):
# given response with accounts
response_text, expected_accounts = self._response_with_expected_aliases()
self.authenticated_session.post = lambda *args, **kwargs: _account_page_response_text(response_text)
# when gets account aliases via fetcher
accounts = account_aliases_fetcher.account_aliases(self.authenticated_session,
self.irrelevant_username,
self.irrelevant_password,
self.irrelevant_auth_method,
self.authenticated_saml_response,
self.irrelevant_config)
# then account numbers matches
assert accounts.keys() == expected_accounts.keys()
def test_returns_two_accounts_expected_for_real_case_response(self):
# given response with accounts
response_text, expected_accounts = self._response_with_two_expected_aliases()
self.authenticated_session.post = lambda *args, **kwargs: _account_page_response_text(response_text)
# when gets account aliases via fetcher
accounts = account_aliases_fetcher.account_aliases(self.authenticated_session,
self.irrelevant_username,
self.irrelevant_password,
self.irrelevant_auth_method,
self.authenticated_saml_response,
self.irrelevant_config)
# then account numbers matches
assert accounts.keys() == expected_accounts.keys()
def test_returns_no_aliases_when_the_call_for_aliases_failed(self):
# given failed response
self.authenticated_session.post = lambda *args, **kwargs: _failed_account_page_response()
# when gets account aliases via fetcher
accounts = account_aliases_fetcher.account_aliases(self.authenticated_session,
self.irrelevant_username,
self.irrelevant_password,
self.irrelevant_auth_method,
self.authenticated_saml_response,
self.irrelevant_config)
# then there are no aliases
assert accounts == {}
def test_returns_full_saml_name_account_when_no_account_alias_is_provided(self):
# given aws account without alias
account_no = '123123'
full_account_name = 'Account: {}'.format(account_no)
self.authenticated_session.post = lambda *args, **kwargs: _account_page_response([
_aws_account_without_alias(account_no)
])
# when gets account aliases via fetcher
accounts = account_aliases_fetcher.account_aliases(self.authenticated_session,
self.irrelevant_username,
self.irrelevant_password,
self.irrelevant_auth_method,
self.authenticated_saml_response,
self.irrelevant_config)
# then uses full account name as the alias
assert accounts == {account_no: full_account_name}
def _response_with_expected_aliases(self):
return u'''
<html>
<body>
<div id="container">
<h1 class="background">Amazon Web Services Login</h1>
<div id="content">
<div id="main_error"></div>
<form id="saml_form" name="saml_form" action="/saml" method="post">
<input type="hidden" name="RelayState" value="" />
<input type="hidden" name="SAMLResponse" value="valueofSAMLRESPONSE" />
<input type="hidden" name="name" value="" />
<input type="hidden" name="portal" value="" />
<p style="font-size: 16px; padding-left: 20px;">Select a role:</p>
<fieldset>
<div class="saml-account"> <div onClick="expandCollapse(0);">
<img id="image0" src="/static/image/down.png" valign="middle"></img>
<div class="saml-account-name">Account: mydomain-account3 (123456789012)</div>
</div>
<hr style="border: 1px solid #ddd;">
<div id="0" class="saml-account" >
<div class="saml-role" onClick="checkRadio(this);">
<input type="radio" name="roleIndex" value="arn:aws:iam::123456789012:role/ADFS-CloudSearchManager" class="saml-radio" id="arn:aws:iam::123456789012:role/ADFS-CloudSearchManager" />
<label for="arn:aws:iam::123456789012:role/ADFS-CloudSearchManager" class="saml-role-description">ADFS-CloudSearchManager</label>
<span style="clear: both;"></span>
</div>
</div></div><div class="saml-account"> <div onClick="expandCollapse(1);">
<img id="image1" src="/static/image/down.png" valign="middle"></img>
<div class="saml-account-name">Account: mydomain-account2 (223456789012)</div>
</div>
<hr style="border: 1px solid #ddd;">
<div id="1" class="saml-account" >
<div class="saml-role" onClick="checkRadio(this);">
<input type="radio" name="roleIndex" value="arn:aws:iam::223456789012:role/ADFS-CloudSearchManager" class="saml-radio" id="arn:aws:iam::223456789012:role/ADFS-CloudSearchManager" />
<label for="arn:aws:iam::223456789012:role/ADFS-CloudSearchManager" class="saml-role-description">ADFS-CloudSearchManager</label>
<span style="clear: both;"></span>
</div>
</div></div><div class="saml-account"> <div onClick="expandCollapse(2);">
<img id="image2" src="/static/image/down.png" valign="middle"></img>
<div class="saml-account-name">Account: mydomain-account1 (323456789012)</div>
</div>
<hr style="border: 1px solid #ddd;">
<div id="2" class="saml-account" >
<div class="saml-role" onClick="checkRadio(this);">
<input type="radio" name="roleIndex" value="arn:aws:iam::323456789012:role/ADFS-Administrator" class="saml-radio" id="arn:aws:iam::323456789012:role/ADFS-Administrator" />
<label for="arn:aws:iam::323456789012:role/ADFS-Administrator" class="saml-role-description">ADFS-Administrator</label>
<span style="clear: both;"></span>
</div>
<div class="saml-role" onClick="checkRadio(this);">
<input type="radio" name="roleIndex" value="arn:aws:iam::323456789012:role/ADFS-EMRManager" class="saml-radio" id="arn:aws:iam::323456789012:role/ADFS-EMRManager" />
<label for="arn:aws:iam::323456789012:role/ADFS-EMRManager" class="saml-role-description">ADFS-EMRManager</label>
<span style="clear: both;"></span>
</div>
</div></div><div class="saml-account"> <div onClick="expandCollapse(3);">
<img id="image3" src="/static/image/down.png" valign="middle"></img>
<div class="saml-account-name">Account: 423456789012</div>
</div>
<hr style="border: 1px solid #ddd;">
<div id="3" class="saml-account" >
<div class="saml-role" onClick="checkRadio(this);">
<input type="radio" name="roleIndex" value="arn:aws:iam::423456789012:role/ADFS-Administrator" class="saml-radio" id="arn:aws:iam::423456789012:role/ADFS-Administrator" />
<label for="arn:aws:iam::423456789012:role/ADFS-Administrator" class="saml-role-description">ADFS-Administrator</label>
<span style="clear: both;"></span>
</div>
</div></div>
</fieldset>
<br>
<div class="buttoninput" id="input_signin_button">
<a id="signin_button" class="css3button" href="#" alt="Continue" value="Continue">Sign In</a>
</div>
</form>
</div>
</body>
</html>
''', {
'123456789012': 'mydomain-account3',
'223456789012': 'mydomain-account2',
'323456789012': 'mydomain-account1',
'423456789012': '423456789012',
}
def _response_with_two_expected_aliases(self):
return u'''
<html>
<body>
<div id="container">
<h1 class="background">Amazon Web Services Login</h1>
<div id="content">
<div id="main_error"></div>
<form id="saml_form" name="saml_form" action="/saml" method="post">
<input type="hidden" name="RelayState" value="" />
<input type="hidden" name="SAMLResponse" value="valueofSAMLRESPONSE" />
<input type="hidden" name="name" value="" />
<input type="hidden" name="portal" value="" />
<p style="font-size: 16px; padding-left: 20px;">Select a role:</p>
<fieldset>
<div class="saml-account"> <div onClick="expandCollapse(0);">
<img id="image0" src="/static/image/down.png" valign="middle"></img>
<div class="saml-account-name">Account: zefr (123456789012)</div>
</div>
<hr style="border: 1px solid #ddd;">
<div id="0" class="saml-account" >
<div class="saml-role" onClick="checkRadio(this);">
<input type="radio" name="roleIndex" value="arn:aws:iam::123456789012:role/CORP-ROLE1" class="saml-radio" id="arn:aws:iam::123456789012:role/CORP-ROLE1" />
<label for="arn:aws:iam::123456789012:role/CORP-ROLE1" class="saml-role-description">CORP-ROLE1</label>
<span style="clear: both;"></span>
</div>
<div class="saml-role" onClick="checkRadio(this);">
<input type="radio" name="roleIndex" value="arn:aws:iam::123456789012:role/CORP-ROLE2" class="saml-radio" id="arn:aws:iam::123456789012:role/CORP-ROLE2" />
<label for="arn:aws:iam::123456789012:role/CORP-ROLE2" class="saml-role-description">CORP-ROLE2</label>
<span style="clear: both;"></span>
</div>
</div></div>
</fieldset>
<br>
<div class="buttoninput" id="input_signin_button">
<a id="signin_button" class="css3button" href="#" alt="Continue" value="Continue">Sign In</a>
</div>
</form>
</div>
</body>
</html>
''', {
'123456789012': 'zefr',
}
def setup_method(self, method):
self.authenticated_session = type('', (), {})()
self.irrelevant_auth_method = {}
self.irrelevant_username = 'irrelevant username'
self.irrelevant_password = 'irrelevant password'
self.authenticated_saml_response = 'irrelevant saml response'
self.irrelevant_config = type('', (), {})()
self.irrelevant_config.ssl_verification = True
| mit | cedc71d8a6c45c61f839688959a60d33 | 49.707395 | 201 | 0.537159 | 4.302865 | false | false | false | false |
venth/aws-adfs | aws_adfs/authenticator.py | 1 | 7315 | import logging
import lxml.etree as ET
from . import account_aliases_fetcher
from . import _duo_authenticator as duo_auth
from . import _duo_universal_prompt_authenticator as duo_universal_prompt_auth
from . import _rsa_authenticator as rsa_auth
from . import _symantec_vip_access as symantec_vip_access
from . import _azure_mfa_authenticator as azure_mfa_auth
from . import _azure_cloud_mfa_authenticator as azure_cloud_mfa_auth
from . import html_roles_fetcher
from . import roles_assertion_extractor
from .helpers import trace_http_request
def authenticate(config, username=None, password=None, assertfile=None):
response, session = html_roles_fetcher.fetch_html_encoded_roles(
adfs_host=config.adfs_host,
adfs_cookie_location=config.adfs_cookie_location,
ssl_verification_enabled=config.ssl_verification,
adfs_ca_bundle=config.adfs_ca_bundle,
provider_id=config.provider_id,
username=username,
password=password,
sspi=config.sspi,
)
trace_http_request(response)
assertion = None
aws_session_duration = None
aggregated_principal_roles = None
if response.status_code == 200:
extract_strategy = _strategy(response, config, session, assertfile)
principal_roles, assertion, aws_session_duration = extract_strategy()
if assertion is None:
logging.error("Cannot extract saml assertion from request's response. Re-authentication needed?")
else:
aggregated_principal_roles = _aggregate_roles_by_account_alias(session,
config,
username,
password,
assertion,
principal_roles)
else:
logging.error(u'Cannot extract roles from response')
logging.debug(u'Roles along with principals found after authentication: {}'.format(aggregated_principal_roles))
return aggregated_principal_roles, assertion, aws_session_duration
def _aggregate_roles_by_account_alias(session,
config,
username,
password,
assertion,
principal_roles):
account_aliases = account_aliases_fetcher.account_aliases(session, username, password, config.provider_id, assertion, config)
aggregated_accounts = {}
for (principal_arn, role_arn) in principal_roles:
role_name = role_arn.split(':role/')[1]
account_no = role_arn.split(':')[4]
if account_no not in account_aliases:
account_aliases[account_no] = account_no
if account_aliases[account_no] not in aggregated_accounts:
aggregated_accounts[account_aliases[account_no]] = {}
aggregated_accounts[account_aliases[account_no]][role_arn] = {'name': role_name, 'principal_arn': principal_arn}
return aggregated_accounts
def _strategy(response, config, session, assertfile=None):
html_response = ET.fromstring(response.text, ET.HTMLParser())
def _plain_extractor():
def extract():
return roles_assertion_extractor.extract(html_response)
return extract
def _duo_extractor():
def extract():
return duo_auth.extract(
html_response, config.ssl_verification, session, config.duo_factor, config.duo_device
)
return extract
def _duo_universal_prompt_extractor():
def extract():
return duo_universal_prompt_auth.extract(
html_response, config.ssl_verification, session, config.duo_factor, config.duo_device
)
return extract
def _symantec_vip_extractor():
def extract():
return symantec_vip_access.extract(html_response, config.ssl_verification, session)
return extract
def _file_extractor():
def extract():
return roles_assertion_extractor.extract_file(assertfile)
return extract
def _rsa_auth_extractor():
def extract():
return rsa_auth.extract(html_response, config.ssl_verification, session)
return extract
def _azure_mfa_extractor():
def extract():
return azure_mfa_auth.extract(html_response, config.ssl_verification, session)
return extract
def _azure_cloud_mfa_extractor():
def extract():
return azure_cloud_mfa_auth.extract(html_response, config.ssl_verification, session)
return extract
if assertfile is None:
chosen_strategy = _plain_extractor
else:
chosen_strategy = _file_extractor
if _is_duo_authentication(html_response):
chosen_strategy = _duo_extractor
elif _is_duo_universal_prompt_authentication(html_response):
chosen_strategy = _duo_universal_prompt_extractor
elif _is_symantec_vip_authentication(html_response):
chosen_strategy = _symantec_vip_extractor
elif _is_rsa_authentication(html_response):
chosen_strategy = _rsa_auth_extractor
elif _is_azure_mfa_authentication(html_response):
chosen_strategy = _azure_mfa_extractor
elif _is_azure_cloud_mfa_authentication(html_response):
chosen_strategy = _azure_cloud_mfa_extractor
return chosen_strategy()
def _is_duo_authentication(html_response):
duo_auth_method = './/input[@id="authMethod"]'
element = html_response.find(duo_auth_method)
duo = element is not None
duo = duo and element.get("value") == "DuoAdfsAdapter"
return duo
def _is_duo_universal_prompt_authentication(html_response):
duo_auth_method = './/form[@id="adfs_form"]/input[@name="adfs_auth_method"]'
element = html_response.find(duo_auth_method)
duo = element is not None
duo = duo and element.get("value") == "DuoAdfsAdapter"
return duo
def _is_symantec_vip_authentication(html_response):
auth_method = './/input[@id="authMethod"]'
element = html_response.find(auth_method)
if (
element is not None
and element.get('value') == 'SymantecVipAdapter'
) or (
element is not None
and element.get('value') == 'VIPAuthenticationProviderWindowsAccountName'
):
return True
def _is_rsa_authentication(html_response):
auth_method = './/input[@id="authMethod"]'
element = html_response.find(auth_method)
return (
element is not None
and element.get('value') == 'SecurIDAuthentication'
)
def _is_azure_mfa_authentication(html_response):
auth_method = './/input[@id="authMethod"]'
element = html_response.find(auth_method)
return (
element is not None
and element.get('value') == 'AzureMfaServerAuthentication'
)
def _is_azure_cloud_mfa_authentication(html_response):
auth_method = './/input[@id="authMethod"]'
element = html_response.find(auth_method)
return (
element is not None
and element.get('value') == 'AzureMfaAuthentication'
)
| mit | e959b7b87a99d8ee4f4259d6c7825ad9 | 36.13198 | 129 | 0.624197 | 4.121127 | false | true | false | false |
missionpinball/mpf-docs | MpfDocsTestCase.py | 1 | 2921 | """Contains the MpfDocsTestCase class"""
import os
import shutil
import tempfile
import uuid
from mpf.tests.MpfTestCase import MpfTestCase
class MpfDocsTestCase(MpfTestCase):
"""Allows unittests to extract configs from .rst files in mpf-docs and
load them and run tests against them.
To use it, add comments in the rst file which specific the begin/end of
the indended code block in the RST file which will be the config. The
begin also has the path and filename to the config file.
For example, from an .rst file:
---------------------------------------------------
blah blah blah some text blah blah blah
.. begin_mpfdoctest:config/config.yaml
::
#config_version=5
switches:
s_left_flipper:
number: 1
.. end_mpfdoctest
blah blah blah some text blah blah blah
---------------------------------------------------
"""
rst_target = ''
def __init__(self, methodName='runTest'):
super().__init__(methodName)
self._temp_machine_folder = None
def setUp(self):
self._temp_machine_folder = os.path.join(tempfile.gettempdir(),
str(uuid.uuid4()))
self.create_temp_config_files()
super().setUp()
def get_options(self):
options = super().get_options()
options['no_load_cache'] = True
options['create_config_cache'] = False
return options
def tearDown(self):
super().tearDown()
shutil.rmtree(self.get_machine_path())
self._temp_machine_folder = None
def get_config_file(self):
return 'config.yaml'
def get_machine_path(self):
return self._temp_machine_folder
def create_temp_config_files(self):
if not self.rst_target.endswith('.rst'):
self.rst_target += '.rst'
if self.rst_target[0] == '/':
self.rst_target = self.rst_target[1:]
with open(os.path.abspath(self.rst_target), 'r') as f:
in_config = False
config = ''
file_path = None
for line in f:
if '.. begin_mpfdoctest' in line:
in_config = True
file_path = line.split(':')[1]
file_path = file_path[:-1] # strip off the \n
file_path = os.path.join(self._temp_machine_folder,
file_path)
elif in_config and '::' not in line and line != '\n':
if '.. end_mpfdoctest' in line:
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(os.path.abspath(file_path), 'w') as f:
f.write(config)
in_config = False
else:
config += line
| mit | 7a151b9a084eb75ef324476f90a28fa5 | 26.046296 | 78 | 0.516604 | 4.399096 | false | true | false | false |
missionpinball/mpf-docs | _doc_tools/mpf_lexer.py | 1 | 12287 | import re
from pygments.lexer import include, bygroups
from pygments.token import Comment, String, Text, Punctuation, Number, Keyword, Name, Literal, Other, STANDARD_TYPES, \
Token
from pygments.lexers.data import YamlLexer
from sphinx.util import docutils
from docutils import nodes
TestHeader = Token.Token.TestCode
TestConfig = Token.Token.TestConfig
STANDARD_TYPES[TestHeader] = 'test_header'
STANDARD_TYPES[TestConfig] = 'test_config'
class ExampleSliderVisitor(docutils.nodes.NodeVisitor):
def __init__(self, document, app):
super().__init__(document)
self.app = app
self.parent = None
def _add_test_toggle_to_node(self, node, config_type):
index = node.parent.children.index(node)
config = node.rawsource
text = "This example is tested to be valid {} config. ".format(config_type)
if "##! test" in config:
text += "Additionally, our integration test passed. Click to show the test and full config."
else:
text += "However, it is not integration tested."
if "\n#! " in config:
text += " Some parts are hidden. Click to show full config."
config = re.sub(r'^#! ([^\n]+)', '\\1', config, flags=re.MULTILINE)
new_node = nodes.container(
'',
nodes.container('', nodes.paragraph(text=text), classes=["header"]),
nodes.literal_block(language="test", rawsource=config, text=config),
classes=["toggle"]
)
node.parent.children = node.parent.children[:index + 1] + [new_node] + node.parent.children[index + 1:]
def visit_literal_block(self, node):
if node.attributes.get("language") == "mpf-config":
self._add_test_toggle_to_node(node, "MPF")
elif node.attributes.get("language") == "mpf-mc-config":
self._add_test_toggle_to_node(node, "MPF and MPF-MC")
def unknown_visit(self, node: docutils.nodes.Node) -> None:
"""Called for all other node types."""
pass
class MpfLexer(YamlLexer):
name = 'mpf-config'
def nothing(token_class):
"""Do not produce empty tokens."""
def callback(lexer, match, context):
yield match.start(), token_class, ""
context.pos = match.end()
return callback
tokens = {
# the root rules
'root': [
# ignored whitespaces
(r'[ ]+(?=#|$)', Text),
# line breaks
(r'\n+', Text),
# doc ignore comment
(r'#![^\n]*\n', nothing(Text)),
(r'##! (?!mode:|show:)[^\n]*\n', nothing(Text)),
# a comment
(r'#[^\n]*', Comment.Single),
# the '%YAML' directive
(r'^%YAML(?=[ ]|$)', YamlLexer.reset_indent(Name.Tag), 'yaml-directive'),
# the %TAG directive
(r'^%TAG(?=[ ]|$)', YamlLexer.reset_indent(Name.Tag), 'tag-directive'),
# document start and document end indicators
(r'^(?:---|\.\.\.)(?=[ ]|$)', YamlLexer.reset_indent(Name.Namespace),
'block-line'),
# indentation spaces
(r'[ ]*(?!\s|$)', YamlLexer.save_indent(Text, start=True),
('block-line', 'indentation')),
],
# trailing whitespaces after directives or a block scalar indicator
'ignored-line': [
(r'#[^\n]*', YamlLexer.something(Text)),
# ignored whitespaces
(r'[ ]+(?=#|$)', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# line break
(r'\n', Text, '#pop:2'),
],
# the %YAML directive
'yaml-directive': [
# the version number
(r'([ ]+)([0-9]+\.[0-9]+)',
bygroups(Text, Number), 'ignored-line'),
],
# the %YAG directive
'tag-directive': [
# a tag handle and the corresponding prefix
(r'([ ]+)(!|![\w-]*!)'
r'([ ]+)(!|!?[\w;/?:@&=+$,.!~*\'()\[\]%-]+)',
bygroups(Text, Keyword.Type, Text, Keyword.Type),
'ignored-line'),
],
# block scalar indicators and indentation spaces
'indentation': [
# trailing whitespaces are ignored
(r'[ ]*$', YamlLexer.something(Text), '#pop:2'),
# whitespaces preceeding block collection indicators
(r'[ ]+(?=[?:-](?:[ ]|$))', YamlLexer.save_indent(Text)),
# block collection indicators
(r'[?:-](?=[ ]|$)', YamlLexer.set_indent(Punctuation.Indicator)),
# the beginning a block line
(r'[ ]*', YamlLexer.save_indent(Text), '#pop'),
],
# an indented line in the block context
'block-line': [
# the line end
(r'[ ]*(?=#|$)', YamlLexer.something(Text), '#pop'),
# whitespaces separating tokens
(r'[ ]+', Text),
# tags, anchors and aliases,
include('descriptors'),
# block collections and scalars
include('block-nodes'),
# flow collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^\s?:,\[\]{}#&*!|>\'"%@`-]|[?:-]\S)',
YamlLexer.something(Name.Variable),
'plain-scalar-in-block-context'),
],
# tags, anchors, aliases
'descriptors': [
# a full-form tag
(r'!<[\w#;/?:@&=+$,.!~*\'()\[\]%-]+>', Keyword.Type),
# a tag in the form '!', '!suffix' or '!handle!suffix'
(r'!(?:[\w-]+!)?'
r'[\w#;/?:@&=+$,.!~*\'()\[\]%-]+', Keyword.Type),
# an anchor
(r'&[\w-]+', Name.Label),
# an alias
(r'\*[\w-]+', Name.Variable),
],
# block collections and scalars
'block-nodes': [
# implicit key
(r':(?=[ ]|$)', YamlLexer.set_indent(Punctuation.Indicator, implicit=True)),
# literal and folded scalars
(r'[|>]', Punctuation.Indicator,
('block-scalar-content', 'block-scalar-header')),
],
# flow collections and quoted scalars
'flow-nodes': [
# a flow sequence
(r'\[', Punctuation.Indicator, 'flow-sequence'),
# a flow mapping
(r'\{', Punctuation.Indicator, 'flow-mapping'),
# a single-quoted scalar
(r'\'', String, 'single-quoted-scalar'),
# a double-quoted scalar
(r'\"', String, 'double-quoted-scalar'),
],
# the content of a flow collection
'flow-collection': [
# whitespaces
(r'[ ]+', Text),
# line breaks
(r'\n+', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# simple indicators
(r'[?:,]', Punctuation.Indicator),
# tags, anchors and aliases
include('descriptors'),
# nested collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^\s?:,\[\]{}#&*!|>\'"%@`])',
YamlLexer.something(Name.Variable),
'plain-scalar-in-flow-context'),
],
# a flow sequence indicated by '[' and ']'
'flow-sequence': [
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\]', Punctuation.Indicator, '#pop'),
],
# a flow mapping indicated by '{' and '}'
'flow-mapping': [
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\}', Punctuation.Indicator, '#pop'),
],
# block scalar lines
'block-scalar-content': [
# line break
(r'\n', Text),
# empty line
(r'^[ ]+$',
YamlLexer.parse_block_scalar_empty_line(Text, Name.Constant)),
# indentation spaces (we may leave the state here)
(r'^[ ]*', YamlLexer.parse_block_scalar_indent(Text)),
# line content
(r'[\S\t ]+', Name.Constant),
],
# the content of a literal or folded scalar
'block-scalar-header': [
# indentation indicator followed by chomping flag
(r'([1-9])?[+-]?(?=[ ]|$)',
YamlLexer.set_block_scalar_indent(Punctuation.Indicator),
'ignored-line'),
# chomping flag followed by indentation indicator
(r'[+-]?([1-9])?(?=[ ]|$)',
YamlLexer.set_block_scalar_indent(Punctuation.Indicator),
'ignored-line'),
],
# ignored and regular whitespaces in quoted scalars
'quoted-scalar-whitespaces': [
# leading and trailing whitespaces are ignored
(r'^[ ]+', Text),
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text),
# other whitespaces are a part of the value
(r'[ ]+', Name.Variable),
],
# single-quoted scalars
'single-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of the quote character
(r'\'\'', String.Escape),
# regular non-whitespace characters
(r'[^\s\']+', String),
# the closing quote
(r'\'', String, '#pop'),
],
# double-quoted scalars
'double-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of special characters
(r'\\[0abt\tn\nvfre "\\N_LP]', String),
# escape codes
(r'\\(?:x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|U[0-9A-Fa-f]{8})',
String.Escape),
# regular non-whitespace characters
(r'[^\s"\\]+', String),
# the closing quote
(r'"', String, '#pop'),
],
# the beginning of a new line while scanning a plain scalar
'plain-scalar-in-block-context-new-line': [
# empty lines
(r'^[ ]+$', Text),
# line breaks
(r'\n+', Text),
# document start and document end indicators
(r'^(?=---|\.\.\.)', YamlLexer.something(Name.Namespace), '#pop:3'),
# indentation spaces (we may leave the block line state here)
(r'^[ ]*', YamlLexer.parse_plain_scalar_indent(Text), '#pop'),
],
# a plain scalar in the block context
'plain-scalar-in-block-context': [
# the scalar ends with the ':' indicator
(r'[ ]*(?=:[ ]|:$)', YamlLexer.something(Text), '#pop'),
# the scalar ends with whitespaces followed by a comment
(r'[ ]+(?=#)', Text, '#pop'),
# trailing whitespaces are ignored
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text, 'plain-scalar-in-block-context-new-line'),
# other whitespaces are a part of the value
(r'[ ]+', Literal.Scalar.Plain),
# regular non-whitespace characters
(r'(?::(?!\s)|[^\s:])+', Literal.Scalar.Plain),
],
# a plain scalar is the flow context
'plain-scalar-in-flow-context': [
# the scalar ends with an indicator character
(r'[ ]*(?=[,:?\[\]{}])', YamlLexer.something(Text), '#pop'),
# the scalar ends with a comment
(r'[ ]+(?=#)', Text, '#pop'),
# leading and trailing whitespaces are ignored
(r'^[ ]+', Text),
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text),
# other whitespaces are a part of the value
(r'[ ]+', Name.Variable),
# regular non-whitespace characters
(r'[^\s,:?\[\]{}]+', Name.Variable),
],
}
# this could be used to show some test output
# def get_tokens_unprocessed(self, text=None, context=None):
# value = super().get_tokens_unprocessed(text, context)
# return value
| mit | c81f7b0f1cb712bcf5ed0e7c5cb4d1c2 | 35.787425 | 119 | 0.494995 | 4.087492 | false | true | false | false |
electron/libchromiumcontent | script/package-clang.py | 2 | 16846 | #!/usr/bin/env python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script will check out llvm and clang, and then package the results up
to a tgz file."""
import argparse
import fnmatch
import itertools
import os
import shutil
import subprocess
import sys
import tarfile
from lib.config import IS_ARM64_HOST, IS_ARMV7_HOST
# Path constants.
THIS_DIR = os.path.dirname(__file__)
CHROMIUM_DIR = os.path.abspath(os.path.join(THIS_DIR, '..', 'src'))
THIRD_PARTY_DIR = os.path.join(CHROMIUM_DIR, 'third_party')
LLVM_DIR = os.path.join(THIRD_PARTY_DIR, 'llvm')
LLVM_BOOTSTRAP_DIR = os.path.join(THIRD_PARTY_DIR, 'llvm-bootstrap')
LLVM_BOOTSTRAP_INSTALL_DIR = os.path.join(THIRD_PARTY_DIR,
'llvm-bootstrap-install')
LLVM_BUILD_DIR = os.path.join(THIRD_PARTY_DIR, 'llvm-build')
LLVM_RELEASE_DIR = os.path.join(LLVM_BUILD_DIR, 'Release+Asserts')
LLVM_LTO_LLD_DIR = os.path.join(THIRD_PARTY_DIR, 'llvm-lto-lld')
STAMP_FILE = os.path.join(LLVM_BUILD_DIR, 'cr_build_revision')
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
BOTO_DIR = os.path.join(SOURCE_ROOT, 'vendor', 'boto')
def Tee(output, logfile):
logfile.write(output)
print output,
def TeeCmd(cmd, logfile, fail_hard=True):
"""Runs cmd and writes the output to both stdout and logfile."""
# Reading from PIPE can deadlock if one buffer is full but we wait on a
# different one. To work around this, pipe the subprocess's stderr to
# its stdout buffer and don't give it a stdin.
# shell=True is required in cmd.exe since depot_tools has an svn.bat, and
# bat files only work with shell=True set.
proc = subprocess.Popen(cmd, bufsize=1, shell=sys.platform == 'win32',
stdin=open(os.devnull), stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
for line in iter(proc.stdout.readline,''):
Tee(line, logfile)
if proc.poll() is not None:
break
exit_code = proc.wait()
if exit_code != 0 and fail_hard:
print 'Failed:', cmd
sys.exit(1)
def PrintTarProgress(tarinfo):
print 'Adding', tarinfo.name
return tarinfo
def GetExpectedStamp(args):
return "%s-%s" % (args.clang_revision, args.clang_sub_revision)
def GetGsutilPath():
if not 'find_depot_tools' in sys.modules:
sys.path.insert(0, os.path.join(CHROMIUM_DIR, 'build'))
global find_depot_tools
import find_depot_tools
depot_path = find_depot_tools.add_depot_tools_to_path()
if depot_path is None:
print ('depot_tools are not found in PATH. '
'Follow the instructions in this document '
'http://dev.chromium.org/developers/how-tos/install-depot-tools'
' to install depot_tools and then try again.')
sys.exit(1)
gsutil_path = os.path.join(depot_path, 'gsutil.py')
return gsutil_path
def RunGsutil(args):
return subprocess.call([sys.executable, GetGsutilPath()] + args)
def GsutilArchiveExists(archive_name, platform):
gsutil_args = ['-q', 'stat',
'gs://chromium-browser-clang-staging/%s/%s.tgz' %
(platform, archive_name)]
return RunGsutil(gsutil_args) == 0
def MaybeUpload(args, archive_name, platform):
# We don't want to rewrite the file, if it already exists on the server,
# so -n option to gsutil is used. It will warn, if the upload was aborted.
gsutil_args = ['cp', '-n', '-a', 'public-read',
'%s.tgz' % archive_name,
'gs://chromium-browser-clang-staging/%s/%s.tgz' %
(platform, archive_name)]
if args.upload:
print 'Uploading %s to S3...' % archive_name
bucket = os.environ.get('ELECTRON_S3_BUCKET')
access_key = os.environ.get('ELECTRON_S3_ACCESS_KEY')
secret_key = os.environ.get('ELECTRON_S3_SECRET_KEY')
exit_code = s3put(bucket, access_key, secret_key,
'clang/{0}/'.format(platform),
['%s.tgz' % archive_name])
if exit_code != 0:
print "s3 upload failed, exit_code: %s" % exit_code
sys.exit(exit_code)
else:
print 'To upload, run:'
print ('gsutil %s' % ' '.join(gsutil_args))
def boto_path_dirs():
if IS_ARM64_HOST:
return [
os.path.join(BOTO_DIR, 'build', 'lib.linux-aarch64-2.7')
]
elif IS_ARMV7_HOST:
return [
os.path.join(BOTO_DIR, 'build', 'lib.linux-armv7l-2.7')
]
else:
return [
os.path.join(BOTO_DIR, 'build', 'lib'),
os.path.join(BOTO_DIR, 'build', 'lib.linux-x86_64-2.7')
]
def s3put(bucket, access_key, secret_key, path, file):
args = [
'--bucket', bucket,
'--multipart',
'--path', path,
'--grant', 'public-read'
] + file
env = os.environ.copy()
env['AWS_ACCESS_KEY_ID'] = access_key
env['AWS_SECRET_ACCESS_KEY'] = secret_key
env['PYTHONPATH'] = os.path.pathsep.join(
[env.get('PYTHONPATH', '')] + boto_path_dirs())
boto = os.path.join(BOTO_DIR, 'bin', 's3put')
try:
output = subprocess.check_call([sys.executable, boto] + list(args), env=env)
return output
except subprocess.CalledProcessError as e:
print 'Error calling boto: ' + e.output
return e.returncode
def UploadPDBToSymbolServer():
assert sys.platform == 'win32'
# Upload PDB and binary to the symbol server on Windows. Put them into the
# chromium-browser-symsrv bucket, since chrome devs have that in their
# _NT_SYMBOL_PATH already. Executable and PDB must be at paths following a
# certain pattern for the Microsoft debuggers to be able to load them.
# Executable:
# chromium-browser-symsrv/clang-cl.exe/ABCDEFAB01234/clang-cl.ex_
# ABCDEFAB is the executable's timestamp in %08X format, 01234 is the
# executable's image size in %x format. tools/symsrc/img_fingerprint.py
# can compute this ABCDEFAB01234 string for us, so use that.
# The .ex_ instead of .exe at the end means that the file is compressed.
# PDB:
# gs://chromium-browser-symsrv/clang-cl.exe.pdb/AABBCCDD/clang-cl.dll.pd_
# AABBCCDD here is computed from the output of
# dumpbin /all mybinary.exe | find "Format: RSDS"
# but tools/symsrc/pdb_fingerprint_from_img.py can compute it already, so
# again just use that.
sys.path.insert(0, os.path.join(CHROMIUM_DIR, 'tools', 'symsrc'))
import img_fingerprint, pdb_fingerprint_from_img
binaries = [ 'bin/clang-cl.exe', 'bin/lld-link.exe' ]
for binary_path in binaries:
binary_path = os.path.join(LLVM_RELEASE_DIR, binary_path)
binary_id = img_fingerprint.GetImgFingerprint(binary_path)
(pdb_id, pdb_path) = pdb_fingerprint_from_img.GetPDBInfoFromImg(binary_path)
# The build process builds clang.exe and then copies it to clang-cl.exe
# (both are the same binary and they behave differently on what their
# filename is). Hence, the pdb is at clang.pdb, not at clang-cl.pdb.
# Likewise, lld-link.exe's PDB file is called lld.pdb.
# Compress and upload.
for f, f_id in ((binary_path, binary_id), (pdb_path, pdb_id)):
subprocess.check_call(
['makecab', '/D', 'CompressionType=LZX', '/D', 'CompressionMemory=21',
f, '/L', os.path.dirname(f)], stdout=open(os.devnull, 'w'))
f_cab = f[:-1] + '_'
dest = '%s/%s/%s' % (os.path.basename(f), f_id, os.path.basename(f_cab))
print 'Uploading %s to Google Cloud Storage...' % dest
gsutil_args = ['cp', '-n', '-a', 'public-read', f_cab,
'gs://chromium-browser-symsrv/' + dest]
exit_code = RunGsutil(gsutil_args)
if exit_code != 0:
print "gsutil failed, exit_code: %s" % exit_code
sys.exit(exit_code)
def main():
parser = argparse.ArgumentParser(description='build and package clang')
parser.add_argument('--upload', action='store_true',
help='Upload the target archive to Google Cloud Storage.')
parser.add_argument('--build-args',
help='Additional args to pass to build script.',
default='')
parser.add_argument('--clang-revision', help='Clang revision to build',
required=True)
parser.add_argument('--clang-sub-revision',
help='Clang sub revision to build', required=True)
parser.add_argument('--platform',
help='Platform to build')
args = parser.parse_args()
# Check that the script is not going to upload a toolchain built from HEAD.
use_head_revision = 'LLVM_FORCE_HEAD_REVISION' in os.environ
if args.upload and use_head_revision:
print ("--upload and LLVM_FORCE_HEAD_REVISION could not be used "
"at the same time.")
return 1
expected_stamp = GetExpectedStamp(args)
pdir = 'clang-' + expected_stamp
print pdir
if args.platform:
platform = args.platform
elif sys.platform == 'darwin':
platform = 'Mac'
elif sys.platform == 'win32':
platform = 'Win'
else:
platform = 'Linux_x64'
with open('buildlog.txt', 'w') as log:
Tee('Diff in llvm:\n', log)
TeeCmd(['svn', 'stat', LLVM_DIR], log, fail_hard=False)
TeeCmd(['svn', 'diff', LLVM_DIR], log, fail_hard=False)
Tee('Diff in llvm/tools/clang:\n', log)
TeeCmd(['svn', 'stat', os.path.join(LLVM_DIR, 'tools', 'clang')],
log, fail_hard=False)
TeeCmd(['svn', 'diff', os.path.join(LLVM_DIR, 'tools', 'clang')],
log, fail_hard=False)
# TODO(thakis): compiler-rt is in projects/compiler-rt on Windows but
# llvm/compiler-rt elsewhere. So this diff call is currently only right on
# Windows.
Tee('Diff in llvm/compiler-rt:\n', log)
TeeCmd(['svn', 'stat', os.path.join(LLVM_DIR, 'projects', 'compiler-rt')],
log, fail_hard=False)
TeeCmd(['svn', 'diff', os.path.join(LLVM_DIR, 'projects', 'compiler-rt')],
log, fail_hard=False)
Tee('Diff in llvm/projects/libcxx:\n', log)
TeeCmd(['svn', 'stat', os.path.join(LLVM_DIR, 'projects', 'libcxx')],
log, fail_hard=False)
TeeCmd(['svn', 'diff', os.path.join(LLVM_DIR, 'projects', 'libcxx')],
log, fail_hard=False)
Tee('Starting build\n', log)
# Do a clobber build.
shutil.rmtree(LLVM_BOOTSTRAP_DIR, ignore_errors=True)
shutil.rmtree(LLVM_BOOTSTRAP_INSTALL_DIR, ignore_errors=True)
shutil.rmtree(LLVM_BUILD_DIR, ignore_errors=True)
opt_flags = args.build_args.split(' ')
if sys.platform.startswith('linux'):
opt_flags += ['--lto-lld']
build_cmd = [sys.executable, os.path.join(THIS_DIR, 'build-clang.py'),
'--bootstrap', '--force-local-build', '--clang-revision',
args.clang_revision, '--clang-sub-revision',
args.clang_sub_revision] + opt_flags
TeeCmd(build_cmd, log)
stamp = open(STAMP_FILE).read().rstrip()
if stamp != expected_stamp:
print 'Actual stamp (%s) != expected stamp (%s).' % (stamp, expected_stamp)
return 1
shutil.rmtree(pdir, ignore_errors=True)
# Copy a whitelist of files to the directory we're going to tar up.
# This supports the same patterns that the fnmatch module understands.
exe_ext = '.exe' if sys.platform == 'win32' else ''
want = ['bin/llvm-symbolizer' + exe_ext,
'bin/sancov' + exe_ext,
'lib/clang/*/asan_blacklist.txt',
'lib/clang/*/cfi_blacklist.txt',
# Copy built-in headers (lib/clang/3.x.y/include).
'lib/clang/*/include/*',
]
if sys.platform == 'win32':
want.append('bin/clang-cl.exe')
want.append('bin/lld-link.exe')
else:
so_ext = 'dylib' if sys.platform == 'darwin' else 'so'
want.extend(['bin/clang',
'lib/libFindBadConstructs.' + so_ext,
'lib/libBlinkGCPlugin.' + so_ext,
])
if sys.platform == 'darwin':
want.extend([# Copy only the OSX and iossim (ASan and profile) runtime
# libraries:
'lib/clang/*/lib/darwin/*asan_osx*',
'lib/clang/*/lib/darwin/*asan_iossim*',
'lib/clang/*/lib/darwin/*profile_osx*',
'lib/clang/*/lib/darwin/*profile_iossim*',
# And the OSX and ios builtin libraries (iossim is lipo'd into
# ios) for the _IsOSVersionAtLeast runtime function.
'lib/clang/*/lib/darwin/*.ios.a',
'lib/clang/*/lib/darwin/*.osx.a',
])
elif sys.platform.startswith('linux'):
# Add llvm-ar and lld for LTO.
want.append('bin/llvm-ar')
want.append('bin/lld')
# Copy only
# lib/clang/*/lib/linux/libclang_rt.{[atm]san,san,ubsan,profile}-*.a ,
# but not dfsan.
want.extend(['lib/clang/*/lib/linux/*[atm]san*',
'lib/clang/*/lib/linux/*ubsan*',
'lib/clang/*/lib/linux/*libclang_rt.san*',
'lib/clang/*/lib/linux/*profile*',
'lib/clang/*/msan_blacklist.txt',
])
elif sys.platform == 'win32':
want.extend(['lib/clang/*/lib/windows/clang_rt.asan*.dll',
'lib/clang/*/lib/windows/clang_rt.asan*.lib',
])
for root, dirs, files in os.walk(LLVM_RELEASE_DIR):
# root: third_party/llvm-build/Release+Asserts/lib/..., rel_root: lib/...
rel_root = root[len(LLVM_RELEASE_DIR)+1:]
rel_files = [os.path.join(rel_root, f) for f in files]
wanted_files = list(set(itertools.chain.from_iterable(
fnmatch.filter(rel_files, p) for p in want)))
if wanted_files:
# Guaranteed to not yet exist at this point:
os.makedirs(os.path.join(pdir, rel_root))
for f in wanted_files:
src = os.path.join(LLVM_RELEASE_DIR, f)
dest = os.path.join(pdir, f)
shutil.copy(src, dest)
# Strip libraries.
if sys.platform == 'darwin' and f.endswith('.dylib'):
subprocess.call(['strip', '-x', dest])
elif (sys.platform.startswith('linux') and
os.path.splitext(f)[1] in ['.so', '.a']):
subprocess.call(['strip', '-g', dest])
# Set up symlinks.
if sys.platform != 'win32':
os.symlink('clang', os.path.join(pdir, 'bin', 'clang++'))
os.symlink('clang', os.path.join(pdir, 'bin', 'clang-cl'))
if sys.platform.startswith('linux'):
os.symlink('lld', os.path.join(pdir, 'bin', 'ld.lld'))
os.symlink('lld', os.path.join(pdir, 'bin', 'lld-link'))
# Copy libc++ headers.
if sys.platform == 'darwin':
shutil.copytree(os.path.join(LLVM_BOOTSTRAP_INSTALL_DIR, 'include', 'c++'),
os.path.join(pdir, 'include', 'c++'))
# Copy buildlog over.
shutil.copy('buildlog.txt', pdir)
# Create archive.
tar_entries = ['bin', 'lib', 'buildlog.txt']
if sys.platform == 'darwin':
tar_entries += ['include']
with tarfile.open(pdir + '.tgz', 'w:gz') as tar:
for entry in tar_entries:
tar.add(os.path.join(pdir, entry), arcname=entry, filter=PrintTarProgress)
MaybeUpload(args, pdir, platform)
# Zip up llvm-code-coverage for code coverage.
code_coverage_dir = 'llvm-code-coverage-' + stamp
shutil.rmtree(code_coverage_dir, ignore_errors=True)
os.makedirs(os.path.join(code_coverage_dir, 'bin'))
for filename in ['llvm-cov', 'llvm-profdata']:
shutil.copy(os.path.join(LLVM_RELEASE_DIR, 'bin', filename + exe_ext),
os.path.join(code_coverage_dir, 'bin'))
with tarfile.open(code_coverage_dir + '.tgz', 'w:gz') as tar:
tar.add(os.path.join(code_coverage_dir, 'bin'), arcname='bin',
filter=PrintTarProgress)
MaybeUpload(args, code_coverage_dir, platform)
# Zip up llvm-objdump for sanitizer coverage.
objdumpdir = 'llvmobjdump-' + stamp
shutil.rmtree(objdumpdir, ignore_errors=True)
os.makedirs(os.path.join(objdumpdir, 'bin'))
shutil.copy(os.path.join(LLVM_RELEASE_DIR, 'bin', 'llvm-objdump' + exe_ext),
os.path.join(objdumpdir, 'bin'))
with tarfile.open(objdumpdir + '.tgz', 'w:gz') as tar:
tar.add(os.path.join(objdumpdir, 'bin'), arcname='bin',
filter=PrintTarProgress)
MaybeUpload(args, objdumpdir, platform)
# Zip up the translation_unit tool.
translation_unit_dir = 'translation_unit-' + stamp
shutil.rmtree(translation_unit_dir, ignore_errors=True)
os.makedirs(os.path.join(translation_unit_dir, 'bin'))
shutil.copy(os.path.join(LLVM_RELEASE_DIR, 'bin', 'translation_unit' +
exe_ext),
os.path.join(translation_unit_dir, 'bin'))
with tarfile.open(translation_unit_dir + '.tgz', 'w:gz') as tar:
tar.add(os.path.join(translation_unit_dir, 'bin'), arcname='bin',
filter=PrintTarProgress)
MaybeUpload(args, translation_unit_dir, platform)
if sys.platform == 'win32' and args.upload:
UploadPDBToSymbolServer()
# FIXME: Warn if the file already exists on the server.
if __name__ == '__main__':
sys.exit(main())
| mit | 79f1b6d9fcc12fd94e5436abe209ae44 | 38.544601 | 80 | 0.628933 | 3.240862 | false | false | false | false |
jhao104/proxy_pool | util/six.py | 1 | 1258 | # -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: six
Description :
Author : JHao
date: 2020/6/22
-------------------------------------------------
Change Activity:
2020/6/22:
-------------------------------------------------
"""
__author__ = 'JHao'
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
def iteritems(d, **kw):
return iter(d.items(**kw))
else:
def iteritems(d, **kw):
return d.iteritems(**kw)
if PY3:
from urllib.parse import urlparse
else:
from urlparse import urlparse
if PY3:
from imp import reload as reload_six
else:
reload_six = reload
if PY3:
from queue import Empty, Queue
else:
from Queue import Empty, Queue
def withMetaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class MetaClass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(MetaClass, 'temporary_class', (), {})
| mit | 3d5a8ce8acb5230f38e3c8a5f2cf081f | 22.222222 | 78 | 0.53429 | 3.931034 | false | false | false | false |
electron/libchromiumcontent | tools/licenses.py | 3 | 18194 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility for checking and processing licensing information in third_party
directories.
Usage: licenses.py <command>
Commands:
scan scan third_party directories, verifying that we have licensing info
credits generate about:credits on stdout
(You can also import this as a module.)
"""
import argparse
import cgi
import os
import sys
# Paths from the root of the tree to directories to skip.
PRUNE_PATHS = set([
# Same module occurs in crypto/third_party/nss and net/third_party/nss, so
# skip this one.
os.path.join('third_party','nss'),
# Placeholder directory only, not third-party code.
os.path.join('third_party','adobe'),
# Apache 2.0 license. See crbug.com/140478
os.path.join('third_party','bidichecker'),
# Build files only, not third-party code.
os.path.join('third_party','widevine'),
# Only binaries, used during development.
os.path.join('third_party','valgrind'),
# Used for development and test, not in the shipping product.
os.path.join('build','secondary'),
os.path.join('third_party','bison'),
os.path.join('third_party','blanketjs'),
os.path.join('third_party','cygwin'),
os.path.join('third_party','gles2_conform'),
os.path.join('third_party','gnu_binutils'),
os.path.join('third_party','gold'),
os.path.join('third_party','gperf'),
os.path.join('third_party','lighttpd'),
os.path.join('third_party','llvm'),
os.path.join('third_party','llvm-build'),
os.path.join('third_party','mingw-w64'),
os.path.join('third_party','nacl_sdk_binaries'),
os.path.join('third_party','pefile'),
os.path.join('third_party','perl'),
os.path.join('third_party','psyco_win32'),
os.path.join('third_party','pylib'),
os.path.join('third_party','pywebsocket'),
os.path.join('third_party','qunit'),
os.path.join('third_party','sinonjs'),
os.path.join('third_party','syzygy'),
os.path.join('tools', 'profile_chrome', 'third_party'),
# Chromium code in third_party.
os.path.join('third_party','fuzzymatch'),
os.path.join('tools', 'swarming_client'),
# Stuff pulled in from chrome-internal for official builds/tools.
os.path.join('third_party', 'clear_cache'),
os.path.join('third_party', 'gnu'),
os.path.join('third_party', 'googlemac'),
os.path.join('third_party', 'pcre'),
os.path.join('third_party', 'psutils'),
os.path.join('third_party', 'sawbuck'),
# See crbug.com/350472
os.path.join('chrome', 'browser', 'resources', 'chromeos', 'quickoffice'),
# Chrome for Android proprietary code.
os.path.join('clank'),
# Redistribution does not require attribution in documentation.
os.path.join('third_party','directxsdk'),
os.path.join('third_party','platformsdk_win2008_6_1'),
os.path.join('third_party','platformsdk_win7'),
# For testing only, presents on some bots.
os.path.join('isolate_deps_dir'),
])
# Directories we don't scan through.
VCS_METADATA_DIRS = ('.svn', '.git')
PRUNE_DIRS = (VCS_METADATA_DIRS +
('out', 'Debug', 'Release', # build files
'layout_tests')) # lots of subdirs
ADDITIONAL_PATHS = (
os.path.join('breakpad'),
os.path.join('chrome', 'common', 'extensions', 'docs', 'examples'),
os.path.join('chrome', 'test', 'chromeos', 'autotest'),
os.path.join('chrome', 'test', 'data'),
os.path.join('native_client'),
os.path.join('net', 'tools', 'spdyshark'),
os.path.join('sdch', 'open-vcdiff'),
os.path.join('testing', 'gmock'),
os.path.join('testing', 'gtest'),
os.path.join('tools', 'grit'),
os.path.join('tools', 'gyp'),
os.path.join('tools', 'page_cycler', 'acid3'),
os.path.join('url', 'third_party', 'mozilla'),
os.path.join('v8'),
# Fake directories to include the strongtalk and fdlibm licenses.
os.path.join('v8', 'strongtalk'),
os.path.join('v8', 'fdlibm'),
)
# Directories where we check out directly from upstream, and therefore
# can't provide a README.chromium. Please prefer a README.chromium
# wherever possible.
SPECIAL_CASES = {
os.path.join('native_client'): {
"Name": "native client",
"URL": "http://code.google.com/p/nativeclient",
"License": "BSD",
},
os.path.join('sdch', 'open-vcdiff'): {
"Name": "open-vcdiff",
"URL": "http://code.google.com/p/open-vcdiff",
"License": "Apache 2.0, MIT, GPL v2 and custom licenses",
"License Android Compatible": "yes",
},
os.path.join('testing', 'gmock'): {
"Name": "gmock",
"URL": "http://code.google.com/p/googlemock",
"License": "BSD",
"License File": "NOT_SHIPPED",
},
os.path.join('testing', 'gtest'): {
"Name": "gtest",
"URL": "http://code.google.com/p/googletest",
"License": "BSD",
"License File": "NOT_SHIPPED",
},
os.path.join('third_party', 'angle'): {
"Name": "Almost Native Graphics Layer Engine",
"URL": "http://code.google.com/p/angleproject/",
"License": "BSD",
},
os.path.join('third_party', 'cros_system_api'): {
"Name": "Chromium OS system API",
"URL": "http://www.chromium.org/chromium-os",
"License": "BSD",
# Absolute path here is resolved as relative to the source root.
"License File": "/LICENSE.chromium_os",
},
os.path.join('third_party', 'lss'): {
"Name": "linux-syscall-support",
"URL": "http://code.google.com/p/linux-syscall-support/",
"License": "BSD",
"License File": "/LICENSE",
},
os.path.join('third_party', 'ots'): {
"Name": "OTS (OpenType Sanitizer)",
"URL": "http://code.google.com/p/ots/",
"License": "BSD",
},
os.path.join('third_party', 'pdfium'): {
"Name": "PDFium",
"URL": "http://code.google.com/p/pdfium/",
"License": "BSD",
},
os.path.join('third_party', 'pdfsqueeze'): {
"Name": "pdfsqueeze",
"URL": "http://code.google.com/p/pdfsqueeze/",
"License": "Apache 2.0",
"License File": "COPYING",
},
os.path.join('third_party', 'ppapi'): {
"Name": "ppapi",
"URL": "http://code.google.com/p/ppapi/",
},
os.path.join('third_party', 'scons-2.0.1'): {
"Name": "scons-2.0.1",
"URL": "http://www.scons.org",
"License": "MIT",
"License File": "NOT_SHIPPED",
},
os.path.join('third_party', 'trace-viewer'): {
"Name": "trace-viewer",
"URL": "http://code.google.com/p/trace-viewer",
"License": "BSD",
"License File": "NOT_SHIPPED",
},
os.path.join('third_party', 'v8-i18n'): {
"Name": "Internationalization Library for v8",
"URL": "http://code.google.com/p/v8-i18n/",
"License": "Apache 2.0",
},
os.path.join('third_party', 'WebKit'): {
"Name": "WebKit",
"URL": "http://webkit.org/",
"License": "BSD and GPL v2",
# Absolute path here is resolved as relative to the source root.
"License File": "/third_party/WebKit/LICENSE_FOR_ABOUT_CREDITS",
},
os.path.join('third_party', 'webpagereplay'): {
"Name": "webpagereplay",
"URL": "http://code.google.com/p/web-page-replay",
"License": "Apache 2.0",
"License File": "NOT_SHIPPED",
},
os.path.join('tools', 'grit'): {
"Name": "grit",
"URL": "http://code.google.com/p/grit-i18n",
"License": "BSD",
"License File": "NOT_SHIPPED",
},
os.path.join('tools', 'gyp'): {
"Name": "gyp",
"URL": "http://code.google.com/p/gyp",
"License": "BSD",
"License File": "NOT_SHIPPED",
},
os.path.join('v8'): {
"Name": "V8 JavaScript Engine",
"URL": "http://code.google.com/p/v8",
"License": "BSD",
},
os.path.join('v8', 'strongtalk'): {
"Name": "Strongtalk",
"URL": "http://www.strongtalk.org/",
"License": "BSD",
# Absolute path here is resolved as relative to the source root.
"License File": "/v8/LICENSE.strongtalk",
},
os.path.join('v8', 'fdlibm'): {
"Name": "fdlibm",
"URL": "http://www.netlib.org/fdlibm/",
"License": "Freely Distributable",
# Absolute path here is resolved as relative to the source root.
"License File" : "/v8/src/third_party/fdlibm/LICENSE",
"License Android Compatible" : "yes",
},
os.path.join('third_party', 'khronos_glcts'): {
# These sources are not shipped, are not public, and it isn't
# clear why they're tripping the license check.
"Name": "khronos_glcts",
"URL": "http://no-public-url",
"License": "Khronos",
"License File": "NOT_SHIPPED",
},
os.path.join('tools', 'telemetry', 'third_party', 'gsutil'): {
"Name": "gsutil",
"URL": "https://cloud.google.com/storage/docs/gsutil",
"License": "Apache 2.0",
"License File": "NOT_SHIPPED",
},
}
# Special value for 'License File' field used to indicate that the license file
# should not be used in about:credits.
NOT_SHIPPED = "NOT_SHIPPED"
class LicenseError(Exception):
"""We raise this exception when a directory's licensing info isn't
fully filled out."""
pass
def AbsolutePath(path, filename, root):
"""Convert a path in README.chromium to be absolute based on the source
root."""
if filename.startswith('/'):
# Absolute-looking paths are relative to the source root
# (which is the directory we're run from).
absolute_path = os.path.join(root, filename[1:])
else:
absolute_path = os.path.join(root, path, filename)
if os.path.exists(absolute_path):
return absolute_path
return None
def ParseDir(path, root, require_license_file=True, optional_keys=None):
"""Examine a third_party/foo component and extract its metadata."""
# Parse metadata fields out of README.chromium.
# We examine "LICENSE" for the license file by default.
metadata = {
"License File": "LICENSE", # Relative path to license text.
"Name": None, # Short name (for header on about:credits).
"URL": None, # Project home page.
"License": None, # Software license.
}
if optional_keys is None:
optional_keys = []
if path in SPECIAL_CASES:
metadata.update(SPECIAL_CASES[path])
else:
# Try to find README.chromium.
readme_path = os.path.join(root, path, 'README.chromium')
if not os.path.exists(readme_path):
raise LicenseError("missing README.chromium or licenses.py "
"SPECIAL_CASES entry")
for line in open(readme_path):
line = line.strip()
if not line:
break
for key in metadata.keys() + optional_keys:
field = key + ": "
if line.startswith(field):
metadata[key] = line[len(field):]
# Check that all expected metadata is present.
for key, value in metadata.iteritems():
if not value:
raise LicenseError("couldn't find '" + key + "' line "
"in README.chromium or licences.py "
"SPECIAL_CASES")
# Special-case modules that aren't in the shipping product, so don't need
# their license in about:credits.
if metadata["License File"] != NOT_SHIPPED:
# Check that the license file exists.
for filename in (metadata["License File"], "COPYING"):
license_path = AbsolutePath(path, filename, root)
if license_path is not None:
break
if require_license_file and not license_path:
raise LicenseError("License file not found. "
"Either add a file named LICENSE, "
"import upstream's COPYING if available, "
"or add a 'License File:' line to "
"README.chromium with the appropriate path.")
metadata["License File"] = license_path
return metadata
def ContainsFiles(path, root):
"""Determines whether any files exist in a directory or in any of its
subdirectories."""
for _, dirs, files in os.walk(os.path.join(root, path)):
if files:
return True
for vcs_metadata in VCS_METADATA_DIRS:
if vcs_metadata in dirs:
dirs.remove(vcs_metadata)
return False
def FilterDirsWithFiles(dirs_list, root):
# If a directory contains no files, assume it's a DEPS directory for a
# project not used by our current configuration and skip it.
return [x for x in dirs_list if ContainsFiles(x, root)]
def FindThirdPartyDirs(prune_paths, root):
"""Find all third_party directories underneath the source root."""
third_party_dirs = set()
for path, dirs, files in os.walk(root):
path = path[len(root)+1:] # Pretty up the path.
if path in prune_paths:
dirs[:] = []
continue
# Prune out directories we want to skip.
# (Note that we loop over PRUNE_DIRS so we're not iterating over a
# list that we're simultaneously mutating.)
for skip in PRUNE_DIRS:
if skip in dirs:
dirs.remove(skip)
if os.path.basename(path) == 'third_party':
# Add all subdirectories that are not marked for skipping.
for dir in dirs:
dirpath = os.path.join(path, dir)
if dirpath not in prune_paths:
third_party_dirs.add(dirpath)
# Don't recurse into any subdirs from here.
dirs[:] = []
continue
# Don't recurse into paths in ADDITIONAL_PATHS, like we do with regular
# third_party/foo paths.
if path in ADDITIONAL_PATHS:
dirs[:] = []
for dir in ADDITIONAL_PATHS:
if dir not in prune_paths:
third_party_dirs.add(dir)
return third_party_dirs
def FindThirdPartyDirsWithFiles(root):
third_party_dirs = FindThirdPartyDirs(PRUNE_PATHS, root)
return FilterDirsWithFiles(third_party_dirs, root)
def ScanThirdPartyDirs(root=None):
"""Scan a list of directories and report on any problems we find."""
if root is None:
root = os.getcwd()
third_party_dirs = FindThirdPartyDirsWithFiles(root)
errors = []
for path in sorted(third_party_dirs):
try:
metadata = ParseDir(path, root)
except LicenseError, e:
errors.append((path, e.args[0]))
continue
for path, error in sorted(errors):
print path + ": " + error
return len(errors) == 0
def GenerateCredits(file_template_file, entry_template_file, output_file):
"""Generate about:credits."""
def EvaluateTemplate(template, env, escape=True):
"""Expand a template with variables like {{foo}} using a
dictionary of expansions."""
for key, val in env.items():
if escape:
val = cgi.escape(val)
template = template.replace('{{%s}}' % key, val)
return template
root = os.path.join(os.path.dirname(__file__), '..', 'src')
third_party_dirs = FindThirdPartyDirs(PRUNE_PATHS, root)
if not file_template_file:
file_template_file = os.path.join(root, 'chrome', 'browser',
'resources', 'about_credits.tmpl')
if not entry_template_file:
entry_template_file = os.path.join(root, 'chrome', 'browser',
'resources',
'about_credits_entry.tmpl')
entry_template = open(entry_template_file).read()
entries = []
for path in third_party_dirs:
try:
metadata = ParseDir(path, root)
except LicenseError:
# TODO(phajdan.jr): Convert to fatal error (http://crbug.com/39240).
continue
if metadata['License File'] == NOT_SHIPPED:
continue
env = {
'name': metadata['Name'],
'url': metadata['URL'],
'license': open(metadata['License File'], 'rb').read(),
}
entry = {
'name': metadata['Name'],
'content': EvaluateTemplate(entry_template, env),
}
entries.append(entry)
entries.sort(key=lambda entry: (entry['name'], entry['content']))
entries_contents = '\n'.join([entry['content'] for entry in entries])
file_template = open(file_template_file).read()
template_contents = "<!-- Generated by licenses.py; do not edit. -->"
template_contents += EvaluateTemplate(file_template,
{'entries': entries_contents},
escape=False)
if output_file:
with open(output_file, 'w') as output:
output.write(template_contents)
else:
print template_contents
return True
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--file-template',
help='Template HTML to use for the license page.')
parser.add_argument('--entry-template',
help='Template HTML to use for each license.')
parser.add_argument('command', choices=['help', 'scan', 'credits'])
parser.add_argument('output_file', nargs='?')
args = parser.parse_args()
if args.command == 'scan':
if not ScanThirdPartyDirs():
return 1
elif args.command == 'credits':
if not GenerateCredits(args.file_template, args.entry_template,
args.output_file):
return 1
else:
print __doc__
return 1
if __name__ == '__main__':
sys.exit(main())
| mit | 1273dc6c0b7eafd3b2e0461e42bd6c0f | 34.67451 | 80 | 0.580136 | 3.691964 | false | false | false | false |
jhao104/proxy_pool | db/dbClient.py | 1 | 3665 | # -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
-------------------------------------------------
File Name: DbClient.py
Description : DB工厂类
Author : JHao
date: 2016/12/2
-------------------------------------------------
Change Activity:
2016/12/02: DB工厂类
2020/07/03: 取消raw_proxy储存
-------------------------------------------------
"""
__author__ = 'JHao'
import os
import sys
from util.six import urlparse, withMetaclass
from util.singleton import Singleton
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
class DbClient(withMetaclass(Singleton)):
"""
DbClient DB工厂类 提供get/put/update/pop/delete/exists/getAll/clean/getCount/changeTable方法
抽象方法定义:
get(): 随机返回一个proxy;
put(proxy): 存入一个proxy;
pop(): 顺序返回并删除一个proxy;
update(proxy): 更新指定proxy信息;
delete(proxy): 删除指定proxy;
exists(proxy): 判断指定proxy是否存在;
getAll(): 返回所有代理;
clean(): 清除所有proxy信息;
getCount(): 返回proxy统计信息;
changeTable(name): 切换操作对象
所有方法需要相应类去具体实现:
ssdb: ssdbClient.py
redis: redisClient.py
mongodb: mongodbClient.py
"""
def __init__(self, db_conn):
"""
init
:return:
"""
self.parseDbConn(db_conn)
self.__initDbClient()
@classmethod
def parseDbConn(cls, db_conn):
db_conf = urlparse(db_conn)
cls.db_type = db_conf.scheme.upper().strip()
cls.db_host = db_conf.hostname
cls.db_port = db_conf.port
cls.db_user = db_conf.username
cls.db_pwd = db_conf.password
cls.db_name = db_conf.path[1:]
return cls
def __initDbClient(self):
"""
init DB Client
:return:
"""
__type = None
if "SSDB" == self.db_type:
__type = "ssdbClient"
elif "REDIS" == self.db_type:
__type = "redisClient"
else:
pass
assert __type, 'type error, Not support DB type: {}'.format(self.db_type)
self.client = getattr(__import__(__type), "%sClient" % self.db_type.title())(host=self.db_host,
port=self.db_port,
username=self.db_user,
password=self.db_pwd,
db=self.db_name)
def get(self, https, **kwargs):
return self.client.get(https, **kwargs)
def put(self, key, **kwargs):
return self.client.put(key, **kwargs)
def update(self, key, value, **kwargs):
return self.client.update(key, value, **kwargs)
def delete(self, key, **kwargs):
return self.client.delete(key, **kwargs)
def exists(self, key, **kwargs):
return self.client.exists(key, **kwargs)
def pop(self, https, **kwargs):
return self.client.pop(https, **kwargs)
def getAll(self, https):
return self.client.getAll(https)
def clear(self):
return self.client.clear()
def changeTable(self, name):
self.client.changeTable(name)
def getCount(self):
return self.client.getCount()
def test(self):
return self.client.test()
| mit | 1978ab1b457db5e746ffbe4f6814b97a | 27.841667 | 107 | 0.492343 | 3.528033 | false | false | false | false |
jhao104/proxy_pool | handler/proxyHandler.py | 1 | 2079 | # -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: ProxyHandler.py
Description :
Author : JHao
date: 2016/12/3
-------------------------------------------------
Change Activity:
2016/12/03:
2020/05/26: 区分http和https
-------------------------------------------------
"""
__author__ = 'JHao'
from helper.proxy import Proxy
from db.dbClient import DbClient
from handler.configHandler import ConfigHandler
class ProxyHandler(object):
""" Proxy CRUD operator"""
def __init__(self):
self.conf = ConfigHandler()
self.db = DbClient(self.conf.dbConn)
self.db.changeTable(self.conf.tableName)
def get(self, https=False):
"""
return a proxy
Args:
https: True/False
Returns:
"""
proxy = self.db.get(https)
return Proxy.createFromJson(proxy) if proxy else None
def pop(self, https):
"""
return and delete a useful proxy
:return:
"""
proxy = self.db.pop(https)
if proxy:
return Proxy.createFromJson(proxy)
return None
def put(self, proxy):
"""
put proxy into use proxy
:return:
"""
self.db.put(proxy)
def delete(self, proxy):
"""
delete useful proxy
:param proxy:
:return:
"""
return self.db.delete(proxy.proxy)
def getAll(self, https=False):
"""
get all proxy from pool as Proxy list
:return:
"""
proxies = self.db.getAll(https)
return [Proxy.createFromJson(_) for _ in proxies]
def exists(self, proxy):
"""
check proxy exists
:param proxy:
:return:
"""
return self.db.exists(proxy.proxy)
def getCount(self):
"""
return raw_proxy and use_proxy count
:return:
"""
total_use_proxy = self.db.getCount()
return {'count': total_use_proxy}
| mit | c7c4ba25af29f422603d82f75d5aa04a | 23.05814 | 61 | 0.491058 | 4.257202 | false | false | false | false |
python-openxml/python-docx | docx/image/constants.py | 18 | 3471 | # encoding: utf-8
"""
Constants specific the the image sub-package
"""
class JPEG_MARKER_CODE(object):
"""
JPEG marker codes
"""
TEM = b'\x01'
DHT = b'\xC4'
DAC = b'\xCC'
JPG = b'\xC8'
SOF0 = b'\xC0'
SOF1 = b'\xC1'
SOF2 = b'\xC2'
SOF3 = b'\xC3'
SOF5 = b'\xC5'
SOF6 = b'\xC6'
SOF7 = b'\xC7'
SOF9 = b'\xC9'
SOFA = b'\xCA'
SOFB = b'\xCB'
SOFD = b'\xCD'
SOFE = b'\xCE'
SOFF = b'\xCF'
RST0 = b'\xD0'
RST1 = b'\xD1'
RST2 = b'\xD2'
RST3 = b'\xD3'
RST4 = b'\xD4'
RST5 = b'\xD5'
RST6 = b'\xD6'
RST7 = b'\xD7'
SOI = b'\xD8'
EOI = b'\xD9'
SOS = b'\xDA'
DQT = b'\xDB' # Define Quantization Table(s)
DNL = b'\xDC'
DRI = b'\xDD'
DHP = b'\xDE'
EXP = b'\xDF'
APP0 = b'\xE0'
APP1 = b'\xE1'
APP2 = b'\xE2'
APP3 = b'\xE3'
APP4 = b'\xE4'
APP5 = b'\xE5'
APP6 = b'\xE6'
APP7 = b'\xE7'
APP8 = b'\xE8'
APP9 = b'\xE9'
APPA = b'\xEA'
APPB = b'\xEB'
APPC = b'\xEC'
APPD = b'\xED'
APPE = b'\xEE'
APPF = b'\xEF'
STANDALONE_MARKERS = (
TEM, SOI, EOI, RST0, RST1, RST2, RST3, RST4, RST5, RST6, RST7
)
SOF_MARKER_CODES = (
SOF0, SOF1, SOF2, SOF3, SOF5, SOF6, SOF7, SOF9, SOFA, SOFB, SOFD,
SOFE, SOFF
)
marker_names = {
b'\x00': 'UNKNOWN',
b'\xC0': 'SOF0',
b'\xC2': 'SOF2',
b'\xC4': 'DHT',
b'\xDA': 'SOS', # start of scan
b'\xD8': 'SOI', # start of image
b'\xD9': 'EOI', # end of image
b'\xDB': 'DQT',
b'\xE0': 'APP0',
b'\xE1': 'APP1',
b'\xE2': 'APP2',
b'\xED': 'APP13',
b'\xEE': 'APP14',
}
@classmethod
def is_standalone(cls, marker_code):
return marker_code in cls.STANDALONE_MARKERS
class MIME_TYPE(object):
"""
Image content types
"""
BMP = 'image/bmp'
GIF = 'image/gif'
JPEG = 'image/jpeg'
PNG = 'image/png'
TIFF = 'image/tiff'
class PNG_CHUNK_TYPE(object):
"""
PNG chunk type names
"""
IHDR = 'IHDR'
pHYs = 'pHYs'
IEND = 'IEND'
class TIFF_FLD_TYPE(object):
"""
Tag codes for TIFF Image File Directory (IFD) entries.
"""
BYTE = 1
ASCII = 2
SHORT = 3
LONG = 4
RATIONAL = 5
field_type_names = {
1: 'BYTE', 2: 'ASCII char', 3: 'SHORT', 4: 'LONG',
5: 'RATIONAL'
}
TIFF_FLD = TIFF_FLD_TYPE
class TIFF_TAG(object):
"""
Tag codes for TIFF Image File Directory (IFD) entries.
"""
IMAGE_WIDTH = 0x0100
IMAGE_LENGTH = 0x0101
X_RESOLUTION = 0x011A
Y_RESOLUTION = 0x011B
RESOLUTION_UNIT = 0x0128
tag_names = {
0x00FE: 'NewSubfileType',
0x0100: 'ImageWidth',
0x0101: 'ImageLength',
0x0102: 'BitsPerSample',
0x0103: 'Compression',
0x0106: 'PhotometricInterpretation',
0x010E: 'ImageDescription',
0x010F: 'Make',
0x0110: 'Model',
0x0111: 'StripOffsets',
0x0112: 'Orientation',
0x0115: 'SamplesPerPixel',
0x0117: 'StripByteCounts',
0x011A: 'XResolution',
0x011B: 'YResolution',
0x011C: 'PlanarConfiguration',
0x0128: 'ResolutionUnit',
0x0131: 'Software',
0x0132: 'DateTime',
0x0213: 'YCbCrPositioning',
0x8769: 'ExifTag',
0x8825: 'GPS IFD',
0xC4A5: 'PrintImageMatching',
}
| mit | 773d75109ca2e9bd6de11661d4628eec | 19.538462 | 73 | 0.501008 | 2.56541 | false | false | false | false |
python-openxml/python-docx | docx/oxml/__init__.py | 1 | 9027 | # encoding: utf-8
"""
Initializes oxml sub-package, including registering custom element classes
corresponding to Open XML elements.
"""
from __future__ import absolute_import
from lxml import etree
from .ns import NamespacePrefixedTag, nsmap
# configure XML parser
element_class_lookup = etree.ElementNamespaceClassLookup()
oxml_parser = etree.XMLParser(remove_blank_text=True, resolve_entities=False)
oxml_parser.set_element_class_lookup(element_class_lookup)
def parse_xml(xml):
"""
Return root lxml element obtained by parsing XML character string in
*xml*, which can be either a Python 2.x string or unicode. The custom
parser is used, so custom element classes are produced for elements in
*xml* that have them.
"""
root_element = etree.fromstring(xml, oxml_parser)
return root_element
def register_element_cls(tag, cls):
"""
Register *cls* to be constructed when the oxml parser encounters an
element with matching *tag*. *tag* is a string of the form
``nspfx:tagroot``, e.g. ``'w:document'``.
"""
nspfx, tagroot = tag.split(':')
namespace = element_class_lookup.get_namespace(nsmap[nspfx])
namespace[tagroot] = cls
def OxmlElement(nsptag_str, attrs=None, nsdecls=None):
"""
Return a 'loose' lxml element having the tag specified by *nsptag_str*.
*nsptag_str* must contain the standard namespace prefix, e.g. 'a:tbl'.
The resulting element is an instance of the custom element class for this
tag name if one is defined. A dictionary of attribute values may be
provided as *attrs*; they are set if present. All namespaces defined in
the dict *nsdecls* are declared in the element using the key as the
prefix and the value as the namespace name. If *nsdecls* is not provided,
a single namespace declaration is added based on the prefix on
*nsptag_str*.
"""
nsptag = NamespacePrefixedTag(nsptag_str)
if nsdecls is None:
nsdecls = nsptag.nsmap
return oxml_parser.makeelement(
nsptag.clark_name, attrib=attrs, nsmap=nsdecls
)
# ===========================================================================
# custom element class mappings
# ===========================================================================
from .shared import CT_DecimalNumber, CT_OnOff, CT_String # noqa
register_element_cls("w:evenAndOddHeaders", CT_OnOff)
register_element_cls("w:titlePg", CT_OnOff)
from .coreprops import CT_CoreProperties # noqa
register_element_cls('cp:coreProperties', CT_CoreProperties)
from .document import CT_Body, CT_Document # noqa
register_element_cls('w:body', CT_Body)
register_element_cls('w:document', CT_Document)
from .numbering import CT_Num, CT_Numbering, CT_NumLvl, CT_NumPr # noqa
register_element_cls('w:abstractNumId', CT_DecimalNumber)
register_element_cls('w:ilvl', CT_DecimalNumber)
register_element_cls('w:lvlOverride', CT_NumLvl)
register_element_cls('w:num', CT_Num)
register_element_cls('w:numId', CT_DecimalNumber)
register_element_cls('w:numPr', CT_NumPr)
register_element_cls('w:numbering', CT_Numbering)
register_element_cls('w:startOverride', CT_DecimalNumber)
from .section import ( # noqa
CT_HdrFtr,
CT_HdrFtrRef,
CT_PageMar,
CT_PageSz,
CT_SectPr,
CT_SectType,
)
register_element_cls("w:footerReference", CT_HdrFtrRef)
register_element_cls("w:ftr", CT_HdrFtr)
register_element_cls("w:hdr", CT_HdrFtr)
register_element_cls("w:headerReference", CT_HdrFtrRef)
register_element_cls("w:pgMar", CT_PageMar)
register_element_cls("w:pgSz", CT_PageSz)
register_element_cls("w:sectPr", CT_SectPr)
register_element_cls("w:type", CT_SectType)
from .settings import CT_Settings # noqa
register_element_cls("w:settings", CT_Settings)
from .shape import ( # noqa
CT_Blip,
CT_BlipFillProperties,
CT_GraphicalObject,
CT_GraphicalObjectData,
CT_Inline,
CT_NonVisualDrawingProps,
CT_Picture,
CT_PictureNonVisual,
CT_Point2D,
CT_PositiveSize2D,
CT_ShapeProperties,
CT_Transform2D,
)
register_element_cls('a:blip', CT_Blip)
register_element_cls('a:ext', CT_PositiveSize2D)
register_element_cls('a:graphic', CT_GraphicalObject)
register_element_cls('a:graphicData', CT_GraphicalObjectData)
register_element_cls('a:off', CT_Point2D)
register_element_cls('a:xfrm', CT_Transform2D)
register_element_cls('pic:blipFill', CT_BlipFillProperties)
register_element_cls('pic:cNvPr', CT_NonVisualDrawingProps)
register_element_cls('pic:nvPicPr', CT_PictureNonVisual)
register_element_cls('pic:pic', CT_Picture)
register_element_cls('pic:spPr', CT_ShapeProperties)
register_element_cls('wp:docPr', CT_NonVisualDrawingProps)
register_element_cls('wp:extent', CT_PositiveSize2D)
register_element_cls('wp:inline', CT_Inline)
from .styles import CT_LatentStyles, CT_LsdException, CT_Style, CT_Styles # noqa
register_element_cls('w:basedOn', CT_String)
register_element_cls('w:latentStyles', CT_LatentStyles)
register_element_cls('w:locked', CT_OnOff)
register_element_cls('w:lsdException', CT_LsdException)
register_element_cls('w:name', CT_String)
register_element_cls('w:next', CT_String)
register_element_cls('w:qFormat', CT_OnOff)
register_element_cls('w:semiHidden', CT_OnOff)
register_element_cls('w:style', CT_Style)
register_element_cls('w:styles', CT_Styles)
register_element_cls('w:uiPriority', CT_DecimalNumber)
register_element_cls('w:unhideWhenUsed', CT_OnOff)
from .table import ( # noqa
CT_Height,
CT_Row,
CT_Tbl,
CT_TblGrid,
CT_TblGridCol,
CT_TblLayoutType,
CT_TblPr,
CT_TblWidth,
CT_Tc,
CT_TcPr,
CT_TrPr,
CT_VMerge,
CT_VerticalJc,
)
register_element_cls('w:bidiVisual', CT_OnOff)
register_element_cls('w:gridCol', CT_TblGridCol)
register_element_cls('w:gridSpan', CT_DecimalNumber)
register_element_cls('w:tbl', CT_Tbl)
register_element_cls('w:tblGrid', CT_TblGrid)
register_element_cls('w:tblLayout', CT_TblLayoutType)
register_element_cls('w:tblPr', CT_TblPr)
register_element_cls('w:tblStyle', CT_String)
register_element_cls('w:tc', CT_Tc)
register_element_cls('w:tcPr', CT_TcPr)
register_element_cls('w:tcW', CT_TblWidth)
register_element_cls('w:tr', CT_Row)
register_element_cls('w:trHeight', CT_Height)
register_element_cls('w:trPr', CT_TrPr)
register_element_cls('w:vAlign', CT_VerticalJc)
register_element_cls('w:vMerge', CT_VMerge)
from .text.font import ( # noqa
CT_Color,
CT_Fonts,
CT_Highlight,
CT_HpsMeasure,
CT_RPr,
CT_Underline,
CT_VerticalAlignRun,
)
register_element_cls('w:b', CT_OnOff)
register_element_cls('w:bCs', CT_OnOff)
register_element_cls('w:caps', CT_OnOff)
register_element_cls('w:color', CT_Color)
register_element_cls('w:cs', CT_OnOff)
register_element_cls('w:dstrike', CT_OnOff)
register_element_cls('w:emboss', CT_OnOff)
register_element_cls('w:highlight', CT_Highlight)
register_element_cls('w:i', CT_OnOff)
register_element_cls('w:iCs', CT_OnOff)
register_element_cls('w:imprint', CT_OnOff)
register_element_cls('w:noProof', CT_OnOff)
register_element_cls('w:oMath', CT_OnOff)
register_element_cls('w:outline', CT_OnOff)
register_element_cls('w:rFonts', CT_Fonts)
register_element_cls('w:rPr', CT_RPr)
register_element_cls('w:rStyle', CT_String)
register_element_cls('w:rtl', CT_OnOff)
register_element_cls('w:shadow', CT_OnOff)
register_element_cls('w:smallCaps', CT_OnOff)
register_element_cls('w:snapToGrid', CT_OnOff)
register_element_cls('w:specVanish', CT_OnOff)
register_element_cls('w:strike', CT_OnOff)
register_element_cls('w:sz', CT_HpsMeasure)
register_element_cls('w:u', CT_Underline)
register_element_cls('w:vanish', CT_OnOff)
register_element_cls('w:vertAlign', CT_VerticalAlignRun)
register_element_cls('w:webHidden', CT_OnOff)
from .text.paragraph import CT_P # noqa
register_element_cls('w:p', CT_P)
from .text.parfmt import ( # noqa
CT_Ind,
CT_Jc,
CT_PPr,
CT_Spacing,
CT_TabStop,
CT_TabStops,
)
register_element_cls('w:ind', CT_Ind)
register_element_cls('w:jc', CT_Jc)
register_element_cls('w:keepLines', CT_OnOff)
register_element_cls('w:keepNext', CT_OnOff)
register_element_cls('w:pageBreakBefore', CT_OnOff)
register_element_cls('w:pPr', CT_PPr)
register_element_cls('w:pStyle', CT_String)
register_element_cls('w:spacing', CT_Spacing)
register_element_cls('w:tab', CT_TabStop)
register_element_cls('w:tabs', CT_TabStops)
register_element_cls('w:widowControl', CT_OnOff)
from .text.run import CT_Br, CT_R, CT_Text # noqa
register_element_cls('w:br', CT_Br)
register_element_cls('w:r', CT_R)
register_element_cls('w:t', CT_Text)
| mit | feea0a036474a98561ff732053d0a594 | 35.399194 | 81 | 0.678298 | 3.016037 | false | false | false | false |
python-openxml/python-docx | docx/parts/story.py | 1 | 3238 | # encoding: utf-8
"""|BaseStoryPart| and related objects"""
from __future__ import absolute_import, division, print_function, unicode_literals
from docx.opc.constants import RELATIONSHIP_TYPE as RT
from docx.opc.part import XmlPart
from docx.oxml.shape import CT_Inline
from docx.shared import lazyproperty
class BaseStoryPart(XmlPart):
"""Base class for story parts.
A story part is one that can contain textual content, such as the document-part and
header or footer parts. These all share content behaviors like `.paragraphs`,
`.add_paragraph()`, `.add_table()` etc.
"""
def get_or_add_image(self, image_descriptor):
"""Return (rId, image) pair for image identified by *image_descriptor*.
*rId* is the str key (often like "rId7") for the relationship between this story
part and the image part, reused if already present, newly created if not.
*image* is an |Image| instance providing access to the properties of the image,
such as dimensions and image type.
"""
image_part = self._package.get_or_add_image_part(image_descriptor)
rId = self.relate_to(image_part, RT.IMAGE)
return rId, image_part.image
def get_style(self, style_id, style_type):
"""Return the style in this document matching *style_id*.
Returns the default style for *style_type* if *style_id* is |None| or does not
match a defined style of *style_type*.
"""
return self._document_part.get_style(style_id, style_type)
def get_style_id(self, style_or_name, style_type):
"""Return str style_id for *style_or_name* of *style_type*.
Returns |None| if the style resolves to the default style for *style_type* or if
*style_or_name* is itself |None|. Raises if *style_or_name* is a style of the
wrong type or names a style not present in the document.
"""
return self._document_part.get_style_id(style_or_name, style_type)
def new_pic_inline(self, image_descriptor, width, height):
"""Return a newly-created `w:inline` element.
The element contains the image specified by *image_descriptor* and is scaled
based on the values of *width* and *height*.
"""
rId, image = self.get_or_add_image(image_descriptor)
cx, cy = image.scaled_dimensions(width, height)
shape_id, filename = self.next_id, image.filename
return CT_Inline.new_pic_inline(shape_id, rId, filename, cx, cy)
@property
def next_id(self):
"""Next available positive integer id value in this story XML document.
The value is determined by incrementing the maximum existing id value. Gaps in
the existing id sequence are not filled. The id attribute value is unique in the
document, without regard to the element type it appears on.
"""
id_str_lst = self._element.xpath('//@id')
used_ids = [int(id_str) for id_str in id_str_lst if id_str.isdigit()]
if not used_ids:
return 1
return max(used_ids) + 1
@lazyproperty
def _document_part(self):
"""|DocumentPart| object for this package."""
return self.package.main_document_part
| mit | ac2526774eb52cf0237062bc85491168 | 40.512821 | 88 | 0.661519 | 3.822904 | false | false | false | false |
python-openxml/python-docx | tests/opc/test_packuri.py | 13 | 3203 | # encoding: utf-8
"""
Test suite for the docx.opc.packuri module
"""
import pytest
from docx.opc.packuri import PackURI
class DescribePackURI(object):
def cases(self, expected_values):
"""
Return list of tuples zipped from uri_str cases and
*expected_values*. Raise if lengths don't match.
"""
uri_str_cases = [
'/',
'/ppt/presentation.xml',
'/ppt/slides/slide1.xml',
]
if len(expected_values) != len(uri_str_cases):
msg = "len(expected_values) differs from len(uri_str_cases)"
raise AssertionError(msg)
pack_uris = [PackURI(uri_str) for uri_str in uri_str_cases]
return zip(pack_uris, expected_values)
def it_can_construct_from_relative_ref(self):
baseURI = '/ppt/slides'
relative_ref = '../slideLayouts/slideLayout1.xml'
pack_uri = PackURI.from_rel_ref(baseURI, relative_ref)
assert pack_uri == '/ppt/slideLayouts/slideLayout1.xml'
def it_should_raise_on_construct_with_bad_pack_uri_str(self):
with pytest.raises(ValueError):
PackURI('foobar')
def it_can_calculate_baseURI(self):
expected_values = ('/', '/ppt', '/ppt/slides')
for pack_uri, expected_baseURI in self.cases(expected_values):
assert pack_uri.baseURI == expected_baseURI
def it_can_calculate_extension(self):
expected_values = ('', 'xml', 'xml')
for pack_uri, expected_ext in self.cases(expected_values):
assert pack_uri.ext == expected_ext
def it_can_calculate_filename(self):
expected_values = ('', 'presentation.xml', 'slide1.xml')
for pack_uri, expected_filename in self.cases(expected_values):
assert pack_uri.filename == expected_filename
def it_knows_the_filename_index(self):
expected_values = (None, None, 1)
for pack_uri, expected_idx in self.cases(expected_values):
assert pack_uri.idx == expected_idx
def it_can_calculate_membername(self):
expected_values = (
'',
'ppt/presentation.xml',
'ppt/slides/slide1.xml',
)
for pack_uri, expected_membername in self.cases(expected_values):
assert pack_uri.membername == expected_membername
def it_can_calculate_relative_ref_value(self):
cases = (
('/', '/ppt/presentation.xml', 'ppt/presentation.xml'),
('/ppt', '/ppt/slideMasters/slideMaster1.xml',
'slideMasters/slideMaster1.xml'),
('/ppt/slides', '/ppt/slideLayouts/slideLayout1.xml',
'../slideLayouts/slideLayout1.xml'),
)
for baseURI, uri_str, expected_relative_ref in cases:
pack_uri = PackURI(uri_str)
assert pack_uri.relative_ref(baseURI) == expected_relative_ref
def it_can_calculate_rels_uri(self):
expected_values = (
'/_rels/.rels',
'/ppt/_rels/presentation.xml.rels',
'/ppt/slides/_rels/slide1.xml.rels',
)
for pack_uri, expected_rels_uri in self.cases(expected_values):
assert pack_uri.rels_uri == expected_rels_uri
| mit | 725334f906d342e8376d88d6a56a412b | 35.397727 | 74 | 0.600999 | 3.578771 | false | false | false | false |
python-openxml/python-docx | tests/oxml/test__init__.py | 11 | 4237 | # encoding: utf-8
"""
Test suite for pptx.oxml.__init__.py module, primarily XML parser-related.
"""
from __future__ import print_function, unicode_literals
import pytest
from lxml import etree
from docx.oxml import (
OxmlElement, oxml_parser, parse_xml, register_element_cls
)
from docx.oxml.ns import qn
from docx.oxml.shared import BaseOxmlElement
class DescribeOxmlElement(object):
def it_returns_an_lxml_element_with_matching_tag_name(self):
element = OxmlElement('a:foo')
assert isinstance(element, etree._Element)
assert element.tag == (
'{http://schemas.openxmlformats.org/drawingml/2006/main}foo'
)
def it_adds_supplied_attributes(self):
element = OxmlElement('a:foo', {'a': 'b', 'c': 'd'})
assert etree.tostring(element) == (
'<a:foo xmlns:a="http://schemas.openxmlformats.org/drawingml/200'
'6/main" a="b" c="d"/>'
).encode('utf-8')
def it_adds_additional_namespace_declarations_when_supplied(self):
ns1 = 'http://schemas.openxmlformats.org/drawingml/2006/main'
ns2 = 'other'
element = OxmlElement('a:foo', nsdecls={'a': ns1, 'x': ns2})
assert len(element.nsmap.items()) == 2
assert element.nsmap['a'] == ns1
assert element.nsmap['x'] == ns2
class DescribeOxmlParser(object):
def it_strips_whitespace_between_elements(self, whitespace_fixture):
pretty_xml_text, stripped_xml_text = whitespace_fixture
element = etree.fromstring(pretty_xml_text, oxml_parser)
xml_text = etree.tostring(element, encoding='unicode')
assert xml_text == stripped_xml_text
# fixtures -------------------------------------------------------
@pytest.fixture
def whitespace_fixture(self):
pretty_xml_text = (
'<foø>\n'
' <bår>text</bår>\n'
'</foø>\n'
)
stripped_xml_text = '<foø><bår>text</bår></foø>'
return pretty_xml_text, stripped_xml_text
class DescribeParseXml(object):
def it_accepts_bytes_and_assumes_utf8_encoding(self, xml_bytes):
parse_xml(xml_bytes)
def it_accepts_unicode_providing_there_is_no_encoding_declaration(self):
non_enc_decl = '<?xml version="1.0" standalone="yes"?>'
enc_decl = '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
xml_body = '<foo><bar>føøbår</bar></foo>'
# unicode body by itself doesn't raise
parse_xml(xml_body)
# adding XML decl without encoding attr doesn't raise either
xml_text = '%s\n%s' % (non_enc_decl, xml_body)
parse_xml(xml_text)
# but adding encoding in the declaration raises ValueError
xml_text = '%s\n%s' % (enc_decl, xml_body)
with pytest.raises(ValueError):
parse_xml(xml_text)
def it_uses_registered_element_classes(self, xml_bytes):
register_element_cls('a:foo', CustElmCls)
element = parse_xml(xml_bytes)
assert isinstance(element, CustElmCls)
# fixture components ---------------------------------------------
@pytest.fixture
def xml_bytes(self):
return (
'<a:foo xmlns:a="http://schemas.openxmlformats.org/drawingml/200'
'6/main">\n'
' <a:bar>foøbår</a:bar>\n'
'</a:foo>\n'
).encode('utf-8')
class DescribeRegisterElementCls(object):
def it_determines_class_used_for_elements_with_matching_tagname(
self, xml_text):
register_element_cls('a:foo', CustElmCls)
foo = parse_xml(xml_text)
assert type(foo) is CustElmCls
assert type(foo.find(qn('a:bar'))) is etree._Element
# fixture components ---------------------------------------------
@pytest.fixture
def xml_text(self):
return (
'<a:foo xmlns:a="http://schemas.openxmlformats.org/drawingml/200'
'6/main">\n'
' <a:bar>foøbår</a:bar>\n'
'</a:foo>\n'
)
# ===========================================================================
# static fixture
# ===========================================================================
class CustElmCls(BaseOxmlElement):
pass
| mit | 3413065291e29a975848bd4f13626ae4 | 31.984375 | 77 | 0.56703 | 3.503734 | false | false | false | false |
python-openxml/python-docx | docx/oxml/section.py | 1 | 11114 | # encoding: utf-8
"""Section-related custom element classes"""
from __future__ import absolute_import, division, print_function, unicode_literals
from copy import deepcopy
from docx.enum.section import WD_HEADER_FOOTER, WD_ORIENTATION, WD_SECTION_START
from docx.oxml.simpletypes import ST_SignedTwipsMeasure, ST_TwipsMeasure, XsdString
from docx.oxml.xmlchemy import (
BaseOxmlElement,
OptionalAttribute,
RequiredAttribute,
ZeroOrMore,
ZeroOrOne,
)
class CT_HdrFtr(BaseOxmlElement):
"""`w:hdr` and `w:ftr`, the root element for header and footer part respectively"""
p = ZeroOrMore('w:p', successors=())
tbl = ZeroOrMore('w:tbl', successors=())
class CT_HdrFtrRef(BaseOxmlElement):
"""`w:headerReference` and `w:footerReference` elements"""
type_ = RequiredAttribute('w:type', WD_HEADER_FOOTER)
rId = RequiredAttribute('r:id', XsdString)
class CT_PageMar(BaseOxmlElement):
"""
``<w:pgMar>`` element, defining page margins.
"""
top = OptionalAttribute('w:top', ST_SignedTwipsMeasure)
right = OptionalAttribute('w:right', ST_TwipsMeasure)
bottom = OptionalAttribute('w:bottom', ST_SignedTwipsMeasure)
left = OptionalAttribute('w:left', ST_TwipsMeasure)
header = OptionalAttribute('w:header', ST_TwipsMeasure)
footer = OptionalAttribute('w:footer', ST_TwipsMeasure)
gutter = OptionalAttribute('w:gutter', ST_TwipsMeasure)
class CT_PageSz(BaseOxmlElement):
"""
``<w:pgSz>`` element, defining page dimensions and orientation.
"""
w = OptionalAttribute('w:w', ST_TwipsMeasure)
h = OptionalAttribute('w:h', ST_TwipsMeasure)
orient = OptionalAttribute(
'w:orient', WD_ORIENTATION, default=WD_ORIENTATION.PORTRAIT
)
class CT_SectPr(BaseOxmlElement):
"""`w:sectPr` element, the container element for section properties"""
_tag_seq = (
'w:footnotePr', 'w:endnotePr', 'w:type', 'w:pgSz', 'w:pgMar', 'w:paperSrc',
'w:pgBorders', 'w:lnNumType', 'w:pgNumType', 'w:cols', 'w:formProt', 'w:vAlign',
'w:noEndnote', 'w:titlePg', 'w:textDirection', 'w:bidi', 'w:rtlGutter',
'w:docGrid', 'w:printerSettings', 'w:sectPrChange',
)
headerReference = ZeroOrMore("w:headerReference", successors=_tag_seq)
footerReference = ZeroOrMore("w:footerReference", successors=_tag_seq)
type = ZeroOrOne("w:type", successors=_tag_seq[3:])
pgSz = ZeroOrOne("w:pgSz", successors=_tag_seq[4:])
pgMar = ZeroOrOne("w:pgMar", successors=_tag_seq[5:])
titlePg = ZeroOrOne("w:titlePg", successors=_tag_seq[14:])
del _tag_seq
def add_footerReference(self, type_, rId):
"""Return newly added CT_HdrFtrRef element of *type_* with *rId*.
The element tag is `w:footerReference`.
"""
footerReference = self._add_footerReference()
footerReference.type_ = type_
footerReference.rId = rId
return footerReference
def add_headerReference(self, type_, rId):
"""Return newly added CT_HdrFtrRef element of *type_* with *rId*.
The element tag is `w:headerReference`.
"""
headerReference = self._add_headerReference()
headerReference.type_ = type_
headerReference.rId = rId
return headerReference
@property
def bottom_margin(self):
"""
The value of the ``w:bottom`` attribute in the ``<w:pgMar>`` child
element, as a |Length| object, or |None| if either the element or the
attribute is not present.
"""
pgMar = self.pgMar
if pgMar is None:
return None
return pgMar.bottom
@bottom_margin.setter
def bottom_margin(self, value):
pgMar = self.get_or_add_pgMar()
pgMar.bottom = value
def clone(self):
"""
Return an exact duplicate of this ``<w:sectPr>`` element tree
suitable for use in adding a section break. All rsid* attributes are
removed from the root ``<w:sectPr>`` element.
"""
clone_sectPr = deepcopy(self)
clone_sectPr.attrib.clear()
return clone_sectPr
@property
def footer(self):
"""
The value of the ``w:footer`` attribute in the ``<w:pgMar>`` child
element, as a |Length| object, or |None| if either the element or the
attribute is not present.
"""
pgMar = self.pgMar
if pgMar is None:
return None
return pgMar.footer
@footer.setter
def footer(self, value):
pgMar = self.get_or_add_pgMar()
pgMar.footer = value
def get_footerReference(self, type_):
"""Return footerReference element of *type_* or None if not present."""
path = "./w:footerReference[@w:type='%s']" % WD_HEADER_FOOTER.to_xml(type_)
footerReferences = self.xpath(path)
if not footerReferences:
return None
return footerReferences[0]
def get_headerReference(self, type_):
"""Return headerReference element of *type_* or None if not present."""
matching_headerReferences = self.xpath(
"./w:headerReference[@w:type='%s']" % WD_HEADER_FOOTER.to_xml(type_)
)
if len(matching_headerReferences) == 0:
return None
return matching_headerReferences[0]
@property
def gutter(self):
"""
The value of the ``w:gutter`` attribute in the ``<w:pgMar>`` child
element, as a |Length| object, or |None| if either the element or the
attribute is not present.
"""
pgMar = self.pgMar
if pgMar is None:
return None
return pgMar.gutter
@gutter.setter
def gutter(self, value):
pgMar = self.get_or_add_pgMar()
pgMar.gutter = value
@property
def header(self):
"""
The value of the ``w:header`` attribute in the ``<w:pgMar>`` child
element, as a |Length| object, or |None| if either the element or the
attribute is not present.
"""
pgMar = self.pgMar
if pgMar is None:
return None
return pgMar.header
@header.setter
def header(self, value):
pgMar = self.get_or_add_pgMar()
pgMar.header = value
@property
def left_margin(self):
"""
The value of the ``w:left`` attribute in the ``<w:pgMar>`` child
element, as a |Length| object, or |None| if either the element or the
attribute is not present.
"""
pgMar = self.pgMar
if pgMar is None:
return None
return pgMar.left
@left_margin.setter
def left_margin(self, value):
pgMar = self.get_or_add_pgMar()
pgMar.left = value
@property
def orientation(self):
"""
The member of the ``WD_ORIENTATION`` enumeration corresponding to the
value of the ``orient`` attribute of the ``<w:pgSz>`` child element,
or ``WD_ORIENTATION.PORTRAIT`` if not present.
"""
pgSz = self.pgSz
if pgSz is None:
return WD_ORIENTATION.PORTRAIT
return pgSz.orient
@orientation.setter
def orientation(self, value):
pgSz = self.get_or_add_pgSz()
pgSz.orient = value
@property
def page_height(self):
"""
Value in EMU of the ``h`` attribute of the ``<w:pgSz>`` child
element, or |None| if not present.
"""
pgSz = self.pgSz
if pgSz is None:
return None
return pgSz.h
@page_height.setter
def page_height(self, value):
pgSz = self.get_or_add_pgSz()
pgSz.h = value
@property
def page_width(self):
"""
Value in EMU of the ``w`` attribute of the ``<w:pgSz>`` child
element, or |None| if not present.
"""
pgSz = self.pgSz
if pgSz is None:
return None
return pgSz.w
@page_width.setter
def page_width(self, value):
pgSz = self.get_or_add_pgSz()
pgSz.w = value
@property
def preceding_sectPr(self):
"""sectPr immediately preceding this one or None if this is the first."""
# ---[1] predicate returns list of zero or one value---
preceding_sectPrs = self.xpath("./preceding::w:sectPr[1]")
return preceding_sectPrs[0] if len(preceding_sectPrs) > 0 else None
def remove_footerReference(self, type_):
"""Return rId of w:footerReference child of *type_* after removing it."""
footerReference = self.get_footerReference(type_)
rId = footerReference.rId
self.remove(footerReference)
return rId
def remove_headerReference(self, type_):
"""Return rId of w:headerReference child of *type_* after removing it."""
headerReference = self.get_headerReference(type_)
rId = headerReference.rId
self.remove(headerReference)
return rId
@property
def right_margin(self):
"""
The value of the ``w:right`` attribute in the ``<w:pgMar>`` child
element, as a |Length| object, or |None| if either the element or the
attribute is not present.
"""
pgMar = self.pgMar
if pgMar is None:
return None
return pgMar.right
@right_margin.setter
def right_margin(self, value):
pgMar = self.get_or_add_pgMar()
pgMar.right = value
@property
def start_type(self):
"""
The member of the ``WD_SECTION_START`` enumeration corresponding to
the value of the ``val`` attribute of the ``<w:type>`` child element,
or ``WD_SECTION_START.NEW_PAGE`` if not present.
"""
type = self.type
if type is None or type.val is None:
return WD_SECTION_START.NEW_PAGE
return type.val
@start_type.setter
def start_type(self, value):
if value is None or value is WD_SECTION_START.NEW_PAGE:
self._remove_type()
return
type = self.get_or_add_type()
type.val = value
@property
def titlePg_val(self):
"""Value of `w:titlePg/@val` or |None| if not present"""
titlePg = self.titlePg
if titlePg is None:
return False
return titlePg.val
@titlePg_val.setter
def titlePg_val(self, value):
if value in [None, False]:
self._remove_titlePg()
else:
self.get_or_add_titlePg().val = value
@property
def top_margin(self):
"""
The value of the ``w:top`` attribute in the ``<w:pgMar>`` child
element, as a |Length| object, or |None| if either the element or the
attribute is not present.
"""
pgMar = self.pgMar
if pgMar is None:
return None
return pgMar.top
@top_margin.setter
def top_margin(self, value):
pgMar = self.get_or_add_pgMar()
pgMar.top = value
class CT_SectType(BaseOxmlElement):
"""
``<w:sectType>`` element, defining the section start type.
"""
val = OptionalAttribute('w:val', WD_SECTION_START)
| mit | 58b81f9cd021b59b27a5833a3563349b | 30.663818 | 88 | 0.602573 | 3.686235 | false | false | false | false |
python-openxml/python-docx | features/steps/document.py | 1 | 8937 | # encoding: utf-8
"""
Step implementations for document-related features
"""
from __future__ import absolute_import, print_function, unicode_literals
from behave import given, then, when
from docx import Document
from docx.enum.section import WD_ORIENT, WD_SECTION
from docx.shape import InlineShapes
from docx.shared import Inches
from docx.section import Sections
from docx.styles.styles import Styles
from docx.table import Table
from docx.text.paragraph import Paragraph
from helpers import test_docx, test_file
# given ===================================================
@given('a blank document')
def given_a_blank_document(context):
context.document = Document(test_docx('doc-word-default-blank'))
@given('a document having built-in styles')
def given_a_document_having_builtin_styles(context):
context.document = Document()
@given('a document having inline shapes')
def given_a_document_having_inline_shapes(context):
context.document = Document(test_docx('shp-inline-shape-access'))
@given('a document having sections')
def given_a_document_having_sections(context):
context.document = Document(test_docx('doc-access-sections'))
@given('a document having styles')
def given_a_document_having_styles(context):
context.document = Document(test_docx('sty-having-styles-part'))
@given('a document having three tables')
def given_a_document_having_three_tables(context):
context.document = Document(test_docx('tbl-having-tables'))
@given('a single-section document having portrait layout')
def given_a_single_section_document_having_portrait_layout(context):
context.document = Document(test_docx('doc-add-section'))
section = context.document.sections[-1]
context.original_dimensions = (section.page_width, section.page_height)
@given("a single-section Document object with headers and footers as document")
def given_a_single_section_Document_object_with_headers_and_footers(context):
context.document = Document(test_docx("doc-add-section"))
# when ====================================================
@when('I add a 2 x 2 table specifying only row and column count')
def when_add_2x2_table_specifying_only_row_and_col_count(context):
document = context.document
document.add_table(rows=2, cols=2)
@when('I add a 2 x 2 table specifying style \'{style_name}\'')
def when_add_2x2_table_specifying_style_name(context, style_name):
document = context.document
document.add_table(rows=2, cols=2, style=style_name)
@when('I add a heading specifying level={level}')
def when_add_heading_specifying_level(context, level):
context.document.add_heading(level=int(level))
@when('I add a heading specifying only its text')
def when_add_heading_specifying_only_its_text(context):
document = context.document
context.heading_text = text = 'Spam vs. Eggs'
document.add_heading(text)
@when('I add a page break to the document')
def when_add_page_break_to_document(context):
document = context.document
document.add_page_break()
@when('I add a paragraph specifying its style as a {kind}')
def when_I_add_a_paragraph_specifying_its_style_as_a(context, kind):
document = context.document
style = context.style = document.styles['Heading 1']
style_spec = {
'style object': style,
'style name': 'Heading 1',
}[kind]
document.add_paragraph(style=style_spec)
@when('I add a paragraph specifying its text')
def when_add_paragraph_specifying_text(context):
document = context.document
context.paragraph_text = 'foobar'
document.add_paragraph(context.paragraph_text)
@when('I add a paragraph without specifying text or style')
def when_add_paragraph_without_specifying_text_or_style(context):
document = context.document
document.add_paragraph()
@when('I add a picture specifying 1.75" width and 2.5" height')
def when_add_picture_specifying_width_and_height(context):
document = context.document
context.picture = document.add_picture(
test_file('monty-truth.png'),
width=Inches(1.75), height=Inches(2.5)
)
@when('I add a picture specifying a height of 1.5 inches')
def when_add_picture_specifying_height(context):
document = context.document
context.picture = document.add_picture(
test_file('monty-truth.png'), height=Inches(1.5)
)
@when('I add a picture specifying a width of 1.5 inches')
def when_add_picture_specifying_width(context):
document = context.document
context.picture = document.add_picture(
test_file('monty-truth.png'), width=Inches(1.5)
)
@when('I add a picture specifying only the image file')
def when_add_picture_specifying_only_image_file(context):
document = context.document
context.picture = document.add_picture(test_file('monty-truth.png'))
@when('I add an even-page section to the document')
def when_I_add_an_even_page_section_to_the_document(context):
context.section = context.document.add_section(WD_SECTION.EVEN_PAGE)
@when('I change the new section layout to landscape')
def when_I_change_the_new_section_layout_to_landscape(context):
new_height, new_width = context.original_dimensions
section = context.section
section.orientation = WD_ORIENT.LANDSCAPE
section.page_width = new_width
section.page_height = new_height
@when("I execute section = document.add_section()")
def when_I_execute_section_eq_document_add_section(context):
context.section = context.document.add_section()
# then ====================================================
@then('document.inline_shapes is an InlineShapes object')
def then_document_inline_shapes_is_an_InlineShapes_object(context):
document = context.document
inline_shapes = document.inline_shapes
assert isinstance(inline_shapes, InlineShapes)
@then('document.paragraphs is a list containing three paragraphs')
def then_document_paragraphs_is_a_list_containing_three_paragraphs(context):
document = context.document
paragraphs = document.paragraphs
assert isinstance(paragraphs, list)
assert len(paragraphs) == 3
for paragraph in paragraphs:
assert isinstance(paragraph, Paragraph)
@then('document.sections is a Sections object')
def then_document_sections_is_a_Sections_object(context):
sections = context.document.sections
msg = 'document.sections not instance of Sections'
assert isinstance(sections, Sections), msg
@then('document.styles is a Styles object')
def then_document_styles_is_a_Styles_object(context):
styles = context.document.styles
assert isinstance(styles, Styles)
@then('document.tables is a list containing three tables')
def then_document_tables_is_a_list_containing_three_tables(context):
document = context.document
tables = document.tables
assert isinstance(tables, list)
assert len(tables) == 3
for table in tables:
assert isinstance(table, Table)
@then('the document contains a 2 x 2 table')
def then_the_document_contains_a_2x2_table(context):
table = context.document.tables[-1]
assert isinstance(table, Table)
assert len(table.rows) == 2
assert len(table.columns) == 2
context.table_ = table
@then('the document has two sections')
def then_the_document_has_two_sections(context):
assert len(context.document.sections) == 2
@then('the first section is portrait')
def then_the_first_section_is_portrait(context):
first_section = context.document.sections[0]
expected_width, expected_height = context.original_dimensions
assert first_section.orientation == WD_ORIENT.PORTRAIT
assert first_section.page_width == expected_width
assert first_section.page_height == expected_height
@then('the last paragraph contains only a page break')
def then_last_paragraph_contains_only_a_page_break(context):
document = context.document
paragraph = document.paragraphs[-1]
assert len(paragraph.runs) == 1
assert len(paragraph.runs[0]._r) == 1
assert paragraph.runs[0]._r[0].type == 'page'
@then('the last paragraph contains the heading text')
def then_last_p_contains_heading_text(context):
document = context.document
text = context.heading_text
paragraph = document.paragraphs[-1]
assert paragraph.text == text
@then('the second section is landscape')
def then_the_second_section_is_landscape(context):
new_section = context.document.sections[-1]
expected_height, expected_width = context.original_dimensions
assert new_section.orientation == WD_ORIENT.LANDSCAPE
assert new_section.page_width == expected_width
assert new_section.page_height == expected_height
@then('the style of the last paragraph is \'{style_name}\'')
def then_the_style_of_the_last_paragraph_is_style(context, style_name):
document = context.document
paragraph = document.paragraphs[-1]
assert paragraph.style.name == style_name, (
'got %s' % paragraph.style.name
)
| mit | 78390812abf62f25ab62217c4d2d7569 | 32.1 | 79 | 0.719481 | 3.602177 | false | false | false | false |
python-openxml/python-docx | tests/text/test_font.py | 1 | 17045 | # encoding: utf-8
"""
Test suite for the docx.text.run module
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from docx.dml.color import ColorFormat
from docx.enum.text import WD_COLOR, WD_UNDERLINE
from docx.shared import Pt
from docx.text.font import Font
import pytest
from ..unitutil.cxml import element, xml
from ..unitutil.mock import class_mock, instance_mock
class DescribeFont(object):
def it_provides_access_to_its_color_object(self, color_fixture):
font, color_, ColorFormat_ = color_fixture
color = font.color
ColorFormat_.assert_called_once_with(font.element)
assert color is color_
def it_knows_its_typeface_name(self, name_get_fixture):
font, expected_value = name_get_fixture
assert font.name == expected_value
def it_can_change_its_typeface_name(self, name_set_fixture):
font, value, expected_xml = name_set_fixture
font.name = value
assert font._element.xml == expected_xml
def it_knows_its_size(self, size_get_fixture):
font, expected_value = size_get_fixture
assert font.size == expected_value
def it_can_change_its_size(self, size_set_fixture):
font, value, expected_xml = size_set_fixture
font.size = value
assert font._element.xml == expected_xml
def it_knows_its_bool_prop_states(self, bool_prop_get_fixture):
font, prop_name, expected_state = bool_prop_get_fixture
assert getattr(font, prop_name) == expected_state
def it_can_change_its_bool_prop_settings(self, bool_prop_set_fixture):
font, prop_name, value, expected_xml = bool_prop_set_fixture
setattr(font, prop_name, value)
assert font._element.xml == expected_xml
def it_knows_whether_it_is_subscript(self, subscript_get_fixture):
font, expected_value = subscript_get_fixture
assert font.subscript == expected_value
def it_can_change_whether_it_is_subscript(self, subscript_set_fixture):
font, value, expected_xml = subscript_set_fixture
font.subscript = value
assert font._element.xml == expected_xml
def it_knows_whether_it_is_superscript(self, superscript_get_fixture):
font, expected_value = superscript_get_fixture
assert font.superscript == expected_value
def it_can_change_whether_it_is_superscript(self, superscript_set_fixture):
font, value, expected_xml = superscript_set_fixture
font.superscript = value
assert font._element.xml == expected_xml
def it_knows_its_underline_type(self, underline_get_fixture):
font, expected_value = underline_get_fixture
assert font.underline is expected_value
def it_can_change_its_underline_type(self, underline_set_fixture):
font, underline, expected_xml = underline_set_fixture
font.underline = underline
assert font._element.xml == expected_xml
def it_knows_its_highlight_color(self, highlight_get_fixture):
font, expected_value = highlight_get_fixture
assert font.highlight_color is expected_value
def it_can_change_its_highlight_color(self, highlight_set_fixture):
font, highlight_color, expected_xml = highlight_set_fixture
font.highlight_color = highlight_color
assert font._element.xml == expected_xml
# fixtures -------------------------------------------------------
@pytest.fixture(params=[
('w:r/w:rPr', 'all_caps', None),
('w:r/w:rPr/w:caps', 'all_caps', True),
('w:r/w:rPr/w:caps{w:val=on}', 'all_caps', True),
('w:r/w:rPr/w:caps{w:val=off}', 'all_caps', False),
('w:r/w:rPr/w:b{w:val=1}', 'bold', True),
('w:r/w:rPr/w:i{w:val=0}', 'italic', False),
('w:r/w:rPr/w:cs{w:val=true}', 'complex_script', True),
('w:r/w:rPr/w:bCs{w:val=false}', 'cs_bold', False),
('w:r/w:rPr/w:iCs{w:val=on}', 'cs_italic', True),
('w:r/w:rPr/w:dstrike{w:val=off}', 'double_strike', False),
('w:r/w:rPr/w:emboss{w:val=1}', 'emboss', True),
('w:r/w:rPr/w:vanish{w:val=0}', 'hidden', False),
('w:r/w:rPr/w:i{w:val=true}', 'italic', True),
('w:r/w:rPr/w:imprint{w:val=false}', 'imprint', False),
('w:r/w:rPr/w:oMath{w:val=on}', 'math', True),
('w:r/w:rPr/w:noProof{w:val=off}', 'no_proof', False),
('w:r/w:rPr/w:outline{w:val=1}', 'outline', True),
('w:r/w:rPr/w:rtl{w:val=0}', 'rtl', False),
('w:r/w:rPr/w:shadow{w:val=true}', 'shadow', True),
('w:r/w:rPr/w:smallCaps{w:val=false}', 'small_caps', False),
('w:r/w:rPr/w:snapToGrid{w:val=on}', 'snap_to_grid', True),
('w:r/w:rPr/w:specVanish{w:val=off}', 'spec_vanish', False),
('w:r/w:rPr/w:strike{w:val=1}', 'strike', True),
('w:r/w:rPr/w:webHidden{w:val=0}', 'web_hidden', False),
])
def bool_prop_get_fixture(self, request):
r_cxml, bool_prop_name, expected_value = request.param
font = Font(element(r_cxml))
return font, bool_prop_name, expected_value
@pytest.fixture(params=[
# nothing to True, False, and None ---------------------------
('w:r', 'all_caps', True,
'w:r/w:rPr/w:caps'),
('w:r', 'bold', False,
'w:r/w:rPr/w:b{w:val=0}'),
('w:r', 'italic', None,
'w:r/w:rPr'),
# default to True, False, and None ---------------------------
('w:r/w:rPr/w:cs', 'complex_script', True,
'w:r/w:rPr/w:cs'),
('w:r/w:rPr/w:bCs', 'cs_bold', False,
'w:r/w:rPr/w:bCs{w:val=0}'),
('w:r/w:rPr/w:iCs', 'cs_italic', None,
'w:r/w:rPr'),
# True to True, False, and None ------------------------------
('w:r/w:rPr/w:dstrike{w:val=1}', 'double_strike', True,
'w:r/w:rPr/w:dstrike'),
('w:r/w:rPr/w:emboss{w:val=on}', 'emboss', False,
'w:r/w:rPr/w:emboss{w:val=0}'),
('w:r/w:rPr/w:vanish{w:val=1}', 'hidden', None,
'w:r/w:rPr'),
# False to True, False, and None -----------------------------
('w:r/w:rPr/w:i{w:val=false}', 'italic', True,
'w:r/w:rPr/w:i'),
('w:r/w:rPr/w:imprint{w:val=0}', 'imprint', False,
'w:r/w:rPr/w:imprint{w:val=0}'),
('w:r/w:rPr/w:oMath{w:val=off}', 'math', None,
'w:r/w:rPr'),
# random mix -------------------------------------------------
('w:r/w:rPr/w:noProof{w:val=1}', 'no_proof', False,
'w:r/w:rPr/w:noProof{w:val=0}'),
('w:r/w:rPr', 'outline', True,
'w:r/w:rPr/w:outline'),
('w:r/w:rPr/w:rtl{w:val=true}', 'rtl', False,
'w:r/w:rPr/w:rtl{w:val=0}'),
('w:r/w:rPr/w:shadow{w:val=on}', 'shadow', True,
'w:r/w:rPr/w:shadow'),
('w:r/w:rPr/w:smallCaps', 'small_caps', False,
'w:r/w:rPr/w:smallCaps{w:val=0}'),
('w:r/w:rPr/w:snapToGrid', 'snap_to_grid', True,
'w:r/w:rPr/w:snapToGrid'),
('w:r/w:rPr/w:specVanish', 'spec_vanish', None,
'w:r/w:rPr'),
('w:r/w:rPr/w:strike{w:val=foo}', 'strike', True,
'w:r/w:rPr/w:strike'),
('w:r/w:rPr/w:webHidden', 'web_hidden', False,
'w:r/w:rPr/w:webHidden{w:val=0}'),
])
def bool_prop_set_fixture(self, request):
r_cxml, prop_name, value, expected_cxml = request.param
font = Font(element(r_cxml))
expected_xml = xml(expected_cxml)
return font, prop_name, value, expected_xml
@pytest.fixture
def color_fixture(self, ColorFormat_, color_):
font = Font(element('w:r'))
return font, color_, ColorFormat_
@pytest.fixture(params=[
('w:r', None),
('w:r/w:rPr', None),
('w:r/w:rPr/w:highlight{w:val=default}', WD_COLOR.AUTO),
('w:r/w:rPr/w:highlight{w:val=blue}', WD_COLOR.BLUE),
])
def highlight_get_fixture(self, request):
r_cxml, expected_value = request.param
font = Font(element(r_cxml), None)
return font, expected_value
@pytest.fixture(params=[
('w:r', WD_COLOR.AUTO,
'w:r/w:rPr/w:highlight{w:val=default}'),
('w:r/w:rPr', WD_COLOR.BRIGHT_GREEN,
'w:r/w:rPr/w:highlight{w:val=green}'),
('w:r/w:rPr/w:highlight{w:val=green}', WD_COLOR.YELLOW,
'w:r/w:rPr/w:highlight{w:val=yellow}'),
('w:r/w:rPr/w:highlight{w:val=yellow}', None,
'w:r/w:rPr'),
('w:r/w:rPr', None,
'w:r/w:rPr'),
('w:r', None,
'w:r/w:rPr'),
])
def highlight_set_fixture(self, request):
r_cxml, value, expected_cxml = request.param
font = Font(element(r_cxml), None)
expected_xml = xml(expected_cxml)
return font, value, expected_xml
@pytest.fixture(params=[
('w:r', None),
('w:r/w:rPr', None),
('w:r/w:rPr/w:rFonts', None),
('w:r/w:rPr/w:rFonts{w:ascii=Arial}', 'Arial'),
])
def name_get_fixture(self, request):
r_cxml, expected_value = request.param
font = Font(element(r_cxml))
return font, expected_value
@pytest.fixture(params=[
('w:r', 'Foo',
'w:r/w:rPr/w:rFonts{w:ascii=Foo,w:hAnsi=Foo}'),
('w:r/w:rPr', 'Foo',
'w:r/w:rPr/w:rFonts{w:ascii=Foo,w:hAnsi=Foo}'),
('w:r/w:rPr/w:rFonts{w:hAnsi=Foo}', 'Bar',
'w:r/w:rPr/w:rFonts{w:ascii=Bar,w:hAnsi=Bar}'),
('w:r/w:rPr/w:rFonts{w:ascii=Foo,w:hAnsi=Foo}', 'Bar',
'w:r/w:rPr/w:rFonts{w:ascii=Bar,w:hAnsi=Bar}'),
])
def name_set_fixture(self, request):
r_cxml, value, expected_r_cxml = request.param
font = Font(element(r_cxml))
expected_xml = xml(expected_r_cxml)
return font, value, expected_xml
@pytest.fixture(params=[
('w:r', None),
('w:r/w:rPr', None),
('w:r/w:rPr/w:sz{w:val=28}', Pt(14)),
])
def size_get_fixture(self, request):
r_cxml, expected_value = request.param
font = Font(element(r_cxml))
return font, expected_value
@pytest.fixture(params=[
('w:r', Pt(12), 'w:r/w:rPr/w:sz{w:val=24}'),
('w:r/w:rPr', Pt(12), 'w:r/w:rPr/w:sz{w:val=24}'),
('w:r/w:rPr/w:sz{w:val=24}', Pt(18), 'w:r/w:rPr/w:sz{w:val=36}'),
('w:r/w:rPr/w:sz{w:val=36}', None, 'w:r/w:rPr'),
])
def size_set_fixture(self, request):
r_cxml, value, expected_r_cxml = request.param
font = Font(element(r_cxml))
expected_xml = xml(expected_r_cxml)
return font, value, expected_xml
@pytest.fixture(params=[
('w:r', None),
('w:r/w:rPr', None),
('w:r/w:rPr/w:vertAlign{w:val=baseline}', False),
('w:r/w:rPr/w:vertAlign{w:val=subscript}', True),
('w:r/w:rPr/w:vertAlign{w:val=superscript}', False),
])
def subscript_get_fixture(self, request):
r_cxml, expected_value = request.param
font = Font(element(r_cxml))
return font, expected_value
@pytest.fixture(params=[
('w:r', True,
'w:r/w:rPr/w:vertAlign{w:val=subscript}'),
('w:r', False,
'w:r/w:rPr'),
('w:r', None,
'w:r/w:rPr'),
('w:r/w:rPr/w:vertAlign{w:val=subscript}', True,
'w:r/w:rPr/w:vertAlign{w:val=subscript}'),
('w:r/w:rPr/w:vertAlign{w:val=subscript}', False,
'w:r/w:rPr'),
('w:r/w:rPr/w:vertAlign{w:val=subscript}', None,
'w:r/w:rPr'),
('w:r/w:rPr/w:vertAlign{w:val=superscript}', True,
'w:r/w:rPr/w:vertAlign{w:val=subscript}'),
('w:r/w:rPr/w:vertAlign{w:val=superscript}', False,
'w:r/w:rPr/w:vertAlign{w:val=superscript}'),
('w:r/w:rPr/w:vertAlign{w:val=superscript}', None,
'w:r/w:rPr'),
('w:r/w:rPr/w:vertAlign{w:val=baseline}', True,
'w:r/w:rPr/w:vertAlign{w:val=subscript}'),
])
def subscript_set_fixture(self, request):
r_cxml, value, expected_r_cxml = request.param
font = Font(element(r_cxml))
expected_xml = xml(expected_r_cxml)
return font, value, expected_xml
@pytest.fixture(params=[
('w:r', None),
('w:r/w:rPr', None),
('w:r/w:rPr/w:vertAlign{w:val=baseline}', False),
('w:r/w:rPr/w:vertAlign{w:val=subscript}', False),
('w:r/w:rPr/w:vertAlign{w:val=superscript}', True),
])
def superscript_get_fixture(self, request):
r_cxml, expected_value = request.param
font = Font(element(r_cxml))
return font, expected_value
@pytest.fixture(params=[
('w:r', True,
'w:r/w:rPr/w:vertAlign{w:val=superscript}'),
('w:r', False,
'w:r/w:rPr'),
('w:r', None,
'w:r/w:rPr'),
('w:r/w:rPr/w:vertAlign{w:val=superscript}', True,
'w:r/w:rPr/w:vertAlign{w:val=superscript}'),
('w:r/w:rPr/w:vertAlign{w:val=superscript}', False,
'w:r/w:rPr'),
('w:r/w:rPr/w:vertAlign{w:val=superscript}', None,
'w:r/w:rPr'),
('w:r/w:rPr/w:vertAlign{w:val=subscript}', True,
'w:r/w:rPr/w:vertAlign{w:val=superscript}'),
('w:r/w:rPr/w:vertAlign{w:val=subscript}', False,
'w:r/w:rPr/w:vertAlign{w:val=subscript}'),
('w:r/w:rPr/w:vertAlign{w:val=subscript}', None,
'w:r/w:rPr'),
('w:r/w:rPr/w:vertAlign{w:val=baseline}', True,
'w:r/w:rPr/w:vertAlign{w:val=superscript}'),
])
def superscript_set_fixture(self, request):
r_cxml, value, expected_r_cxml = request.param
font = Font(element(r_cxml))
expected_xml = xml(expected_r_cxml)
return font, value, expected_xml
@pytest.fixture(params=[
('w:r', None),
('w:r/w:rPr/w:u', None),
('w:r/w:rPr/w:u{w:val=single}', True),
('w:r/w:rPr/w:u{w:val=none}', False),
('w:r/w:rPr/w:u{w:val=double}', WD_UNDERLINE.DOUBLE),
('w:r/w:rPr/w:u{w:val=wave}', WD_UNDERLINE.WAVY),
])
def underline_get_fixture(self, request):
r_cxml, expected_value = request.param
run = Font(element(r_cxml), None)
return run, expected_value
@pytest.fixture(params=[
('w:r', True, 'w:r/w:rPr/w:u{w:val=single}'),
('w:r', False, 'w:r/w:rPr/w:u{w:val=none}'),
('w:r', None, 'w:r/w:rPr'),
('w:r', WD_UNDERLINE.SINGLE, 'w:r/w:rPr/w:u{w:val=single}'),
('w:r', WD_UNDERLINE.THICK, 'w:r/w:rPr/w:u{w:val=thick}'),
('w:r/w:rPr/w:u{w:val=single}', True,
'w:r/w:rPr/w:u{w:val=single}'),
('w:r/w:rPr/w:u{w:val=single}', False,
'w:r/w:rPr/w:u{w:val=none}'),
('w:r/w:rPr/w:u{w:val=single}', None,
'w:r/w:rPr'),
('w:r/w:rPr/w:u{w:val=single}', WD_UNDERLINE.SINGLE,
'w:r/w:rPr/w:u{w:val=single}'),
('w:r/w:rPr/w:u{w:val=single}', WD_UNDERLINE.DOTTED,
'w:r/w:rPr/w:u{w:val=dotted}'),
])
def underline_set_fixture(self, request):
initial_r_cxml, value, expected_cxml = request.param
run = Font(element(initial_r_cxml), None)
expected_xml = xml(expected_cxml)
return run, value, expected_xml
# fixture components ---------------------------------------------
@pytest.fixture
def color_(self, request):
return instance_mock(request, ColorFormat)
@pytest.fixture
def ColorFormat_(self, request, color_):
return class_mock(
request, 'docx.text.font.ColorFormat', return_value=color_
)
| mit | 6eda9524869f9a212d9702bf2dd45323 | 42.482143 | 79 | 0.5022 | 2.902265 | false | false | false | false |
browniebroke/deezer-python | src/deezer/pagination.py | 1 | 3745 | from __future__ import annotations
from typing import Generator, Generic, TypeVar, overload
from urllib.parse import parse_qs, urlparse
import deezer
ResourceType = TypeVar("ResourceType")
REPR_OUTPUT_SIZE = 5
class PaginatedList(Generic[ResourceType]):
"""Abstract paginated response from the API and make them more Pythonic."""
# Lifted and adapted from PyGithub:
# https://github.com/PyGithub/PyGithub/blob/master/github/PaginatedList.py
def __init__(
self,
client: deezer.Client,
base_path: str,
parent: deezer.Resource | None = None,
**params,
):
self.__elements: list[ResourceType] = []
self.__client = client
self.__base_path = base_path
self.__base_params = params
self.__next_path: str | None = base_path
self.__next_params = params
self.__parent = parent
self.__total = None
self.__iter = iter(self)
def __repr__(self) -> str:
repr_size = 5
data: list[ResourceType | str] = list(self[: repr_size + 1])
if len(data) > repr_size:
data[-1] = "..."
return f"<{self.__class__.__name__} {data!r}>"
@overload
def __getitem__(self, index: int) -> ResourceType:
...
@overload
def __getitem__(self, index: slice) -> list[ResourceType]:
...
def __getitem__(
self,
index: int | slice,
) -> ResourceType | list[ResourceType]:
if isinstance(index, int):
self._fetch_to_index(index)
return self.__elements[index]
if index.stop is not None:
self._fetch_to_index(index.stop)
else:
while self._could_grow():
self._grow()
return self.__elements[index]
def __iter__(self) -> Generator[ResourceType, None, None]:
yield from self.__elements
while self._could_grow():
yield from self._grow()
def __next__(self) -> ResourceType:
return next(self.__iter)
def __len__(self) -> int:
return self.total
def _could_grow(self) -> bool:
return self.__next_path is not None
def _grow(self) -> list[ResourceType]:
new_elements = self._fetch_next_page()
self.__elements.extend(new_elements)
return new_elements
def _fetch_next_page(self) -> list[ResourceType]:
assert self.__next_path is not None # nosec B101
response_payload = self.__client.request(
"GET",
self.__next_path,
parent=self.__parent,
paginate_list=True,
**self.__next_params,
)
self.__next_path = None
self.__total = response_payload.get("total")
next_url = response_payload.get("next", None)
if next_url:
url_bits = urlparse(next_url)
self.__next_path = url_bits.path.lstrip("/")
self.__next_params = parse_qs(url_bits.query)
return response_payload["data"]
def _fetch_to_index(self, index: int):
while len(self.__elements) <= index and self._could_grow():
self._grow()
@property
def total(self) -> int:
"""The total number of items in the list, mirroring what Deezer returns."""
if self.__total is None:
params = self.__base_params.copy()
params["limit"] = 1
response_payload = self.__client.request(
"GET",
self.__base_path,
parent=self.__parent,
paginate_list=True,
**params,
)
self.__total = response_payload["total"]
assert self.__total is not None # nosec B101
return self.__total
| mit | 4fceddde63fac2aa2459ef488156555c | 30.208333 | 83 | 0.551669 | 4.009636 | false | false | false | false |
python-openxml/python-docx | docx/opc/packuri.py | 19 | 3880 | # encoding: utf-8
"""
Provides the PackURI value type along with some useful known pack URI strings
such as PACKAGE_URI.
"""
import posixpath
import re
class PackURI(str):
"""
Provides access to pack URI components such as the baseURI and the
filename slice. Behaves as |str| otherwise.
"""
_filename_re = re.compile('([a-zA-Z]+)([1-9][0-9]*)?')
def __new__(cls, pack_uri_str):
if not pack_uri_str[0] == '/':
tmpl = "PackURI must begin with slash, got '%s'"
raise ValueError(tmpl % pack_uri_str)
return str.__new__(cls, pack_uri_str)
@staticmethod
def from_rel_ref(baseURI, relative_ref):
"""
Return a |PackURI| instance containing the absolute pack URI formed by
translating *relative_ref* onto *baseURI*.
"""
joined_uri = posixpath.join(baseURI, relative_ref)
abs_uri = posixpath.abspath(joined_uri)
return PackURI(abs_uri)
@property
def baseURI(self):
"""
The base URI of this pack URI, the directory portion, roughly
speaking. E.g. ``'/ppt/slides'`` for ``'/ppt/slides/slide1.xml'``.
For the package pseudo-partname '/', baseURI is '/'.
"""
return posixpath.split(self)[0]
@property
def ext(self):
"""
The extension portion of this pack URI, e.g. ``'xml'`` for
``'/word/document.xml'``. Note the period is not included.
"""
# raw_ext is either empty string or starts with period, e.g. '.xml'
raw_ext = posixpath.splitext(self)[1]
return raw_ext[1:] if raw_ext.startswith('.') else raw_ext
@property
def filename(self):
"""
The "filename" portion of this pack URI, e.g. ``'slide1.xml'`` for
``'/ppt/slides/slide1.xml'``. For the package pseudo-partname '/',
filename is ''.
"""
return posixpath.split(self)[1]
@property
def idx(self):
"""
Return partname index as integer for tuple partname or None for
singleton partname, e.g. ``21`` for ``'/ppt/slides/slide21.xml'`` and
|None| for ``'/ppt/presentation.xml'``.
"""
filename = self.filename
if not filename:
return None
name_part = posixpath.splitext(filename)[0] # filename w/ext removed
match = self._filename_re.match(name_part)
if match is None:
return None
if match.group(2):
return int(match.group(2))
return None
@property
def membername(self):
"""
The pack URI with the leading slash stripped off, the form used as
the Zip file membername for the package item. Returns '' for the
package pseudo-partname '/'.
"""
return self[1:]
def relative_ref(self, baseURI):
"""
Return string containing relative reference to package item from
*baseURI*. E.g. PackURI('/ppt/slideLayouts/slideLayout1.xml') would
return '../slideLayouts/slideLayout1.xml' for baseURI '/ppt/slides'.
"""
# workaround for posixpath bug in 2.6, doesn't generate correct
# relative path when *start* (second) parameter is root ('/')
if baseURI == '/':
relpath = self[1:]
else:
relpath = posixpath.relpath(self, baseURI)
return relpath
@property
def rels_uri(self):
"""
The pack URI of the .rels part corresponding to the current pack URI.
Only produces sensible output if the pack URI is a partname or the
package pseudo-partname '/'.
"""
rels_filename = '%s.rels' % self.filename
rels_uri_str = posixpath.join(self.baseURI, '_rels', rels_filename)
return PackURI(rels_uri_str)
PACKAGE_URI = PackURI('/')
CONTENT_TYPES_URI = PackURI('/[Content_Types].xml')
| mit | ad56d07671eae010d898f7adaaf9caaf | 32.162393 | 78 | 0.587887 | 3.907351 | false | false | false | false |
python-openxml/python-docx | docx/image/helpers.py | 19 | 3286 | # encoding: utf-8
from __future__ import absolute_import, division, print_function
from struct import Struct
from .exceptions import UnexpectedEndOfFileError
BIG_ENDIAN = '>'
LITTLE_ENDIAN = '<'
class StreamReader(object):
"""
Wraps a file-like object to provide access to structured data from a
binary file. Byte-order is configurable. *base_offset* is added to any
base value provided to calculate actual location for reads.
"""
def __init__(self, stream, byte_order, base_offset=0):
super(StreamReader, self).__init__()
self._stream = stream
self._byte_order = (
LITTLE_ENDIAN if byte_order == LITTLE_ENDIAN else BIG_ENDIAN
)
self._base_offset = base_offset
def read(self, count):
"""
Allow pass-through read() call
"""
return self._stream.read(count)
def read_byte(self, base, offset=0):
"""
Return the int value of the byte at the file position defined by
self._base_offset + *base* + *offset*. If *base* is None, the byte is
read from the current position in the stream.
"""
fmt = 'B'
return self._read_int(fmt, base, offset)
def read_long(self, base, offset=0):
"""
Return the int value of the four bytes at the file position defined by
self._base_offset + *base* + *offset*. If *base* is None, the long is
read from the current position in the stream. The endian setting of
this instance is used to interpret the byte layout of the long.
"""
fmt = '<L' if self._byte_order is LITTLE_ENDIAN else '>L'
return self._read_int(fmt, base, offset)
def read_short(self, base, offset=0):
"""
Return the int value of the two bytes at the file position determined
by *base* and *offset*, similarly to ``read_long()`` above.
"""
fmt = b'<H' if self._byte_order is LITTLE_ENDIAN else b'>H'
return self._read_int(fmt, base, offset)
def read_str(self, char_count, base, offset=0):
"""
Return a string containing the *char_count* bytes at the file
position determined by self._base_offset + *base* + *offset*.
"""
def str_struct(char_count):
format_ = '%ds' % char_count
return Struct(format_)
struct = str_struct(char_count)
chars = self._unpack_item(struct, base, offset)
unicode_str = chars.decode('UTF-8')
return unicode_str
def seek(self, base, offset=0):
location = self._base_offset + base + offset
self._stream.seek(location)
def tell(self):
"""
Allow pass-through tell() call
"""
return self._stream.tell()
def _read_bytes(self, byte_count, base, offset):
self.seek(base, offset)
bytes_ = self._stream.read(byte_count)
if len(bytes_) < byte_count:
raise UnexpectedEndOfFileError
return bytes_
def _read_int(self, fmt, base, offset):
struct = Struct(fmt)
return self._unpack_item(struct, base, offset)
def _unpack_item(self, struct, base, offset):
bytes_ = self._read_bytes(struct.size, base, offset)
return struct.unpack(bytes_)[0]
| mit | 345f0f441fb81b5489d1a4916da0b072 | 32.876289 | 78 | 0.601948 | 3.954272 | false | false | false | false |
meine-stadt-transparent/meine-stadt-transparent | importer/tests/test_import_anything.py | 1 | 2398 | import json
import responses
from django.core.management import call_command
from django.test import TestCase
from importer.loader import BaseLoader
from importer.management.commands import import_anything
from mainapp.models import File
class TestImportAnything(TestCase):
fixtures = ["import-anything"]
def test_import_anything(self):
with open("testdata/oparl/File.json") as fp:
file = json.load(fp)
with open("testdata/oparl/System.json") as fp:
system = json.load(fp)
# Import a previously not existing object
file["name"] = "Old"
with responses.RequestsMock() as requests_mock:
requests_mock.add(requests_mock.GET, system["id"], json=system)
requests_mock.add(requests_mock.GET, file["id"], json=file)
call_command(import_anything.Command(), file["id"])
self.assertEqual(File.objects.get(oparl_id=file["id"]).name, "Old")
# Update an existing object
file["name"] = "New"
with responses.RequestsMock() as requests_mock:
requests_mock.add(requests_mock.GET, system["id"], json=system)
requests_mock.add(requests_mock.GET, file["id"], json=file)
call_command(import_anything.Command(), file["id"])
self.assertEqual(File.objects.get(oparl_id=file["id"]).name, "New")
def test_warn_import_mismatch_url_id(self):
with open("testdata/oparl/File.json") as fp:
file = json.load(fp)
with open("testdata/oparl/System.json") as fp:
system = json.load(fp)
alias_url = "https://ris.krefeld.de/webservice/oparl/v1/body/1/file/2-6766"
# Typeshed is wrong for assertLogs
# noinspection PyTypeChecker
with self.assertLogs(BaseLoader.__module__, level="WARNING") as cm:
with responses.RequestsMock() as requests_mock:
requests_mock.add(requests_mock.GET, system["id"], json=system)
requests_mock.add(requests_mock.GET, alias_url, json=file)
call_command(import_anything.Command(), alias_url)
self.assertEqual(
cm.output,
[
"WARNING:{}:Mismatch between url and id. url: {} id: {}".format(
BaseLoader.__module__, alias_url, file["id"]
)
],
)
| mit | a978fdff44d781e8eb54046f3a83bd86 | 36.46875 | 84 | 0.605922 | 3.899187 | false | true | false | false |
meine-stadt-transparent/meine-stadt-transparent | customization_examples/juelich_transparent/import_hooks.py | 1 | 1692 | """
Using this file, you can attach sanitize-callbacks to the importer. The following functions can be used:
- sanitize_file
- sanitize_person
- sanitize_consultation
- sanitize_meeting
- sanitize_agenda_item
- sanitize_paper
- sanitize_organization
To activate these callbacks, you need to register this file as described in the readme.
"""
import re
def sanitize_file(file):
"""
This hook can be used to clean up some data from the API.
In this example, we strip the "Sitzungsvorlage (...)" from the name of the file.
:param file: mainapp.models.file
:return: mainapp.models.file
"""
file.name = re.sub(
r"Sitzungsvorlage \((?P<name>.*)\)", r"\g<name>", file.name, flags=re.DOTALL
)
return file
def sanitize_person(person):
"""
This hook can be used to clean up some data from the API.
In this example, we strip salutations like "Herr" or "Frau" from the name.
:param person: mainapp.models.person
:return: mainapp.models.person
"""
for prefix in ["Frau", "Herr", "Herrn"]:
person.name = re.sub(r"^" + re.escape(prefix) + " ", "", person.name)
return person
def sanitize_organization(orga):
"""
This hook can be used to clean up some data from the API.
In this example, we shorten some party names to prevent line-breaks on the person list.
:param orga: mainapp.models.organization
:return: mainapp.models.organization
"""
if orga.name == "Unabhängige Wählergemeinschaft - Jülichs überparteiliche Liste":
orga.short_name = "UWG Jülich"
if orga.name == "Bündnis 90 / Die Grünen":
orga.short_name = "B.90 / Die Grünen"
return orga
| mit | 51b248c74cb2fe299dd65fa865eebb6a | 26.16129 | 104 | 0.669834 | 3.308448 | false | false | false | false |
meine-stadt-transparent/meine-stadt-transparent | mainapp/management/commands/test_search.py | 1 | 2867 | import time
from typing import Dict, List
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import BaseCommand
from django_elasticsearch_dsl import Index
from mainapp.documents.index import autocomplete_analyzer, text_analyzer
from mainapp.functions.search import search_string_to_params, MainappSearch, parse_hit
class Command(BaseCommand):
help = "Search for some predefined terms to check how the search is working"
def add_arguments(self, parser):
parser.add_argument("--rebuild", action="store_true")
def analyze(self, text: str) -> Dict[str, List[Dict]]:
"""Shows what elasticsearch does with the tokens"""
elastic_index_file = Index(settings.ELASTICSEARCH_PREFIX + "-file")
elastic_index_file.analyzer(autocomplete_analyzer)
elastic_index_file.analyzer(text_analyzer)
return elastic_index_file.analyze(
body={"analyzer": "text_analyzer", "text": text}
)
def handle(self, *args, **options):
"""
The checks:
* "rese" should match "research", but currently doesn't
* "contain(|sng|ing)" should match "containing" by stemming, preserving the original and fuzzy
* "here" matches "here's" due to language analysis
* "Knutt" should prefer "Knutt" over "Knuth", but currently prefers frequency
* "Schulhaus" is for big german dataset performance
"""
if options.get("rebuild"):
start = time.perf_counter()
call_command(
"search_index", action="rebuild", force=True, models=["mainapp.Person"]
)
end = time.perf_counter()
self.stdout.write("Total: {}\n".format(end - start))
words = ["containing", "here's"]
for word in words:
self.stdout.write(
"{} {}\n".format(
word, [token["token"] for token in self.analyze(word)["tokens"]]
)
)
queries = [
"rese",
"contain",
"containsng",
"containing",
"here",
"Knutt",
"Schulhaus",
]
for query in queries:
params = search_string_to_params(query)
main_search = MainappSearch(params)
executed = main_search.execute()
self.stdout.write(
"# {}: {} | {}\n".format(query, len(executed.hits), executed.took)
)
for hit in executed.hits:
hit = parse_hit(hit)
highlight = (
str(hit.get("highlight"))
.replace("\n", " ")
.replace("\r", " ")[:100]
)
self.stdout.write(" - {}, {}\n".format(hit["name"][:30], highlight))
| mit | 7ba025efb4ac702bcf02bb2ee2199bf6 | 35.75641 | 103 | 0.560865 | 4.241124 | false | false | false | false |
meine-stadt-transparent/meine-stadt-transparent | mainapp/migrations/0001_squashed_0055_file_mentioned_persons.py | 1 | 46642 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-22 14:18
from __future__ import unicode_literals
import django.db.models.deletion
import django.utils.timezone
import djgeojson.fields
from django.conf import settings
from django.db import migrations, models
# Functions from the following migrations need manual copying.
# Move them and any dependencies into this file, then update the
# RunPython operations to refer to the local versions:
# mainapp.migrations.0012_site
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('sites', '0002_alter_domain_unique'),
]
operations = [
migrations.CreateModel(
name='AgendaItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.CharField(max_length=20)),
('position', models.IntegerField()),
('public', models.NullBooleanField()),
],
options={
'ordering': ['position'],
},
),
migrations.CreateModel(
name='Body',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oparl_id', models.CharField(blank=True, max_length=2000, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('name', models.CharField(max_length=200)),
('short_name', models.CharField(max_length=50)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Committee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oparl_id', models.CharField(blank=True, max_length=2000, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('name', models.CharField(max_length=200)),
('short_name', models.CharField(max_length=50)),
('body', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.Body')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CommitteeMembership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oparl_id', models.CharField(blank=True, max_length=2000, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('start', models.DateField()),
('end', models.DateField()),
('role', models.CharField(max_length=200)),
('committee', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.Committee')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oparl_id', models.CharField(blank=True, max_length=2000, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('name', models.CharField(max_length=200)),
('short_name', models.CharField(max_length=50)),
('body', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.Body')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='File',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oparl_id', models.CharField(blank=True, max_length=2000, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('storage_filename', models.CharField(max_length=256)),
('displayed_filename', models.CharField(max_length=256)),
('legal_date', models.DateField()),
('filesize', models.IntegerField()),
('parsed_text', models.TextField(blank=True, null=True)),
('license', models.CharField(blank=True, max_length=200, null=True)),
('description', models.TextField(blank=True, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='LegislativeTerm',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oparl_id', models.CharField(blank=True, max_length=2000, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('name', models.CharField(max_length=200)),
('start_date', models.DateField()),
('end_date', models.DateField()),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oparl_id', models.CharField(blank=True, max_length=2000, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('displayed_name', models.CharField(max_length=1000)),
('description', models.TextField(blank=True, null=True)),
('is_official', models.BooleanField()),
('osm_id', models.BigIntegerField(blank=True, null=True)),
('geometry', djgeojson.fields.GeometryField(default=None)),
('bodies', models.ManyToManyField(blank=True, to='mainapp.Body')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Meeting',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oparl_id', models.CharField(blank=True, max_length=2000, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('name', models.CharField(max_length=1000)),
('cancelled', models.BooleanField()),
('start', models.DateTimeField()),
('end', models.DateTimeField()),
('public', models.IntegerField(blank=True, choices=[(0, 'unknown'), (1, 'public'), (2, 'not public'),
(3, 'splitted')], default=0)),
('auxiliary_files',
models.ManyToManyField(blank=True, related_name='meeting_auxiliary_files', to='mainapp.File')),
('committees', models.ManyToManyField(blank=True, to='mainapp.Committee')),
('invitation', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
related_name='meeting_invitation', to='mainapp.File')),
('locations', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
to='mainapp.Location')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='MeetingSeries',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oparl_id', models.CharField(blank=True, max_length=2000, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('name', models.CharField(max_length=1000)),
('description', models.TextField(blank=True, null=True)),
('is_regular', models.BooleanField()),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Paper',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oparl_id', models.CharField(blank=True, max_length=2000, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('reference_number', models.CharField(max_length=50)),
('name', models.CharField(max_length=200)),
('short_name', models.CharField(max_length=50)),
('description', models.TextField(blank=True, null=True)),
('is_change_request_of',
models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
to='mainapp.Paper')),
('submitter_committee', models.ManyToManyField(to='mainapp.Committee')),
('submitter_department', models.ManyToManyField(to='mainapp.Department')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ParliamentaryGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oparl_id', models.CharField(blank=True, max_length=2000, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('name', models.CharField(max_length=200)),
('start', models.DateField()),
('end', models.DateField()),
('short_name', models.CharField(max_length=20)),
('body', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.Body')),
('legislative_terms', models.ManyToManyField(blank=True, to='mainapp.LegislativeTerm')),
('location', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
to='mainapp.Location')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ParliamentaryGroupMembership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oparl_id', models.CharField(blank=True, max_length=2000, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('start', models.DateField()),
('end', models.DateField()),
('role', models.CharField(max_length=200)),
('parliamentary_group',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.ParliamentaryGroup')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oparl_id', models.CharField(blank=True, max_length=2000, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('name', models.CharField(max_length=100)),
('given_name', models.CharField(max_length=50)),
('family_name', models.CharField(max_length=50)),
('location', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
to='mainapp.Location')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='SearchPoi',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oparl_id', models.CharField(blank=True, max_length=2000, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('displayed_name', models.CharField(max_length=1000)),
('osm_id', models.BigIntegerField(blank=True, null=True)),
('osm_amenity', models.CharField(max_length=1000, null=True)),
('geometry', djgeojson.fields.GeometryField(null=True)),
('exclude_from_search', models.BooleanField(default=False)),
('bodies', models.ManyToManyField(blank=True, to='mainapp.Body')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='SearchStreet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oparl_id', models.CharField(blank=True, max_length=2000, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('displayed_name', models.CharField(max_length=1000)),
('osm_id', models.BigIntegerField(blank=True, null=True)),
('exclude_from_search', models.BooleanField(default=False)),
('bodies', models.ManyToManyField(blank=True, to='mainapp.Body')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='parliamentarygroupmembership',
name='person',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.Person'),
),
migrations.AddField(
model_name='paper',
name='submitter_parliamentary_groups',
field=models.ManyToManyField(blank=True, to='mainapp.ParliamentaryGroup'),
),
migrations.AddField(
model_name='paper',
name='submitter_persons',
field=models.ManyToManyField(blank=True, to='mainapp.Person'),
),
migrations.AddField(
model_name='meeting',
name='meeting_series',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
to='mainapp.MeetingSeries'),
),
migrations.AddField(
model_name='meeting',
name='persons',
field=models.ManyToManyField(blank=True, to='mainapp.Person'),
),
migrations.AddField(
model_name='meeting',
name='results_protocol',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
related_name='meeting_results_protocol', to='mainapp.File'),
),
migrations.AddField(
model_name='meeting',
name='verbatim_protocol',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
related_name='meeting_verbatim_protocol', to='mainapp.File'),
),
migrations.AddField(
model_name='file',
name='locations',
field=models.ManyToManyField(blank=True, to='mainapp.Location'),
),
migrations.AddField(
model_name='file',
name='paper',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
to='mainapp.Paper'),
),
migrations.AddField(
model_name='department',
name='location',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
to='mainapp.Location'),
),
migrations.AddField(
model_name='committeemembership',
name='person',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.Person'),
),
migrations.AddField(
model_name='committee',
name='legislative_terms',
field=models.ManyToManyField(blank=True, to='mainapp.LegislativeTerm'),
),
migrations.AddField(
model_name='committee',
name='location',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
to='mainapp.Location'),
),
migrations.AddField(
model_name='body',
name='center',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
related_name='body_center', to='mainapp.Location'),
),
migrations.AddField(
model_name='body',
name='legislative_terms',
field=models.ManyToManyField(blank=True, to='mainapp.LegislativeTerm'),
),
migrations.AddField(
model_name='body',
name='outline',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
related_name='body_outline', to='mainapp.Location'),
),
migrations.AddField(
model_name='agendaitem',
name='meeting',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.Meeting'),
),
migrations.AddField(
model_name='agendaitem',
name='paper',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
to='mainapp.Paper'),
),
migrations.AddField(
model_name='agendaitem',
name='title',
field=models.CharField(default='', max_length=1000),
preserve_default=False,
),
migrations.AlterUniqueTogether(
name='agendaitem',
unique_together=set([('meeting', 'position')]),
),
migrations.RemoveField(
model_name='location',
name='displayed_name',
),
migrations.AddField(
model_name='legislativeterm',
name='short_name',
field=models.CharField(default='', max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='location',
name='name',
field=models.CharField(default='', max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='location',
name='short_name',
field=models.CharField(default='', max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='meeting',
name='short_name',
field=models.CharField(default='', max_length=50),
preserve_default=False,
),
migrations.AlterField(
model_name='meeting',
name='name',
field=models.CharField(max_length=200),
),
migrations.AlterField(
model_name='parliamentarygroup',
name='short_name',
field=models.CharField(max_length=50),
),
migrations.CreateModel(
name='DepartmentMembership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oparl_id', models.CharField(blank=True, max_length=2000, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('start', models.DateField()),
('end', models.DateField()),
('role', models.CharField(max_length=200)),
('department', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.Department')),
('person', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.Person')),
],
options={
'abstract': False,
},
),
migrations.RenameField(
model_name='paper',
old_name='submitter_committee',
new_name='submitter_committees',
),
migrations.AlterField(
model_name='paper',
name='submitter_committees',
field=models.ManyToManyField(blank=True, to='mainapp.Committee'),
),
migrations.RenameField(
model_name='paper',
old_name='submitter_department',
new_name='submitter_departments',
),
migrations.AlterField(
model_name='paper',
name='submitter_departments',
field=models.ManyToManyField(blank=True, to='mainapp.Department'),
),
migrations.AlterModelOptions(
name='agendaitem',
options={'ordering': ['meeting', 'position']},
),
migrations.RenameField(
model_name='paper',
old_name='is_change_request_of',
new_name='change_request_of',
),
migrations.RenameField(
model_name='legislativeterm',
old_name='end_date',
new_name='end',
),
migrations.RenameField(
model_name='legislativeterm',
old_name='start_date',
new_name='start',
),
migrations.AddField(
model_name='committee',
name='end',
field=models.DateField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='committee',
name='start',
field=models.DateField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.RenameField(
model_name='meeting',
old_name='locations',
new_name='location',
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phone', models.PositiveIntegerField(blank=True, null=True)),
('completion_level',
models.PositiveSmallIntegerField(default=0, verbose_name='Profile completion percentage')),
('email_is_verified', models.BooleanField(default=False, verbose_name='Email is verified')),
('personal_info_is_completed',
models.BooleanField(default=False, verbose_name='Personal info completed')),
('user',
models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='profile',
to=settings.AUTH_USER_MODEL, verbose_name='User')),
],
options={
'verbose_name_plural': 'User profiles',
'verbose_name': 'User profile',
},
),
migrations.AddField(
model_name='meetingseries',
name='short_name',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AddField(
model_name='file',
name='mime_type',
field=models.CharField(default='FIXME', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='file',
name='name',
field=models.CharField(default='FIXME', max_length=200),
preserve_default=False,
),
migrations.AlterField(
model_name='file',
name='displayed_filename',
field=models.CharField(max_length=200),
),
migrations.AlterField(
model_name='file',
name='storage_filename',
field=models.CharField(max_length=200),
),
migrations.AlterField(
model_name='committee',
name='end',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='parliamentarygroup',
name='end',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='committee',
name='start',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='parliamentarygroup',
name='start',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='meeting',
name='end',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='file',
name='legal_date',
field=models.DateField(blank=True, null=True),
),
migrations.RemoveField(
model_name='meeting',
name='meeting_series',
),
migrations.AlterField(
model_name='body',
name='oparl_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AlterField(
model_name='committee',
name='oparl_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AlterField(
model_name='committeemembership',
name='oparl_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AlterField(
model_name='department',
name='oparl_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AlterField(
model_name='departmentmembership',
name='oparl_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AlterField(
model_name='file',
name='oparl_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AlterField(
model_name='legislativeterm',
name='oparl_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AlterField(
model_name='location',
name='oparl_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AlterField(
model_name='meeting',
name='oparl_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AlterField(
model_name='paper',
name='oparl_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.RemoveField(
model_name='parliamentarygroup',
name='body',
),
migrations.AlterField(
model_name='parliamentarygroup',
name='oparl_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AlterField(
model_name='parliamentarygroupmembership',
name='oparl_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AlterField(
model_name='person',
name='oparl_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AlterField(
model_name='searchpoi',
name='oparl_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AlterField(
model_name='searchstreet',
name='oparl_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.DeleteModel(
name='MeetingSeries',
),
migrations.AlterField(
model_name='departmentmembership',
name='end',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='departmentmembership',
name='start',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='parliamentarygroupmembership',
name='end',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='committeemembership',
name='end',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='paper',
name='legal_date',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='paper',
name='main_file',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
related_name='paper_main_file', to='mainapp.File'),
),
migrations.AlterField(
model_name='paper',
name='name',
field=models.CharField(max_length=300),
),
migrations.RemoveField(
model_name='file',
name='paper',
),
migrations.AddField(
model_name='agendaitem',
name='oparl_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AddField(
model_name='paper',
name='files',
field=models.ManyToManyField(blank=True, to='mainapp.File'),
),
migrations.AlterField(
model_name='committeemembership',
name='start',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='parliamentarygroupmembership',
name='start',
field=models.DateField(blank=True, null=True),
),
migrations.CreateModel(
name='UserAlert',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('last_match', models.DateTimeField(null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('search_string', models.TextField(default='')),
],
),
migrations.RemoveField(
model_name='userprofile',
name='completion_level',
),
migrations.RemoveField(
model_name='userprofile',
name='personal_info_is_completed',
),
migrations.RemoveField(
model_name='userprofile',
name='phone',
),
migrations.AlterField(
model_name='agendaitem',
name='title',
field=models.CharField(max_length=2000),
),
migrations.AlterField(
model_name='agendaitem',
name='title',
field=models.TextField(),
),
migrations.CreateModel(
name='PaperType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('paper_type', models.CharField(max_length=200, unique=True)),
],
),
migrations.AddField(
model_name='paper',
name='paper_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
to='mainapp.PaperType'),
),
migrations.AlterField(
model_name='paper',
name='reference_number',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AddField(
model_name='agendaitem',
name='deleted',
field=models.BooleanField(default=False),
),
migrations.AlterUniqueTogether(
name='agendaitem',
unique_together=set([]),
),
migrations.AlterField(
model_name='paper',
name='reference_number',
field=models.CharField(default='[Deleted]', max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='agendaitem',
name='created',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='agendaitem',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.CreateModel(
name='Consultation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oparl_id', models.CharField(blank=True, max_length=255, null=True, unique=True)),
('title', models.TextField(blank=True, null=True)),
('authoritative', models.NullBooleanField()),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('meeting', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
to='mainapp.Meeting')),
('paper', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
to='mainapp.Paper')),
('role', models.CharField(blank=True, max_length=200, null=True)),
],
),
migrations.RemoveField(
model_name='agendaitem',
name='paper',
),
migrations.AddField(
model_name='agendaitem',
name='auxiliary_file',
field=models.ManyToManyField(blank=True, related_name='auxiliary_file', to='mainapp.File'),
),
migrations.AddField(
model_name='agendaitem',
name='end',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='agendaitem',
name='resolution_file',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
related_name='resolution_file', to='mainapp.File'),
),
migrations.AddField(
model_name='agendaitem',
name='resolution_text',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='agendaitem',
name='result',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AddField(
model_name='agendaitem',
name='start',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='agendaitem',
name='consultation',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
to='mainapp.Consultation'),
),
migrations.RemoveField(
model_name='userprofile',
name='email_is_verified',
),
migrations.CreateModel(
name='Organization',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oparl_id', models.CharField(blank=True, max_length=255, null=True, unique=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('name', models.CharField(max_length=200)),
('short_name', models.CharField(max_length=50)),
('start', models.DateField(blank=True, null=True)),
('end', models.DateField(blank=True, null=True)),
('color', models.CharField(blank=True, max_length=6, null=True)),
('logo', models.CharField(blank=True, max_length=255, null=True)),
('body', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.Body')),
('legislative_terms', models.ManyToManyField(blank=True, to='mainapp.LegislativeTerm')),
('location', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
to='mainapp.Location')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='OrganizationMembership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oparl_id', models.CharField(blank=True, max_length=255, null=True, unique=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('start', models.DateField(blank=True, null=True)),
('end', models.DateField(blank=True, null=True)),
('role', models.CharField(blank=True, max_length=200, null=True)),
('organization',
models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='mainapp.Organization')),
('person', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.Person')),
],
options={
'abstract': False,
},
),
migrations.RemoveField(
model_name='committee',
name='body',
),
migrations.RemoveField(
model_name='committee',
name='legislative_terms',
),
migrations.RemoveField(
model_name='committee',
name='location',
),
migrations.RemoveField(
model_name='committeemembership',
name='committee',
),
migrations.RemoveField(
model_name='committeemembership',
name='person',
),
migrations.RemoveField(
model_name='department',
name='body',
),
migrations.RemoveField(
model_name='department',
name='location',
),
migrations.RemoveField(
model_name='departmentmembership',
name='department',
),
migrations.RemoveField(
model_name='departmentmembership',
name='person',
),
migrations.RemoveField(
model_name='parliamentarygroup',
name='legislative_terms',
),
migrations.RemoveField(
model_name='parliamentarygroup',
name='location',
),
migrations.RemoveField(
model_name='parliamentarygroupmembership',
name='parliamentary_group',
),
migrations.RemoveField(
model_name='parliamentarygroupmembership',
name='person',
),
migrations.RenameField(
model_name='paper',
old_name='submitter_persons',
new_name='persons',
),
migrations.RemoveField(
model_name='meeting',
name='committees',
),
migrations.RemoveField(
model_name='paper',
name='submitter_committees',
),
migrations.RemoveField(
model_name='paper',
name='submitter_departments',
),
migrations.RemoveField(
model_name='paper',
name='submitter_parliamentary_groups',
),
migrations.DeleteModel(
name='Committee',
),
migrations.DeleteModel(
name='CommitteeMembership',
),
migrations.DeleteModel(
name='Department',
),
migrations.DeleteModel(
name='DepartmentMembership',
),
migrations.DeleteModel(
name='ParliamentaryGroup',
),
migrations.DeleteModel(
name='ParliamentaryGroupMembership',
),
migrations.AddField(
model_name='meeting',
name='organizations',
field=models.ManyToManyField(blank=True, to='mainapp.Organization'),
),
migrations.AddField(
model_name='paper',
name='organizations',
field=models.ManyToManyField(blank=True, to='mainapp.Organization'),
),
migrations.CreateModel(
name='OrganizationType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, null=True)),
],
),
migrations.AddField(
model_name='organization',
name='organization_type',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE,
to='mainapp.OrganizationType'),
preserve_default=False,
),
migrations.AddField(
model_name='consultation',
name='organizations',
field=models.ManyToManyField(blank=True, to='mainapp.Organization'),
),
migrations.AddField(
model_name='file',
name='page_count',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='agendaitem',
name='key',
field=models.CharField(blank=True, max_length=20, null=True),
),
migrations.AlterField(
model_name='body',
name='name',
field=models.TextField(),
),
migrations.AlterField(
model_name='legislativeterm',
name='name',
field=models.TextField(),
),
migrations.AlterField(
model_name='legislativeterm',
name='short_name',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='location',
name='name',
field=models.TextField(),
),
migrations.AlterField(
model_name='meeting',
name='name',
field=models.TextField(),
),
migrations.AlterField(
model_name='organization',
name='name',
field=models.TextField(),
),
migrations.AlterField(
model_name='paper',
name='name',
field=models.TextField(),
),
migrations.AddField(
model_name='file',
name='mentioned_persons',
field=models.ManyToManyField(blank=True, to='mainapp.Person'),
),
]
| mit | 723702b00d1fe4049fb124a1d1ae811c | 41.133695 | 120 | 0.535097 | 4.551327 | false | false | false | false |
tox-dev/tox | src/tox/config/parallel.py | 2 | 2062 | from __future__ import absolute_import, unicode_literals
from argparse import ArgumentTypeError
ENV_VAR_KEY_PUBLIC = "TOX_PARALLEL_ENV"
ENV_VAR_KEY_PRIVATE = "_TOX_PARALLEL_ENV"
OFF_VALUE = 0
DEFAULT_PARALLEL = OFF_VALUE
def auto_detect_cpus():
try:
from os import sched_getaffinity # python 3 only
def cpu_count():
return len(sched_getaffinity(0))
except ImportError:
# python 2 options
try:
from os import cpu_count
except ImportError:
from multiprocessing import cpu_count
try:
n = cpu_count()
except NotImplementedError: # pragma: no cov
n = None # pragma: no cov
return n if n else 1
def parse_num_processes(s):
if s == "all":
return None
if s == "auto":
return auto_detect_cpus()
else:
value = int(s)
if value < 0:
raise ArgumentTypeError("value must be positive")
return value
def add_parallel_flags(parser):
parser.add_argument(
"-p",
"--parallel",
nargs="?",
const="auto",
dest="parallel",
help="run tox environments in parallel, the argument controls limit: all,"
" auto or missing argument - cpu count, some positive number, 0 to turn off",
action="store",
type=parse_num_processes,
default=DEFAULT_PARALLEL,
metavar="VAL",
)
parser.add_argument(
"-o",
"--parallel-live",
action="store_true",
dest="parallel_live",
help="connect to stdout while running environments",
)
def add_parallel_config(parser):
parser.add_testenv_attribute(
"depends",
type="env-list",
help="tox environments that this environment depends on (must be run after those)",
)
parser.add_testenv_attribute(
"parallel_show_output",
type="bool",
default=False,
help="if set to True the content of the output will always be shown "
"when running in parallel mode",
)
| mit | d1a44328a784f6823dc421de43f58320 | 24.775 | 91 | 0.593598 | 4.051081 | false | false | false | false |
bbangert/velruse | velruse/providers/bitbucket.py | 3 | 6916 | """Bitbucket Authentication Views
http://confluence.atlassian.com/display/BITBUCKET/OAuth+on+Bitbucket
"""
from pyramid.httpexceptions import HTTPFound
from pyramid.security import NO_PERMISSION_REQUIRED
import requests
from requests_oauthlib import OAuth1
from ..api import (
AuthenticationComplete,
AuthenticationDenied,
register_provider,
)
from ..compat import parse_qsl
from ..exceptions import ThirdPartyFailure
from ..settings import ProviderSettings
from ..utils import flat_url
REQUEST_URL = 'https://bitbucket.org/api/1.0/oauth/request_token/'
AUTH_URL = 'https://bitbucket.org/api/1.0/oauth/authenticate/'
ACCESS_URL = 'https://bitbucket.org/api/1.0/oauth/access_token/'
USER_URL = 'https://bitbucket.org/api/1.0/user'
EMAIL_URL = 'https://bitbucket.org/api/1.0/users/{username}/emails'
class BitbucketAuthenticationComplete(AuthenticationComplete):
"""Bitbucket auth complete"""
def includeme(config):
config.add_directive('add_bitbucket_login', add_bitbucket_login)
config.add_directive('add_bitbucket_login_from_settings',
add_bitbucket_login_from_settings)
def add_bitbucket_login_from_settings(config, prefix='velruse.bitbucket.'):
settings = config.registry.settings
p = ProviderSettings(settings, prefix)
p.update('consumer_key', required=True)
p.update('consumer_secret', required=True)
p.update('login_path')
p.update('callback_path')
config.add_bitbucket_login(**p.kwargs)
def add_bitbucket_login(config,
consumer_key,
consumer_secret,
login_path='/bitbucket/login',
callback_path='/bitbucket/login/callback',
name='bitbucket'):
"""
Add a Bitbucket login provider to the application.
"""
provider = BitbucketProvider(name, consumer_key, consumer_secret)
config.add_route(provider.login_route, login_path)
config.add_view(provider, attr='login', route_name=provider.login_route,
permission=NO_PERMISSION_REQUIRED)
config.add_route(provider.callback_route, callback_path,
use_global_views=True,
factory=provider.callback)
register_provider(config, name, provider)
class BitbucketProvider(object):
def __init__(self, name, consumer_key, consumer_secret):
self.name = name
self.type = 'bitbucket'
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.login_route = 'velruse.%s-login' % name
self.callback_route = 'velruse.%s-callback' % name
def login(self, request):
"""Initiate a bitbucket login"""
# grab the initial request token
oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
callback_uri=request.route_url(self.callback_route))
resp = requests.post(REQUEST_URL, auth=oauth)
if resp.status_code != 200:
raise ThirdPartyFailure("Status %s: %s" % (
resp.status_code, resp.content))
request_token = dict(parse_qsl(resp.text))
# store the token for later
request.session['velruse.token'] = request_token
# redirect the user to authorize the app
auth_url = flat_url(AUTH_URL, oauth_token=request_token['oauth_token'])
return HTTPFound(location=auth_url)
def callback(self, request):
"""Process the bitbucket redirect"""
if 'denied' in request.GET:
return AuthenticationDenied("User denied authentication",
provider_name=self.name,
provider_type=self.type)
verifier = request.GET.get('oauth_verifier')
if not verifier:
raise ThirdPartyFailure("No oauth_verifier returned")
request_token = request.session.pop('velruse.token')
# turn our request token into an access token
oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=request_token['oauth_token'],
resource_owner_secret=request_token['oauth_token_secret'],
verifier=verifier)
resp = requests.post(ACCESS_URL, auth=oauth)
if resp.status_code != 200:
raise ThirdPartyFailure("Status %s: %s" % (
resp.status_code, resp.content))
access_token = dict(parse_qsl(resp.text))
creds = {
'oauthAccessToken': access_token['oauth_token'],
'oauthAccessTokenSecret': access_token['oauth_token_secret'],
}
# setup oauth for general api calls
oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=creds['oauthAccessToken'],
resource_owner_secret=creds['oauthAccessTokenSecret'])
# request user profile
resp = requests.get(USER_URL, auth=oauth)
if resp.status_code != 200:
raise ThirdPartyFailure("Status %s: %s" % (
resp.status_code, resp.content))
user_data = resp.json()
data = user_data['user']
username = data['username']
# Setup the normalized contact info
profile = {}
profile['accounts'] = [{
'domain': 'bitbucket.com',
'username': username,
}]
profile['preferredUsername'] = username
name = {}
first_name = data.get('first_name', '')
last_name = data.get('last_name', '')
if first_name or last_name:
name['formatted'] = u'{0} {1}'.format(first_name, last_name).strip()
if first_name:
name['givenName'] = first_name
if last_name:
name['familyName'] = last_name
if name:
profile['name'] = name
display_name = name.get('formatted')
if not display_name:
display_name = data.get('display_name')
profile['displayName'] = display_name
# request user emails
resp = requests.get(EMAIL_URL.format(username=username), auth=oauth)
if resp.status_code == 200:
data = resp.json()
emails = []
for item in data:
email = {'value': item['email']}
if item.get('primary'):
email['primary'] = True
emails.append(email)
if item.get('active'):
profile['verifiedEmail'] = item['email']
profile['emails'] = emails
return BitbucketAuthenticationComplete(profile=profile,
credentials=creds,
provider_name=self.name,
provider_type=self.type)
| mit | 776436873d4b0e0248adb2d62247d0ea | 35.592593 | 80 | 0.594563 | 4.242945 | false | true | false | false |
bbangert/velruse | velruse/providers/openid.py | 2 | 13917 | from __future__ import absolute_import
import datetime
import re
from openid.consumer import consumer
from openid.extensions import ax
from openid.extensions import sreg
from pyramid.request import Response
from pyramid.httpexceptions import HTTPFound
from pyramid.security import NO_PERMISSION_REQUIRED
from ..api import (
AuthenticationComplete,
AuthenticationDenied,
register_provider,
)
from ..exceptions import (
MissingParameter,
ThirdPartyFailure,
)
log = __import__('logging').getLogger(__name__)
# Setup our attribute objects that we'll be requesting
ax_attributes = dict(
nickname='http://axschema.org/namePerson/friendly',
email='http://axschema.org/contact/email',
full_name='http://axschema.org/namePerson',
birthday='http://axschema.org/birthDate',
gender='http://axschema.org/person/gender',
postal_code='http://axschema.org/contact/postalCode/home',
country='http://axschema.org/contact/country/home',
timezone='http://axschema.org/pref/timezone',
language='http://axschema.org/pref/language',
name_prefix='http://axschema.org/namePerson/prefix',
first_name='http://axschema.org/namePerson/first',
last_name='http://axschema.org/namePerson/last',
middle_name='http://axschema.org/namePerson/middle',
name_suffix='http://axschema.org/namePerson/suffix',
web='http://axschema.org/contact/web/default',
thumbnail='http://axschema.org/media/image/default',
)
#Change names later to make things a little bit clearer
alternate_ax_attributes = dict(
nickname='http://schema.openid.net/namePerson/friendly',
email='http://schema.openid.net/contact/email',
full_name='http://schema.openid.net/namePerson',
birthday='http://schema.openid.net/birthDate',
gender='http://schema.openid.net/person/gender',
postal_code='http://schema.openid.net/contact/postalCode/home',
country='http://schema.openid.net/contact/country/home',
timezone='http://schema.openid.net/pref/timezone',
language='http://schema.openid.net/pref/language',
name_prefix='http://schema.openid.net/namePerson/prefix',
first_name='http://schema.openid.net/namePerson/first',
last_name='http://schema.openid.net/namePerson/last',
middle_name='http://schema.openid.net/namePerson/middle',
name_suffix='http://schema.openid.net/namePerson/suffix',
web='http://schema.openid.net/contact/web/default',
)
# Translation dict for AX attrib names to sreg equiv
trans_dict = dict(
full_name='fullname',
birthday='dob',
postal_code='postcode',
)
attributes = ax_attributes
class OpenIDAuthenticationComplete(AuthenticationComplete):
"""OpenID auth complete"""
def includeme(config):
config.add_directive('add_openid_login', add_openid_login)
def add_openid_login(config,
realm=None,
storage=None,
login_path='/login/openid',
callback_path='/login/openid/callback',
name='openid'):
"""
Add an OpenID login provider to the application.
`storage` should be an object conforming to the
`openid.store.interface.OpenIDStore` protocol. If left as `None` then
the provider will run in a stateless mode.
"""
provider = OpenIDConsumer(name, 'openid', realm=realm, storage=storage)
config.add_route(provider.login_route, login_path)
config.add_view(provider, attr='login', route_name=provider.login_route,
permission=NO_PERMISSION_REQUIRED)
config.add_route(provider.callback_route, callback_path,
use_global_views=True,
factory=provider.callback)
register_provider(config, name, provider)
class OpenIDConsumer(object):
"""OpenID Consumer base class
Providors using specialized OpenID based authentication subclass this.
"""
def __init__(self,
name,
_type,
realm=None,
storage=None,
context=OpenIDAuthenticationComplete):
self.openid_store = storage
self.name = name
self.type = _type
self.context = context
self.realm_override = realm
self.login_route = 'velruse.%s-url' % name
self.callback_route = 'velruse.%s-callback' % name
def _get_realm(self, request):
if self.realm_override is not None:
return self.realm_override
return request.host_url
def _lookup_identifier(self, request, identifier):
"""Extension point for inherited classes that want to change or set
a default identifier"""
return identifier
def _update_authrequest(self, request, authrequest):
"""Update the authrequest with the default extensions and attributes
we ask for
This method doesn't need to return anything, since the extensions
should be added to the authrequest object itself.
"""
# Add on the Attribute Exchange for those that support that
ax_request = ax.FetchRequest()
for attrib in attributes.values():
ax_request.add(ax.AttrInfo(attrib))
authrequest.addExtension(ax_request)
# Form the Simple Reg request
sreg_request = sreg.SRegRequest(
optional=['nickname', 'email', 'fullname', 'dob', 'gender',
'postcode', 'country', 'language', 'timezone'],
)
authrequest.addExtension(sreg_request)
def _get_access_token(self, request_token):
"""Called to exchange a request token for the access token
This method doesn't by default return anything, other OpenID+Oauth
consumers should override it to do the appropriate lookup for the
access token, and return the access token.
"""
def login(self, request):
log.debug('Handling OpenID login')
# Load default parameters that all Auth Responders take
openid_url = request.params.get('openid_identifier')
# Let inherited consumers alter the openid identifier if desired
openid_url = self._lookup_identifier(request, openid_url)
if not openid_url:
log.error('Velruse: no openid_url')
raise MissingParameter('No openid_identifier was found')
openid_session = {}
oidconsumer = consumer.Consumer(openid_session, self.openid_store)
try:
log.debug('About to try OpenID begin')
authrequest = oidconsumer.begin(openid_url)
except consumer.DiscoveryFailure:
log.debug('OpenID begin DiscoveryFailure')
raise
if authrequest is None:
log.debug('OpenID begin returned empty')
raise ThirdPartyFailure("OpenID begin returned nothing")
log.debug('Updating authrequest')
# Update the authrequest
self._update_authrequest(request, authrequest)
realm = self._get_realm(request)
# TODO: add a csrf check to the return_to URL
return_to = request.route_url(self.callback_route)
request.session['velruse.openid_session'] = openid_session
# OpenID 2.0 lets Providers request POST instead of redirect, this
# checks for such a request.
if authrequest.shouldSendRedirect():
log.debug('About to initiate OpenID redirect')
redirect_url = authrequest.redirectURL(
realm=realm,
return_to=return_to,
immediate=False)
return HTTPFound(location=redirect_url)
else:
log.debug('About to initiate OpenID POST')
html = authrequest.htmlMarkup(
realm=realm,
return_to=return_to,
immediate=False)
return Response(body=html)
def _update_profile_data(self, request, user_data, credentials):
"""Update the profile data using an OAuth request to fetch more data"""
def callback(self, request):
"""Handle incoming redirect from OpenID Provider"""
log.debug('Handling processing of response from server')
openid_session = request.session.pop('velruse.openid_session', None)
if not openid_session:
raise ThirdPartyFailure("No OpenID Session has begun.")
# Setup the consumer and parse the information coming back
oidconsumer = consumer.Consumer(openid_session, self.openid_store)
return_to = request.route_url(self.callback_route)
info = oidconsumer.complete(request.params, return_to)
if info.status in [consumer.FAILURE, consumer.CANCEL]:
return AuthenticationDenied("OpenID failure",
provider_name=self.name,
provider_type=self.type)
elif info.status == consumer.SUCCESS:
openid_identity = info.identity_url
if info.endpoint.canonicalID:
# If it's an i-name, use the canonicalID as its secure even if
# the old one is compromised
openid_identity = info.endpoint.canonicalID
user_data = extract_openid_data(
identifier=openid_identity,
sreg_resp=sreg.SRegResponse.fromSuccessResponse(info),
ax_resp=ax.FetchResponse.fromSuccessResponse(info)
)
# Did we get any OAuth info?
oauth = info.extensionResponse(
'http://specs.openid.net/extensions/oauth/1.0', False
)
cred = {}
if oauth and 'request_token' in oauth:
access_token = self._get_access_token(oauth['request_token'])
if access_token:
cred.update(access_token)
# See if we need to update our profile data with an OAuth call
self._update_profile_data(request, user_data, cred)
return self.context(profile=user_data,
credentials=cred,
provider_name=self.name,
provider_type=self.type)
else:
raise ThirdPartyFailure("OpenID failed.")
class AttribAccess(object):
"""Uniform attribute accessor for Simple Reg and Attribute Exchange
values"""
def __init__(self, sreg_resp, ax_resp):
self.sreg_resp = sreg_resp or {}
self.ax_resp = ax_resp or ax.AXKeyValueMessage()
def get(self, key, ax_only=False):
"""Get a value from either Simple Reg or AX"""
# First attempt to fetch it from AX
v = self.ax_resp.getSingle(attributes[key])
if v:
return v
if ax_only:
return None
# Translate the key if needed
if key in trans_dict:
key = trans_dict[key]
# Don't attempt to fetch keys that aren't valid sreg fields
if key not in sreg.data_fields:
return None
return self.sreg_resp.get(key)
def extract_openid_data(identifier, sreg_resp, ax_resp):
"""Extract the OpenID Data from Simple Reg and AX data
This normalizes the data to the appropriate format.
"""
attribs = AttribAccess(sreg_resp, ax_resp)
account = {}
accounts = [account]
ud = {'accounts': accounts}
if 'google.com' in identifier:
account['domain'] = 'google.com'
elif 'yahoo.com' in identifier:
account['domain'] = 'yahoo.com'
elif 'aol.com' in identifier:
account['domain'] = 'aol.com'
else:
account['domain'] = 'openid.net'
account['username'] = identifier
# Sort out the display name and preferred username
if account['domain'] == 'google.com':
# Extract the first bit as the username since Google doesn't return
# any usable nickname info
email = attribs.get('email')
if email:
ud['preferredUsername'] = re.match('(^.*?)@', email).groups()[0]
else:
ud['preferredUsername'] = attribs.get('nickname')
# We trust that Google and Yahoo both verify their email addresses
if account['domain'] in ['google.com', 'yahoo.com']:
ud['verifiedEmail'] = attribs.get('email', ax_only=True)
ud['emails'] = [attribs.get('email')]
# Parse through the name parts, assign the properly if present
name = {}
name_keys = ['name_prefix', 'first_name', 'middle_name', 'last_name',
'name_suffix']
pcard_map = {'first_name': 'givenName', 'middle_name': 'middleName',
'last_name': 'familyName',
'name_prefix': 'honorificPrefix',
'name_suffix': 'honorificSuffix'}
full_name_vals = []
for part in name_keys:
val = attribs.get(part)
if val:
full_name_vals.append(val)
name[pcard_map[part]] = val
full_name = ' '.join(full_name_vals).strip()
if not full_name:
full_name = attribs.get('full_name')
name['formatted'] = full_name
ud['name'] = name
ud['displayName'] = full_name or ud.get('preferredUsername')
urls = attribs.get('web')
if urls:
ud['urls'] = [urls]
gender = attribs.get('gender')
if gender:
ud['gender'] = {'M': 'male', 'F': 'female'}.get(gender)
birthday = attribs.get('birthday')
if birthday:
try:
# confirm that the date is valid
date = datetime.datetime.strptime(birthday, '%Y-%m-%d').date()
ud['birthday'] = date.strftime('%Y-%m-%d')
except ValueError:
pass
thumbnail = attribs.get('thumbnail')
if thumbnail:
ud['photos'] = [{'type': 'thumbnail', 'value': thumbnail}]
ud['thumbnailUrl'] = thumbnail
# Now strip out empty values
for k, v in ud.items():
if not v or (isinstance(v, list) and not v[0]):
del ud[k]
return ud
| mit | e13c1df81064b21476f9cd02baae572a | 34.59335 | 79 | 0.620105 | 4.173013 | false | false | false | false |
bbangert/velruse | tests/selenium/testapp/__init__.py | 2 | 4507 | import json
from pyramid.config import Configurator
from pyramid.session import UnencryptedCookieSessionFactoryConfig
from pyramid.view import view_config
from velruse import login_url
@view_config(
name='login',
request_method='GET',
renderer='{}:templates/login.mako'.format(__name__),
)
def login_view(request):
return {
'login_url': lambda name: login_url(request, name),
'providers': request.registry.settings['login_providers'],
}
@view_config(
context='velruse.AuthenticationComplete',
renderer='{}:templates/result.mako'.format(__name__),
)
def login_complete_view(request):
context = request.context
result = {
'profile': context.profile,
'credentials': context.credentials,
}
return {
'result': json.dumps(result, indent=4),
}
@view_config(
context='velruse.AuthenticationDenied',
renderer='{}:templates/result.mako'.format(__name__),
)
def login_denied_view(request):
return {
'result': 'denied',
}
def main(global_conf, **settings):
session_factory = UnencryptedCookieSessionFactoryConfig('seekrit')
providers = settings.get('login_providers', '')
providers = filter(None, [p.strip()
for line in providers.splitlines()
for p in line.split(', ')])
settings['login_providers'] = providers
config = Configurator(settings=settings)
config.set_session_factory(session_factory)
if 'facebook' in providers:
config.include('velruse.providers.facebook')
config.add_facebook_login(
settings['velruse.facebook.app_id'],
settings['velruse.facebook.app_secret'],
)
if 'github' in providers:
config.include('velruse.providers.github')
config.add_github_login(
settings['velruse.github.app_id'],
settings['velruse.github.app_secret'],
)
if 'twitter' in providers:
config.include('velruse.providers.twitter')
config.add_twitter_login(
settings['velruse.twitter.consumer_key'],
settings['velruse.twitter.consumer_secret'],
)
if 'live' in providers:
config.include('velruse.providers.live')
config.add_live_login(
settings['velruse.live.client_id'],
settings['velruse.live.client_secret'],
)
if 'bitbucket' in providers:
config.include('velruse.providers.bitbucket')
config.add_bitbucket_login(
settings['velruse.bitbucket.consumer_key'],
settings['velruse.bitbucket.consumer_secret'],
)
if 'google_hybrid' in providers:
config.include('velruse.providers.google_hybrid')
config.add_google_hybrid_login(
realm=settings['velruse.google_hybrid.realm'],
consumer_key=settings['velruse.google_hybrid.consumer_key'],
consumer_secret=settings['velruse.google_hybrid.consumer_secret'],
scope=settings.get('velruse.google_hybrid.scope'),
login_path='/login/google_hybrid',
callback_path='/login/google_hybrid/callback',
name='google_hybrid',
)
if 'google_oauth2' in providers:
config.include('velruse.providers.google_oauth2')
config.add_google_oauth2_login(
consumer_key=settings['velruse.google_oauth2.consumer_key'],
consumer_secret=settings['velruse.google_oauth2.consumer_secret'],
scope=settings.get('velruse.google_oauth2.scope'),
login_path='/login/google_oauth2',
callback_path='/login/google_oauth2/callback',
name='google_oauth2',
)
if 'openid' in providers:
config.include('velruse.providers.openid')
config.add_openid_login(
realm=settings['velruse.openid.realm'],
)
if 'yahoo' in providers:
config.include('velruse.providers.yahoo')
config.add_yahoo_login(
realm=settings['velruse.yahoo.realm'],
consumer_key=settings['velruse.yahoo.consumer_key'],
consumer_secret=settings['velruse.yahoo.consumer_secret'],
)
if 'linkedin' in providers:
config.include('velruse.providers.linkedin')
config.add_linkedin_login(
settings['velruse.linkedin.consumer_key'],
settings['velruse.linkedin.consumer_secret'],
)
config.scan(__name__)
return config.make_wsgi_app()
| mit | 83f5e1740db2651fc4375ad7b83355d4 | 31.42446 | 78 | 0.626137 | 3.960457 | false | true | false | false |
coleifer/peewee | playhouse/pool.py | 1 | 11476 | """
Lightweight connection pooling for peewee.
In a multi-threaded application, up to `max_connections` will be opened. Each
thread (or, if using gevent, greenlet) will have it's own connection.
In a single-threaded application, only one connection will be created. It will
be continually recycled until either it exceeds the stale timeout or is closed
explicitly (using `.manual_close()`).
By default, all your application needs to do is ensure that connections are
closed when you are finished with them, and they will be returned to the pool.
For web applications, this typically means that at the beginning of a request,
you will open a connection, and when you return a response, you will close the
connection.
Simple Postgres pool example code:
# Use the special postgresql extensions.
from playhouse.pool import PooledPostgresqlExtDatabase
db = PooledPostgresqlExtDatabase(
'my_app',
max_connections=32,
stale_timeout=300, # 5 minutes.
user='postgres')
class BaseModel(Model):
class Meta:
database = db
That's it!
"""
import heapq
import logging
import random
import time
from collections import namedtuple
from itertools import chain
try:
from psycopg2.extensions import TRANSACTION_STATUS_IDLE
from psycopg2.extensions import TRANSACTION_STATUS_INERROR
from psycopg2.extensions import TRANSACTION_STATUS_UNKNOWN
except ImportError:
TRANSACTION_STATUS_IDLE = \
TRANSACTION_STATUS_INERROR = \
TRANSACTION_STATUS_UNKNOWN = None
from peewee import MySQLDatabase
from peewee import PostgresqlDatabase
from peewee import SqliteDatabase
logger = logging.getLogger('peewee.pool')
def make_int(val):
if val is not None and not isinstance(val, (int, float)):
return int(val)
return val
class MaxConnectionsExceeded(ValueError): pass
PoolConnection = namedtuple('PoolConnection', ('timestamp', 'connection',
'checked_out'))
class PooledDatabase(object):
def __init__(self, database, max_connections=20, stale_timeout=None,
timeout=None, **kwargs):
self._max_connections = make_int(max_connections)
self._stale_timeout = make_int(stale_timeout)
self._wait_timeout = make_int(timeout)
if self._wait_timeout == 0:
self._wait_timeout = float('inf')
# Available / idle connections stored in a heap, sorted oldest first.
self._connections = []
# Mapping of connection id to PoolConnection. Ordinarily we would want
# to use something like a WeakKeyDictionary, but Python typically won't
# allow us to create weak references to connection objects.
self._in_use = {}
# Use the memory address of the connection as the key in the event the
# connection object is not hashable. Connections will not get
# garbage-collected, however, because a reference to them will persist
# in "_in_use" as long as the conn has not been closed.
self.conn_key = id
super(PooledDatabase, self).__init__(database, **kwargs)
def init(self, database, max_connections=None, stale_timeout=None,
timeout=None, **connect_kwargs):
super(PooledDatabase, self).init(database, **connect_kwargs)
if max_connections is not None:
self._max_connections = make_int(max_connections)
if stale_timeout is not None:
self._stale_timeout = make_int(stale_timeout)
if timeout is not None:
self._wait_timeout = make_int(timeout)
if self._wait_timeout == 0:
self._wait_timeout = float('inf')
def connect(self, reuse_if_open=False):
if not self._wait_timeout:
return super(PooledDatabase, self).connect(reuse_if_open)
expires = time.time() + self._wait_timeout
while expires > time.time():
try:
ret = super(PooledDatabase, self).connect(reuse_if_open)
except MaxConnectionsExceeded:
time.sleep(0.1)
else:
return ret
raise MaxConnectionsExceeded('Max connections exceeded, timed out '
'attempting to connect.')
def _connect(self):
while True:
try:
# Remove the oldest connection from the heap.
ts, conn = heapq.heappop(self._connections)
key = self.conn_key(conn)
except IndexError:
ts = conn = None
logger.debug('No connection available in pool.')
break
else:
if self._is_closed(conn):
# This connecton was closed, but since it was not stale
# it got added back to the queue of available conns. We
# then closed it and marked it as explicitly closed, so
# it's safe to throw it away now.
# (Because Database.close() calls Database._close()).
logger.debug('Connection %s was closed.', key)
ts = conn = None
elif self._stale_timeout and self._is_stale(ts):
# If we are attempting to check out a stale connection,
# then close it. We don't need to mark it in the "closed"
# set, because it is not in the list of available conns
# anymore.
logger.debug('Connection %s was stale, closing.', key)
self._close(conn, True)
ts = conn = None
else:
break
if conn is None:
if self._max_connections and (
len(self._in_use) >= self._max_connections):
raise MaxConnectionsExceeded('Exceeded maximum connections.')
conn = super(PooledDatabase, self)._connect()
ts = time.time() - random.random() / 1000
key = self.conn_key(conn)
logger.debug('Created new connection %s.', key)
self._in_use[key] = PoolConnection(ts, conn, time.time())
return conn
def _is_stale(self, timestamp):
# Called on check-out and check-in to ensure the connection has
# not outlived the stale timeout.
return (time.time() - timestamp) > self._stale_timeout
def _is_closed(self, conn):
return False
def _can_reuse(self, conn):
# Called on check-in to make sure the connection can be re-used.
return True
def _close(self, conn, close_conn=False):
key = self.conn_key(conn)
if close_conn:
super(PooledDatabase, self)._close(conn)
elif key in self._in_use:
pool_conn = self._in_use.pop(key)
if self._stale_timeout and self._is_stale(pool_conn.timestamp):
logger.debug('Closing stale connection %s.', key)
super(PooledDatabase, self)._close(conn)
elif self._can_reuse(conn):
logger.debug('Returning %s to pool.', key)
heapq.heappush(self._connections, (pool_conn.timestamp, conn))
else:
logger.debug('Closed %s.', key)
def manual_close(self):
"""
Close the underlying connection without returning it to the pool.
"""
if self.is_closed():
return False
# Obtain reference to the connection in-use by the calling thread.
conn = self.connection()
# A connection will only be re-added to the available list if it is
# marked as "in use" at the time it is closed. We will explicitly
# remove it from the "in use" list, call "close()" for the
# side-effects, and then explicitly close the connection.
self._in_use.pop(self.conn_key(conn), None)
self.close()
self._close(conn, close_conn=True)
def close_idle(self):
# Close any open connections that are not currently in-use.
with self._lock:
for _, conn in self._connections:
self._close(conn, close_conn=True)
self._connections = []
def close_stale(self, age=600):
# Close any connections that are in-use but were checked out quite some
# time ago and can be considered stale.
with self._lock:
in_use = {}
cutoff = time.time() - age
n = 0
for key, pool_conn in self._in_use.items():
if pool_conn.checked_out < cutoff:
self._close(pool_conn.connection, close_conn=True)
n += 1
else:
in_use[key] = pool_conn
self._in_use = in_use
return n
def close_all(self):
# Close all connections -- available and in-use. Warning: may break any
# active connections used by other threads.
self.close()
with self._lock:
for _, conn in self._connections:
self._close(conn, close_conn=True)
for pool_conn in self._in_use.values():
self._close(pool_conn.connection, close_conn=True)
self._connections = []
self._in_use = {}
class PooledMySQLDatabase(PooledDatabase, MySQLDatabase):
def _is_closed(self, conn):
try:
conn.ping(False)
except:
return True
else:
return False
class _PooledPostgresqlDatabase(PooledDatabase):
def _is_closed(self, conn):
if conn.closed:
return True
txn_status = conn.get_transaction_status()
if txn_status == TRANSACTION_STATUS_UNKNOWN:
return True
elif txn_status != TRANSACTION_STATUS_IDLE:
conn.rollback()
return False
def _can_reuse(self, conn):
txn_status = conn.get_transaction_status()
# Do not return connection in an error state, as subsequent queries
# will all fail. If the status is unknown then we lost the connection
# to the server and the connection should not be re-used.
if txn_status == TRANSACTION_STATUS_UNKNOWN:
return False
elif txn_status == TRANSACTION_STATUS_INERROR:
conn.reset()
elif txn_status != TRANSACTION_STATUS_IDLE:
conn.rollback()
return True
class PooledPostgresqlDatabase(_PooledPostgresqlDatabase, PostgresqlDatabase):
pass
try:
from playhouse.postgres_ext import PostgresqlExtDatabase
class PooledPostgresqlExtDatabase(_PooledPostgresqlDatabase, PostgresqlExtDatabase):
pass
except ImportError:
PooledPostgresqlExtDatabase = None
class _PooledSqliteDatabase(PooledDatabase):
def _is_closed(self, conn):
try:
conn.total_changes
except:
return True
else:
return False
class PooledSqliteDatabase(_PooledSqliteDatabase, SqliteDatabase):
pass
try:
from playhouse.sqlite_ext import SqliteExtDatabase
class PooledSqliteExtDatabase(_PooledSqliteDatabase, SqliteExtDatabase):
pass
except ImportError:
PooledSqliteExtDatabase = None
try:
from playhouse.sqlite_ext import CSqliteExtDatabase
class PooledCSqliteExtDatabase(_PooledSqliteDatabase, CSqliteExtDatabase):
pass
except ImportError:
PooledCSqliteExtDatabase = None
| mit | 18bd1c29dc2bcc812dd3b5b56c257119 | 35.08805 | 88 | 0.609184 | 4.283688 | false | false | false | false |
coleifer/peewee | examples/sqlite_fts_compression.py | 1 | 1712 | #
# Small example demonstrating the use of zlib compression with the Sqlite
# full-text search extension.
#
import zlib
from peewee import *
from playhouse.sqlite_ext import *
db = SqliteExtDatabase(':memory:')
class SearchIndex(FTSModel):
content = SearchField()
class Meta:
database = db
@db.func('zlib_compress')
def _zlib_compress(data):
if data is not None:
if isinstance(data, str):
data = data.encode('utf8')
return zlib.compress(data, 9)
@db.func('zlib_decompress')
def _zlib_decompress(data):
if data is not None:
return zlib.decompress(data)
SearchIndex.create_table(
tokenize='porter',
compress='zlib_compress',
uncompress='zlib_decompress')
phrases = [
'A faith is a necessity to a man. Woe to him who believes in nothing.',
('All who call on God in true faith, earnestly from the heart, will '
'certainly be heard, and will receive what they have asked and desired.'),
('Be faithful in small things because it is in them that your strength '
'lies.'),
('Faith consists in believing when it is beyond the power of reason to '
'believe.'),
('Faith has to do with things that are not seen and hope with things that '
'are not at hand.')]
for phrase in phrases:
SearchIndex.create(content=phrase)
# Use the simple ranking algorithm.
query = SearchIndex.search('faith things', with_score=True)
for row in query:
print(round(row.score, 2), row.content.decode('utf8'))
print('---')
# Use the Okapi-BM25 ranking algorithm.
query = SearchIndex.search_bm25('believe', with_score=True)
for row in query:
print(round(row.score, 2), row.content.decode('utf8'))
db.close()
| mit | 235a8fc15a3aff052a29f55762217622 | 25.75 | 79 | 0.683411 | 3.444668 | false | false | false | false |
coleifer/peewee | examples/diary.py | 1 | 2450 | #!/usr/bin/env python
from collections import OrderedDict
import datetime
from getpass import getpass
import sys
from peewee import *
from playhouse.sqlcipher_ext import SqlCipherDatabase
# Defer initialization of the database until the script is executed from the
# command-line.
db = SqlCipherDatabase(None)
class Entry(Model):
content = TextField()
timestamp = DateTimeField(default=datetime.datetime.now)
class Meta:
database = db
def initialize(passphrase):
db.init('diary.db', passphrase=passphrase)
db.create_tables([Entry])
def menu_loop():
choice = None
while choice != 'q':
for key, value in menu.items():
print('%s) %s' % (key, value.__doc__))
choice = input('Action: ').lower().strip()
if choice in menu:
menu[choice]()
def add_entry():
"""Add entry"""
print('Enter your entry. Press ctrl+d when finished.')
data = sys.stdin.read().strip()
if data and input('Save entry? [Yn] ') != 'n':
Entry.create(content=data)
print('Saved successfully.')
def view_entries(search_query=None):
"""View previous entries"""
query = Entry.select().order_by(Entry.timestamp.desc())
if search_query:
query = query.where(Entry.content.contains(search_query))
for entry in query:
timestamp = entry.timestamp.strftime('%A %B %d, %Y %I:%M%p')
print(timestamp)
print('=' * len(timestamp))
print(entry.content)
print('n) next entry')
print('d) delete entry')
print('q) return to main menu')
action = input('Choice? (Ndq) ').lower().strip()
if action == 'q':
break
elif action == 'd':
entry.delete_instance()
break
def search_entries():
"""Search entries"""
view_entries(input('Search query: '))
menu = OrderedDict([
('a', add_entry),
('v', view_entries),
('s', search_entries),
])
if __name__ == '__main__':
# Collect the passphrase using a secure method.
passphrase = getpass('Enter password: ')
if not passphrase:
sys.stderr.write('Passphrase required to access diary.\n')
sys.stderr.flush()
sys.exit(1)
elif len(passphrase) < 8:
sys.stderr.write('Passphrase must be at least 8 characters.\n')
sys.stderr.flush()
sys.exit(1)
# Initialize the database.
initialize(passphrase)
menu_loop()
| mit | 7601dcadc683b6f63e667ceaafd31ac1 | 24.520833 | 76 | 0.608163 | 3.864353 | false | false | false | false |
coleifer/peewee | playhouse/reflection.py | 1 | 30928 | try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict
from collections import namedtuple
from inspect import isclass
import re
import warnings
from peewee import *
from peewee import _StringField
from peewee import _query_val_transform
from peewee import CommaNodeList
from peewee import SCOPE_VALUES
from peewee import make_snake_case
from peewee import text_type
try:
from pymysql.constants import FIELD_TYPE
except ImportError:
try:
from MySQLdb.constants import FIELD_TYPE
except ImportError:
FIELD_TYPE = None
try:
from playhouse import postgres_ext
except ImportError:
postgres_ext = None
try:
from playhouse.cockroachdb import CockroachDatabase
except ImportError:
CockroachDatabase = None
RESERVED_WORDS = set([
'and', 'as', 'assert', 'break', 'class', 'continue', 'def', 'del', 'elif',
'else', 'except', 'exec', 'finally', 'for', 'from', 'global', 'if',
'import', 'in', 'is', 'lambda', 'not', 'or', 'pass', 'print', 'raise',
'return', 'try', 'while', 'with', 'yield',
])
class UnknownField(object):
pass
class Column(object):
"""
Store metadata about a database column.
"""
primary_key_types = (IntegerField, AutoField)
def __init__(self, name, field_class, raw_column_type, nullable,
primary_key=False, column_name=None, index=False,
unique=False, default=None, extra_parameters=None):
self.name = name
self.field_class = field_class
self.raw_column_type = raw_column_type
self.nullable = nullable
self.primary_key = primary_key
self.column_name = column_name
self.index = index
self.unique = unique
self.default = default
self.extra_parameters = extra_parameters
# Foreign key metadata.
self.rel_model = None
self.related_name = None
self.to_field = None
def __repr__(self):
attrs = [
'field_class',
'raw_column_type',
'nullable',
'primary_key',
'column_name']
keyword_args = ', '.join(
'%s=%s' % (attr, getattr(self, attr))
for attr in attrs)
return 'Column(%s, %s)' % (self.name, keyword_args)
def get_field_parameters(self):
params = {}
if self.extra_parameters is not None:
params.update(self.extra_parameters)
# Set up default attributes.
if self.nullable:
params['null'] = True
if self.field_class is ForeignKeyField or self.name != self.column_name:
params['column_name'] = "'%s'" % self.column_name
if self.primary_key and not issubclass(self.field_class, AutoField):
params['primary_key'] = True
if self.default is not None:
params['constraints'] = '[SQL("DEFAULT %s")]' % self.default
# Handle ForeignKeyField-specific attributes.
if self.is_foreign_key():
params['model'] = self.rel_model
if self.to_field:
params['field'] = "'%s'" % self.to_field
if self.related_name:
params['backref'] = "'%s'" % self.related_name
# Handle indexes on column.
if not self.is_primary_key():
if self.unique:
params['unique'] = 'True'
elif self.index and not self.is_foreign_key():
params['index'] = 'True'
return params
def is_primary_key(self):
return self.field_class is AutoField or self.primary_key
def is_foreign_key(self):
return self.field_class is ForeignKeyField
def is_self_referential_fk(self):
return (self.field_class is ForeignKeyField and
self.rel_model == "'self'")
def set_foreign_key(self, foreign_key, model_names, dest=None,
related_name=None):
self.foreign_key = foreign_key
self.field_class = ForeignKeyField
if foreign_key.dest_table == foreign_key.table:
self.rel_model = "'self'"
else:
self.rel_model = model_names[foreign_key.dest_table]
self.to_field = dest and dest.name or None
self.related_name = related_name or None
def get_field(self):
# Generate the field definition for this column.
field_params = {}
for key, value in self.get_field_parameters().items():
if isclass(value) and issubclass(value, Field):
value = value.__name__
field_params[key] = value
param_str = ', '.join('%s=%s' % (k, v)
for k, v in sorted(field_params.items()))
field = '%s = %s(%s)' % (
self.name,
self.field_class.__name__,
param_str)
if self.field_class is UnknownField:
field = '%s # %s' % (field, self.raw_column_type)
return field
class Metadata(object):
column_map = {}
extension_import = ''
def __init__(self, database):
self.database = database
self.requires_extension = False
def execute(self, sql, *params):
return self.database.execute_sql(sql, params)
def get_columns(self, table, schema=None):
metadata = OrderedDict(
(metadata.name, metadata)
for metadata in self.database.get_columns(table, schema))
# Look up the actual column type for each column.
column_types, extra_params = self.get_column_types(table, schema)
# Look up the primary keys.
pk_names = self.get_primary_keys(table, schema)
if len(pk_names) == 1:
pk = pk_names[0]
if column_types[pk] is IntegerField:
column_types[pk] = AutoField
elif column_types[pk] is BigIntegerField:
column_types[pk] = BigAutoField
columns = OrderedDict()
for name, column_data in metadata.items():
field_class = column_types[name]
default = self._clean_default(field_class, column_data.default)
columns[name] = Column(
name,
field_class=field_class,
raw_column_type=column_data.data_type,
nullable=column_data.null,
primary_key=column_data.primary_key,
column_name=name,
default=default,
extra_parameters=extra_params.get(name))
return columns
def get_column_types(self, table, schema=None):
raise NotImplementedError
def _clean_default(self, field_class, default):
if default is None or field_class in (AutoField, BigAutoField) or \
default.lower() == 'null':
return
if issubclass(field_class, _StringField) and \
isinstance(default, text_type) and not default.startswith("'"):
default = "'%s'" % default
return default or "''"
def get_foreign_keys(self, table, schema=None):
return self.database.get_foreign_keys(table, schema)
def get_primary_keys(self, table, schema=None):
return self.database.get_primary_keys(table, schema)
def get_indexes(self, table, schema=None):
return self.database.get_indexes(table, schema)
class PostgresqlMetadata(Metadata):
column_map = {
16: BooleanField,
17: BlobField,
20: BigIntegerField,
21: SmallIntegerField,
23: IntegerField,
25: TextField,
700: FloatField,
701: DoubleField,
1042: CharField, # blank-padded CHAR
1043: CharField,
1082: DateField,
1114: DateTimeField,
1184: DateTimeField,
1083: TimeField,
1266: TimeField,
1700: DecimalField,
2950: UUIDField, # UUID
}
array_types = {
1000: BooleanField,
1001: BlobField,
1005: SmallIntegerField,
1007: IntegerField,
1009: TextField,
1014: CharField,
1015: CharField,
1016: BigIntegerField,
1115: DateTimeField,
1182: DateField,
1183: TimeField,
2951: UUIDField,
}
extension_import = 'from playhouse.postgres_ext import *'
def __init__(self, database):
super(PostgresqlMetadata, self).__init__(database)
if postgres_ext is not None:
# Attempt to add types like HStore and JSON.
cursor = self.execute('select oid, typname, format_type(oid, NULL)'
' from pg_type;')
results = cursor.fetchall()
for oid, typname, formatted_type in results:
if typname == 'json':
self.column_map[oid] = postgres_ext.JSONField
elif typname == 'jsonb':
self.column_map[oid] = postgres_ext.BinaryJSONField
elif typname == 'hstore':
self.column_map[oid] = postgres_ext.HStoreField
elif typname == 'tsvector':
self.column_map[oid] = postgres_ext.TSVectorField
for oid in self.array_types:
self.column_map[oid] = postgres_ext.ArrayField
def get_column_types(self, table, schema):
column_types = {}
extra_params = {}
extension_types = set((
postgres_ext.ArrayField,
postgres_ext.BinaryJSONField,
postgres_ext.JSONField,
postgres_ext.TSVectorField,
postgres_ext.HStoreField)) if postgres_ext is not None else set()
# Look up the actual column type for each column.
identifier = '%s."%s"' % (schema, table)
cursor = self.execute(
'SELECT attname, atttypid FROM pg_catalog.pg_attribute '
'WHERE attrelid = %s::regclass AND attnum > %s', identifier, 0)
# Store column metadata in dictionary keyed by column name.
for name, oid in cursor.fetchall():
column_types[name] = self.column_map.get(oid, UnknownField)
if column_types[name] in extension_types:
self.requires_extension = True
if oid in self.array_types:
extra_params[name] = {'field_class': self.array_types[oid]}
return column_types, extra_params
def get_columns(self, table, schema=None):
schema = schema or 'public'
return super(PostgresqlMetadata, self).get_columns(table, schema)
def get_foreign_keys(self, table, schema=None):
schema = schema or 'public'
return super(PostgresqlMetadata, self).get_foreign_keys(table, schema)
def get_primary_keys(self, table, schema=None):
schema = schema or 'public'
return super(PostgresqlMetadata, self).get_primary_keys(table, schema)
def get_indexes(self, table, schema=None):
schema = schema or 'public'
return super(PostgresqlMetadata, self).get_indexes(table, schema)
class CockroachDBMetadata(PostgresqlMetadata):
# CRDB treats INT the same as BIGINT, so we just map bigint type OIDs to
# regular IntegerField.
column_map = PostgresqlMetadata.column_map.copy()
column_map[20] = IntegerField
array_types = PostgresqlMetadata.array_types.copy()
array_types[1016] = IntegerField
extension_import = 'from playhouse.cockroachdb import *'
def __init__(self, database):
Metadata.__init__(self, database)
self.requires_extension = True
if postgres_ext is not None:
# Attempt to add JSON types.
cursor = self.execute('select oid, typname, format_type(oid, NULL)'
' from pg_type;')
results = cursor.fetchall()
for oid, typname, formatted_type in results:
if typname == 'jsonb':
self.column_map[oid] = postgres_ext.BinaryJSONField
for oid in self.array_types:
self.column_map[oid] = postgres_ext.ArrayField
class MySQLMetadata(Metadata):
if FIELD_TYPE is None:
column_map = {}
else:
column_map = {
FIELD_TYPE.BLOB: TextField,
FIELD_TYPE.CHAR: CharField,
FIELD_TYPE.DATE: DateField,
FIELD_TYPE.DATETIME: DateTimeField,
FIELD_TYPE.DECIMAL: DecimalField,
FIELD_TYPE.DOUBLE: FloatField,
FIELD_TYPE.FLOAT: FloatField,
FIELD_TYPE.INT24: IntegerField,
FIELD_TYPE.LONG_BLOB: TextField,
FIELD_TYPE.LONG: IntegerField,
FIELD_TYPE.LONGLONG: BigIntegerField,
FIELD_TYPE.MEDIUM_BLOB: TextField,
FIELD_TYPE.NEWDECIMAL: DecimalField,
FIELD_TYPE.SHORT: IntegerField,
FIELD_TYPE.STRING: CharField,
FIELD_TYPE.TIMESTAMP: DateTimeField,
FIELD_TYPE.TIME: TimeField,
FIELD_TYPE.TINY_BLOB: TextField,
FIELD_TYPE.TINY: IntegerField,
FIELD_TYPE.VAR_STRING: CharField,
}
def __init__(self, database, **kwargs):
if 'password' in kwargs:
kwargs['passwd'] = kwargs.pop('password')
super(MySQLMetadata, self).__init__(database, **kwargs)
def get_column_types(self, table, schema=None):
column_types = {}
# Look up the actual column type for each column.
cursor = self.execute('SELECT * FROM `%s` LIMIT 1' % table)
# Store column metadata in dictionary keyed by column name.
for column_description in cursor.description:
name, type_code = column_description[:2]
column_types[name] = self.column_map.get(type_code, UnknownField)
return column_types, {}
class SqliteMetadata(Metadata):
column_map = {
'bigint': BigIntegerField,
'blob': BlobField,
'bool': BooleanField,
'boolean': BooleanField,
'char': CharField,
'date': DateField,
'datetime': DateTimeField,
'decimal': DecimalField,
'float': FloatField,
'integer': IntegerField,
'integer unsigned': IntegerField,
'int': IntegerField,
'long': BigIntegerField,
'numeric': DecimalField,
'real': FloatField,
'smallinteger': IntegerField,
'smallint': IntegerField,
'smallint unsigned': IntegerField,
'text': TextField,
'time': TimeField,
'varchar': CharField,
}
begin = '(?:["\[\(]+)?'
end = '(?:["\]\)]+)?'
re_foreign_key = (
'(?:FOREIGN KEY\s*)?'
'{begin}(.+?){end}\s+(?:.+\s+)?'
'references\s+{begin}(.+?){end}'
'\s*\(["|\[]?(.+?)["|\]]?\)').format(begin=begin, end=end)
re_varchar = r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$'
def _map_col(self, column_type):
raw_column_type = column_type.lower()
if raw_column_type in self.column_map:
field_class = self.column_map[raw_column_type]
elif re.search(self.re_varchar, raw_column_type):
field_class = CharField
else:
column_type = re.sub('\(.+\)', '', raw_column_type)
if column_type == '':
field_class = BareField
else:
field_class = self.column_map.get(column_type, UnknownField)
return field_class
def get_column_types(self, table, schema=None):
column_types = {}
columns = self.database.get_columns(table)
for column in columns:
column_types[column.name] = self._map_col(column.data_type)
return column_types, {}
_DatabaseMetadata = namedtuple('_DatabaseMetadata', (
'columns',
'primary_keys',
'foreign_keys',
'model_names',
'indexes'))
class DatabaseMetadata(_DatabaseMetadata):
def multi_column_indexes(self, table):
accum = []
for index in self.indexes[table]:
if len(index.columns) > 1:
field_names = [self.columns[table][column].name
for column in index.columns
if column in self.columns[table]]
accum.append((field_names, index.unique))
return accum
def column_indexes(self, table):
accum = {}
for index in self.indexes[table]:
if len(index.columns) == 1:
accum[index.columns[0]] = index.unique
return accum
class Introspector(object):
pk_classes = [AutoField, IntegerField]
def __init__(self, metadata, schema=None):
self.metadata = metadata
self.schema = schema
def __repr__(self):
return '<Introspector: %s>' % self.metadata.database
@classmethod
def from_database(cls, database, schema=None):
if isinstance(database, Proxy):
if database.obj is None:
raise ValueError('Cannot introspect an uninitialized Proxy.')
database = database.obj # Reference the proxied db obj.
if CockroachDatabase and isinstance(database, CockroachDatabase):
metadata = CockroachDBMetadata(database)
elif isinstance(database, PostgresqlDatabase):
metadata = PostgresqlMetadata(database)
elif isinstance(database, MySQLDatabase):
metadata = MySQLMetadata(database)
elif isinstance(database, SqliteDatabase):
metadata = SqliteMetadata(database)
else:
raise ValueError('Introspection not supported for %r' % database)
return cls(metadata, schema=schema)
def get_database_class(self):
return type(self.metadata.database)
def get_database_name(self):
return self.metadata.database.database
def get_database_kwargs(self):
return self.metadata.database.connect_params
def get_additional_imports(self):
if self.metadata.requires_extension:
return '\n' + self.metadata.extension_import
return ''
def make_model_name(self, table, snake_case=True):
if snake_case:
table = make_snake_case(table)
model = re.sub(r'[^\w]+', '', table)
model_name = ''.join(sub.title() for sub in model.split('_'))
if not model_name[0].isalpha():
model_name = 'T' + model_name
return model_name
def make_column_name(self, column, is_foreign_key=False, snake_case=True):
column = column.strip()
if snake_case:
column = make_snake_case(column)
column = column.lower()
if is_foreign_key:
# Strip "_id" from foreign keys, unless the foreign-key happens to
# be named "_id", in which case the name is retained.
column = re.sub('_id$', '', column) or column
# Remove characters that are invalid for Python identifiers.
column = re.sub(r'[^\w]+', '_', column)
if column in RESERVED_WORDS:
column += '_'
if len(column) and column[0].isdigit():
column = '_' + column
return column
def introspect(self, table_names=None, literal_column_names=False,
include_views=False, snake_case=True):
# Retrieve all the tables in the database.
tables = self.metadata.database.get_tables(schema=self.schema)
if include_views:
views = self.metadata.database.get_views(schema=self.schema)
tables.extend([view.name for view in views])
if table_names is not None:
tables = [table for table in tables if table in table_names]
table_set = set(tables)
# Store a mapping of table name -> dictionary of columns.
columns = {}
# Store a mapping of table name -> set of primary key columns.
primary_keys = {}
# Store a mapping of table -> foreign keys.
foreign_keys = {}
# Store a mapping of table name -> model name.
model_names = {}
# Store a mapping of table name -> indexes.
indexes = {}
# Gather the columns for each table.
for table in tables:
table_indexes = self.metadata.get_indexes(table, self.schema)
table_columns = self.metadata.get_columns(table, self.schema)
try:
foreign_keys[table] = self.metadata.get_foreign_keys(
table, self.schema)
except ValueError as exc:
err(*exc.args)
foreign_keys[table] = []
else:
# If there is a possibility we could exclude a dependent table,
# ensure that we introspect it so FKs will work.
if table_names is not None:
for foreign_key in foreign_keys[table]:
if foreign_key.dest_table not in table_set:
tables.append(foreign_key.dest_table)
table_set.add(foreign_key.dest_table)
model_names[table] = self.make_model_name(table, snake_case)
# Collect sets of all the column names as well as all the
# foreign-key column names.
lower_col_names = set(column_name.lower()
for column_name in table_columns)
fks = set(fk_col.column for fk_col in foreign_keys[table])
for col_name, column in table_columns.items():
if literal_column_names:
new_name = re.sub(r'[^\w]+', '_', col_name)
else:
new_name = self.make_column_name(col_name, col_name in fks,
snake_case)
# If we have two columns, "parent" and "parent_id", ensure
# that when we don't introduce naming conflicts.
lower_name = col_name.lower()
if lower_name.endswith('_id') and new_name in lower_col_names:
new_name = col_name.lower()
column.name = new_name
for index in table_indexes:
if len(index.columns) == 1:
column = index.columns[0]
if column in table_columns:
table_columns[column].unique = index.unique
table_columns[column].index = True
primary_keys[table] = self.metadata.get_primary_keys(
table, self.schema)
columns[table] = table_columns
indexes[table] = table_indexes
# Gather all instances where we might have a `related_name` conflict,
# either due to multiple FKs on a table pointing to the same table,
# or a related_name that would conflict with an existing field.
related_names = {}
sort_fn = lambda foreign_key: foreign_key.column
for table in tables:
models_referenced = set()
for foreign_key in sorted(foreign_keys[table], key=sort_fn):
try:
column = columns[table][foreign_key.column]
except KeyError:
continue
dest_table = foreign_key.dest_table
if dest_table in models_referenced:
related_names[column] = '%s_%s_set' % (
dest_table,
column.name)
else:
models_referenced.add(dest_table)
# On the second pass convert all foreign keys.
for table in tables:
for foreign_key in foreign_keys[table]:
src = columns[foreign_key.table][foreign_key.column]
try:
dest = columns[foreign_key.dest_table][
foreign_key.dest_column]
except KeyError:
dest = None
src.set_foreign_key(
foreign_key=foreign_key,
model_names=model_names,
dest=dest,
related_name=related_names.get(src))
return DatabaseMetadata(
columns,
primary_keys,
foreign_keys,
model_names,
indexes)
def generate_models(self, skip_invalid=False, table_names=None,
literal_column_names=False, bare_fields=False,
include_views=False):
database = self.introspect(table_names, literal_column_names,
include_views)
models = {}
class BaseModel(Model):
class Meta:
database = self.metadata.database
schema = self.schema
pending = set()
def _create_model(table, models):
pending.add(table)
for foreign_key in database.foreign_keys[table]:
dest = foreign_key.dest_table
if dest not in models and dest != table:
if dest in pending:
warnings.warn('Possible reference cycle found between '
'%s and %s' % (table, dest))
else:
_create_model(dest, models)
primary_keys = []
columns = database.columns[table]
for column_name, column in columns.items():
if column.primary_key:
primary_keys.append(column.name)
multi_column_indexes = database.multi_column_indexes(table)
column_indexes = database.column_indexes(table)
class Meta:
indexes = multi_column_indexes
table_name = table
# Fix models with multi-column primary keys.
composite_key = False
if len(primary_keys) == 0:
primary_keys = columns.keys()
if len(primary_keys) > 1:
Meta.primary_key = CompositeKey(*[
field.name for col, field in columns.items()
if col in primary_keys])
composite_key = True
attrs = {'Meta': Meta}
for column_name, column in columns.items():
FieldClass = column.field_class
if FieldClass is not ForeignKeyField and bare_fields:
FieldClass = BareField
elif FieldClass is UnknownField:
FieldClass = BareField
params = {
'column_name': column_name,
'null': column.nullable}
if column.primary_key and composite_key:
if FieldClass is AutoField:
FieldClass = IntegerField
params['primary_key'] = False
elif column.primary_key and FieldClass is not AutoField:
params['primary_key'] = True
if column.is_foreign_key():
if column.is_self_referential_fk():
params['model'] = 'self'
else:
dest_table = column.foreign_key.dest_table
if dest_table in models:
params['model'] = models[dest_table]
else:
FieldClass = DeferredForeignKey
params['rel_model_name'] = dest_table
if column.to_field:
params['field'] = column.to_field
# Generate a unique related name.
params['backref'] = '%s_%s_rel' % (table, column_name)
if column.default is not None:
constraint = SQL('DEFAULT %s' % column.default)
params['constraints'] = [constraint]
if column_name in column_indexes and not \
column.is_primary_key():
if column_indexes[column_name]:
params['unique'] = True
elif not column.is_foreign_key():
params['index'] = True
attrs[column.name] = FieldClass(**params)
try:
models[table] = type(str(table), (BaseModel,), attrs)
except ValueError:
if not skip_invalid:
raise
finally:
if table in pending:
pending.remove(table)
# Actually generate Model classes.
for table, model in sorted(database.model_names.items()):
if table not in models:
_create_model(table, models)
return models
def introspect(database, schema=None):
introspector = Introspector.from_database(database, schema=schema)
return introspector.introspect()
def generate_models(database, schema=None, **options):
introspector = Introspector.from_database(database, schema=schema)
return introspector.generate_models(**options)
def print_model(model, indexes=True, inline_indexes=False):
print(model._meta.name)
for field in model._meta.sorted_fields:
parts = [' %s %s' % (field.name, field.field_type)]
if field.primary_key:
parts.append(' PK')
elif inline_indexes:
if field.unique:
parts.append(' UNIQUE')
elif field.index:
parts.append(' INDEX')
if isinstance(field, ForeignKeyField):
parts.append(' FK: %s.%s' % (field.rel_model.__name__,
field.rel_field.name))
print(''.join(parts))
if indexes:
index_list = model._meta.fields_to_index()
if not index_list:
return
print('\nindex(es)')
for index in index_list:
parts = [' ']
ctx = model._meta.database.get_sql_context()
with ctx.scope_values(param='%s', quote='""'):
ctx.sql(CommaNodeList(index._expressions))
if index._where:
ctx.literal(' WHERE ')
ctx.sql(index._where)
sql, params = ctx.query()
clean = sql % tuple(map(_query_val_transform, params))
parts.append(clean.replace('"', ''))
if index._unique:
parts.append(' UNIQUE')
print(''.join(parts))
def get_table_sql(model):
sql, params = model._schema._create_table().query()
if model._meta.database.param != '%s':
sql = sql.replace(model._meta.database.param, '%s')
# Format and indent the table declaration, simplest possible approach.
match_obj = re.match('^(.+?\()(.+)(\).*)', sql)
create, columns, extra = match_obj.groups()
indented = ',\n'.join(' %s' % column for column in columns.split(', '))
clean = '\n'.join((create, indented, extra)).strip()
return clean % tuple(map(_query_val_transform, params))
def print_table_sql(model):
print(get_table_sql(model))
| mit | f520f24c86250ba5040d4ba18611acf3 | 35.300469 | 80 | 0.557003 | 4.286625 | false | false | false | false |
coleifer/peewee | tests/transactions.py | 1 | 10934 | from peewee import *
from .base import DatabaseTestCase
from .base import IS_CRDB
from .base import IS_CRDB_NESTED_TX
from .base import IS_SQLITE
from .base import ModelTestCase
from .base import db
from .base import new_connection
from .base import skip_if
from .base import skip_unless
from .base_models import Register
class BaseTransactionTestCase(ModelTestCase):
requires = [Register]
def assertRegister(self, vals):
query = Register.select().order_by(Register.value)
self.assertEqual([register.value for register in query], vals)
def _save(self, *vals):
Register.insert([{Register.value: val} for val in vals]).execute()
def requires_nested(fn):
return skip_if(IS_CRDB and not IS_CRDB_NESTED_TX,
'nested transaction support is required')(fn)
class TestTransaction(BaseTransactionTestCase):
def test_simple(self):
self.assertFalse(db.in_transaction())
with db.atomic():
self.assertTrue(db.in_transaction())
self._save(1)
self.assertFalse(db.in_transaction())
self.assertRegister([1])
# Explicit rollback, implicit commit.
with db.atomic() as txn:
self._save(2)
txn.rollback()
self.assertTrue(db.in_transaction())
self._save(3)
self.assertFalse(db.in_transaction())
self.assertRegister([1, 3])
# Explicit rollbacks.
with db.atomic() as txn:
self._save(4)
txn.rollback()
self._save(5)
txn.rollback()
self.assertRegister([1, 3])
@requires_nested
def test_transactions(self):
self.assertFalse(db.in_transaction())
with db.atomic():
self.assertTrue(db.in_transaction())
self._save(1)
self.assertRegister([1])
with db.atomic() as txn:
self._save(2)
txn.rollback()
self._save(3)
with db.atomic() as sp1:
self._save(4)
with db.atomic() as sp2:
self._save(5)
sp2.rollback()
with db.atomic() as sp3:
self._save(6)
with db.atomic() as sp4:
self._save(7)
with db.atomic() as sp5:
self._save(8)
self.assertRegister([1, 3, 4, 6, 7, 8])
sp4.rollback()
self.assertRegister([1, 3, 4, 6])
self.assertRegister([1, 3, 4, 6])
def test_commit_rollback(self):
with db.atomic() as txn:
self._save(1)
txn.commit()
self._save(2)
txn.rollback()
self.assertRegister([1])
with db.atomic() as txn:
self._save(3)
txn.rollback()
self._save(4)
self.assertRegister([1, 4])
@requires_nested
def test_commit_rollback_nested(self):
with db.atomic() as txn:
self.test_commit_rollback()
txn.rollback()
self.assertRegister([])
with db.atomic():
self.test_commit_rollback()
self.assertRegister([1, 4])
def test_nesting_transaction_obj(self):
self.assertRegister([])
with db.transaction() as txn:
self._save(1)
with db.transaction() as txn2:
self._save(2)
txn2.rollback() # Actually issues a rollback.
self.assertRegister([])
self._save(3)
self.assertRegister([3])
with db.transaction() as txn:
self._save(4)
with db.transaction() as txn2:
with db.transaction() as txn3:
self._save(5)
txn3.commit() # Actually commits.
self._save(6)
txn2.rollback()
self.assertRegister([3, 4, 5])
@requires_nested
def test_savepoint_commit(self):
with db.atomic() as txn:
self._save(1)
txn.rollback()
self._save(2)
txn.commit()
with db.atomic() as sp:
self._save(3)
sp.rollback()
self._save(4)
sp.commit()
self.assertRegister([2, 4])
def test_atomic_decorator(self):
@db.atomic()
def save(i):
self._save(i)
save(1)
self.assertRegister([1])
def text_atomic_exception(self):
def will_fail(self):
with db.atomic():
self._save(1)
self._save(None)
self.assertRaises(IntegrityError, will_fail)
self.assertRegister([])
def user_error(self):
with db.atomic():
self._save(2)
raise ValueError
self.assertRaises(ValueError, user_error)
self.assertRegister([])
def test_manual_commit(self):
with db.manual_commit():
db.begin()
self._save(1)
db.rollback()
db.begin()
self._save(2)
db.commit()
with db.manual_commit():
db.begin()
self._save(3)
db.rollback()
db.begin()
self._save(4)
db.commit()
self.assertRegister([2, 4])
def test_mixing_manual_atomic(self):
@db.manual_commit()
def will_fail():
pass
@db.atomic()
def also_fails():
pass
with db.atomic():
self.assertRaises(ValueError, will_fail)
with db.manual_commit():
self.assertRaises(ValueError, also_fails)
with db.manual_commit():
with self.assertRaises(ValueError):
with db.atomic(): pass
with db.atomic():
with self.assertRaises(ValueError):
with db.manual_commit(): pass
def test_closing_db_in_transaction(self):
with db.atomic():
self.assertRaises(OperationalError, db.close)
@requires_nested
def test_db_context_manager(self):
db.close()
self.assertTrue(db.is_closed())
with db:
self.assertFalse(db.is_closed())
self._save(1)
with db:
self._save(2)
try:
with db:
self._save(3)
raise ValueError('xxx')
except ValueError:
pass
self._save(4)
try:
with db:
self._save(5)
with db:
self._save(6)
raise ValueError('yyy')
except ValueError:
pass
self.assertFalse(db.is_closed())
self.assertTrue(db.is_closed())
self.assertRegister([1, 2, 4])
@requires_nested
class TestSession(BaseTransactionTestCase):
def test_session(self):
self.assertTrue(db.session_start())
self.assertTrue(db.session_start())
self.assertEqual(db.transaction_depth(), 2)
self._save(1)
self.assertTrue(db.session_commit())
self.assertEqual(db.transaction_depth(), 1)
self._save(2) # Now we're in autocommit mode.
self.assertTrue(db.session_rollback())
self.assertEqual(db.transaction_depth(), 0)
self.assertTrue(db.session_start())
self._save(3)
self.assertTrue(db.session_rollback())
self.assertRegister([1])
def test_session_with_closed_db(self):
db.close()
self.assertTrue(db.session_start())
self.assertFalse(db.is_closed())
self.assertRaises(OperationalError, db.close)
self._save(1)
self.assertTrue(db.session_rollback())
self.assertRegister([])
def test_session_inside_context_manager(self):
with db.atomic():
self.assertTrue(db.session_start())
self._save(1)
self.assertTrue(db.session_commit())
self._save(2)
self.assertTrue(db.session_rollback())
db.session_start()
self._save(3)
self.assertRegister([1, 3])
def test_commit_rollback_mix(self):
db.session_start()
with db.atomic() as txn: # Will be a savepoint.
self._save(1)
with db.atomic() as t2:
self._save(2)
with db.atomic() as t3:
self._save(3)
t2.rollback()
txn.commit()
self._save(4)
txn.rollback()
self.assertTrue(db.session_commit())
self.assertRegister([1])
def test_session_rollback(self):
db.session_start()
self._save(1)
with db.atomic() as txn:
self._save(2)
with db.atomic() as t2:
self._save(3)
self.assertRegister([1, 2, 3])
self.assertTrue(db.session_rollback())
self.assertRegister([])
db.session_start()
self._save(1)
with db.transaction() as txn:
self._save(2)
with db.transaction() as t2:
self._save(3)
t2.rollback() # Rolls back everything, starts new txn.
db.session_commit()
self.assertRegister([])
def test_session_commit(self):
db.session_start()
self._save(1)
with db.transaction() as txn:
self._save(2)
with db.transaction() as t2:
self._save(3)
t2.commit() # Saves everything, starts new txn.
txn.rollback()
self.assertTrue(db.session_rollback())
self.assertRegister([1, 2, 3])
@skip_unless(IS_SQLITE, 'requires sqlite for transaction lock type')
class TestTransactionLockType(BaseTransactionTestCase):
def test_lock_type(self):
db2 = new_connection(timeout=0.001)
db2.connect()
with self.database.atomic(lock_type='EXCLUSIVE') as txn:
with self.assertRaises(OperationalError):
with db2.atomic(lock_type='IMMEDIATE') as t2:
self._save(1)
self._save(2)
self.assertRegister([2])
with self.database.atomic('IMMEDIATE') as txn:
with self.assertRaises(OperationalError):
with db2.atomic('EXCLUSIVE') as t2:
self._save(3)
self._save(4)
self.assertRegister([2, 4])
with self.database.transaction(lock_type='DEFERRED') as txn:
self._save(5) # Deferred -> Exclusive after our write.
with self.assertRaises(OperationalError):
with db2.transaction(lock_type='IMMEDIATE') as t2:
self._save(6)
self.assertRegister([2, 4, 5])
| mit | fca81a516a2ed1cef4198f83e66e86a6 | 27.107969 | 74 | 0.519938 | 4.189272 | false | true | false | false |
wechatpy/wechatpy | wechatpy/work/client/api/kf_message.py | 1 | 4405 | # -*- coding: utf-8 -*-
from optionaldict import optionaldict
from wechatpy.client.api.base import BaseWeChatAPI
class WeChatKFMessage(BaseWeChatAPI):
"""
发送微信客服消息
https://work.weixin.qq.com/api/doc/90000/90135/94677
支持:
* 文本消息
* 图片消息
* 语音消息
* 视频消息
* 文件消息
* 图文链接
* 小程序
* 菜单消息
* 地理位置
"""
def send(self, user_id, open_kfid, msgid="", msg=None):
"""
当微信客户处于“新接入待处理”或“由智能助手接待”状态下,可调用该接口给用户发送消息。
注意仅当微信客户在主动发送消息给客服后的48小时内,企业可发送消息给客户,最多可发送5条消息;若用户继续发送消息,企业可再次下发消息。
支持发送消息类型:文本、图片、语音、视频、文件、图文、小程序、菜单消息、地理位置。
:param user_id: 指定接收消息的客户UserID
:param open_kfid: 指定发送消息的客服帐号ID
:param msgid: 指定消息ID
:param tag_ids: 标签ID列表。
:param msg: 发送消息的 dict 对象
:type msg: dict | None
:return: 接口调用结果
"""
msg = msg or {}
data = {
"touser": user_id,
"open_kfid": open_kfid,
}
if msgid:
data["msgid"] = msgid
data.update(msg)
return self._post("kf/send_msg", data=data)
def send_text(self, user_id, open_kfid, content, msgid=""):
return self.send(
user_id,
open_kfid,
msgid,
msg={"msgtype": "text", "text": {"content": content}},
)
def send_image(self, user_id, open_kfid, media_id, msgid=""):
return self.send(
user_id,
open_kfid,
msgid,
msg={"msgtype": "image", "image": {"media_id": media_id}},
)
def send_voice(self, user_id, open_kfid, media_id, msgid=""):
return self.send(
user_id,
open_kfid,
msgid,
msg={"msgtype": "voice", "voice": {"media_id": media_id}},
)
def send_video(self, user_id, open_kfid, media_id, msgid=""):
video_data = optionaldict()
video_data["media_id"] = media_id
return self.send(
user_id,
open_kfid,
msgid,
msg={"msgtype": "video", "video": dict(video_data)},
)
def send_file(self, user_id, open_kfid, media_id, msgid=""):
return self.send(
user_id,
open_kfid,
msgid,
msg={"msgtype": "file", "file": {"media_id": media_id}},
)
def send_articles_link(self, user_id, open_kfid, article, msgid=""):
articles_data = {
"title": article["title"],
"desc": article["desc"],
"url": article["url"],
"thumb_media_id": article["thumb_media_id"],
}
return self.send(
user_id,
open_kfid,
msgid,
msg={"msgtype": "news", "link": {"link": articles_data}},
)
def send_msgmenu(self, user_id, open_kfid, head_content, menu_list, tail_content, msgid=""):
return self.send(
user_id,
open_kfid,
msgid,
msg={
"msgtype": "msgmenu",
"msgmenu": {"head_content": head_content, "list": menu_list, "tail_content": tail_content},
},
)
def send_location(self, user_id, open_kfid, name, address, latitude, longitude, msgid=""):
return self.send(
user_id,
open_kfid,
msgid,
msg={
"msgtype": "location",
"msgmenu": {"name": name, "address": address, "latitude": latitude, "longitude": longitude},
},
)
def send_miniprogram(self, user_id, open_kfid, appid, title, thumb_media_id, pagepath, msgid=""):
return self.send(
user_id,
open_kfid,
msgid,
msg={
"msgtype": "miniprogram",
"msgmenu": {"appid": appid, "title": title, "thumb_media_id": thumb_media_id, "pagepath": pagepath},
},
)
| mit | 600b0f0f256477164b6bc7a1d77ee19e | 27.294964 | 116 | 0.495296 | 2.858285 | false | false | false | false |
ceph/teuthology | teuthology/task/proc_thrasher.py | 4 | 2404 | """
Process thrasher
"""
import logging
import gevent
import random
import time
from teuthology.orchestra import run
log = logging.getLogger(__name__)
class ProcThrasher:
""" Kills and restarts some number of the specified process on the specified
remote
"""
def __init__(self, config, remote, *proc_args, **proc_kwargs):
self.proc_kwargs = proc_kwargs
self.proc_args = proc_args
self.config = config
self.greenlet = None
self.logger = proc_kwargs.get("logger", log.getChild('proc_thrasher'))
self.remote = remote
# config:
self.num_procs = self.config.get("num_procs", 5)
self.rest_period = self.config.get("rest_period", 100) # seconds
self.run_time = self.config.get("run_time", 1000) # seconds
def log(self, msg):
"""
Local log wrapper
"""
self.logger.info(msg)
def start(self):
"""
Start thrasher. This also makes sure that the greenlet interface
is used.
"""
if self.greenlet is not None:
return
self.greenlet = gevent.Greenlet(self.loop)
self.greenlet.start()
def join(self):
"""
Local join
"""
self.greenlet.join()
def loop(self):
"""
Thrashing loop -- loops at time intervals. Inside that loop, the
code loops through the individual procs, creating new procs.
"""
time_started = time.time()
procs = []
self.log("Starting")
while time_started + self.run_time > time.time():
if len(procs) > 0:
self.log("Killing proc")
proc = random.choice(procs)
procs.remove(proc)
proc.stdin.close()
self.log("About to wait")
run.wait([proc])
self.log("Killed proc")
while len(procs) < self.num_procs:
self.log("Creating proc " + str(len(procs) + 1))
self.log("args are " + str(self.proc_args) + " kwargs: " + str(self.proc_kwargs))
procs.append(self.remote.run(
*self.proc_args,
** self.proc_kwargs))
self.log("About to sleep")
time.sleep(self.rest_period)
self.log("Just woke")
run.wait(procs)
| mit | 08b6251f6c912cd1703eb6c5e9bb9d49 | 29.05 | 97 | 0.534526 | 3.960461 | false | true | false | false |
wechatpy/wechatpy | tests/test_client.py | 1 | 35461 | # -*- coding: utf-8 -*-
import io
import json
import os
import inspect
import time
import unittest
from datetime import datetime
from httmock import HTTMock, response, urlmatch
from wechatpy import WeChatClient
from wechatpy.exceptions import WeChatClientException
from wechatpy.schemes import JsApiCardExt
_TESTS_PATH = os.path.abspath(os.path.dirname(__file__))
_FIXTURE_PATH = os.path.join(_TESTS_PATH, "fixtures")
@urlmatch(netloc=r"(.*\.)?api\.weixin\.qq\.com$")
def wechat_api_mock(url, request):
path = url.path.replace("/cgi-bin/", "").replace("/", "_")
if path.startswith("_"):
path = path[1:]
res_file = os.path.join(_FIXTURE_PATH, f"{path}.json")
content = {
"errcode": 99999,
"errmsg": f"can not find fixture {res_file}",
}
headers = {"Content-Type": "application/json"}
try:
with open(res_file, "rb") as f:
content = json.loads(f.read().decode("utf-8"))
except (IOError, ValueError) as e:
content["errmsg"] = f"Loads fixture {res_file} failed, error: {e}"
return response(200, content, headers, request=request)
class WeChatClientTestCase(unittest.TestCase):
app_id = "123456"
secret = "123456"
def setUp(self):
self.client = WeChatClient(self.app_id, self.secret)
def test_two_client_not_equal(self):
client2 = WeChatClient("654321", "654321", "987654321")
self.assertNotEqual(self.client, client2)
self.assertNotEqual(self.client.user, client2.user)
self.assertNotEqual(id(self.client.menu), id(client2.menu))
with HTTMock(wechat_api_mock):
self.client.fetch_access_token()
self.assertNotEqual(self.client.access_token, client2.access_token)
def test_subclass_client_ok(self):
class TestClient(WeChatClient):
pass
client = TestClient("12345", "123456", "123456789")
self.assertEqual(client, client.user._client)
def test_fetch_access_token_is_method(self):
self.assertTrue(inspect.ismethod(self.client.fetch_access_token))
class TestClient(WeChatClient):
@property
def fetch_access_token(self):
pass
client = TestClient("12345", "123456", "123456789")
self.assertFalse(inspect.ismethod(client.fetch_access_token))
def test_fetch_access_token(self):
with HTTMock(wechat_api_mock):
token = self.client.fetch_access_token()
self.assertEqual("1234567890", token["access_token"])
self.assertEqual(7200, token["expires_in"])
self.assertEqual("1234567890", self.client.access_token)
def test_upload_media(self):
media_file = io.StringIO("nothing")
with HTTMock(wechat_api_mock):
media = self.client.media.upload("image", media_file)
self.assertEqual("image", media["type"])
self.assertEqual("12345678", media["media_id"])
def test_user_get_group_id(self):
with HTTMock(wechat_api_mock):
group_id = self.client.user.get_group_id("123456")
self.assertEqual(102, group_id)
def test_create_group(self):
with HTTMock(wechat_api_mock):
group = self.client.group.create("test")
self.assertEqual(1, group["group"]["id"])
self.assertEqual("test", group["group"]["name"])
def test_group_get(self):
with HTTMock(wechat_api_mock):
groups = self.client.group.get()
self.assertEqual(5, len(groups))
def test_group_getid(self):
with HTTMock(wechat_api_mock):
group = self.client.group.get("123456")
self.assertEqual(102, group)
def test_group_update(self):
with HTTMock(wechat_api_mock):
result = self.client.group.update(102, "test")
self.assertEqual(0, result["errcode"])
def test_group_move_user(self):
with HTTMock(wechat_api_mock):
result = self.client.group.move_user("test", 102)
self.assertEqual(0, result["errcode"])
def test_group_delete(self):
with HTTMock(wechat_api_mock):
result = self.client.group.delete(123456)
self.assertEqual(0, result["errcode"])
def test_send_text_message(self):
with HTTMock(wechat_api_mock):
result = self.client.message.send_text(1, "test", account="test")
self.assertEqual(0, result["errcode"])
def test_send_image_message(self):
with HTTMock(wechat_api_mock):
result = self.client.message.send_image(1, "123456")
self.assertEqual(0, result["errcode"])
def test_send_voice_message(self):
with HTTMock(wechat_api_mock):
result = self.client.message.send_voice(1, "123456")
self.assertEqual(0, result["errcode"])
def test_send_video_message(self):
with HTTMock(wechat_api_mock):
result = self.client.message.send_video(1, "123456", "test", "test")
self.assertEqual(0, result["errcode"])
def test_send_music_message(self):
with HTTMock(wechat_api_mock):
result = self.client.message.send_music(
1, "http://www.qq.com", "http://www.qq.com", "123456", "test", "test"
)
self.assertEqual(0, result["errcode"])
def test_send_articles_message(self):
with HTTMock(wechat_api_mock):
articles = [
{"title": "test", "description": "test", "url": "http://www.qq.com", "image": "http://www.qq.com"}
]
result = self.client.message.send_articles(1, articles)
self.assertEqual(0, result["errcode"])
def test_send_card_message(self):
with HTTMock(wechat_api_mock):
result = self.client.message.send_card(1, "123456")
self.assertEqual(0, result["errcode"])
def test_send_mini_program_page(self):
with HTTMock(wechat_api_mock):
result = self.client.message.send_mini_program_page(1, {})
self.assertEqual(0, result["errcode"])
def test_send_mass_text_message(self):
with HTTMock(wechat_api_mock):
result = self.client.message.send_mass_text("test", [1])
self.assertEqual(0, result["errcode"])
def test_send_mass_image_message(self):
with HTTMock(wechat_api_mock):
result = self.client.message.send_mass_image("123456", [1])
self.assertEqual(0, result["errcode"])
def test_send_mass_voice_message(self):
with HTTMock(wechat_api_mock):
result = self.client.message.send_mass_voice("test", [1])
self.assertEqual(0, result["errcode"])
def test_send_mass_video_message(self):
with HTTMock(wechat_api_mock):
result = self.client.message.send_mass_video("test", [1], title="title", description="desc")
self.assertEqual(0, result["errcode"])
def test_send_mass_article_message(self):
with HTTMock(wechat_api_mock):
result = self.client.message.send_mass_article("test", [1])
self.assertEqual(0, result["errcode"])
def test_send_mass_card_message(self):
with HTTMock(wechat_api_mock):
result = self.client.message.send_mass_card("test", [1])
self.assertEqual(0, result["errcode"])
def test_get_mass_message(self):
with HTTMock(wechat_api_mock):
result = self.client.message.get_mass(201053012)
self.assertEqual("SEND_SUCCESS", result["msg_status"])
def test_create_menu(self):
with HTTMock(wechat_api_mock):
result = self.client.menu.create({"button": [{"type": "click", "name": "test", "key": "test"}]})
self.assertEqual(0, result["errcode"])
def test_get_menu(self):
with HTTMock(wechat_api_mock):
menu = self.client.menu.get()
self.assertTrue("menu" in menu)
def test_delete_menu(self):
with HTTMock(wechat_api_mock):
result = self.client.menu.delete()
self.assertEqual(0, result["errcode"])
def test_update_menu(self):
with HTTMock(wechat_api_mock):
result = self.client.menu.update({"button": [{"type": "click", "name": "test", "key": "test"}]})
self.assertEqual(0, result["errcode"])
def test_short_url(self):
with HTTMock(wechat_api_mock):
result = self.client.misc.short_url("http://www.qq.com")
self.assertEqual("http://qq.com", result["short_url"])
def test_get_wechat_ips(self):
with HTTMock(wechat_api_mock):
result = self.client.misc.get_wechat_ips()
self.assertEqual(["127.0.0.1"], result)
def test_check_network(self):
with HTTMock(wechat_api_mock):
result = self.client.misc.check_network()
dns = result["dns"]
self.assertListEqual(
dns,
[
{"ip": "111.161.64.40", "real_operator": "UNICOM"},
{"ip": "111.161.64.48", "real_operator": "UNICOM"},
],
)
def test_get_user_info(self):
with HTTMock(wechat_api_mock):
openid = "o6_bmjrPTlm6_2sgVt7hMZOPfL2M"
user = self.client.user.get(openid)
self.assertEqual("Band", user["nickname"])
def test_get_followers(self):
with HTTMock(wechat_api_mock):
result = self.client.user.get_followers()
self.assertEqual(2, result["total"])
self.assertEqual(2, result["count"])
def test_iter_followers(self):
@urlmatch(netloc=r"(.*\.)?api\.weixin\.qq\.com$", query=r".*next_openid=[^&]+")
def next_openid_mock(url, request):
"""伪造第二页的请求"""
content = {"total": 2, "count": 0, "next_openid": ""}
headers = {"Content-Type": "application/json"}
return response(200, content, headers, request=request)
with HTTMock(next_openid_mock, wechat_api_mock):
users = list(self.client.user.iter_followers())
self.assertEqual(2, len(users))
self.assertIn("OPENID1", users)
self.assertIn("OPENID2", users)
def test_update_user_remark(self):
with HTTMock(wechat_api_mock):
openid = "openid"
remark = "test"
result = self.client.user.update_remark(openid, remark)
self.assertEqual(0, result["errcode"])
def test_get_user_info_batch(self):
user_list = [
{"openid": "otvxTs4dckWG7imySrJd6jSi0CWE", "lang": "zh-CN"},
{"openid": "otvxTs_JZ6SEiP0imdhpi50fuSZg", "lang": "zh-CN"},
]
with HTTMock(wechat_api_mock):
result = self.client.user.get_batch(user_list)
self.assertEqual(user_list[0]["openid"], result[0]["openid"])
self.assertEqual("iWithery", result[0]["nickname"])
self.assertEqual(user_list[1]["openid"], result[1]["openid"])
def test_get_user_info_batch_openid_list(self):
user_list = ["otvxTs4dckWG7imySrJd6jSi0CWE", "otvxTs_JZ6SEiP0imdhpi50fuSZg"]
with HTTMock(wechat_api_mock):
result = self.client.user.get_batch(user_list)
self.assertEqual(user_list[0], result[0]["openid"])
self.assertEqual("iWithery", result[0]["nickname"])
self.assertEqual(user_list[1], result[1]["openid"])
def test_get_tag_users(self):
with HTTMock(wechat_api_mock):
result = self.client.tag.get_tag_users(101)
self.assertEqual(2, result["count"])
def test_iter_tag_users(self):
@urlmatch(netloc=r"(.*\.)?api\.weixin\.qq\.com$", path=r".*user/tag/get")
def next_openid_mock(url, request):
"""伪造第二页的请求"""
data = json.loads(request.body.decode())
if not data.get("next_openid"):
return wechat_api_mock(url, request)
# 根据拿到的第二页请求响应 是没有data和next_openid的
content = {"count": 0}
headers = {"Content-Type": "application/json"}
return response(200, content, headers, request=request)
with HTTMock(next_openid_mock, wechat_api_mock):
users = list(self.client.tag.iter_tag_users(101))
self.assertEqual(2, len(users))
self.assertIn("OPENID1", users)
self.assertIn("OPENID2", users)
def test_create_qrcode(self):
data = {
"expire_seconds": 1800,
"action_name": "QR_SCENE",
"action_info": {"scene": {"scene_id": 123}},
}
with HTTMock(wechat_api_mock):
result = self.client.qrcode.create(data)
self.assertEqual(1800, result["expire_seconds"])
def test_get_qrcode_url_with_str_ticket(self):
ticket = "123"
url = self.client.qrcode.get_url(ticket)
self.assertEqual("https://mp.weixin.qq.com/cgi-bin/showqrcode?ticket=123", url)
def test_get_qrcode_url_with_dict_ticket(self):
ticket = {
"ticket": "123",
}
url = self.client.qrcode.get_url(ticket)
self.assertEqual("https://mp.weixin.qq.com/cgi-bin/showqrcode?ticket=123", url)
def test_customservice_add_account(self):
with HTTMock(wechat_api_mock):
result = self.client.customservice.add_account("test1@test", "test1", "test1")
self.assertEqual(0, result["errcode"])
def test_customservice_update_account(self):
with HTTMock(wechat_api_mock):
result = self.client.customservice.update_account("test1@test", "test1", "test1")
self.assertEqual(0, result["errcode"])
def test_customservice_delete_account(self):
with HTTMock(wechat_api_mock):
result = self.client.customservice.delete_account(
"test1@test",
)
self.assertEqual(0, result["errcode"])
def test_customservice_upload_headimg(self):
media_file = io.StringIO("nothing")
with HTTMock(wechat_api_mock):
result = self.client.customservice.upload_headimg("test1@test", media_file)
self.assertEqual(0, result["errcode"])
def test_customservice_get_accounts(self):
with HTTMock(wechat_api_mock):
result = self.client.customservice.get_accounts()
self.assertEqual(2, len(result))
def test_customservice_get_online_accounts(self):
with HTTMock(wechat_api_mock):
result = self.client.customservice.get_online_accounts()
self.assertEqual(2, len(result))
def test_customservice_create_session(self):
with HTTMock(wechat_api_mock):
result = self.client.customservice.create_session("openid", "test1@test")
self.assertEqual(0, result["errcode"])
def test_customservice_close_session(self):
with HTTMock(wechat_api_mock):
result = self.client.customservice.close_session("openid", "test1@test")
self.assertEqual(0, result["errcode"])
def test_customservice_get_session(self):
with HTTMock(wechat_api_mock):
result = self.client.customservice.get_session("openid")
self.assertEqual("test1@test", result["kf_account"])
def test_customservice_get_session_list(self):
with HTTMock(wechat_api_mock):
result = self.client.customservice.get_session_list("test1@test")
self.assertEqual(2, len(result))
def test_customservice_get_wait_case(self):
with HTTMock(wechat_api_mock):
result = self.client.customservice.get_wait_case()
self.assertEqual(150, result["count"])
def test_customservice_get_records(self):
with HTTMock(wechat_api_mock):
result = self.client.customservice.get_records(123456789, 987654321, 1)
self.assertEqual(2, len(result["recordlist"]))
def test_datacube_get_user_summary(self):
with HTTMock(wechat_api_mock):
result = self.client.datacube.get_user_summary("2014-12-06", "2014-12-07")
self.assertEqual(1, len(result))
def test_datacube_get_user_cumulate(self):
with HTTMock(wechat_api_mock):
result = self.client.datacube.get_user_cumulate(datetime(2014, 12, 6), datetime(2014, 12, 7))
self.assertEqual(1, len(result))
def test_datacube_get_interface_summary(self):
with HTTMock(wechat_api_mock):
result = self.client.datacube.get_interface_summary("2014-12-06", "2014-12-07")
self.assertEqual(1, len(result))
def test_datacube_get_interface_summary_hour(self):
with HTTMock(wechat_api_mock):
result = self.client.datacube.get_interface_summary_hour("2014-12-06", "2014-12-07")
self.assertEqual(1, len(result))
def test_datacube_get_article_summary(self):
with HTTMock(wechat_api_mock):
result = self.client.datacube.get_article_summary("2014-12-06", "2014-12-07")
self.assertEqual(1, len(result))
def test_datacube_get_article_total(self):
with HTTMock(wechat_api_mock):
result = self.client.datacube.get_article_total("2014-12-06", "2014-12-07")
self.assertEqual(1, len(result))
def test_datacube_get_user_read(self):
with HTTMock(wechat_api_mock):
result = self.client.datacube.get_user_read("2014-12-06", "2014-12-07")
self.assertEqual(1, len(result))
def test_datacube_get_user_read_hour(self):
with HTTMock(wechat_api_mock):
result = self.client.datacube.get_user_read_hour("2014-12-06", "2014-12-07")
self.assertEqual(1, len(result))
def test_datacube_get_user_share(self):
with HTTMock(wechat_api_mock):
result = self.client.datacube.get_user_share("2014-12-06", "2014-12-07")
self.assertEqual(2, len(result))
def test_datacube_get_user_share_hour(self):
with HTTMock(wechat_api_mock):
result = self.client.datacube.get_user_share_hour("2014-12-06", "2014-12-07")
self.assertEqual(1, len(result))
def test_datacube_get_upstream_msg(self):
with HTTMock(wechat_api_mock):
result = self.client.datacube.get_upstream_msg("2014-12-06", "2014-12-07")
self.assertEqual(1, len(result))
def test_datacube_get_upstream_msg_hour(self):
with HTTMock(wechat_api_mock):
result = self.client.datacube.get_upstream_msg_hour("2014-12-06", "2014-12-07")
self.assertEqual(1, len(result))
def test_datacube_get_upstream_msg_week(self):
with HTTMock(wechat_api_mock):
result = self.client.datacube.get_upstream_msg_week("2014-12-06", "2014-12-07")
self.assertEqual(1, len(result))
def test_datacube_get_upstream_msg_month(self):
with HTTMock(wechat_api_mock):
result = self.client.datacube.get_upstream_msg_month("2014-12-06", "2014-12-07")
self.assertEqual(1, len(result))
def test_datacube_get_upstream_msg_dist(self):
with HTTMock(wechat_api_mock):
result = self.client.datacube.get_upstream_msg_dist("2014-12-06", "2014-12-07")
self.assertEqual(1, len(result))
def test_datacube_get_upstream_msg_dist_week(self):
with HTTMock(wechat_api_mock):
result = self.client.datacube.get_upstream_msg_dist_week("2014-12-06", "2014-12-07")
self.assertEqual(1, len(result))
def test_datacube_get_upstream_msg_dist_month(self):
with HTTMock(wechat_api_mock):
result = self.client.datacube.get_upstream_msg_dist_month("2014-12-06", "2014-12-07")
self.assertEqual(1, len(result))
def test_device_get_qrcode_url(self):
with HTTMock(wechat_api_mock):
qrcode_url = self.client.device.get_qrcode_url(123)
self.assertEqual("https://we.qq.com/d/123", qrcode_url)
qrcode_url = self.client.device.get_qrcode_url(123, {"a": "a"})
self.assertEqual("https://we.qq.com/d/123#YT1h", qrcode_url)
def test_jsapi_get_ticket_response(self):
with HTTMock(wechat_api_mock):
result = self.client.jsapi.get_ticket()
self.assertEqual(
"bxLdikRXVbTPdHSM05e5u5sUoXNKd8-41ZO3MhKoyN5OfkWITDGgnr2fwJ0m9E8NYzWKVZvdVtaUgWvsdshFKA", # NOQA
result["ticket"],
)
self.assertEqual(7200, result["expires_in"])
def test_jsapi_get_jsapi_signature(self):
noncestr = "Wm3WZYTPz0wzccnW"
ticket = "sM4AOVdWfPE4DxkXGEs8VMCPGGVi4C3VM0P37wVUCFvkVAy_90u5h9nbSlYy3-Sl-HhTdfl2fzFy1AOcHKP7qg" # NOQA
timestamp = 1414587457
url = "http://mp.weixin.qq.com?params=value"
signature = self.client.jsapi.get_jsapi_signature(noncestr, ticket, timestamp, url)
self.assertEqual("0f9de62fce790f9a083d5c99e95740ceb90c27ed", signature)
def test_jsapi_get_jsapi_card_ticket(self):
"""card_ticket 与 jsapi_ticket 的 api 都相同,除了请求参数 type 为 wx_card
所以这里使用与 `test_jsapi_get_ticket` 相同的测试文件"""
with HTTMock(wechat_api_mock):
ticket = self.client.jsapi.get_jsapi_card_ticket()
self.assertEqual(
"bxLdikRXVbTPdHSM05e5u5sUoXNKd8-41ZO3MhKoyN5OfkWITDGgnr2fwJ0m9E8NYzWKVZvdVtaUgWvsdshFKA", # NOQA
ticket,
)
self.assertTrue(7200 < self.client.session.get(f"{self.client.appid}_jsapi_card_ticket_expires_at"))
self.assertEqual(
self.client.session.get(f"{self.client.appid}_jsapi_card_ticket"),
"bxLdikRXVbTPdHSM05e5u5sUoXNKd8-41ZO3MhKoyN5OfkWITDGgnr2fwJ0m9E8NYzWKVZvdVtaUgWvsdshFKA",
)
def test_jsapi_card_ext(self):
card_ext = json.loads(JsApiCardExt("asdf", openid="2").to_json())
self.assertNotIn("outer_str", card_ext)
self.assertNotIn("code", card_ext)
card_ext = json.loads(JsApiCardExt("asdf", code="4", openid="2").to_json())
self.assertIn("code", card_ext)
def test_jsapi_get_jsapi_add_card_params(self):
"""微信签名测试工具:http://mp.weixin.qq.com/debug/cgi-bin/sandbox?t=cardsign"""
nonce_str = "Wm3WZYTPz0wzccnW"
card_ticket = "sM4AOVdWfPE4DxkXGEs8VMCPGGVi4C3VM0P37wVUCFvkVAy_90u5h9nbSlYy3-Sl-HhTdfl2fzFy1AOcHKP7qg"
timestamp = "1414587457"
card_id = "random_card_id"
code = "random_code"
openid = "random_openid"
# 测试最少填写
card_params = self.client.jsapi.get_jsapi_add_card_params(
card_ticket=card_ticket, timestamp=timestamp, card_id=card_id, nonce_str=nonce_str
)
self.assertEqual(
JsApiCardExt(
signature="22dce6bad4db532d4a2ef82ca2ca7bbe1e10ef28",
nonce_str=nonce_str,
timestamp=timestamp,
),
card_params,
)
# 测试自定义code
card_params = self.client.jsapi.get_jsapi_add_card_params(
card_ticket=card_ticket, timestamp=timestamp, card_id=card_id, nonce_str=nonce_str, code=code
)
self.assertEqual(
JsApiCardExt(
nonce_str=nonce_str,
timestamp=timestamp,
code=code,
signature="2e9c6d12952246e071717d7baeab20c30420b5cd",
),
card_params,
)
# 测试指定用户领取
card_params = self.client.jsapi.get_jsapi_add_card_params(
card_ticket=card_ticket, timestamp=timestamp, card_id=card_id, nonce_str=nonce_str, openid=openid
)
self.assertEqual(
JsApiCardExt(
nonce_str=nonce_str,
timestamp=timestamp,
openid=openid,
signature="ded860a5dd4467312764bd86e544ad0579cbfad0",
),
card_params,
)
# 测试指定用户领取且自定义code
card_params = self.client.jsapi.get_jsapi_add_card_params(
card_ticket=card_ticket, timestamp=timestamp, card_id=card_id, nonce_str=nonce_str, openid=openid, code=code
)
self.assertEqual(
JsApiCardExt(
nonce_str=nonce_str,
timestamp=timestamp,
openid=openid,
code=code,
signature="950dc1842852457ea573d4d6af34879c1ec093c8",
),
card_params,
)
def test_menu_get_menu_info(self):
with HTTMock(wechat_api_mock):
menu_info = self.client.menu.get_menu_info()
self.assertEqual(1, menu_info["is_menu_open"])
def test_message_get_autoreply_info(self):
with HTTMock(wechat_api_mock):
autoreply = self.client.message.get_autoreply_info()
self.assertEqual(1, autoreply["is_autoreply_open"])
def test_shakearound_apply_device_id(self):
with HTTMock(wechat_api_mock):
res = self.client.shakearound.apply_device_id(1, "test")
self.assertEqual(123, res["apply_id"])
def test_shakearound_update_device(self):
with HTTMock(wechat_api_mock):
res = self.client.shakearound.update_device("1234", comment="test")
self.assertEqual(0, res["errcode"])
def test_shakearound_bind_device_location(self):
with HTTMock(wechat_api_mock):
res = self.client.shakearound.bind_device_location(123, 1234)
self.assertEqual(0, res["errcode"])
def test_shakearound_search_device(self):
with HTTMock(wechat_api_mock):
res = self.client.shakearound.search_device(apply_id=123)
self.assertEqual(151, res["total_count"])
self.assertEqual(2, len(res["devices"]))
def test_shakearound_add_page(self):
with HTTMock(wechat_api_mock):
res = self.client.shakearound.add_page("test", "test", "http://www.qq.com", "http://www.qq.com")
self.assertEqual(28840, res["page_id"])
def test_shakearound_update_page(self):
with HTTMock(wechat_api_mock):
res = self.client.shakearound.update_page(123, "test", "test", "http://www.qq.com", "http://www.qq.com")
self.assertEqual(28840, res["page_id"])
def test_shakearound_delete_page(self):
with HTTMock(wechat_api_mock):
res = self.client.shakearound.delete_page(123)
self.assertEqual(0, res["errcode"])
def test_shakearound_search_page(self):
with HTTMock(wechat_api_mock):
res = self.client.shakearound.search_pages(123)
self.assertEqual(2, res["total_count"])
self.assertEqual(2, len(res["pages"]))
def test_shakearound_add_material(self):
with HTTMock(wechat_api_mock):
media_file = io.StringIO("nothing")
res = self.client.shakearound.add_material(media_file, "icon")
self.assertEqual(
"http://shp.qpic.cn/wechat_shakearound_pic/0/1428377032e9dd2797018cad79186e03e8c5aec8dc/120", # NOQA
res["pic_url"],
)
def test_shakearound_bind_device_pages(self):
with HTTMock(wechat_api_mock):
res = self.client.shakearound.bind_device_pages(123, 1, 1, 1234)
self.assertEqual(0, res["errcode"])
def test_shakearound_get_shake_info(self):
with HTTMock(wechat_api_mock):
res = self.client.shakearound.get_shake_info("123456")
self.assertEqual(14211, res["page_id"])
self.assertEqual("oVDmXjp7y8aG2AlBuRpMZTb1-cmA", res["openid"])
def test_shakearound_get_device_statistics(self):
with HTTMock(wechat_api_mock):
res = self.client.shakearound.get_device_statistics("2015-04-01 00:00:00", "2015-04-17 00:00:00", 1234)
self.assertEqual(2, len(res))
def test_shakearound_get_page_statistics(self):
with HTTMock(wechat_api_mock):
res = self.client.shakearound.get_page_statistics("2015-04-01 00:00:00", "2015-04-17 00:00:00", 1234)
self.assertEqual(2, len(res))
def test_material_get_count(self):
with HTTMock(wechat_api_mock):
res = self.client.material.get_count()
self.assertEqual(1, res["voice_count"])
self.assertEqual(2, res["video_count"])
self.assertEqual(3, res["image_count"])
self.assertEqual(4, res["news_count"])
def test_shakearound_get_apply_status(self):
with HTTMock(wechat_api_mock):
res = self.client.shakearound.get_apply_status(1234)
self.assertEqual(4, len(res))
def test_reraise_requests_exception(self):
@urlmatch(netloc=r"(.*\.)?api\.weixin\.qq\.com$")
def _wechat_api_mock(url, request):
return {"status_code": 404, "content": "404 not found"}
try:
with HTTMock(_wechat_api_mock):
self.client.material.get_count()
except WeChatClientException as e:
self.assertEqual(404, e.response.status_code)
def test_wifi_list_shops(self):
with HTTMock(wechat_api_mock):
res = self.client.wifi.list_shops()
self.assertEqual(16, res["totalcount"])
self.assertEqual(1, res["pageindex"])
def test_wifi_get_shop(self):
with HTTMock(wechat_api_mock):
res = self.client.wifi.get_shop(1)
self.assertEqual(1, res["bar_type"])
self.assertEqual(2, res["ap_count"])
def test_wifi_add_device(self):
with HTTMock(wechat_api_mock):
res = self.client.wifi.add_device(123, "WX-test", "12345678", "00:1f:7a:ad:5c:a8")
self.assertEqual(0, res["errcode"])
def test_wifi_list_devices(self):
with HTTMock(wechat_api_mock):
res = self.client.wifi.list_devices()
self.assertEqual(2, res["totalcount"])
self.assertEqual(1, res["pageindex"])
def test_wifi_delete_device(self):
with HTTMock(wechat_api_mock):
res = self.client.wifi.delete_device("00:1f:7a:ad:5c:a8")
self.assertEqual(0, res["errcode"])
def test_wifi_get_qrcode_url(self):
with HTTMock(wechat_api_mock):
qrcode_url = self.client.wifi.get_qrcode_url(123, 0)
self.assertEqual("http://www.qq.com", qrcode_url)
def test_wifi_set_homepage(self):
with HTTMock(wechat_api_mock):
res = self.client.wifi.set_homepage(123, 0)
self.assertEqual(0, res["errcode"])
def test_wifi_get_homepage(self):
with HTTMock(wechat_api_mock):
res = self.client.wifi.get_homepage(429620)
self.assertEqual(1, res["template_id"])
self.assertEqual("http://wifi.weixin.qq.com/", res["url"])
def test_wifi_list_statistics(self):
with HTTMock(wechat_api_mock):
res = self.client.wifi.list_statistics("2015-05-01", "2015-05-02")
self.assertEqual(2, len(res))
def test_upload_mass_image(self):
media_file = io.StringIO("nothing")
with HTTMock(wechat_api_mock):
res = self.client.media.upload_mass_image(media_file)
self.assertEqual(
"http://mmbiz.qpic.cn/mmbiz/gLO17UPS6FS2xsypf378iaNhWacZ1G1UplZYWEYfwvuU6Ont96b1roYs CNFwaRrSaKTPCUdBK9DgEHicsKwWCBRQ/0", # NOQA
res,
)
def test_scan_get_merchant_info(self):
with HTTMock(wechat_api_mock):
res = self.client.scan.get_merchant_info()
self.assertEqual(8888, res["verified_firm_code_list"][0])
def test_scan_create_product(self):
with HTTMock(wechat_api_mock):
res = self.client.scan.create_product(
{
"keystandard": "ean13",
"keystr": "6900000000000",
}
)
self.assertEqual("5g0B4A90aqc", res["pid"])
def test_scan_publish_product(self):
with HTTMock(wechat_api_mock):
res = self.client.scan.publish_product("ean13", "6900873042720")
self.assertEqual(0, res["errcode"])
def test_scan_unpublish_product(self):
with HTTMock(wechat_api_mock):
res = self.client.scan.unpublish_product("ean13", "6900873042720")
self.assertEqual(0, res["errcode"])
def test_scan_set_test_whitelist(self):
with HTTMock(wechat_api_mock):
res = self.client.scan.set_test_whitelist(["openid1"], ["messense"])
self.assertEqual(0, res["errcode"])
def test_scan_get_product(self):
with HTTMock(wechat_api_mock):
res = self.client.scan.get_product("ean13", "6900873042720")
self.assertIn("brand_info", res)
def test_scan_list_product(self):
with HTTMock(wechat_api_mock):
res = self.client.scan.list_product()
self.assertEqual(2, res["total"])
def test_scan_update_product(self):
with HTTMock(wechat_api_mock):
res = self.client.scan.update_product(
{
"keystandard": "ean13",
"keystr": "6900000000000",
}
)
self.assertEqual("5g0B4A90aqc", res["pid"])
def test_scan_clear_product(self):
with HTTMock(wechat_api_mock):
res = self.client.scan.clear_product("ean13", "6900873042720")
self.assertEqual(0, res["errcode"])
def test_scan_check_ticket(self):
with HTTMock(wechat_api_mock):
res = self.client.scan.check_ticket("Ym1haDlvNXJqY3Ru1")
self.assertEqual("otAzGjrS4AYCmeJM1GhEOcHXXTAo", res["openid"])
def test_change_openid(self):
with HTTMock(wechat_api_mock):
res = self.client.user.change_openid(
"xxxxx",
["oEmYbwN-n24jxvk4Sox81qedINkQ", "oEmYbwH9uVd4RKJk7ZZg6SzL6tTo"],
)
self.assertEqual(2, len(res))
self.assertEqual("o2FwqwI9xCsVadFah_HtpPfaR-X4", res[0]["new_openid"])
self.assertEqual("ori_openid error", res[1]["err_msg"])
def test_code_to_session(self):
with HTTMock(wechat_api_mock):
res = self.client.wxa.code_to_session("023dUeGW1oeGOZ0JXvHW1SDVFW1dUeGu")
self.assertIn("session_key", res)
self.assertEqual("D1ZWEygStjuLCnZ9IN2l4Q==", res["session_key"])
self.assertEqual("o16wA0b4AZKzgVJR3MBwoUdTfU_E", res["openid"])
self.assertEqual("or4zX05h_Ykt4ju0TUfx3CQsvfTo", res["unionid"])
def test_get_phone_number(self):
with HTTMock(wechat_api_mock):
res = self.client.wxa.get_phone_number("code")
self.assertEqual("13123456789", res["phone_info"]["purePhoneNumber"])
def test_client_expires_at_consistency(self):
from redis import Redis
from wechatpy.session.redisstorage import RedisStorage
redis = Redis()
session = RedisStorage(redis)
client1 = WeChatClient(self.app_id, self.secret, session=session)
client2 = WeChatClient(self.app_id, self.secret, session=session)
assert client1.expires_at == client2.expires_at
expires_at = time.time() + 7200
client1.expires_at = expires_at
assert client1.expires_at == client2.expires_at == expires_at
| mit | 2bfbd205a09b1a3f31f1af72715044a2 | 40.096737 | 141 | 0.607328 | 3.301592 | false | true | false | false |
wechatpy/wechatpy | wechatpy/client/api/tag.py | 1 | 6535 | # -*- coding: utf-8 -*-
from operator import itemgetter
from wechatpy.utils import to_text
from wechatpy.client.api.base import BaseWeChatAPI
class WeChatTag(BaseWeChatAPI):
def create(self, name):
"""
创建标签
详情请参考
https://developers.weixin.qq.com/doc/offiaccount/User_Management/User_Tag_Management.html
:param name: 标签名(30个字符以内)
:return: 返回的 JSON 数据包
"""
name = to_text(name)
return self._post(
"tags/create",
data={"tag": {"name": name}},
result_processor=itemgetter("tag"),
)
def get(self):
"""
获取公众号已创建的标签
详情请参考
https://developers.weixin.qq.com/doc/offiaccount/User_Management/User_Tag_Management.html
:return: 所有标签列表
"""
return self._get("tags/get", result_processor=itemgetter("tags"))
def update(self, tag_id, name):
"""
编辑标签
详情请参考
https://developers.weixin.qq.com/doc/offiaccount/User_Management/User_Tag_Management.html
:param tag_id: 标签id,由微信分配
:param name: 标签名字(30个字符以内)
:return: 返回的 JSON 数据包
"""
name = to_text(name)
return self._post("tags/update", data={"tag": {"id": int(tag_id), "name": name}})
def delete(self, tag_id):
"""
删除标签
详情请参考
https://developers.weixin.qq.com/doc/offiaccount/User_Management/User_Tag_Management.html
:param tag_id: 标签 ID
:return: 返回的 JSON 数据包
"""
return self._post("tags/delete", data={"tag": {"id": tag_id}})
def tag_user(self, tag_id, user_id):
"""
批量为用户打标签
详情请参考
https://developers.weixin.qq.com/doc/offiaccount/User_Management/User_Tag_Management.html
:param tag_id: 标签 ID
:param user_id: 用户 ID, 可以是单个或者列表
:return: 返回的 JSON 数据包
"""
data = {"tagid": tag_id}
if isinstance(user_id, (tuple, list)):
data["openid_list"] = user_id
else:
data["openid_list"] = [
user_id,
]
return self._post("tags/members/batchtagging", data=data)
def untag_user(self, tag_id, user_id):
"""
批量为用户取消标签
详情请参考
https://developers.weixin.qq.com/doc/offiaccount/User_Management/User_Tag_Management.html
:param tag_id: 标签 ID
:param user_id: 用户 ID, 可以是单个或者列表
:return: 返回的 JSON 数据包
"""
data = {"tagid": tag_id}
if isinstance(user_id, (tuple, list)):
data["openid_list"] = user_id
else:
data["openid_list"] = [
user_id,
]
return self._post("tags/members/batchuntagging", data=data)
def get_user_tag(self, user_id):
"""
获取用户身上的标签列表
详情请参考
https://developers.weixin.qq.com/doc/offiaccount/User_Management/User_Tag_Management.html
:param user_id: 用户 ID, 可以是单个或者列表
:return: 返回的 JSON 数据包
"""
return self._post(
"tags/getidlist",
data={"openid": user_id},
result_processor=itemgetter("tagid_list"),
)
def get_tag_users(self, tag_id, first_user_id=None):
"""
获取标签下粉丝列表
详情请参考
https://developers.weixin.qq.com/doc/offiaccount/User_Management/User_Tag_Management.html
:param tag_id: 标签 ID
:param first_user_id: 可选。第一个拉取的 OPENID,不填默认从头开始拉取
:return: 返回的 JSON 数据包
"""
data = {
"tagid": tag_id,
}
if first_user_id:
data["next_openid"] = first_user_id
return self._post("user/tag/get", data=data)
def iter_tag_users(self, tag_id, first_user_id=None):
"""
获取标签下粉丝 openid 的生成器
:return: 返回一个迭代器,可以用 for 进行循环,得到 openid
使用示例::
>>> from wechatpy import WeChatClient
>>>
>>> client = WeChatClient('appid', 'secret')
>>> for openid in client.tag.iter_tag_users(0):
>>> print(openid)
"""
while True:
follower_data = self.get_tag_users(tag_id, first_user_id)
if "data" not in follower_data:
return
for openid in follower_data["data"]["openid"]:
yield openid
first_user_id = follower_data.get("next_openid")
if not first_user_id:
return
def get_black_list(self, begin_openid=None):
"""
获取公众号的黑名单列表
详情请参考
https://developers.weixin.qq.com/doc/offiaccount/User_Management/Manage_blacklist.html
:param begin_openid: 起始的 OpenID,传空则默认从头开始拉取
:return: 返回的 JSON 数据包
:rtype: dict
"""
data = {}
if begin_openid:
data["begin_openid"] = begin_openid
return self._post(
"tags/members/getblacklist",
data=data,
)
def batch_black_list(self, openid_list):
"""
批量拉黑用户
详情请参考
https://developers.weixin.qq.com/doc/offiaccount/User_Management/Manage_blacklist.html
:param openid_list: 批量拉黑用户的 OpenID list, 最多20个
:type openid_list: list
"""
return self._post(
"tags/members/batchblacklist",
data={
"openid_list": openid_list,
},
)
def batch_unblack_list(self, openid_list):
"""
批量取消拉黑
详情请参考
https://developers.weixin.qq.com/doc/offiaccount/User_Management/Manage_blacklist.html
:param openid_list: 批量取消拉黑的 OpenID list, 最多20个
:type openid_list: list
"""
return self._post(
"tags/members/batchunblacklist",
data={
"openid_list": openid_list,
},
)
| mit | b20d993a44d3bf1ad66d3b3417630146 | 25.402715 | 97 | 0.538303 | 2.952935 | false | false | false | false |
ceph/teuthology | teuthology/provision/cloud/base.py | 4 | 2475 | import logging
from copy import deepcopy
from libcloud.compute.providers import get_driver
from libcloud.compute.types import Provider as lc_Provider
import teuthology.orchestra.remote
import teuthology.provision.cloud
from teuthology.misc import canonicalize_hostname, decanonicalize_hostname
log = logging.getLogger(__name__)
class Provider(object):
_driver_posargs = list()
def __init__(self, name, conf):
self.name = name
self.conf = conf
self.driver_name = self.conf['driver']
def _get_driver(self):
driver_type = get_driver(
getattr(lc_Provider, self.driver_name.upper())
)
driver_args = self._get_driver_args()
driver = driver_type(
*[driver_args.pop(arg_name) for arg_name in self._driver_posargs],
**driver_args
)
return driver
driver = property(fget=_get_driver)
def _get_driver_args(self):
return deepcopy(self.conf['driver_args'])
class Provisioner(object):
def __init__(
self, provider, name, os_type=None, os_version=None,
conf=None, user='ubuntu',
):
if isinstance(provider, str):
provider = teuthology.provision.cloud.get_provider(provider)
self.provider = provider
self.name = decanonicalize_hostname(name)
self.hostname = canonicalize_hostname(name, user=None)
self.os_type = os_type
self.os_version = os_version
self.user = user
def create(self):
try:
return self._create()
except Exception:
log.exception("Failed to create %s", self.name)
return False
def _create(self):
pass
def destroy(self):
try:
return self._destroy()
except Exception:
log.exception("Failed to destroy %s", self.name)
return False
def _destroy(self):
pass
@property
def remote(self):
if not hasattr(self, '_remote'):
self._remote = teuthology.orchestra.remote.Remote(
"%s@%s" % (self.user, self.name),
)
return self._remote
def __repr__(self):
template = "%s(provider='%s', name='%s', os_type='%s', " \
"os_version='%s')"
return template % (
self.__class__.__name__,
self.provider.name,
self.name,
self.os_type,
self.os_version,
)
| mit | 20bf47a19d09594890223f11ea8b4751 | 26.808989 | 78 | 0.575354 | 4.017857 | false | false | false | false |
wechatpy/wechatpy | wechatpy/client/api/merchant/group.py | 2 | 1165 | # -*- coding: utf-8 -*-
from wechatpy.client.api.base import BaseWeChatAPI
class MerchantGroup(BaseWeChatAPI):
API_BASE_URL = "https://api.weixin.qq.com/"
def add(self, name, product_list):
return self._post(
"merchant/group/add",
data={"group_detail": {"group_name": name, "product_list": product_list}},
)
def delete(self, group_id):
return self._post("merchant/group/del", data={"group_id": group_id})
def update(self, group_id, name):
return self._post(
"merchant/group/propertymod",
data={"group_id": group_id, "group_name": name},
)
def update_product(self, group_id, product):
return self._post("merchant/group/productmod", data={"group_id": group_id, "product": product})
def get_all(self):
res = self._get("merchant/group/getall", result_processor=lambda x: x["groups_detail"])
return res
def get(self, group_id):
res = self._post(
"merchant/group/getbyid",
data={"group_id": group_id},
result_processor=lambda x: x["group_detail"],
)
return res
| mit | dd870e67533f9eecb787e172ced1adeb | 29.657895 | 103 | 0.581974 | 3.530303 | false | false | false | false |
wechatpy/wechatpy | wechatpy/client/api/marketing.py | 1 | 4851 | # -*- coding: utf-8 -*-
import json
import datetime
from optionaldict import optionaldict
from wechatpy.client.api.base import BaseWeChatAPI
class WeChatMarketing(BaseWeChatAPI):
API_BASE_URL = "https://api.weixin.qq.com/marketing/"
def add_user_action_sets(self, _type, name, description, version="v1.0"):
"""
创建数据源
https://wximg.qq.com/wxp/pdftool/get.html?id=rkalQXDBM&pa=39
:param _type: 用户行为源类型
:param name: 用户行为源名称 必填
:param description: 用户行为源描述,字段长度最小 1 字节,长度最大 128 字节
:param version: 版本号 v1.0
:return: 数据源唯一ID
"""
return self._post(
"user_action_sets/add",
params={"version": version},
json=optionaldict(type=_type, name=name, description=description, version=version),
result_processor=lambda x: x["data"]["user_action_set_id"],
)
def get_user_action_sets(self, user_action_set_id, version="v1.0"):
"""
获取数据源信息
:param user_action_set_id: 数据源唯一ID
:param version: 版本号 v1.0
"""
return self._get(
"user_action_sets/get",
params={"version": version, "user_action_set_id": user_action_set_id},
result_processor=lambda x: x["data"]["list"],
)
def add_user_actions(self, actions=(), version="v1.0"):
"""
回传数据
https://wximg.qq.com/wxp/pdftool/get.html?id=rkalQXDBM&pa=39
:param actions: 用户行为源类型
:param version: 版本号 v1.0
"""
return self._post("user_actions/add", params={"version": version}, json={"actions": actions})
def get_user_action_set_reports(
self,
user_action_set_id,
start_date=None,
end_date=None,
time_granularity=None,
aggregation=None,
version="v1.0",
):
"""
数据源报表查询
:param user_action_set_id: 数据源唯一ID
:param start_date: 开始日期 默认今天
:param end_date: 结束日期 默认今天
:param time_granularity: 时间粒度 枚举 {'DAILY'(按天) 'HOURLY'(按小时)}
:param aggregation: 聚合纬度 枚举 {'DOMAIN' 'ACTION_TYPE'}
:param version: 版本号 v1.0
:return:
"""
today = datetime.date.today()
if start_date is None:
start_date = today
if end_date is None:
end_date = today
if isinstance(start_date, datetime.date):
start_date = start_date.strftime("%Y-%m-%d")
if isinstance(end_date, datetime.date):
end_date = end_date.strftime("%Y-%m-%d")
if time_granularity not in ("DAILY", "HOURLY"):
time_granularity = None
if aggregation not in ("DOMAIN", "ACTION_TYPE"):
aggregation = None
return self._get(
"user_action_set_reports/get",
params=optionaldict(
user_action_set_id=user_action_set_id,
date_range=json.dumps({"start_date": start_date, "end_date": end_date}),
time_granularity=time_granularity,
aggregation=aggregation,
version=version,
),
result_processor=lambda x: x["data"],
)
def get_ad_leads(
self,
start_date=None,
end_date=None,
filtering=(),
page=1,
page_size=100,
version="v1.0",
):
"""
获取朋友圈销售线索数据接口
:param start_date: 开始日期 默认今天
:param end_date: 结束日期 默认今天
:param filtering: 过滤条件 [{field: 过滤字段, operator: 操作符, values: 字段取值}]
:param page: 页码,获取指定页数据
:param page_size: 一页获取的数据条数(1-100)
:param version: 版本号 v1.0
"""
today = datetime.date.today()
if start_date is None:
start_date = today
if end_date is None:
end_date = today
if isinstance(start_date, datetime.date):
start_date = start_date.strftime("%Y-%m-%d")
if isinstance(end_date, datetime.date):
end_date = end_date.strftime("%Y-%m-%d")
return self._get(
"wechat_ad_leads/get",
params=optionaldict(
date_range=json.dumps({"start_date": start_date, "end_date": end_date}),
filtering=json.dumps(filtering) if filtering else None,
page=page,
page_size=page_size,
version=version,
),
result_processor=lambda x: x["data"],
)
| mit | cf813db29c15b878a68ce99c964be238 | 29.923611 | 101 | 0.544577 | 3.023082 | false | false | false | false |
ceph/teuthology | teuthology/provision/__init__.py | 4 | 4441 | import logging
import teuthology.lock.query
from teuthology.misc import decanonicalize_hostname, get_distro, get_distro_version
from teuthology.provision import cloud
from teuthology.provision import downburst
from teuthology.provision import fog
from teuthology.provision import openstack
from teuthology.provision import pelagos
import os
log = logging.getLogger(__name__)
def _logfile(ctx, shortname):
if hasattr(ctx, 'config') and ctx.config.get('archive_path'):
return os.path.join(ctx.config['archive_path'],
shortname + '.downburst.log')
def get_reimage_types():
return pelagos.get_types() + fog.get_types()
def reimage(ctx, machine_name, machine_type):
os_type = get_distro(ctx)
os_version = get_distro_version(ctx)
pelagos_types = pelagos.get_types()
fog_types = fog.get_types()
if machine_type in pelagos_types and machine_type in fog_types:
raise Exception('machine_type can be used with one provisioner only')
elif machine_type in pelagos_types:
obj = pelagos.Pelagos(machine_name, os_type, os_version)
elif machine_type in fog_types:
obj = fog.FOG(machine_name, os_type, os_version)
else:
raise Exception("The machine_type '%s' is not known to any "
"of configured provisioners" % machine_type)
return obj.create()
def create_if_vm(ctx, machine_name, _downburst=None):
"""
Use downburst to create a virtual machine
:param _downburst: Only used for unit testing.
"""
if _downburst:
status_info = _downburst.status
else:
status_info = teuthology.lock.query.get_status(machine_name)
shortname = decanonicalize_hostname(machine_name)
machine_type = status_info['machine_type']
os_type = get_distro(ctx)
os_version = get_distro_version(ctx)
if not teuthology.lock.query.is_vm(status=status_info):
return False
if machine_type in cloud.get_types():
return cloud.get_provisioner(
machine_type,
shortname,
os_type,
os_version,
conf=getattr(ctx, 'config', dict()),
).create()
has_config = hasattr(ctx, 'config') and ctx.config is not None
if has_config and 'downburst' in ctx.config:
log.warning(
'Usage of a custom downburst config has been deprecated.'
)
dbrst = _downburst or \
downburst.Downburst(name=machine_name, os_type=os_type,
os_version=os_version, status=status_info,
logfile=_logfile(ctx, shortname))
return dbrst.create()
def destroy_if_vm(ctx, machine_name, user=None, description=None,
_downburst=None):
"""
Use downburst to destroy a virtual machine
Return False only on vm downburst failures.
:param _downburst: Only used for unit testing.
"""
if _downburst:
status_info = _downburst.status
else:
status_info = teuthology.lock.query.get_status(machine_name)
if not status_info or not teuthology.lock.query.is_vm(status=status_info):
return True
if user is not None and user != status_info['locked_by']:
msg = "Tried to destroy {node} as {as_user} but it is locked " + \
"by {locked_by}"
log.error(msg.format(node=machine_name, as_user=user,
locked_by=status_info['locked_by']))
return False
if (description is not None and description !=
status_info['description']):
msg = "Tried to destroy {node} with description {desc_arg} " + \
"but it is locked with description {desc_lock}"
log.error(msg.format(node=machine_name, desc_arg=description,
desc_lock=status_info['description']))
return False
machine_type = status_info.get('machine_type')
shortname = decanonicalize_hostname(machine_name)
if machine_type == 'openstack':
return openstack.ProvisionOpenStack().destroy(shortname)
elif machine_type in cloud.get_types():
return cloud.get_provisioner(
machine_type, shortname, None, None).destroy()
dbrst = _downburst or \
downburst.Downburst(name=machine_name, os_type=None,
os_version=None, status=status_info,
logfile=_logfile(ctx, shortname))
return dbrst.destroy()
| mit | 66e0424aaf99ffcb41303353e9a98404 | 35.702479 | 83 | 0.634767 | 3.744519 | false | true | false | false |
wechatpy/wechatpy | wechatpy/work/client/api/external_contact_group_chat.py | 2 | 5022 | # -*- coding: utf-8 -*-
from typing import List
from optionaldict import optionaldict
from wechatpy.client.api.base import BaseWeChatAPI
class WeChatExternalContactGroupChat(BaseWeChatAPI):
"""
https://work.weixin.qq.com/api/doc#90000/90135/90221
"""
def list(
self,
status_filter: int = 0,
owner_userid_list: List = None,
cursor: str = None,
limit: int = 100,
):
"""
该接口用于获取配置过客户群管理的客户群列表。
https://work.weixin.qq.com/api/doc/90000/90135/92120
:return: 返回的 JSON 数据包
:param status_filter: 客户群跟进状态过滤。[0 - 所有列表(即不过滤)
1 - 离职待继承
2 - 离职继承中
3 - 离职继承完成
默认为0]
:param owner_userid_list: 根据群主id过滤。如果不填,表示获取应用可见范围内全部群主的数据(但是不建议这么用,如果可见范围人数超过1000人,为了防止数据包过大,会报错 81017)
:param cursor: 用于分页查询的游标,字符串类型,由上一次调用返回,首次调用不填
:param limit: 分页,预期请求的数据量,取值范围 1 ~ 1000, 默认100
:return:
"""
data = optionaldict(
status_filter=status_filter,
cursor=cursor,
limit=limit,
)
if owner_userid_list:
data["owner_filter"] = {"userid_list": owner_userid_list}
return self._post("externalcontact/groupchat/list", data=data)
def list_all(self, status_filter: int = 0, owner_userid_list: List = None, limit: int = 100) -> List:
"""
该接口用于获取配置过客户群管理的所有客户群列表,自动走完所有分页
"""
chat_list = []
cursor = None
while True:
result = self.list(status_filter, owner_userid_list, cursor, limit)
if result["errcode"] == 0:
chat_list.extend(result["group_chat_list"])
if result.get("next_cursor"):
cursor = result["next_cursor"]
else:
break
return chat_list
def get(self, chat_id: str):
"""
通过客户群ID,获取详情。包括群名、群成员列表、群成员入群时间、入群方式。(客户群是由具有客户群使用权限的成员创建的外部群)
需注意的是,如果发生群信息变动,会立即收到群变更事件,但是部分信息是异步处理,可能需要等一段时间调此接口才能得到最新结果
PS: 接口命名为get,调用却是POST,无语
https://work.weixin.qq.com/api/doc/90000/90135/92122
:param chat_id: 客户群ID
:return: 返回的 JSON 数据包
"""
return self._post("externalcontact/groupchat/get", data={"chat_id": chat_id})
def statistic(
self,
day_begin_time: int,
day_end_time: int = None,
owner_userid_list: List = None,
order_by: int = 1,
order_asc: int = 0,
offset: int = 0,
limit: int = 500,
):
"""
获取指定日期的统计数据。注意,企业微信仅存储180天的数据。
:param day_begin_time: 起始日期的时间戳,填当天的0时0分0秒(否则系统自动处理为当天的0分0秒)。取值范围:昨天至前180天。
:param day_end_time: 结束日期的时间戳,填当天的0时0分0秒(否则系统自动处理为当天的0分0秒)。取值范围:昨天至前180天。
如果不填,默认同 day_begin_time(即默认取一天的数据)
:param owner_userid_list: 群主过滤,如果不填,表示获取全部群主的数据
:param order_by: 排序方式。默认为1
[1 - 新增群的数量
2 - 群总数
3 - 新增群人数
4 - 群总人数]
:param order_asc: 是否升序。0-否;1-是。默认降序,即0
:param offset: 分页,偏移量, 默认为0
:param limit: 分页,预期请求的数据量,默认为500,取值范围 1 ~ 1000
:return: 返回的 JSON 数据包
"""
if not day_end_time:
day_end_time = day_begin_time
data = optionaldict(
day_begin_time=day_begin_time,
day_end_time=day_end_time,
order_by=order_by,
order_asc=order_asc,
offset=offset,
limit=limit,
)
if owner_userid_list:
data["owner_filter"] = {"userid_list": owner_userid_list}
return self._post("externalcontact/groupchat/statistic", data=data)
| mit | 78db5729260b5339c741c11b12ec3d53 | 34.018182 | 112 | 0.532451 | 2.182436 | false | false | false | false |
wechatpy/wechatpy | wechatpy/work/client/api/batch.py | 2 | 4677 | # -*- coding: utf-8 -*-
from optionaldict import optionaldict
from wechatpy.client.api.base import BaseWeChatAPI
from wechatpy.utils import to_text
class WeChatBatch(BaseWeChatAPI):
"""
https://work.weixin.qq.com/api/doc#90000/90135/90979
异步批量接口用于大批量数据的处理,提交后接口即返回,企业微信会在后台继续执行任务。
执行完成后,企业微信后台会通过任务事件通知企业获取结果。事件的内容是加密的,解密过程请参考 [消息的加解密处理][signure],任务事件请参考异步任务完成事件推送。
目前,仅为通讯录更新提供了异步批量接口
"""
def sync_user(self, url, token, encoding_aes_key, media_id, to_invite=True):
"""
增量更新成员
https://work.weixin.qq.com/api/doc#90000/90135/90980
:param url: 企业应用接收企业微信推送请求的访问协议和地址,支持http或https协议
:param token: 用于生成签名
:param encoding_aes_key: 用于消息体的加密,是AES密钥的Base64编码
:param media_id: 上传的csv文件的media_id
:param to_invite: 是否邀请新建的成员使用企业微信(将通过微信服务通知或短信或邮件下发邀请,每天自动下发一次,最多持续3个工作日),默认值为true。
:return: 返回的 JSON 数据包
"""
return self._post(
"batch/syncuser",
data={
"media_id": media_id,
"to_invite": to_invite,
"callback": {
"url": url,
"token": token,
"encodingaeskey": encoding_aes_key,
},
},
)
def replace_user(self, url, token, encoding_aes_key, media_id, to_invite=True):
"""
全量覆盖成员
https://work.weixin.qq.com/api/doc#90000/90135/90981
:param url: 企业应用接收企业微信推送请求的访问协议和地址,支持http或https协议
:param token: 用于生成签名
:param encoding_aes_key: 用于消息体的加密,是AES密钥的Base64编码
:param media_id: 上传的csv文件的media_id
:param to_invite: 是否邀请新建的成员使用企业微信(将通过微信服务通知或短信或邮件下发邀请,每天自动下发一次,最多持续3个工作日),默认值为true。
:return: 返回的 JSON 数据包
"""
return self._post(
"batch/replaceuser",
data={
"media_id": media_id,
"to_invite": to_invite,
"callback": {
"url": url,
"token": token,
"encodingaeskey": encoding_aes_key,
},
},
)
def replace_party(self, url, token, encoding_aes_key, media_id):
"""
全量覆盖部门
https://work.weixin.qq.com/api/doc#90000/90135/90982
:param url: 企业应用接收企业微信推送请求的访问协议和地址,支持http或https协议
:param token: 用于生成签名
:param encoding_aes_key: 用于消息体的加密,是AES密钥的Base64编码
:param media_id: 上传的csv文件的media_id
:return: 返回的 JSON 数据包
"""
return self._post(
"batch/replaceparty",
data={
"media_id": media_id,
"callback": {
"url": url,
"token": token,
"encodingaeskey": encoding_aes_key,
},
},
)
def get_result(self, job_id):
"""
获取异步任务结果
https://work.weixin.qq.com/api/doc#90000/90135/90983
:param job_id: 异步任务id,最大长度为64字符
:return: 返回的 JSON 数据包
"""
return self._get("batch/getresult", params={"jobid": job_id})
def invite(self, user=None, party=None, tag=None):
"""
邀请成员
https://work.weixin.qq.com/api/doc#90000/90135/90975
企业可通过接口批量邀请成员使用企业微信,邀请后将通过短信或邮件下发通知。
:param user: 成员ID列表, 最多支持1000个。
:param party: 成员ID列表, 最多支持100个。
:param tag: 成员ID列表, 最多支持100个。
:return: 返回的 JSON 数据包
"""
data = optionaldict(user=user, party=party, tag=tag)
return self._post("batch/invite", data=data)
| mit | f22fe3256755ab368429fdd2bef9db4b | 28.725 | 91 | 0.550042 | 2.152686 | false | false | false | false |
ceph/teuthology | teuthology/exceptions.py | 2 | 4909 | class BranchNotFoundError(ValueError):
def __init__(self, branch, repo=None):
self.branch = branch
self.repo = repo
def __str__(self):
if self.repo:
repo_str = " in repo: %s" % self.repo
else:
repo_str = ""
return "Branch '{branch}' not found{repo_str}!".format(
branch=self.branch, repo_str=repo_str)
class BranchMismatchError(ValueError):
def __init__(self, branch, repo, reason=None):
self.branch = branch
self.repo = repo
self.reason = reason
def __str__(self):
msg = f"Cannot use branch {self.branch} with repo {self.repo}"
if self.reason:
msg = f"{msg} because {self.reason}"
return msg
class CommitNotFoundError(ValueError):
def __init__(self, commit, repo=None):
self.commit = commit
self.repo = repo
def __str__(self):
if self.repo:
repo_str = " in repo: %s" % self.repo
else:
repo_str = ""
return "'{commit}' not found{repo_str}!".format(
commit=self.commit, repo_str=repo_str)
class GitError(RuntimeError):
pass
class BootstrapError(RuntimeError):
pass
class ConfigError(RuntimeError):
"""
Meant to be used when an invalid config entry is found.
"""
pass
class ParseError(Exception):
pass
class CommandFailedError(Exception):
"""
Exception thrown on command failure
"""
def __init__(self, command, exitstatus, node=None, label=None):
self.command = command
self.exitstatus = exitstatus
self.node = node
self.label = label
def __str__(self):
prefix = "Command failed"
if self.label:
prefix += " ({label})".format(label=self.label)
if self.node:
prefix += " on {node}".format(node=self.node)
return "{prefix} with status {status}: {cmd!r}".format(
status=self.exitstatus,
cmd=self.command,
prefix=prefix,
)
def fingerprint(self):
"""
Returns a list of strings to group failures with.
Used by sentry instead of grouping by backtrace.
"""
return [
self.label or self.command,
'exit status {}'.format(self.exitstatus),
'{{ type }}',
]
class AnsibleFailedError(Exception):
"""
Exception thrown when an ansible playbook fails
"""
def __init__(self, failures):
self.failures = failures
def __str__(self):
return "{failures}".format(
failures=self.failures,
)
class CommandCrashedError(Exception):
"""
Exception thrown on crash
"""
def __init__(self, command):
self.command = command
def __str__(self):
return "Command crashed: {command!r}".format(
command=self.command,
)
class ConnectionLostError(Exception):
"""
Exception thrown when the connection is lost
"""
def __init__(self, command, node=None):
self.command = command
self.node = node
def __str__(self):
node_str = 'to %s ' % self.node if self.node else ''
return "SSH connection {node_str}was lost: {command!r}".format(
node_str=node_str,
command=self.command,
)
class ScheduleFailError(RuntimeError):
def __init__(self, message, name=None):
self.message = message
self.name = name
def __str__(self):
return "Scheduling {name} failed: {msg}".format(
name=self.name,
msg=self.message,
).replace(' ', ' ')
class VersionNotFoundError(Exception):
def __init__(self, url):
self.url = url
def __str__(self):
return "Failed to fetch package version from %s" % self.url
class UnsupportedPackageTypeError(Exception):
def __init__(self, node):
self.node = node
def __str__(self):
return "os.package_type {pkg_type!r} on {node}".format(
node=self.node, pkg_type=self.node.os.package_type)
class SELinuxError(Exception):
def __init__(self, node, denials):
self.node = node
self.denials = denials
def __str__(self):
return "SELinux denials found on {node}: {denials}".format(
node=self.node, denials=self.denials)
class QuotaExceededError(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class SkipJob(Exception):
"""
Used by teuthology.worker when it notices that a job is broken and should
be skipped.
"""
pass
class MaxWhileTries(Exception):
pass
class ConsoleError(Exception):
pass
class NoRemoteError(Exception):
message = "This operation requires a remote"
def __str__(self):
return self.message
| mit | 7bcb529a375ca3c10befbe3b3b4bf6b6 | 22.714976 | 77 | 0.574862 | 4.020475 | false | false | false | false |
wechatpy/wechatpy | wechatpy/client/api/merchant/shelf.py | 2 | 1043 | # -*- coding: utf-8 -*-
from wechatpy.client.api.base import BaseWeChatAPI
class MerchantShelf(BaseWeChatAPI):
API_BASE_URL = "https://api.weixin.qq.com/"
def add(self, name, banner, shelf_data):
return self._post(
"merchant/shelf/add",
data={"shelf_name": name, "shelf_banner": banner, "shelf_data": shelf_data},
)
def delete(self, shelf_id):
return self._post("merchant/shelf/del", data={"shelf_id": shelf_id})
def update(self, shelf_id, name, banner, shelf_data):
return self._post(
"merchant/shelf/add",
data={
"shelf_id": shelf_id,
"shelf_name": name,
"shelf_banner": banner,
"shelf_data": shelf_data,
},
)
def get_all(self):
res = self._get("merchant/shelf/getall", result_processor=lambda x: x["shelves"])
return res
def get(self, shelf_id):
return self._post("merchant/shelf/getbyid", data={"shelf_id": shelf_id})
| mit | 083e6b2d4f8f5e27bbb19b6acd51d6a1 | 28.8 | 89 | 0.554171 | 3.249221 | false | false | false | false |
ceph/teuthology | teuthology/task/selinux.py | 2 | 7847 | import logging
import os
from io import StringIO
from teuthology.exceptions import SELinuxError
from teuthology.misc import get_archive_dir
from teuthology.orchestra.cluster import Cluster
from teuthology.orchestra import run
from teuthology.task import Task
log = logging.getLogger(__name__)
class SELinux(Task):
"""
A task to set the SELinux mode during test execution. Note that SELinux
must first be enabled and the filesystem must have been labeled.
On teardown, also checks the audit log for any denials.
By default selinux will ignore few known denials(listed below). The test
will fail for any other denials seen in audit.log. For the test not to
fail for other denials one can add the overrides with appropriate escapes
overrides:
selinux:
allowlist:
- 'name="cephtest"'
- 'dmidecode'
- 'comm="logrotate"'
- 'comm="idontcare"'
Known denials which are ignored:
comm="dmidecode"
chronyd.service
name="cephtest"
Automatically skips hosts running non-RPM-based OSes.
"""
def __init__(self, ctx, config):
super(SELinux, self).__init__(ctx, config)
self.log = log
self.mode = self.config.get('mode', 'permissive')
def filter_hosts(self):
"""
Exclude any non-RPM-based hosts, and any downburst VMs
"""
super(SELinux, self).filter_hosts()
new_cluster = Cluster()
for (remote, roles) in self.cluster.remotes.items():
if remote.is_vm:
msg = "Excluding {host}: VMs are not yet supported"
log.info(msg.format(host=remote.shortname))
elif remote.is_container:
msg = "Excluding {host}: containers are not yet supported"
log.info(msg.format(host=remote.shortname))
elif remote.os.name in ['opensuse', 'sle']:
msg = "Excluding {host}: \
SELinux is not supported for '{os}' os_type yet"
log.info(msg.format(host=remote.shortname, os=remote.os.name))
elif remote.os.package_type == 'rpm':
new_cluster.add(remote, roles)
else:
msg = "Excluding {host}: OS '{os}' does not support SELinux"
log.debug(msg.format(host=remote.shortname, os=remote.os.name))
self.cluster = new_cluster
return self.cluster
def setup(self):
super(SELinux, self).setup()
self.rotate_log()
self.old_modes = self.get_modes()
self.old_denials = self.get_denials()
self.set_mode()
def rotate_log(self):
self.cluster.run(args="sudo service auditd rotate")
def get_modes(self):
"""
Get the current SELinux mode from each host so that we can restore
during teardown
"""
log.debug("Getting current SELinux state")
modes = dict()
for remote in self.cluster.remotes.keys():
result = remote.run(
args=['/usr/sbin/getenforce'],
stdout=StringIO(),
)
modes[remote.name] = result.stdout.getvalue().strip().lower()
log.debug("Existing SELinux modes: %s", modes)
return modes
def set_mode(self):
"""
Set the requested SELinux mode
"""
log.info("Putting SELinux into %s mode", self.mode)
for remote in self.cluster.remotes.keys():
mode = self.old_modes[remote.name]
if mode == "Disabled" or mode == "disabled":
continue
remote.run(
args=['sudo', '/usr/sbin/setenforce', self.mode],
)
def get_denials(self):
"""
Look for denials in the audit log
"""
all_denials = dict()
# dmidecode issue:
# https://bugzilla.redhat.com/show_bug.cgi?id=1289274
# tracker for chronyd/cephtest issue:
# http://tracker.ceph.com/issues/14244
known_denials = [
'comm="dmidecode"',
'chronyd.service',
'name="cephtest"',
'scontext=system_u:system_r:nrpe_t:s0',
'scontext=system_u:system_r:pcp_pmlogger_t',
'scontext=system_u:system_r:pcp_pmcd_t:s0',
'comm="rhsmd"',
'scontext=system_u:system_r:syslogd_t:s0',
'tcontext=system_u:system_r:nrpe_t:s0',
'comm="updatedb"',
'comm="smartd"',
'comm="rhsmcertd-worke"',
'comm="setroubleshootd"',
'comm="rpm"',
'tcontext=system_u:object_r:container_runtime_exec_t:s0',
'comm="ksmtuned"',
'comm="sssd"',
'comm="sss_cache"',
'context=system_u:system_r:NetworkManager_dispatcher_t:s0',
]
se_allowlist = self.config.get('allowlist', [])
if se_allowlist:
known_denials.extend(se_allowlist)
ignore_known_denials = '\'\(' + str.join('\|', known_denials) + '\)\''
for remote in self.cluster.remotes.keys():
proc = remote.run(
args=['sudo', 'grep', '-a', 'avc: .*denied',
'/var/log/audit/audit.log', run.Raw('|'), 'grep', '-av',
run.Raw(ignore_known_denials)],
stdout=StringIO(),
check_status=False,
)
output = proc.stdout.getvalue()
if output:
denials = output.strip().split('\n')
log.debug("%s has %s denials", remote.name, len(denials))
else:
denials = []
all_denials[remote.name] = denials
return all_denials
def teardown(self):
self.restore_modes()
self.archive_log()
self.get_new_denials()
def restore_modes(self):
"""
If necessary, restore previous SELinux modes
"""
# If there's nothing to do, skip this
if not set(self.old_modes.values()).difference(set([self.mode])):
return
log.info("Restoring old SELinux modes")
for remote in self.cluster.remotes.keys():
mode = self.old_modes[remote.name]
if mode == "Disabled" or mode == "disabled":
continue
if mode != self.mode:
remote.run(
args=['sudo', '/usr/sbin/setenforce', mode],
)
def archive_log(self):
if not hasattr(self.ctx, 'archive') or not self.ctx.archive:
return
archive_dir = get_archive_dir(self.ctx)
audit_archive = os.path.join(archive_dir, 'audit')
mkdir_cmd = "mkdir {audit_archive}"
cp_cmd = "sudo cp /var/log/audit/audit.log {audit_archive}"
chown_cmd = "sudo chown $USER {audit_archive}/audit.log"
gzip_cmd = "gzip {audit_archive}/audit.log"
full_cmd = " && ".join((mkdir_cmd, cp_cmd, chown_cmd, gzip_cmd))
self.cluster.run(
args=full_cmd.format(audit_archive=audit_archive)
)
def get_new_denials(self):
"""
Determine if there are any new denials in the audit log
"""
all_denials = self.get_denials()
new_denials = dict()
for remote in self.cluster.remotes.keys():
old_host_denials = self.old_denials[remote.name]
all_host_denials = all_denials[remote.name]
new_host_denials = set(all_host_denials).difference(
set(old_host_denials)
)
new_denials[remote.name] = list(new_host_denials)
for remote in self.cluster.remotes.keys():
if len(new_denials[remote.name]):
raise SELinuxError(node=remote,
denials=new_denials[remote.name])
task = SELinux
| mit | 615179078019c3516cb589a7efb4697f | 35.16129 | 79 | 0.555244 | 3.736667 | false | false | false | false |
ceph/teuthology | teuthology/orchestra/daemon/group.py | 4 | 7625 | from teuthology import misc
from teuthology.orchestra.daemon.state import DaemonState
from teuthology.orchestra.daemon.systemd import SystemDState
from teuthology.orchestra.daemon.cephadmunit import CephadmUnit
class DaemonGroup(object):
"""
Collection of daemon state instances
"""
def __init__(self, use_systemd=False, use_cephadm=None):
"""
self.daemons is a dictionary indexed by role. Each entry is a
dictionary of DaemonState values indexed by an id parameter.
:param use_systemd: Whether or not to use systemd when appropriate
(default: False) Note: This option may be removed
in the future.
"""
self.daemons = {}
self.use_systemd = use_systemd
self.use_cephadm = use_cephadm
def add_daemon(self, remote, type_, id_, *args, **kwargs):
"""
Add a daemon. If there already is a daemon for this id_ and role, stop
that daemon. (Re)start the daemon once the new value is set.
:param remote: Remote site
:param type_: type of daemon (osd, mds, mon, rgw, for example)
:param id_: Id (index into role dictionary)
:param args: Daemonstate positional parameters
:param kwargs: Daemonstate keyword parameters
"""
# for backwards compatibility with older ceph-qa-suite branches,
# we can only get optional args from unused kwargs entries
self.register_daemon(remote, type_, id_, *args, **kwargs)
cluster = kwargs.pop('cluster', 'ceph')
role = cluster + '.' + type_
self.daemons[role][id_].restart()
def register_daemon(self, remote, type_, id_, *args, **kwargs):
"""
Add a daemon. If there already is a daemon for this id_ and role, stop
that daemon.
:param remote: Remote site
:param type_: type of daemon (osd, mds, mon, rgw, for example)
:param id_: Id (index into role dictionary)
:param args: Daemonstate positional parameters
:param kwargs: Daemonstate keyword parameters
"""
# for backwards compatibility with older ceph-qa-suite branches,
# we can only get optional args from unused kwargs entries
cluster = kwargs.pop('cluster', 'ceph')
role = cluster + '.' + type_
if role not in self.daemons:
self.daemons[role] = {}
if id_ in self.daemons[role]:
self.daemons[role][id_].stop()
self.daemons[role][id_] = None
klass = DaemonState
if self.use_cephadm:
klass = CephadmUnit
kwargs['use_cephadm'] = self.use_cephadm
elif self.use_systemd and \
not any(i == 'valgrind' for i in args) and \
remote.init_system == 'systemd':
# We currently cannot use systemd and valgrind together because
# it would require rewriting the unit files
klass = SystemDState
self.daemons[role][id_] = klass(
remote, role, id_, *args, **kwargs)
def get_daemon(self, type_, id_, cluster='ceph'):
"""
get the daemon associated with this id_ for this role.
:param type_: type of daemon (osd, mds, mon, rgw, for example)
:param id_: Id (index into role dictionary)
"""
role = cluster + '.' + type_
if role not in self.daemons:
return None
return self.daemons[role].get(str(id_), None)
def iter_daemons_of_role(self, type_, cluster='ceph'):
"""
Iterate through all daemon instances for this role. Return dictionary
of daemon values.
:param type_: type of daemon (osd, mds, mon, rgw, for example)
"""
role = cluster + '.' + type_
return self.daemons.get(role, {}).values()
def resolve_role_list(self, roles, types, cluster_aware=False):
"""
Resolve a configuration setting that may be None or contain wildcards
into a list of roles (where a role is e.g. 'mds.a' or 'osd.0'). This
is useful for tasks that take user input specifying a flexible subset
of the available roles.
The task calling this must specify what kinds of roles it can can
handle using the ``types`` argument, where a role type is 'osd' or
'mds' for example. When selecting roles this is used as a filter, or
when an explicit list of roles is passed, the an exception is raised if
any are not of a suitable type.
Examples:
::
# Passing None (i.e. user left config blank) defaults to all roles
# (filtered by ``types``)
None, types=['osd', 'mds', 'mon'] ->
['osd.0', 'osd.1', 'osd.2', 'mds.a', mds.b', 'mon.a']
# Wildcards are expanded
roles=['mds.*', 'osd.0'], types=['osd', 'mds', 'mon'] ->
['mds.a', 'mds.b', 'osd.0']
# Boring lists are unaltered
roles=['osd.0', 'mds.a'], types=['osd', 'mds', 'mon'] ->
['osd.0', 'mds.a']
# Entries in role list that don't match types result in an
# exception
roles=['osd.0', 'mds.a'], types=['osd'] -> RuntimeError
:param roles: List (of roles or wildcards) or None (select all suitable
roles)
:param types: List of acceptable role types, for example
['osd', 'mds'].
:param cluster_aware: bool to determine whether to consider include
cluster in the returned roles - just for
backwards compatibility with pre-jewel versions
of ceph-qa-suite
:return: List of strings like ["mds.0", "osd.2"]
"""
assert (isinstance(roles, list) or roles is None)
resolved = []
if roles is None:
# Handle default: all roles available
for type_ in types:
for role, daemons in self.daemons.items():
if not role.endswith('.' + type_):
continue
for daemon in daemons.values():
prefix = type_
if cluster_aware:
prefix = daemon.role
resolved.append(prefix + '.' + daemon.id_)
else:
# Handle explicit list of roles or wildcards
for raw_role in roles:
try:
cluster, role_type, role_id = misc.split_role(raw_role)
except ValueError:
msg = ("Invalid role '{0}', roles must be of format "
"[<cluster>.]<type>.<id>").format(raw_role)
raise RuntimeError(msg)
if role_type not in types:
msg = "Invalid role type '{0}' in role '{1}'".format(
role_type, raw_role)
raise RuntimeError(msg)
if role_id == "*":
# Handle wildcard, all roles of the type
for daemon in self.iter_daemons_of_role(role_type,
cluster=cluster):
prefix = role_type
if cluster_aware:
prefix = daemon.role
resolved.append(prefix + '.' + daemon.id_)
else:
# Handle explicit role
resolved.append(raw_role)
return resolved
| mit | a46aa1a1b94373e89895b1f46ad9da9d | 41.361111 | 79 | 0.543082 | 4.344729 | false | false | false | false |
wechatpy/wechatpy | wechatpy/client/api/freepublish.py | 1 | 2847 | from wechatpy.client.api.base import BaseWeChatAPI
class WeChatFreePublish(BaseWeChatAPI):
def submit(self, media_id: str) -> dict:
"""
发布接口
详情请参考:
https://developers.weixin.qq.com/doc/offiaccount/Publish/Publish.html
:param media_id: 要发布的草稿的media_id
:return: 返回的 JSON 数据包
"""
return self._post("freepublish/submit", data={"media_id": media_id})
def get(self, publish_id: str) -> dict:
"""
发布状态轮询接口
开发者可以尝试通过下面的发布状态轮询接口获知发布情况。
详情请参考:
https://developers.weixin.qq.com/doc/offiaccount/Publish/Get_status.html
:param publish_id: 发布任务id
:return: 返回的 JSON 数据包
"""
return self._post("freepublish/get", data={"publish_id": publish_id})
def delete(self, article_id: str, index: int = 0) -> dict:
"""
删除发布
发布成功之后,随时可以通过该接口删除。此操作不可逆,请谨慎操作。
详情请参考:
https://developers.weixin.qq.com/doc/offiaccount/Publish/Delete_posts.html
:param article_id: 成功发布时返回的 article_id
:param index: 要删除的文章在图文消息中的位置,第一篇编号为1,该字段不填或填 0 会删除全部文章
:return: 返回的 JSON 数据包
"""
return self._post("freepublish/delete", data={"article_id": article_id, "index": index})
def getarticle(self, article_id: str) -> dict:
"""
通过 article_id 获取已发布文章
开发者可以通过 article_id 获取已发布的图文信息。
详情请参考:
https://developers.weixin.qq.com/doc/offiaccount/Publish/Get_article_from_id.html
:param article_id: 要获取的草稿的article_id
:return: 返回的 JSON 数据包
"""
return self._post("freepublish/getarticle", data={"article_id": article_id})
def batchget(self, offset: int, count: int, no_content: int = 0) -> dict:
"""
获取成功发布列表
开发者可以获取已成功发布的消息列表。
详情请参考:
https://developers.weixin.qq.com/doc/offiaccount/Publish/Get_publication_records.html
:param offset: 从全部素材的该偏移位置开始返回,0表示从第一个素材返回
:param count: 返回素材的数量,取值在1到20之间
:param no_content: 1 表示不返回 content 字段,0 表示正常返回,默认为 0
:return: 返回的 JSON 数据包
"""
return self._post("freepublish/batchget", data={"offset": offset, "count": count, "no_content": no_content})
| mit | fed1b3d5623f637f76db439f8f92fb62 | 30.842857 | 116 | 0.613728 | 2.224551 | false | false | false | false |
wechatpy/wechatpy | wechatpy/work/client/api/kf.py | 1 | 8169 | # -*- coding: utf-8 -*-
from wechatpy.client.api.base import BaseWeChatAPI
class WeChatKF(BaseWeChatAPI):
"""
微信客服接口
https://work.weixin.qq.com/api/doc/90000/90135/94670
"""
def sync_msg(self, token, cursor="", limit=1000):
"""
微信客户发送的消息、接待人员在企业微信回复的消息、发送消息接口发送失败事件(如被用户拒收)
、客户点击菜单消息的回复消息,可以通过该接口获取具体的消息内容和事件。不支持读取通过发送消息接口发送的消息。
支持的消息类型:文本、图片、语音、视频、文件、位置、链接、名片、小程序、事件。
:param token: 回调事件返回的token字段,10分钟内有效;可不填,如果不填接口有严格的频率限制。不多于128字节
:param cursor: 上一次调用时返回的next_cursor,第一次拉取可以不填。不多于64字节
:param limit: 期望请求的数据量,默认值和最大值都为1000。
注意:可能会出现返回条数少于limit的情况,需结合返回的has_more字段判断是否继续请求。
:return: 接口调用结果
"""
data = {
"token": token,
"cursor": cursor,
"limit": limit,
}
return self._post("kf/sync_msg", data=data)
def get_service_state(self, open_kfid, external_userid):
"""
获取会话状态
ID 状态 说明
0 未处理 新会话接入。可选择:1.直接用API自动回复消息。2.放进待接入池等待接待人员接待。3.指定接待人员进行接待
1 由智能助手接待 可使用API回复消息。可选择转入待接入池或者指定接待人员处理。
2 待接入池排队中 在待接入池中排队等待接待人员接入。可选择转为指定人员接待
3 由人工接待 人工接待中。可选择结束会话
4 已结束 会话已经结束。不允许变更会话状态,等待用户重新发起咨询
:param open_kfid: 客服帐号ID
:param external_userid: 微信客户的external_userid
:return: 接口调用结果
"""
data = {
"open_kfid": open_kfid,
"external_userid": external_userid,
}
return self._post("kf/service_state/get", data=data)
def trans_service_state(self, open_kfid, external_userid, service_state, servicer_userid=""):
"""
变更会话状态
:param open_kfid: 客服帐号ID
:param external_userid: 微信客户的external_userid
:param service_state: 当前的会话状态,状态定义参考概述中的表格
:return: 接口调用结果
"""
data = {
"open_kfid": open_kfid,
"external_userid": external_userid,
"service_state": service_state,
}
if servicer_userid:
data["servicer_userid"] = servicer_userid
return self._post("kf/service_state/trans", data=data)
def get_servicer_list(self, open_kfid):
"""
获取接待人员列表
:param open_kfid: 客服帐号ID
:return: 接口调用结果
"""
data = {
"open_kfid": open_kfid,
}
return self._get("kf/servicer/list", params=data)
def add_servicer(self, open_kfid, userid_list):
"""
添加接待人员
添加指定客服帐号的接待人员。
:param open_kfid: 客服帐号ID
:param userid_list: 接待人员userid列表
:return: 接口调用结果
"""
if not isinstance(userid_list, list):
userid_list = [userid_list]
data = {
"open_kfid": open_kfid,
"userid_list": userid_list,
}
return self._post("kf/servicer/add", data=data)
def del_servicer(self, open_kfid, userid_list):
"""
删除接待人员
从客服帐号删除接待人员
:param open_kfid: 客服帐号ID
:param userid_list: 接待人员userid列表
:return: 接口调用结果
"""
if not isinstance(userid_list, list):
userid_list = [userid_list]
data = {
"open_kfid": open_kfid,
"userid_list": userid_list,
}
return self._post("kf/servicer/del", data=data)
def batchget_customer(self, external_userid_list):
"""
客户基本信息获取
:param external_userid_list: external_userid列表
:return: 接口调用结果
"""
if not isinstance(external_userid_list, list):
external_userid_list = [external_userid_list]
data = {
"external_userid_list": external_userid_list,
}
return self._post("kf/customer/batchget", data=data)
def get_account_list(self):
"""
获取客服帐号列表
:return: 接口调用结果
"""
return self._get("kf/account/list")
def add_contact_way(self, open_kfid, scene):
"""
获取客服帐号链接
:param open_kfid: 客服帐号ID
:param scene: 场景值,字符串类型,由开发者自定义。不多于32字节;字符串取值范围(正则表达式):[0-9a-zA-Z_-]*
:return: 接口调用结果
"""
data = {"open_kfid": open_kfid, "scene": scene}
return self._post("kf/add_contact_way", data=data)
def get_upgrade_service_config(self):
"""
获取配置的专员与客户群
:return: 接口调用结果
"""
return self._get("kf/customer/get_upgrade_service_config")
def upgrade_service(self, open_kfid, external_userid, service_type, member=None, groupchat=None):
"""
为客户升级为专员或客户群服务
:param open_kfid: 客服帐号ID
:param external_userid: 微信客户的external_userid
:param service_type: 表示是升级到专员服务还是客户群服务。1:专员服务。2:客户群服务
:param member: 推荐的服务专员,type等于1时有效
:param groupchat: 推荐的客户群,type等于2时有效
:return: 接口调用结果
"""
data = {
"open_kfid": open_kfid,
"external_userid": external_userid,
"type": service_type,
}
if service_type == 1:
data["member"] = member
else:
data["groupchat"] = groupchat
return self._post("kf/customer/upgrade_service", data=data)
def cancel_upgrade_service(self, open_kfid, external_userid):
"""
为客户取消推荐
:param open_kfid: 客服帐号ID
:param external_userid: 微信客户的external_userid
:return: 接口调用结果
"""
data = {"open_kfid": open_kfid, "external_userid": external_userid}
return self._post("kf/customer/cancel_upgrade_service", data=data)
def send_msg_on_event(self, code, msgtype, msg_content, msgid=None):
"""
当特定的事件回调消息包含code字段,可以此code为凭证,调用该接口给用户发送相应事件场景下的消息,如客服欢迎语。
支持发送消息类型:文本、菜单消息。
:param code: 事件响应消息对应的code。通过事件回调下发,仅可使用一次。
:param msgtype: 消息类型。对不同的msgtype,有相应的结构描述,详见消息类型
:param msg_content: 目前支持文本与菜单消息,具体查看文档
:param msgid: 消息ID。如果请求参数指定了msgid,则原样返回,否则系统自动生成并返回。不多于32字节;
字符串取值范围(正则表达式):[0-9a-zA-Z_-]*
:return: 接口调用结果
"""
data = {"code": code, "msgtype": msgtype}
if msgid:
data["msgid"] = msgid
data.update(msg_content)
return self._post("kf/send_msg_on_event", data=data)
| mit | a0f3d18b35e9323c62c0ec485ebcfb0c | 27.832558 | 101 | 0.570092 | 2.120766 | false | false | false | false |
wechatpy/wechatpy | docs/conf.py | 1 | 8420 | # -*- coding: utf-8 -*-
#
# wechatpy documentation build configuration file, created by
# sphinx-quickstart on Thu Sep 25 14:26:14 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath("_themes"))
import wechatpy
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "wechatpy"
copyright = "2014 - 2020, Messense Lv"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = wechatpy.__version__
# The full version, including alpha/beta/rc tags.
release = wechatpy.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = "zh_CN"
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "furo"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/images/logo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
html_search_language = "zh"
# Output file base name for HTML help builder.
htmlhelp_basename = "wechatpydoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
("index", "wechatpy.tex", "wechatpy Documentation", "messense", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", "wechatpy", "wechatpy Documentation", ["messense"], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"wechatpy",
"wechatpy Documentation",
"messense",
"wechatpy",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
| mit | eda93a7c3921de96cb5a827789920f3c | 30.654135 | 79 | 0.698456 | 3.765653 | false | true | false | false |
ceph/teuthology | teuthology/lock/ops.py | 2 | 17840 | import logging
import json
import os
import random
import time
import yaml
import requests
import teuthology.orchestra.remote
import teuthology.parallel
import teuthology.provision
from teuthology import misc
from teuthology import report
from teuthology.config import config
from teuthology.contextutil import safe_while
from teuthology.task import console_log
from teuthology.misc import canonicalize_hostname
from teuthology.job_status import set_status
from teuthology.lock import util, query
log = logging.getLogger(__name__)
def update_nodes(nodes, reset_os=False):
for node in nodes:
remote = teuthology.orchestra.remote.Remote(
canonicalize_hostname(node))
if reset_os:
log.info("Updating [%s]: reset os type and version on server", node)
inventory_info = dict()
inventory_info['os_type'] = ''
inventory_info['os_version'] = ''
inventory_info['name'] = remote.hostname
else:
log.info("Updating [%s]: set os type and version on server", node)
inventory_info = remote.inventory_info
update_inventory(inventory_info)
def lock_many_openstack(ctx, num, machine_type, user=None, description=None,
arch=None):
os_type = teuthology.provision.get_distro(ctx)
os_version = teuthology.provision.get_distro_version(ctx)
if hasattr(ctx, 'config'):
resources_hint = ctx.config.get('openstack')
else:
resources_hint = None
machines = teuthology.provision.openstack.ProvisionOpenStack().create(
num, os_type, os_version, arch, resources_hint)
result = {}
for machine in machines:
lock_one(machine, user, description)
result[machine] = None # we do not collect ssh host keys yet
return result
def lock_many(ctx, num, machine_type, user=None, description=None,
os_type=None, os_version=None, arch=None, reimage=True):
if user is None:
user = misc.get_user()
if not util.vps_version_or_type_valid(
ctx.machine_type,
os_type,
os_version
):
log.error('Invalid os-type or version detected -- lock failed')
return
# In the for loop below we can safely query for all bare-metal machine_type
# values at once. So, if we're being asked for 'plana,mira,burnupi', do it
# all in one shot. If we are passed 'plana,mira,burnupi,vps', do one query
# for 'plana,mira,burnupi' and one for 'vps'
machine_types_list = misc.get_multi_machine_types(machine_type)
if machine_types_list == ['vps']:
machine_types = machine_types_list
elif machine_types_list == ['openstack']:
return lock_many_openstack(ctx, num, machine_type,
user=user,
description=description,
arch=arch)
elif 'vps' in machine_types_list:
machine_types_non_vps = list(machine_types_list)
machine_types_non_vps.remove('vps')
machine_types_non_vps = '|'.join(machine_types_non_vps)
machine_types = [machine_types_non_vps, 'vps']
else:
machine_types_str = '|'.join(machine_types_list)
machine_types = [machine_types_str, ]
for machine_type in machine_types:
uri = os.path.join(config.lock_server, 'nodes', 'lock_many', '')
data = dict(
locked_by=user,
count=num,
machine_type=machine_type,
description=description,
)
# Only query for os_type/os_version if non-vps and non-libcloud, since
# in that case we just create them.
vm_types = ['vps'] + teuthology.provision.cloud.get_types()
reimage_types = teuthology.provision.get_reimage_types()
if machine_type not in vm_types + reimage_types:
if os_type:
data['os_type'] = os_type
if os_version:
data['os_version'] = os_version
if arch:
data['arch'] = arch
log.debug("lock_many request: %s", repr(data))
response = requests.post(
uri,
data=json.dumps(data),
headers={'content-type': 'application/json'},
)
if response.ok:
machines = dict()
for machine in response.json():
key = misc.canonicalize_hostname(
machine['name'],
user=machine.get('user'),
)
machines[key] = machine['ssh_pub_key']
log.debug('locked {machines}'.format(
machines=', '.join(machines.keys())))
if machine_type in vm_types:
ok_machs = {}
update_nodes(machines, True)
for machine in machines:
if teuthology.provision.create_if_vm(ctx, machine):
ok_machs[machine] = machines[machine]
else:
log.error('Unable to create virtual machine: %s',
machine)
unlock_one(ctx, machine, user)
ok_machs = do_update_keys(list(ok_machs.keys()))[1]
update_nodes(ok_machs)
return ok_machs
elif reimage and machine_type in reimage_types:
return reimage_machines(ctx, machines, machine_type)
return machines
elif response.status_code == 503:
log.error('Insufficient nodes available to lock %d %s nodes.',
num, machine_type)
log.error(response.text)
else:
log.error('Could not lock %d %s nodes, reason: unknown.',
num, machine_type)
return []
def lock_one(name, user=None, description=None):
name = misc.canonicalize_hostname(name, user=None)
if user is None:
user = misc.get_user()
request = dict(name=name, locked=True, locked_by=user,
description=description)
uri = os.path.join(config.lock_server, 'nodes', name, 'lock', '')
response = requests.put(uri, json.dumps(request))
success = response.ok
if success:
log.debug('locked %s as %s', name, user)
else:
try:
reason = response.json().get('message')
except ValueError:
reason = str(response.status_code)
log.error('failed to lock {node}. reason: {reason}'.format(
node=name, reason=reason))
return response
def unlock_many(names, user):
fixed_names = [misc.canonicalize_hostname(name, user=None) for name in
names]
names = fixed_names
uri = os.path.join(config.lock_server, 'nodes', 'unlock_many', '')
data = dict(
locked_by=user,
names=names,
)
with safe_while(
sleep=1, increment=0.5, action=f'unlock_many {names}') as proceed:
while proceed():
response = requests.post(
uri,
data=json.dumps(data),
headers={'content-type': 'application/json'},
)
if response.ok:
log.debug("Unlocked: %s", ', '.join(names))
return True
log.error("Failed to unlock: %s", ', '.join(names))
return False
def unlock_one(ctx, name, user, description=None):
name = misc.canonicalize_hostname(name, user=None)
if not teuthology.provision.destroy_if_vm(ctx, name, user, description):
log.error('destroy failed for %s', name)
return False
request = dict(name=name, locked=False, locked_by=user,
description=description)
uri = os.path.join(config.lock_server, 'nodes', name, 'lock', '')
with safe_while(
sleep=1, increment=0.5, action="unlock %s" % name) as proceed:
while proceed():
try:
response = requests.put(uri, json.dumps(request))
if response.ok:
log.info('unlocked: %s', name)
return response.ok
# Work around https://github.com/kennethreitz/requests/issues/2364
except requests.ConnectionError as e:
log.warning("Saw %s while unlocking; retrying...", str(e))
try:
reason = response.json().get('message')
except ValueError:
reason = str(response.status_code)
log.error('failed to unlock {node}. reason: {reason}'.format(
node=name, reason=reason))
return False
def update_lock(name, description=None, status=None, ssh_pub_key=None):
name = misc.canonicalize_hostname(name, user=None)
updated = {}
if description is not None:
updated['description'] = description
if status is not None:
updated['up'] = (status == 'up')
if ssh_pub_key is not None:
updated['ssh_pub_key'] = ssh_pub_key
if updated:
uri = os.path.join(config.lock_server, 'nodes', name, '')
inc = random.uniform(0, 1)
with safe_while(
sleep=1, increment=inc, action=f'update lock {name}') as proceed:
while proceed():
response = requests.put(
uri,
json.dumps(updated))
if response.ok:
return True
return response.ok
return True
def update_inventory(node_dict):
"""
Like update_lock(), but takes a dict and doesn't try to do anything smart
by itself
"""
name = node_dict.get('name')
if not name:
raise ValueError("must specify name")
if not config.lock_server:
return
uri = os.path.join(config.lock_server, 'nodes', name, '')
log.info("Updating %s on lock server", name)
inc = random.uniform(0, 1)
with safe_while(
sleep=1, increment=inc, action=f'update inventory {name}') as proceed:
while proceed():
response = requests.put(
uri,
json.dumps(node_dict),
headers={'content-type': 'application/json'},
)
if response.status_code == 404:
log.info("Creating new node %s on lock server", name)
uri = os.path.join(config.lock_server, 'nodes', '')
response = requests.post(
uri,
json.dumps(node_dict),
headers={'content-type': 'application/json'},
)
if response.ok:
return
def do_update_keys(machines, all_=False, _raise=True):
reference = query.list_locks(keyed_by_name=True)
if all_:
machines = reference.keys()
keys_dict = misc.ssh_keyscan(machines, _raise=_raise)
return push_new_keys(keys_dict, reference), keys_dict
def push_new_keys(keys_dict, reference):
ret = 0
for hostname, pubkey in keys_dict.items():
log.info('Checking %s', hostname)
if reference[hostname]['ssh_pub_key'] != pubkey:
log.info('New key found. Updating...')
if not update_lock(hostname, ssh_pub_key=pubkey):
log.error('failed to update %s!', hostname)
ret = 1
return ret
def reimage_machines(ctx, machines, machine_type):
reimage_types = teuthology.provision.get_reimage_types()
if machine_type not in reimage_types:
log.info(f"Skipping reimage of {machines.keys()} because {machine_type} is not in {reimage_types}")
return machines
# Setup log file, reimage machines and update their keys
reimaged = dict()
console_log_conf = dict(
logfile_name='{shortname}_reimage.log',
remotes=[teuthology.orchestra.remote.Remote(machine)
for machine in machines],
)
with console_log.task(ctx, console_log_conf):
with teuthology.parallel.parallel() as p:
for machine in machines:
log.info("Start node '%s' reimaging", machine)
update_nodes([machine], True)
p.spawn(teuthology.provision.reimage, ctx,
machine, machine_type)
reimaged[machine] = machines[machine]
log.info("Node '%s' reimaging is complete", machine)
reimaged = do_update_keys(list(reimaged.keys()))[1]
update_nodes(reimaged)
return reimaged
def block_and_lock_machines(ctx, total_requested, machine_type, reimage=True):
# It's OK for os_type and os_version to be None here. If we're trying
# to lock a bare metal machine, we'll take whatever is available. If
# we want a vps, defaults will be provided by misc.get_distro and
# misc.get_distro_version in provision.create_if_vm
os_type = ctx.config.get("os_type")
os_version = ctx.config.get("os_version")
arch = ctx.config.get('arch')
reserved = config.reserve_machines
assert isinstance(reserved, int), 'reserve_machines must be integer'
assert (reserved >= 0), 'reserve_machines should >= 0'
log.info('Locking machines...')
# change the status during the locking process
report.try_push_job_info(ctx.config, dict(status='waiting'))
all_locked = dict()
requested = total_requested
while True:
# get a candidate list of machines
machines = query.list_locks(machine_type=machine_type, up=True,
locked=False, count=requested + reserved)
if machines is None:
if ctx.block:
log.error('Error listing machines, trying again')
time.sleep(20)
continue
else:
raise RuntimeError('Error listing machines')
# make sure there are machines for non-automated jobs to run
if len(machines) < reserved + requested \
and ctx.owner.startswith('scheduled'):
if ctx.block:
log.info(
'waiting for more %s machines to be free (need %s + %s, have %s)...',
machine_type,
reserved,
requested,
len(machines),
)
time.sleep(10)
continue
else:
assert 0, ('not enough machines free; need %s + %s, have %s' %
(reserved, requested, len(machines)))
try:
newly_locked = lock_many(ctx, requested, machine_type,
ctx.owner, ctx.archive, os_type,
os_version, arch, reimage=reimage)
except Exception:
# Lock failures should map to the 'dead' status instead of 'fail'
if 'summary' in ctx:
set_status(ctx.summary, 'dead')
raise
all_locked.update(newly_locked)
log.info(
'{newly_locked} {mtype} machines locked this try, '
'{total_locked}/{total_requested} locked so far'.format(
newly_locked=len(newly_locked),
mtype=machine_type,
total_locked=len(all_locked),
total_requested=total_requested,
)
)
if len(all_locked) == total_requested:
vmlist = []
for lmach in all_locked:
if teuthology.lock.query.is_vm(lmach):
vmlist.append(lmach)
if vmlist:
log.info('Waiting for virtual machines to come up')
keys_dict = dict()
loopcount = 0
while len(keys_dict) != len(vmlist):
loopcount += 1
time.sleep(10)
keys_dict = misc.ssh_keyscan(vmlist)
log.info('virtual machine is still unavailable')
if loopcount == 40:
loopcount = 0
log.info('virtual machine(s) still not up, ' +
'recreating unresponsive ones.')
for guest in vmlist:
if guest not in keys_dict.keys():
log.info('recreating: ' + guest)
full_name = misc.canonicalize_hostname(guest)
teuthology.provision.destroy_if_vm(ctx, full_name)
teuthology.provision.create_if_vm(ctx, full_name)
if teuthology.lock.ops.do_update_keys(keys_dict)[0]:
log.info("Error in virtual machine keys")
newscandict = {}
for dkey in all_locked.keys():
stats = teuthology.lock.query.get_status(dkey)
newscandict[dkey] = stats['ssh_pub_key']
ctx.config['targets'] = newscandict
else:
ctx.config['targets'] = all_locked
locked_targets = yaml.safe_dump(
ctx.config['targets'],
default_flow_style=False
).splitlines()
log.info('\n '.join(['Locked targets:', ] + locked_targets))
# successfully locked machines, change status back to running
report.try_push_job_info(ctx.config, dict(status='running'))
break
elif not ctx.block:
assert 0, 'not enough machines are available'
else:
requested = requested - len(newly_locked)
assert requested > 0, "lock_machines: requested counter went" \
"negative, this shouldn't happen"
log.info(
"{total} machines locked ({new} new); need {more} more".format(
total=len(all_locked), new=len(newly_locked), more=requested)
)
log.warning('Could not lock enough machines, waiting...')
time.sleep(10)
| mit | 1920758bb7b6c6695e9ed6758b3cf87a | 38.732739 | 107 | 0.558184 | 4.12963 | false | false | false | false |
wechatpy/wechatpy | wechatpy/crypto/__init__.py | 2 | 4759 | # -*- coding: utf-8 -*-
"""
wechatpy.crypto
~~~~~~~~~~~~~~~~
This module provides some crypto tools for WeChat and WeChat work
:copyright: (c) 2014 by messense.
:license: MIT, see LICENSE for more details.
"""
import json
import time
import base64
import hashlib
from wechatpy.utils import to_text, to_binary, WeChatSigner
from wechatpy.exceptions import (
InvalidAppIdException,
InvalidMchIdException,
InvalidSignatureException,
)
from wechatpy.crypto.base import BasePrpCrypto, WeChatCipher, BaseRefundCrypto
from wechatpy.crypto.pkcs7 import PKCS7Encoder
def _get_signature(token, timestamp, nonce, encrypt):
signer = WeChatSigner()
signer.add_data(token, timestamp, nonce, encrypt)
return signer.signature
class PrpCrypto(BasePrpCrypto):
def encrypt(self, text, app_id):
return self._encrypt(text, app_id)
def decrypt(self, text, app_id):
return self._decrypt(text, app_id, InvalidAppIdException)
class BaseWeChatCrypto:
def __init__(self, token, encoding_aes_key, _id):
encoding_aes_key = to_binary(encoding_aes_key + "=")
self.key = base64.b64decode(encoding_aes_key)
assert len(self.key) == 32
self.token = token
self._id = _id
def _check_signature(self, signature, timestamp, nonce, echo_str, crypto_class=None):
_signature = _get_signature(self.token, timestamp, nonce, echo_str)
if _signature != signature:
raise InvalidSignatureException()
pc = crypto_class(self.key)
return pc.decrypt(echo_str, self._id)
def _encrypt_message(self, msg, nonce, timestamp=None, crypto_class=None):
from wechatpy.replies import BaseReply
xml = """<xml>
<Encrypt><![CDATA[{encrypt}]]></Encrypt>
<MsgSignature><![CDATA[{signature}]]></MsgSignature>
<TimeStamp>{timestamp}</TimeStamp>
<Nonce><![CDATA[{nonce}]]></Nonce>
</xml>"""
if isinstance(msg, BaseReply):
msg = msg.render()
timestamp = timestamp or to_text(int(time.time()))
pc = crypto_class(self.key)
encrypt = to_text(pc.encrypt(msg, self._id))
signature = _get_signature(self.token, timestamp, nonce, encrypt)
return to_text(xml.format(encrypt=encrypt, signature=signature, timestamp=timestamp, nonce=nonce))
def _decrypt_message(self, msg, signature, timestamp, nonce, crypto_class=None):
if not isinstance(msg, dict):
import xmltodict
msg = xmltodict.parse(to_text(msg))["xml"]
encrypt = msg["Encrypt"]
_signature = _get_signature(self.token, timestamp, nonce, encrypt)
if _signature != signature:
raise InvalidSignatureException()
pc = crypto_class(self.key)
return pc.decrypt(encrypt, self._id)
class WeChatCrypto(BaseWeChatCrypto):
def __init__(self, token, encoding_aes_key, app_id):
super().__init__(token, encoding_aes_key, app_id)
self.app_id = app_id
def encrypt_message(self, msg, nonce, timestamp=None):
return self._encrypt_message(msg, nonce, timestamp, PrpCrypto)
def decrypt_message(self, msg, signature, timestamp, nonce):
return self._decrypt_message(msg, signature, timestamp, nonce, PrpCrypto)
class WeChatWxaCrypto:
def __init__(self, key, iv, app_id):
self.cipher = WeChatCipher(base64.b64decode(key), base64.b64decode(iv))
self.app_id = app_id
def decrypt_message(self, msg):
raw_data = base64.b64decode(msg)
decrypted = self.cipher.decrypt(raw_data)
plaintext = PKCS7Encoder.decode(decrypted)
decrypted_msg = json.loads(to_text(plaintext))
if decrypted_msg["watermark"]["appid"] != self.app_id:
raise InvalidAppIdException()
return decrypted_msg
class RefundCrypto(BaseRefundCrypto):
def encrypt(self, text):
return self._encrypt(text)
def decrypt(self, text):
return self._decrypt(text)
class WeChatRefundCrypto:
def __init__(self, key):
self.key = to_binary(hashlib.md5(to_binary(key)).hexdigest())
assert len(self.key) == 32
def _decrypt_message(self, msg, appid, mch_id, crypto_class=None):
import xmltodict
if not isinstance(msg, dict):
msg = xmltodict.parse(to_text(msg))["xml"]
req_info = msg["req_info"]
if msg["appid"] != appid:
raise InvalidAppIdException()
if msg["mch_id"] != mch_id:
raise InvalidMchIdException()
pc = crypto_class(self.key)
ret = pc.decrypt(req_info)
return xmltodict.parse(to_text(ret))["root"]
def decrypt_message(self, msg, appid, mch_id):
return self._decrypt_message(msg, appid, mch_id, RefundCrypto)
| mit | aaf7543a5ee6ffd8a49b58be1aec6bdd | 32.27972 | 106 | 0.647615 | 3.578195 | false | false | false | false |
ceph/teuthology | teuthology/orchestra/remote.py | 1 | 24969 | """
Support for paramiko remote objects.
"""
import teuthology.lock.query
import teuthology.lock.util
from teuthology.orchestra import run
from teuthology.orchestra import connection
from teuthology.orchestra import console
from teuthology.orchestra.opsys import OS
import teuthology.provision
from teuthology import misc
from teuthology.exceptions import CommandFailedError
from teuthology.misc import host_shortname
import errno
import time
import re
import logging
from io import BytesIO
from io import StringIO
import os
import pwd
import tempfile
import netaddr
log = logging.getLogger(__name__)
class RemoteShell(object):
"""
Contains methods to run miscellaneous shell commands on remote machines.
These methods were originally part of orchestra.remote.Remote. The reason
for moving these methods from Remote is that applications that use
teuthology for testing usually have programs that can run tests locally on
a single node machine for development work (for example, vstart_runner.py
in case of Ceph). These programs can import and reuse these methods
without having to deal SSH stuff. In short, this class serves a shared
interface.
To use these methods, inherit the class here and implement "run()" method in
the subclass.
"""
def remove(self, path):
self.run(args=['rm', '-fr', path])
def mkdtemp(self, suffix=None, parentdir=None):
"""
Create a temporary directory on remote machine and return it's path.
"""
args = ['mktemp', '-d']
if suffix:
args.append('--suffix=%s' % suffix)
if parentdir:
args.append('--tmpdir=%s' % parentdir)
return self.sh(args).strip()
def mktemp(self, suffix=None, parentdir=None, data=None):
"""
Make a remote temporary file.
:param suffix: suffix for the temporary file
:param parentdir: parent dir where temp file should be created
:param data: write data to the file if provided
Returns: the path of the temp file created.
"""
args = ['mktemp']
if suffix:
args.append('--suffix=%s' % suffix)
if parentdir:
args.append('--tmpdir=%s' % parentdir)
path = self.sh(args).strip()
if data:
self.write_file(path=path, data=data)
return path
def sh(self, script, **kwargs):
"""
Shortcut for run method.
Usage:
my_name = remote.sh('whoami')
remote_date = remote.sh('date')
"""
if 'stdout' not in kwargs:
kwargs['stdout'] = BytesIO()
if 'args' not in kwargs:
kwargs['args'] = script
proc = self.run(**kwargs)
out = proc.stdout.getvalue()
if isinstance(out, bytes):
return out.decode()
else:
return out
def sh_file(self, script, label="script", sudo=False, **kwargs):
"""
Run shell script after copying its contents to a remote file
:param script: string with script text, or file object
:param sudo: run command with sudo if True,
run as user name if string value (defaults to False)
:param label: string value which will be part of file name
Returns: stdout
"""
ftempl = '/tmp/teuthology-remote-$(date +%Y%m%d%H%M%S)-{}-XXXX'\
.format(label)
script_file = self.sh("mktemp %s" % ftempl).strip()
self.sh("cat - | tee {script} ; chmod a+rx {script}"\
.format(script=script_file), stdin=script)
if sudo:
if isinstance(sudo, str):
command="sudo -u %s %s" % (sudo, script_file)
else:
command="sudo %s" % script_file
else:
command="%s" % script_file
return self.sh(command, **kwargs)
def chmod(self, file_path, permissions):
"""
As super-user, set permissions on the remote file specified.
"""
args = [
'sudo',
'chmod',
permissions,
file_path,
]
self.run(
args=args,
)
def chcon(self, file_path, context):
"""
Set the SELinux context of a given file.
VMs and non-RPM-based hosts will skip this operation because ours
currently have SELinux disabled.
:param file_path: The path to the file
:param context: The SELinux context to be used
"""
if self.os.package_type != 'rpm' or \
self.os.name in ['opensuse', 'sle']:
return
if teuthology.lock.query.is_vm(self.shortname):
return
self.run(args="sudo chcon {con} {path}".format(
con=context, path=file_path))
def copy_file(self, src, dst, sudo=False, mode=None, owner=None,
mkdir=False, append=False):
"""
Copy data to remote file
:param src: source file path on remote host
:param dst: destination file path on remote host
:param sudo: use sudo to write file, defaults False
:param mode: set file mode bits if provided
:param owner: set file owner if provided
:param mkdir: ensure the destination directory exists, defaults
False
:param append: append data to the file, defaults False
"""
dd = 'sudo dd' if sudo else 'dd'
args = dd + ' if=' + src + ' of=' + dst
if append:
args += ' conv=notrunc oflag=append'
if mkdir:
mkdirp = 'sudo mkdir -p' if sudo else 'mkdir -p'
dirpath = os.path.dirname(dst)
if dirpath:
args = mkdirp + ' ' + dirpath + '\n' + args
if mode:
chmod = 'sudo chmod' if sudo else 'chmod'
args += '\n' + chmod + ' ' + mode + ' ' + dst
if owner:
chown = 'sudo chown' if sudo else 'chown'
args += '\n' + chown + ' ' + owner + ' ' + dst
args = 'set -ex' + '\n' + args
self.run(args=args)
def move_file(self, src, dst, sudo=False, mode=None, owner=None,
mkdir=False):
"""
Move data to remote file
:param src: source file path on remote host
:param dst: destination file path on remote host
:param sudo: use sudo to write file, defaults False
:param mode: set file mode bits if provided
:param owner: set file owner if provided
:param mkdir: ensure the destination directory exists, defaults
False
"""
mv = 'sudo mv' if sudo else 'mv'
args = mv + ' ' + src + ' ' + dst
if mkdir:
mkdirp = 'sudo mkdir -p' if sudo else 'mkdir -p'
dirpath = os.path.dirname(dst)
if dirpath:
args = mkdirp + ' ' + dirpath + '\n' + args
if mode:
chmod = 'sudo chmod' if sudo else 'chmod'
args += ' && ' + chmod + ' ' + mode + ' ' + dst
if owner:
chown = 'sudo chown' if sudo else 'chown'
args += ' && ' + chown + ' ' + owner + ' ' + dst
self.run(args=args)
def read_file(self, path, sudo=False, stdout=None,
offset=0, length=0):
"""
Read data from remote file
:param path: file path on remote host
:param sudo: use sudo to read the file, defaults False
:param stdout: output object, defaults to io.BytesIO()
:param offset: number of bytes to skip from the file
:param length: number of bytes to read from the file
:raises: :class:`FileNotFoundError`: there is no such file by the path
:raises: :class:`RuntimeError`: unexpected error occurred
:returns: the file contents in bytes, if stdout is `io.BytesIO`, by
default
:returns: the file contents in str, if stdout is `io.StringIO`
"""
dd = 'sudo dd' if sudo else 'dd'
args = dd + ' if=' + path + ' of=/dev/stdout'
iflags=[]
# we have to set defaults here instead of the method's signature,
# because python is reusing the object from call to call
stdout = stdout or BytesIO()
if offset:
args += ' skip=' + str(offset)
iflags += 'skip_bytes'
if length:
args += ' count=' + str(length)
iflags += 'count_bytes'
if iflags:
args += ' iflag=' + ','.join(iflags)
args = 'set -ex' + '\n' + args
proc = self.run(args=args, stdout=stdout, stderr=StringIO(),
check_status=False, quiet=True)
if proc.returncode:
if 'No such file or directory' in proc.stderr.getvalue():
raise FileNotFoundError(errno.ENOENT,
f"Cannot find file on the remote '{self.name}'", path)
else:
raise RuntimeError("Unexpected error occurred while trying to "
f"read '{path}' file on the remote '{self.name}'")
return proc.stdout.getvalue()
def write_file(self, path, data, sudo=False, mode=None, owner=None,
mkdir=False, append=False):
"""
Write data to remote file
:param path: file path on remote host
:param data: str, binary or fileobj to be written
:param sudo: use sudo to write file, defaults False
:param mode: set file mode bits if provided
:param owner: set file owner if provided
:param mkdir: preliminary create the file directory, defaults False
:param append: append data to the file, defaults False
"""
dd = 'sudo dd' if sudo else 'dd'
args = dd + ' of=' + path
if append:
args += ' conv=notrunc oflag=append'
if mkdir:
mkdirp = 'sudo mkdir -p' if sudo else 'mkdir -p'
dirpath = os.path.dirname(path)
if dirpath:
args = mkdirp + ' ' + dirpath + '\n' + args
if mode:
chmod = 'sudo chmod' if sudo else 'chmod'
args += '\n' + chmod + ' ' + mode + ' ' + path
if owner:
chown = 'sudo chown' if sudo else 'chown'
args += '\n' + chown + ' ' + owner + ' ' + path
args = 'set -ex' + '\n' + args
self.run(args=args, stdin=data, quiet=True)
def sudo_write_file(self, path, data, **kwargs):
"""
Write data to remote file with sudo, for more info see `write_file()`.
"""
self.write_file(path, data, sudo=True, **kwargs)
def is_mounted(self, path):
"""
Check if the given path is mounted on the remote machine.
This method checks the contents of "/proc/self/mounts" instead of
using "mount" or "findmnt" command since these commands hang when a
CephFS client is blocked and its mount point on the remote machine
is left unhandled/unmounted.
:param path: path on remote host
"""
# XXX: matching newline too is crucial so that "/mnt" does not match
# "/mnt/cephfs" if it's present in the output.
return f'{path}\n' in self.sh("cat /proc/self/mounts | awk '{print $2}'")
@property
def os(self):
if not hasattr(self, '_os'):
try:
os_release = self.sh('cat /etc/os-release').strip()
self._os = OS.from_os_release(os_release)
return self._os
except CommandFailedError:
pass
lsb_release = self.sh('lsb_release -a').strip()
self._os = OS.from_lsb_release(lsb_release)
return self._os
@property
def arch(self):
if not hasattr(self, '_arch'):
self._arch = self.sh('uname -m').strip()
return self._arch
class Remote(RemoteShell):
"""
A connection to a remote host.
This is a higher-level wrapper around Paramiko's `SSHClient`.
"""
# for unit tests to hook into
_runner = staticmethod(run.run)
_reimage_types = None
def __init__(self, name, ssh=None, shortname=None, console=None,
host_key=None, keep_alive=True):
self.name = name
if '@' in name:
(self.user, hostname) = name.split('@')
# Temporary workaround for 'hostname --fqdn' not working on some
# machines
self._hostname = hostname
else:
# os.getlogin() doesn't work on non-login shells. The following
# should work on any unix system
self.user = pwd.getpwuid(os.getuid()).pw_name
hostname = name
self._shortname = shortname or host_shortname(hostname)
self._host_key = host_key
self.keep_alive = keep_alive
self._console = console
self.ssh = ssh
if self._reimage_types is None:
Remote._reimage_types = teuthology.provision.get_reimage_types()
def connect(self, timeout=None, create_key=None, context='connect'):
args = dict(user_at_host=self.name, host_key=self._host_key,
keep_alive=self.keep_alive, _create_key=create_key)
if context == 'reconnect':
# The reason for the 'context' workaround is not very
# clear from the technical side.
# I'll get "[Errno 98] Address already in use" altough
# there are no open tcp(ssh) connections.
# When connecting without keepalive, host_key and _create_key
# set, it will proceed.
args = dict(user_at_host=self.name, _create_key=False, host_key=None)
if timeout:
args['timeout'] = timeout
self.ssh = connection.connect(**args)
return self.ssh
def reconnect(self, timeout=None, socket_timeout=None, sleep_time=30):
"""
Attempts to re-establish connection. Returns True for success; False
for failure.
"""
if self.ssh is not None:
self.ssh.close()
if not timeout:
return self._reconnect(timeout=socket_timeout)
start_time = time.time()
elapsed_time = lambda: time.time() - start_time
while elapsed_time() < timeout:
success = self._reconnect(timeout=socket_timeout)
if success:
log.info(f"Successfully reconnected to host '{self.name}'")
break
# Don't let time_remaining be < 0
time_remaining = max(0, timeout - elapsed_time())
sleep_val = min(time_remaining, sleep_time)
time.sleep(sleep_val)
return success
def _reconnect(self, timeout=None):
log.info(f"Trying to reconnect to host '{self.name}'")
try:
self.connect(timeout=timeout, context='reconnect')
return self.is_online
except Exception as e:
log.debug(e)
return False
@property
def ip_address(self):
return self.ssh.get_transport().getpeername()[0]
@property
def interface(self):
"""
The interface used by the current SSH connection
"""
if not hasattr(self, '_interface'):
self._set_iface_and_cidr()
return self._interface
@property
def cidr(self):
"""
The network (in CIDR notation) used by the remote's SSH connection
"""
if not hasattr(self, '_cidr'):
self._set_iface_and_cidr()
return self._cidr
def _set_iface_and_cidr(self):
ip_addr_show = self.sh('PATH=/sbin:/usr/sbin ip addr show')
regexp = 'inet.? %s' % self.ip_address
for line in ip_addr_show.split('\n'):
line = line.strip()
if re.match(regexp, line):
items = line.split()
self._interface = items[-1]
self._cidr = str(netaddr.IPNetwork(items[1]).cidr)
return
raise RuntimeError("Could not determine interface/CIDR!")
@property
def hostname(self):
if not hasattr(self, '_hostname'):
self._hostname = self.sh('hostname --fqdn').strip()
return self._hostname
@property
def machine_type(self):
if not getattr(self, '_machine_type', None):
remote_info = teuthology.lock.query.get_status(self.hostname)
if not remote_info:
return None
self._machine_type = remote_info.get("machine_type", None)
return self._machine_type
@property
def is_reimageable(self):
return self.machine_type in self._reimage_types
@property
def shortname(self):
if self._shortname is None:
self._shortname = host_shortname(self.hostname)
return self._shortname
@property
def is_online(self):
if self.ssh is None:
return False
if self.ssh.get_transport() is None:
return False
try:
self.run(args="true")
except Exception:
return False
return self.ssh.get_transport().is_active()
def ensure_online(self):
if self.is_online:
return
self.connect()
if not self.is_online:
raise Exception('unable to connect')
@property
def system_type(self):
"""
System type decorator
"""
return misc.get_system_type(self)
def __str__(self):
return self.name
def __repr__(self):
return '{classname}(name={name!r})'.format(
classname=self.__class__.__name__,
name=self.name,
)
def run(self, **kwargs):
"""
This calls `orchestra.run.run` with our SSH client.
TODO refactor to move run.run here?
"""
if not self.ssh or \
not self.ssh.get_transport() or \
not self.ssh.get_transport().is_active():
if not self.reconnect():
raise Exception(f'Cannot connect to remote host {self.shortname}')
r = self._runner(client=self.ssh, name=self.shortname, **kwargs)
r.remote = self
return r
def _sftp_put_file(self, local_path, remote_path):
"""
Use the paramiko.SFTPClient to put a file. Returns the remote filename.
"""
sftp = self.ssh.open_sftp()
sftp.put(local_path, remote_path)
return
def _sftp_get_file(self, remote_path, local_path):
"""
Use the paramiko.SFTPClient to get a file. Returns the local filename.
"""
file_size = self._format_size(
self._sftp_get_size(remote_path)
).strip()
log.debug("{}:{} is {}".format(self.shortname, remote_path, file_size))
sftp = self.ssh.open_sftp()
sftp.get(remote_path, local_path)
return local_path
def _sftp_open_file(self, remote_path):
"""
Use the paramiko.SFTPClient to open a file. Returns a
paramiko.SFTPFile object.
"""
sftp = self.ssh.open_sftp()
return sftp.open(remote_path)
def _sftp_get_size(self, remote_path):
"""
Via _sftp_open_file, return the filesize in bytes
"""
with self._sftp_open_file(remote_path) as f:
return f.stat().st_size
@staticmethod
def _format_size(file_size):
"""
Given a file_size in bytes, returns a human-readable representation.
"""
for unit in ('B', 'KB', 'MB', 'GB', 'TB'):
if abs(file_size) < 1024.0:
break
file_size = file_size / 1024.0
return "{:3.0f}{}".format(file_size, unit)
def put_file(self, path, dest_path, sudo=False):
"""
Copy a local filename to a remote file
"""
if sudo:
raise NotImplementedError("sudo not supported")
self._sftp_put_file(path, dest_path)
return
def get_file(self, path, sudo=False, dest_dir='/tmp'):
"""
Fetch a remote file, and return its local filename.
:param sudo: Use sudo on the remote end to read a file that
requires it. Defaults to False.
:param dest_dir: Store the file in this directory. If it is /tmp,
generate a unique filename; if not, use the original
filename.
:returns: The path to the local file
"""
if not os.path.isdir(dest_dir):
raise IOError("{dir} is not a directory".format(dir=dest_dir))
if sudo:
orig_path = path
path = self.mktemp()
args = [
'sudo',
'cp',
orig_path,
path,
]
self.run(args=args)
self.chmod(path, '0666')
if dest_dir == '/tmp':
# If we're storing in /tmp, generate a unique filename
(fd, local_path) = tempfile.mkstemp(dir=dest_dir)
os.close(fd)
else:
# If we are storing somewhere other than /tmp, use the original
# filename
local_path = os.path.join(dest_dir, path.split(os.path.sep)[-1])
self._sftp_get_file(path, local_path)
if sudo:
self.remove(path)
return local_path
def get_tar(self, path, to_path, sudo=False):
"""
Tar a remote directory and copy it locally
"""
remote_temp_path = self.mktemp()
args = []
if sudo:
args.append('sudo')
args.extend([
'tar',
'cz',
'-f', '-',
'-C', path,
'--',
'.',
run.Raw('>'), remote_temp_path
])
self.run(args=args)
if sudo:
self.chmod(remote_temp_path, '0666')
self._sftp_get_file(remote_temp_path, to_path)
self.remove(remote_temp_path)
def get_tar_stream(self, path, sudo=False):
"""
Tar-compress a remote directory and return the RemoteProcess
for streaming
"""
args = []
if sudo:
args.append('sudo')
args.extend([
'tar',
'cz',
'-f', '-',
'-C', path,
'--',
'.',
])
return self.run(args=args, wait=False, stdout=run.PIPE)
@property
def host_key(self):
if not self._host_key:
trans = self.ssh.get_transport()
key = trans.get_remote_server_key()
self._host_key = ' '.join((key.get_name(), key.get_base64()))
return self._host_key
@property
def inventory_info(self):
node = dict()
node['name'] = self.hostname
node['user'] = self.user
node['arch'] = self.arch
node['os_type'] = self.os.name
node['os_version'] = '.'.join(self.os.version.split('.')[:2])
node['ssh_pub_key'] = self.host_key
node['up'] = True
return node
@property
def console(self):
if not self._console:
self._console = getRemoteConsole(self.name)
return self._console
@property
def is_vm(self):
if not hasattr(self, '_is_vm'):
self._is_vm = teuthology.lock.query.is_vm(self.name)
return self._is_vm
@property
def is_container(self):
if not hasattr(self, '_is_container'):
self._is_container = not bool(self.run(
args="test -f /run/.containerenv -o -f /.dockerenv",
check_status=False,
).returncode)
return self._is_container
@property
def init_system(self):
"""
Which init system does the remote use?
:returns: 'systemd' or None
"""
if not hasattr(self, '_init_system'):
self._init_system = None
proc = self.run(
args=['which', 'systemctl'],
check_status=False,
)
if proc.returncode == 0:
self._init_system = 'systemd'
return self._init_system
def __del__(self):
if self.ssh is not None:
self.ssh.close()
def getRemoteConsole(name, ipmiuser=None, ipmipass=None, ipmidomain=None,
logfile=None, timeout=60):
"""
Return either VirtualConsole or PhysicalConsole depending on name.
"""
if teuthology.lock.query.is_vm(name):
try:
return console.VirtualConsole(name)
except Exception:
return None
return console.PhysicalConsole(
name, ipmiuser, ipmipass, ipmidomain, logfile, timeout)
| mit | 385ebe934331fe573121e11b4ce045e9 | 32.741892 | 82 | 0.545156 | 4.102695 | false | false | false | false |
wechatpy/wechatpy | wechatpy/client/api/shakearound.py | 2 | 12341 | # -*- coding: utf-8 -*-
import time
from datetime import datetime
from optionaldict import optionaldict
from wechatpy.client.api.base import BaseWeChatAPI
class WeChatShakeAround(BaseWeChatAPI):
API_BASE_URL = "https://api.weixin.qq.com/"
@classmethod
def _to_timestamp(cls, date):
if isinstance(date, str):
date = datetime.strptime(date, "%Y-%m-%d %H:%M:%S")
if isinstance(date, datetime):
timestamp = int(time.mktime(date.timetuple()))
return timestamp
return int(date)
def apply_device_id(self, quantity, reason, poi_id=None, comment=None):
"""
申请设备ID
详情请参考
http://mp.weixin.qq.com/wiki/15/b9e012f917e3484b7ed02771156411f3.html
:param quantity: 申请的设备ID的数量,单次新增设备超过500个,需走人工审核流程
:param reason: 申请理由,不超过100个字
:param poi_id: 可选,设备关联的门店ID
:param comment: 可选,备注,不超过15个汉字或30个英文字母
:return: 申请的设备信息
"""
data = optionaldict()
data["quantity"] = quantity
data["apply_reason"] = reason
data["poi_id"] = poi_id
data["comment"] = comment
res = self._post(
"shakearound/device/applyid",
data=data,
result_processor=lambda x: x["data"],
)
return res
def update_device(self, device_id=None, uuid=None, major=None, minor=None, comment=None):
"""
更新设备信息
详情请参考
http://mp.weixin.qq.com/wiki/15/b9e012f917e3484b7ed02771156411f3.html
:param device_id: 设备编号,若填了UUID、major、minor,则可不填设备编号,若二者都填,则以设备编号为优先
:param uuid: UUID
:param major: major
:param minor: minor
:param comment: 设备的备注信息,不超过15个汉字或30个英文字母。
:return: 返回的 JSON 数据包
"""
data = optionaldict()
data["comment"] = comment
data["device_identifier"] = {
"device_id": device_id,
"uuid": uuid,
"major": major,
"minor": minor,
}
return self._post("shakearound/device/update", data=data)
def bind_device_location(self, poi_id, device_id=None, uuid=None, major=None, minor=None):
"""
配置设备与门店的关联关系
详情请参考
http://mp.weixin.qq.com/wiki/15/b9e012f917e3484b7ed02771156411f3.html
:param poi_id: 待关联的门店ID
:param device_id: 设备编号,若填了UUID、major、minor,则可不填设备编号,若二者都填,则以设备编号为优先
:param uuid: UUID
:param major: major
:param minor: minor
:return: 返回的 JSON 数据包
"""
data = optionaldict()
data["poi_id"] = poi_id
data["device_identifier"] = {
"device_id": device_id,
"uuid": uuid,
"major": major,
"minor": minor,
}
return self._post("shakearound/device/bindlocation", data=data)
def search_device(self, identifiers=None, apply_id=None, begin=0, count=10):
"""
查询设备列表
详情请参考
http://mp.weixin.qq.com/wiki/15/b9e012f917e3484b7ed02771156411f3.html
:param identifiers: 设备 ID 信息列表
:param apply_id: 批次ID,申请设备ID超出500个时所返回批次ID
:param begin: 设备列表的起始索引值
:param count: 待查询的设备个数
:return: 设备列表
"""
data = optionaldict()
data["begin"] = begin
data["count"] = count
data["apply_id"] = apply_id
if identifiers:
data["device_identifiers"] = identifiers
res = self._post("shakearound/device/search", data=data, result_processor=lambda x: x["data"])
return res
def add_page(self, title, description, icon_url, page_url, comment=None):
"""
新增页面
详情请参考
http://mp.weixin.qq.com/wiki/5/6626199ea8757c752046d8e46cf13251.html
:param title: 在摇一摇页面展示的主标题,不超过6个字
:param description: 在摇一摇页面展示的副标题,不超过7个字
:param icon_url: 在摇一摇页面展示的图片。图片需先上传至微信侧服务器,
用“素材管理-上传图片素材”接口上传图片,返回的图片URL再配置在此处
:param page_url: 跳转链接
:param comment: 可选,页面的备注信息,不超过15个字
:return: 页面信息
"""
data = optionaldict()
data["title"] = title
data["description"] = description
data["icon_url"] = icon_url
data["page_url"] = page_url
data["comment"] = comment
res = self._post("shakearound/page/add", data=data, result_processor=lambda x: x["data"])
return res
def update_page(self, page_id, title, description, icon_url, page_url, comment=None):
"""
编辑页面信息
详情请参考
http://mp.weixin.qq.com/wiki/5/6626199ea8757c752046d8e46cf13251.html
:param page_id: 摇周边页面唯一ID
:param title: 在摇一摇页面展示的主标题,不超过6个字
:param description: 在摇一摇页面展示的副标题,不超过7个字
:param icon_url: 在摇一摇页面展示的图片。图片需先上传至微信侧服务器,
用“素材管理-上传图片素材”接口上传图片,返回的图片URL再配置在此处
:param page_url: 跳转链接
:param comment: 可选,页面的备注信息,不超过15个字
:return: 页面信息
"""
data = optionaldict()
data["page_id"] = page_id
data["title"] = title
data["description"] = description
data["icon_url"] = icon_url
data["page_url"] = page_url
data["comment"] = comment
res = self._post("shakearound/page/update", data=data, result_processor=lambda x: x["data"])
return res
def search_pages(self, page_ids=None, begin=0, count=10):
"""
查询页面列表
详情请参考
http://mp.weixin.qq.com/wiki/5/6626199ea8757c752046d8e46cf13251.html
:param page_ids: 指定页面的id列表
:param begin: 页面列表的起始索引值
:param count: 待查询的页面个数
:return: 页面查询结果信息
"""
if not page_ids:
data = {"type": 2, "begin": begin, "count": count}
else:
if not isinstance(page_ids, (tuple, list)):
page_ids = [page_ids]
data = {"type": 1, "page_ids": page_ids}
res = self._post("shakearound/page/search", data=data, result_processor=lambda x: x["data"])
return res
def delete_page(self, page_id):
"""
删除页面
详情请参考
http://mp.weixin.qq.com/wiki/5/6626199ea8757c752046d8e46cf13251.html
:param page_id: 指定页面的id列表
:return: 返回的 JSON 数据包
"""
return self._post("shakearound/page/delete", data={"page_id": page_id})
def add_material(self, media_file, media_type="icon"):
"""
上传图片素材
详情请参考
http://mp.weixin.qq.com/wiki/5/e997428269ff189d8f9a4b9e177be2d9.html
:param media_file: 要上传的文件,一个 File-object
:param media_type: 摇一摇素材类型, 取值为 icon或者 license, 默认 icon.
:return: 上传的素材信息
"""
res = self._post(
"shakearound/material/add",
files={"media": media_file},
params={"type": media_type},
result_processor=lambda x: x["data"],
)
return res
def bind_device_pages(self, page_ids, bind, append, device_id=None, uuid=None, major=None, minor=None):
"""
配置设备与页面的关联关系
详情请参考
http://mp.weixin.qq.com/wiki/12/c8120214ec0ba08af5dfcc0da1a11400.html
:param page_ids: 待关联的页面列表
:param bind: 关联操作标志位, 0为解除关联关系,1为建立关联关系
:param append: 新增操作标志位, 0为覆盖,1为新增
:param device_id: 设备编号,若填了UUID、major、minor,则可不填设备编号,若二者都填,则以设备编号为优先
:param uuid: UUID
:param major: major
:param minor: minor
:return: 返回的 JSON 数据包
"""
if not isinstance(page_ids, (tuple, list)):
page_ids = [page_ids]
data = {
"page_ids": page_ids,
"bind": int(bind),
"append": int(append),
"device_identifier": {
"device_id": device_id,
"uuid": uuid,
"major": major,
"minor": minor,
},
}
return self._post("shakearound/device/bindpage", data=data)
def get_shake_info(self, ticket):
"""
获取摇周边的设备及用户信息
详情请参考
http://mp.weixin.qq.com/wiki/3/34904a5db3d0ec7bb5306335b8da1faf.html
:param ticket: 摇周边业务的ticket,可在摇到的URL中得到,ticket生效时间为30分钟
:return: 设备及用户信息
"""
res = self._post(
"shakearound/user/getshakeinfo",
data={"ticket": ticket},
result_processor=lambda x: x["data"],
)
return res
def get_device_statistics(self, begin_date, end_date, device_id=None, uuid=None, major=None, minor=None):
"""
以设备为维度的数据统计接口
http://mp.weixin.qq.com/wiki/0/8a24bcacad40fe7ee98d1573cb8a6764.html
:param begin_date: 起始时间,最长时间跨度为30天
:param end_date: 结束时间,最长时间跨度为30天
:param device_id: 设备编号,若填了UUID、major、minor,则可不填设备编号,若二者都填,则以设备编号为优先
:param uuid: UUID
:param major: major
:param minor: minor
"""
data = {
"device_identifier": {
"device_id": device_id,
"uuid": uuid,
"major": major,
"minor": minor,
},
"begin_date": self._to_timestamp(begin_date),
"end_date": self._to_timestamp(end_date),
}
res = self._post(
"shakearound/statistics/device",
data=data,
result_processor=lambda x: x["data"],
)
return res
def get_page_statistics(self, page_id, begin_date, end_date):
"""
以页面为维度的数据统计接口
详情请参考
http://mp.weixin.qq.com/wiki/0/8a24bcacad40fe7ee98d1573cb8a6764.html
:param page_id: 页面 ID
:param begin_date: 起始时间,最长时间跨度为30天
:param end_date: 结束时间,最长时间跨度为30天
:return: 统计数据
"""
res = self._post(
"shakearound/statistics/page",
data={
"page_id": page_id,
"begin_date": self._to_timestamp(begin_date),
"end_date": self._to_timestamp(end_date),
},
result_processor=lambda x: x["data"],
)
return res
def get_apply_status(self, apply_id):
"""
查询设备ID申请审核状态
详情请参考
http://mp.weixin.qq.com/wiki/15/b9e012f917e3484b7ed02771156411f3.html
:param apply_id: 批次ID,申请设备ID时所返回的批次ID
:return: 批次状态信息
"""
res = self._post(
"shakearound/device/applystatus",
data={
"apply_id": apply_id,
},
result_processor=lambda x: x["data"],
)
return res
| mit | fd8dbb6db25714e5f191b2f0bd539442 | 31.015337 | 109 | 0.558781 | 2.524063 | false | false | false | false |
marcelm/cutadapt | src/cutadapt/predicates.py | 1 | 4006 | """
Filtering criteria (predicates)
"""
from abc import ABC, abstractmethod
from .qualtrim import expected_errors
from .modifiers import ModificationInfo
class Predicate(ABC):
@abstractmethod
def test(self, read, info: ModificationInfo) -> bool:
"""
Return True if the filtering criterion matches.
"""
@classmethod
def descriptive_identifier(cls) -> str:
"""
Return a short name for this predicate based on the class name such as "too_long",
"too_many_expected_errors".
This is used as identifier in the JSON report.
"""
return "".join(
("_" + ch.lower() if ch.isupper() else ch) for ch in cls.__name__
)[1:]
class TooShort(Predicate):
"""Select reads that are shorter than the specified minimum length"""
def __init__(self, minimum_length: int):
self.minimum_length = minimum_length
def __repr__(self):
return f"TooShort(minimum_length={self.minimum_length})"
def test(self, read, info: ModificationInfo):
return len(read) < self.minimum_length
class TooLong(Predicate):
"""Select reads that are longer than the specified maximum length"""
def __init__(self, maximum_length: int):
self.maximum_length = maximum_length
def __repr__(self):
return f"TooLong(maximum_length={self.maximum_length})"
def test(self, read, info: ModificationInfo):
return len(read) > self.maximum_length
class TooManyExpectedErrors(Predicate):
"""
Select reads whose expected number of errors, according to the quality
values, exceeds a threshold.
The idea comes from usearch's -fastq_maxee parameter
(http://drive5.com/usearch/).
"""
def __init__(self, max_errors: float):
self.max_errors = max_errors
def __repr__(self):
return f"TooManyExpectedErrors(max_errors={self.max_errors})"
def test(self, read, info: ModificationInfo):
return expected_errors(read.qualities) > self.max_errors
class TooManyN(Predicate):
"""
Select reads that have too many 'N' bases.
Both a raw count or a proportion (relative to the sequence length) can be used.
"""
def __init__(self, count: float):
"""
count -- if it is below 1.0, it will be considered a proportion, and above and equal to
1 will be considered as discarding reads with a number of N's greater than this cutoff.
"""
assert count >= 0
self.is_proportion = count < 1.0
self.cutoff = count
def __repr__(self):
return f"TooManyN(cutoff={self.cutoff}, is_proportion={self.is_proportion})"
def test(self, read, info: ModificationInfo):
n_count = read.sequence.lower().count("n")
if self.is_proportion:
if len(read) == 0:
return False
return n_count / len(read) > self.cutoff
else:
return n_count > self.cutoff
class CasavaFiltered(Predicate):
"""
Select reads that have failed the CASAVA filter according to the read header.
The headers look like ``xxxx x:Y:x:x`` (with a ``Y``). Reads that pass the filter
have an ``N`` instead of ``Y``.
Reads with unrecognized headers are not selected.
"""
def __repr__(self):
return "CasavaFiltered()"
def test(self, read, info: ModificationInfo):
_, _, right = read.name.partition(" ")
return right[1:4] == ":Y:" # discard if :Y: found
class DiscardUntrimmed(Predicate):
"""
Select reads for which no adapter match was found
"""
def __repr__(self):
return "DiscardUntrimmed()"
def test(self, read, info: ModificationInfo):
return not info.matches
class DiscardTrimmed(Predicate):
"""
Select reads for which at least one adapter match was found
"""
def __repr__(self):
return "DiscardTrimmed()"
def test(self, read, info: ModificationInfo):
return bool(info.matches)
| mit | bb9a00243d8c90c5f0f4814a74c80f0a | 27.411348 | 95 | 0.627808 | 3.99004 | false | true | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.