hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
941d6dd3633da037ca8a317502b87d93b1342dd7 | 3,722 | py | Python | test/models/test_raw_logs.py | Stranger6667/py-offers | 73ef00e7b02ddfa5adc6df282009aea7670e465f | [
"MIT"
] | 17 | 2016-08-26T10:50:44.000Z | 2021-07-08T17:39:34.000Z | test/models/test_raw_logs.py | Stranger6667/py-offers | 73ef00e7b02ddfa5adc6df282009aea7670e465f | [
"MIT"
] | 80 | 2016-08-26T08:45:20.000Z | 2020-08-31T15:47:10.000Z | test/models/test_raw_logs.py | Stranger6667/py-offers | 73ef00e7b02ddfa5adc6df282009aea7670e465f | [
"MIT"
] | 7 | 2017-04-03T08:19:06.000Z | 2020-10-16T19:02:21.000Z | from pyoffers.models.raw_log import DateDir, LogFile, LogRecord
CASSETTE_NAME = "raw_log"
def test_list_date_dirs(date_dirs):
assert len(date_dirs) == 3
assert all(isinstance(item, DateDir) for item in date_dirs)
assert str(date_dirs[0]) == "DateDir: Sep 9, 2016 (20160909)"
def assert_log_files(log_files):
assert len(log_files) == 3
assert all(isinstance(item, LogFile) for item in log_files)
assert str(log_files[0]) == "LogFile: Sep 9, 2016 - 09:00 am (20160909/clicks-1473411600-SlY9UO.zip)"
def test_manager_list_logs(api):
result = api.raw_logs.clicks.list_logs("20160909")
assert_log_files(result)
def test_model_list_logs(date_dirs):
date_dir = date_dirs[0]
result = date_dir.list_logs()
assert_log_files(result)
def test_download_link(log_file):
assert log_file.download_link == "https://s3.amazonaws.com/ho-adserverlogs-prod/qproc/raw-logs/clicks/blabla"
def test_equality(date_dirs):
assert date_dirs[0] != date_dirs[1]
assert date_dirs[0] == date_dirs[0]
def test_content(log_file):
assert (
log_file.content == b"transaction_id,affiliate_id,affiliate_manager_id,advertiser_id,advertiser_manager_id,"
b"offer_id,creative_url_id,affiliate_source,affiliate_sub1,affiliate_sub2,"
b"affiliate_sub3,affiliate_sub4,affiliate_sub5,datetime,revenue_cents,payout_cents,"
b"refer,url,pixel_refer,ip,user_agent,country_code,browser_id,is_click_unique,"
b"ad_campaign_id,ad_campaign_creative_id,offer_file_id,status,offer_file_size,"
b"currency,payout_type,revenue_type,device_brand,device_model,device_os,"
b"device_os_version,device_id,device_id_md5,device_id_sha1,android_id,"
b"android_id_md5,android_id_sha1,mac_address,mac_address_md5,mac_address_sha1,"
b"odin,open_udid,ios_ifa,ios_ifa_md5,ios_ifa_sha1,ios_ifv,user_id,unknown_id,"
b"payout_group_id,revenue_group_id,req_connection_speed,google_aid\n"
b'"1020f1afc9b6af45c4efe622938512",3,12,44,12,13,"","NEX","UNUSE","PD","",'
b'"d3ba452c-6abb-487f-a9f7-10a513765f36","","2016-09-09 14:00:28","","","",'
b'"http://example.com/aff_c?offer_id=13&aff_id=3&source=NEX&aff_sub=UNUSE&aff_sub2='
b'PD&aff_sub4=d3ba452c-6abb-487f-a9f7-10a513765f36","","127.0.0.1",'
b'"Mozilla/5.0 (Linux; Android 4.1.2; GT-S6310N Build/JZO54K) AppleWebKit/537.36 '
b'(KHTML, Like Gecko) Chrome/50.0.2661.89 Mobile Safari/537.36","CZ",8,0,"","","","",'
b'"","CZK","cpa_flat","cpa_flat","Samsung","GT-S6310N","Android","4.1","","","","","",'
b'"","","","","","","","","","","","",0,0,"mobile",""\n'
b'"102f149014c5fae4576c577c26347c",10,4,18,4,84,"","NEX","THM","AD","",'
b'"94be64d8-5c3e-4bad-8d6a-ad4e9a9a01cb","","2016-09-09 14:00:44","250","","",'
b'"http://example.com/aff_c?offer_id=84&aff_id=10&source=NEX&aff_sub=THM&aff_sub2=AD'
b'&aff_sub4=94be64d8-5c3e-4bad-8d6a-ad4e9a9a01cb","","127.0.0.1","Mozilla/5.0 '
b"(Windows NT 10.0) AppleWebKit/537.36 (KHTML, Like Gecko) Chrome/46.0.2486.0 "
b'Safari/537.36 Edge/13.10586","ES",6,0,"","","","","","EUR","cpa_flat","cpc",'
b'"Microsoft","Edge","Desktop","","","","","","","","","","","","","","","","","",'
b'"",0,0,"broadband",""\n'
)
def test_log_records(log_file):
records = log_file.records
assert len(records) == 2
assert all(isinstance(item, LogRecord) for item in records)
assert str(records[0]) == "LogRecord: 13 (1020f1afc9b6af45c4efe622938512)"
def test_find_all(api):
records = api.raw_logs.clicks.find_all("20160912")
assert len(records) == 4
assert all(isinstance(item, LogRecord) for item in records)
| 46.525 | 116 | 0.679205 |
cd17c3f371cb590737e0dd8f36920f541dd4287c | 1,438 | py | Python | python/anyascii/_data/_0c1.py | casept/anyascii | d4f426b91751254b68eaa84c6cd23099edd668e6 | [
"ISC"
] | null | null | null | python/anyascii/_data/_0c1.py | casept/anyascii | d4f426b91751254b68eaa84c6cd23099edd668e6 | [
"ISC"
] | null | null | null | python/anyascii/_data/_0c1.py | casept/anyascii | d4f426b91751254b68eaa84c6cd23099edd668e6 | [
"ISC"
] | null | null | null | b='Syae Syaeg Syaekk Syaegs Syaen Syaenj Syaenh Syaed Syael Syaelg Syaelm Syaelb Syaels Syaelt Syaelp Syaelh Syaem Syaeb Syaebs Syaes Syaess Syaeng Syaej Syaech Syaek Syaet Syaep Syaeh Seo Seog Seokk Seogs Seon Seonj Seonh Seod Seol Seolg Seolm Seolb Seols Seolt Seolp Seolh Seom Seob Seobs Seos Seoss Seong Seoj Seoch Seok Seot Seop Seoh Se Seg Sekk Segs Sen Senj Senh Sed Sel Selg Selm Selb Sels Selt Selp Selh Sem Seb Sebs Ses Sess Seng Sej Sech Sek Set Sep Seh Syeo Syeog Syeokk Syeogs Syeon Syeonj Syeonh Syeod Syeol Syeolg Syeolm Syeolb Syeols Syeolt Syeolp Syeolh Syeom Syeob Syeobs Syeos Syeoss Syeong Syeoj Syeoch Syeok Syeot Syeop Syeoh Sye Syeg Syekk Syegs Syen Syenj Syenh Syed Syel Syelg Syelm Syelb Syels Syelt Syelp Syelh Syem Syeb Syebs Syes Syess Syeng Syej Syech Syek Syet Syep Syeh So Sog Sokk Sogs Son Sonj Sonh Sod Sol Solg Solm Solb Sols Solt Solp Solh Som Sob Sobs Sos Soss Song Soj Soch Sok Sot Sop Soh Swa Swag Swakk Swags Swan Swanj Swanh Swad Swal Swalg Swalm Swalb Swals Swalt Swalp Swalh Swam Swab Swabs Swas Swass Swang Swaj Swach Swak Swat Swap Swah Swae Swaeg Swaekk Swaegs Swaen Swaenj Swaenh Swaed Swael Swaelg Swaelm Swaelb Swaels Swaelt Swaelp Swaelh Swaem Swaeb Swaebs Swaes Swaess Swaeng Swaej Swaech Swaek Swaet Swaep Swaeh Soe Soeg Soekk Soegs Soen Soenj Soenh Soed Soel Soelg Soelm Soelb Soels Soelt Soelp Soelh Soem Soeb Soebs Soes Soess Soeng Soej Soech Soek Soet Soep Soeh Syo Syog Syokk Syogs' | 1,438 | 1,438 | 0.820584 |
a611793e897e32113f80fea9891bd9cd8e239f0b | 3,614 | py | Python | flypy/support/numpy_support.py | filmackay/flypy | d64e70959c5c8af9e914dcc3ce1068fb99859c3a | [
"BSD-2-Clause"
] | null | null | null | flypy/support/numpy_support.py | filmackay/flypy | d64e70959c5c8af9e914dcc3ce1068fb99859c3a | [
"BSD-2-Clause"
] | null | null | null | flypy/support/numpy_support.py | filmackay/flypy | d64e70959c5c8af9e914dcc3ce1068fb99859c3a | [
"BSD-2-Clause"
] | 1 | 2020-01-01T00:43:24.000Z | 2020-01-01T00:43:24.000Z | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import math
import struct
import numpy as np
from flypy.types import *
if struct.pack('i', 1)[0] == '\1':
nbo = '<' # little endian
else:
nbo = '>' # big endian
def from_dtype(dtype):
"""
Map a NumPy dtype to a minitype.
>>> map_dtype(np.dtype(np.int32))
int32
>>> map_dtype(np.dtype(np.int64))
int64
>>> map_dtype(np.dtype(np.object))
PyObject *
>>> map_dtype(np.dtype(np.float64))
float64
>>> map_dtype(np.dtype(np.complex128))
complex128
"""
from flypy.types import (int8, int16, int32, int64,
uint8, uint16, uint32, uint64,
float32, float64, complex64, complex128,
struct_, object_)
if dtype.byteorder not in ('=', nbo, '|') and dtype.kind in ('iufbc'):
raise TypeError("Only native byteorder is supported", dtype)
item_idx = int(math.log(dtype.itemsize, 2))
if dtype.kind == 'i':
return [int8, int16, int32, int64][item_idx]
elif dtype.kind == 'u':
return [uint8, uint16, uint32, uint64][item_idx]
elif dtype.kind == 'f':
if dtype.itemsize == 2:
pass # half floats not supported yet
elif dtype.itemsize == 4:
return float32
elif dtype.itemsize == 8:
return float64
elif dtype.itemsize == 16:
raise TypeError("long double is not support")
elif dtype.kind == 'b':
return int8
elif dtype.kind == 'c':
if dtype.itemsize == 8:
return complex64
elif dtype.itemsize == 16:
return complex128
elif dtype.itemsize == 32:
raise TypeError("long double is not support")
# return complex256
elif dtype.kind == 'V':
fields = [(name, from_dtype(dtype.fields[name][0]))
for name in dtype.names]
is_aligned = dtype.alignment != 1
return struct_(fields, packed=not getattr(dtype, 'isalignedstruct',
is_aligned))
elif dtype.kind == 'O':
return object_
# TODO:
#elif dtype.kind == 'M':
# # Get datetime units from 2nd to last character in dtype string
# # Example dtype string: '<M8[D]', where D is datetime units
# return datetime(units=dtype.str[-2])
#elif dtype.kind == 'm':
# # Get timedelta units from 2nd to last character in dtype string
# # Example dtype string: '<m8[D]', where D is timedelta units
# return timedelta(units=dtype.str[-2])
def to_dtype(type):
from flypy.types import object_
typemap = {
int8 : np.int8,
int16 : np.int16,
int32 : np.int32,
int64 : np.int64,
uint8 : np.uint8,
uint16 : np.uint16,
uint32 : np.uint32,
uint64 : np.uint64,
float_ : np.float32,
double : np.float64,
# longdouble: np.longdouble,
short : np.dtype('h'),
int_ : np.dtype('i'),
long_ : np.dtype('l'),
longlong : np.longlong,
ushort : np.dtype('H'),
uint : np.dtype('I'),
ulong : np.dtype('L'),
ulonglong: np.ulonglong,
complex64: np.complex64,
complex128: np.complex128,
# complex256: getattr(np, 'complex256', None),
bool_ : np.bool,
object_ : np.object,
}
# TODO: records, datetime
result = typemap[type]
return np.dtype(result) | 29.867769 | 75 | 0.549806 |
26e843e5ef89cf8193b43398e97ce6d85d4f507f | 395 | py | Python | plugin/fiftyone/conf/configs.py | fenrir-z/ymir | ec6047323d3b914efb56252295589711973169e1 | [
"Apache-2.0"
] | null | null | null | plugin/fiftyone/conf/configs.py | fenrir-z/ymir | ec6047323d3b914efb56252295589711973169e1 | [
"Apache-2.0"
] | null | null | null | plugin/fiftyone/conf/configs.py | fenrir-z/ymir | ec6047323d3b914efb56252295589711973169e1 | [
"Apache-2.0"
] | null | null | null | from typing import List
from environs import Env
env = Env()
class Config:
mongo_uri: str = env.str("FIFTYONE_DATABASE_URI", "mongodb://localhost:27017/")
redis_host: str = env.str("FIFTYONE_REDIS_HOST", "localhost")
redis_port: int = env.int("FIFTYONE_REDIS_PORT", 6379)
redis_db: int = env.int("FIFTYONE_REDIS_DB", 0)
allowed_hosts: List[str] = ["*"]
conf = Config()
| 23.235294 | 83 | 0.688608 |
dc59acb703cdcc54983c4aa3ee04fc75ad50fb31 | 367 | py | Python | Greedy/1333.py | kjh9267/BOJ_Python | b4d2ae09c252cc9280df93ccecbd07880947827e | [
"Apache-2.0"
] | null | null | null | Greedy/1333.py | kjh9267/BOJ_Python | b4d2ae09c252cc9280df93ccecbd07880947827e | [
"Apache-2.0"
] | null | null | null | Greedy/1333.py | kjh9267/BOJ_Python | b4d2ae09c252cc9280df93ccecbd07880947827e | [
"Apache-2.0"
] | null | null | null | import sys
N, L, D = map(int,sys.stdin.readline().split())
for i in range(L,(L+5)*N,L+5):
key = False
for j in range(0,(L+5)*N + D,D):
if i <= j < i + 5:
print(j)
key = True
break
if key:
break
else:
for i in range(0,(L+5)*N*1001,D):
if i >= (L+5)*N:
print(i)
break | 20.388889 | 47 | 0.425068 |
1cfc8d5d02b67bc3c801a4fb74f4558d0092b967 | 7,020 | py | Python | python/hk_utils/hk_print.py | HeekangPark/hk_utils | 271c95c77df72ca8c70ab10ae5077e223c630145 | [
"MIT"
] | null | null | null | python/hk_utils/hk_print.py | HeekangPark/hk_utils | 271c95c77df72ca8c70ab10ae5077e223c630145 | [
"MIT"
] | null | null | null | python/hk_utils/hk_print.py | HeekangPark/hk_utils | 271c95c77df72ca8c70ab10ae5077e223c630145 | [
"MIT"
] | null | null | null | import builtins
_decoration = {
'bold': '1',
'underline': '4',
'blink': '5',
'reverse': '7',
}
_color = {
'black': '30',
'red': '31',
'green': '32',
'yellow': '33',
'blue': '34',
'magenta': '35',
'cyan': '36',
'white': '37',
}
_background = {
'black': '40',
'red': '41',
'green': '42',
'yellow': '43',
'blue': '44',
'magenta': '45',
'cyan': '46',
'white': '47',
}
_intense = {
'black': '90',
'red': '91',
'green': '92',
'yellow': '93',
'blue': '94',
'magenta': '95',
'cyan': '96',
'white': '97'
}
_background_intense = {
'black': '100',
'red': '101',
'green': '102',
'yellow': '103',
'blue': '104',
'magenta': '105',
'cyan': '106',
'white': '107'
}
_reset = '0'
def _setStyle(color=None, background=None, intense=False, background_intense=False, bold=False, underline=False, blink=False, reverse=False):
styles = []
if bold:
styles.append(_decoration['bold'])
if underline:
styles.append(_decoration['underline'])
if blink:
styles.append(_decoration['blink'])
if reverse:
styles.append(_decoration['reverse'])
if color is not None:
color = color.lower()
if color not in _color.keys():
raise ValueError(f"Invalid color: {color}")
if intense:
styles.append(_intense[color])
else:
styles.append(_color[color])
if background is not None:
background = background.lower()
if background not in _background.keys():
raise ValueError(f"Invalid background: {background}")
if background_intense:
styles.append(_background_intense[background])
else:
styles.append(_background[background])
if len(styles) == 0:
styles.append(_reset)
return f"\033[{';'.join(styles)}m"
def _resetStyle():
return f"\033[{_reset}m"
class HKPrintTheme:
def __init__(self):
self.theme = {
"success": {
"color": "green",
"background": None,
"intense": False,
"background_intense": False,
"bold": False,
"underline": False,
"blink": False,
"reverse": False
},
"error": {
"color": "red",
"background": None,
"intense": False,
"background_intense": False,
"bold": False,
"underline": False,
"blink": False,
"reverse": False
},
"warning": {
"color": "yellow",
"background": None,
"intense": False,
"background_intense": False,
"bold": False,
"underline": False,
"blink": False,
"reverse": False
},
"info": {
"color": "cyan",
"background": None,
"intense": False,
"background_intense": False,
"bold": False,
"underline": False,
"blink": False,
"reverse": False
},
"debug": {
"color": None,
"background": None,
"intense": False,
"background_intense": False,
"bold": False,
"underline": False,
"blink": False,
"reverse": False
}
}
self._default_style = {
"color": None,
"background": None,
"intense": False,
"background_intense": False,
"bold": False,
"underline": False,
"blink": False,
"reverse": False
}
def setStyle(self, styleName, styleDict):
assert isinstance(styleDict, dict), f"Invalid param:styleDict - {styleDict}"
if styleName in self.theme.keys():
defaultStyle = self.theme[styleName]
else:
defaultStyle = self._default_style
if "color" in styleDict.keys():
assert styleDict["color"].lower() in _color.keys(), f"Invalid value:styleDict.color - {styleDict['color']}"
defaultStyle["color"] = styleDict["color"].lower()
if "background" in styleDict.keys():
assert styleDict["background"].lower() in _background.keys(), f"Invalid value:styleDict.background - {styleDict['background']}"
defaultStyle["background"] = styleDict["background"].lower()
if "intense" in styleDict.keys():
assert isinstance(styleDict["intense"], bool), f"Invalid value:styleDict.intense - {styleDict['intense']}"
defaultStyle["intense"] = styleDict["intense"]
if "background_intense" in styleDict.keys():
assert isinstance(styleDict["background_intense"], bool), f"Invalid value:styleDict.background_intense - {styleDict['background_intense']}"
defaultStyle["background_intense"] = styleDict["background_intense"]
if "bold" in styleDict.keys():
assert isinstance(styleDict["bold"], bool), f"Invalid value:styleDict.bold - {styleDict['bold']}"
defaultStyle["bold"] = styleDict["bold"]
if "underline" in styleDict.keys():
assert isinstance(styleDict["underline"], bool), f"Invalid value:styleDict.underline - {styleDict['underline']}"
defaultStyle["underline"] = styleDict["underline"]
if "blink" in styleDict.keys():
assert isinstance(styleDict["blink"], bool), f"Invalid value:styleDict.blink - {styleDict['blink']}"
defaultStyle["blink"] = styleDict["blink"]
if "reverse" in styleDict.keys():
assert isinstance(styleDict["reverse"], bool), f"Invalid value:styleDict.reverse - {styleDict['reverse']}"
defaultStyle["reverse"] = styleDict["reverse"]
self.theme[styleName] = defaultStyle
def _print_template(*args, style, **kwargs):
if len(args) > 0:
args = list(args)
args[0] = _setStyle(**style) + str(args[0])
args[-1] = str(args[-1]) + _resetStyle()
args = tuple(args)
builtins.print(*args, **kwargs)
class HKPrint:
def __init__(self, theme=None):
assert theme is None or isinstance(theme, HKPrintTheme), f"Invalid param:theme - {theme}"
if theme is None:
self.theme = HKPrintTheme().theme
else:
self.theme = theme.theme
for method_name in reversed(self.theme):
setattr(self.__class__, method_name, lambda cls, *args, style=self.theme[method_name], **kwargs: _print_template(*args, style=style, **kwargs))
def __call__(self, *args, **kwargs):
builtins.print(*args, **kwargs)
| 32.35023 | 155 | 0.522365 |
544bfc1577f76e964586209240931398ef179d1c | 550 | py | Python | blockwise_descent.py | rtavenar/SparseGroupLasso | e9c6855d92891173e78791837e0431f342ade90c | [
"BSD-2-Clause"
] | 37 | 2016-11-23T14:47:01.000Z | 2021-09-27T07:21:33.000Z | blockwise_descent.py | candleinwindsteve/SparseGroupLasso | dc7ae8687236b12d53aa7779afb6505fbaf060ff | [
"BSD-2-Clause"
] | 5 | 2018-01-18T19:41:18.000Z | 2019-08-26T13:16:01.000Z | blockwise_descent.py | candleinwindsteve/SparseGroupLasso | dc7ae8687236b12d53aa7779afb6505fbaf060ff | [
"BSD-2-Clause"
] | 17 | 2017-11-28T21:50:00.000Z | 2021-09-27T07:21:35.000Z | import numpy
import blockwise_descent_semisparse
__author__ = 'Romain Tavenard romain.tavenard[at]univ-rennes2.fr'
class SGL(blockwise_descent_semisparse.SGL):
def __init__(self, groups, alpha, lbda, max_iter_outer=10000, max_iter_inner=100, rtol=1e-6):
self.ind_sparse = numpy.ones((len(groups), ))
self.groups = numpy.array(groups)
self.alpha = alpha
self.lbda = lbda
self.max_iter_outer = max_iter_outer
self.max_iter_inner = max_iter_inner
self.rtol = rtol
self.coef_ = None
| 32.352941 | 97 | 0.696364 |
e86c2a00355f83c122fec78f23219f90534b4351 | 11,284 | py | Python | integration_tests/src/main/python/asserts.py | petro-rudenko/spark-rapids | 0d49b5818db0add9b9ce54de07a67ae9b89c5f74 | [
"Apache-2.0"
] | 1 | 2020-08-26T22:47:34.000Z | 2020-08-26T22:47:34.000Z | integration_tests/src/main/python/asserts.py | petro-rudenko/spark-rapids | 0d49b5818db0add9b9ce54de07a67ae9b89c5f74 | [
"Apache-2.0"
] | null | null | null | integration_tests/src/main/python/asserts.py | petro-rudenko/spark-rapids | 0d49b5818db0add9b9ce54de07a67ae9b89c5f74 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from conftest import is_incompat, should_sort_on_spark, should_sort_locally, get_float_check, get_limit, spark_jvm
from datetime import date, datetime
import math
from pyspark.sql import Row
import pytest
from spark_session import with_cpu_session, with_gpu_session
import time
import types as pytypes
def _assert_equal(cpu, gpu, float_check, path):
t = type(cpu)
if (t is Row):
assert len(cpu) == len(gpu), "CPU and GPU row have different lengths at {}".format(path)
if hasattr(cpu, "__fields__") and hasattr(gpu, "__fields__"):
for field in cpu.__fields__:
_assert_equal(cpu[field], gpu[field], float_check, path + [field])
else:
for index in range(len(cpu)):
_assert_equal(cpu[index], gpu[index], float_check, path + [index])
elif (t is list):
assert len(cpu) == len(gpu), "CPU and GPU list have different lengths at {}".format(path)
for index in range(len(cpu)):
_assert_equal(cpu[index], gpu[index], float_check, path + [index])
elif (t is pytypes.GeneratorType):
index = 0
# generator has no zip :( so we have to do this the hard way
done = False
while not done:
sub_cpu = None
sub_gpu = None
try:
sub_cpu = next(cpu)
except StopIteration:
done = True
try:
sub_gpu = next(gpu)
except StopIteration:
done = True
if done:
assert sub_cpu == sub_gpu and sub_cpu == None, "CPU and GPU generators have different lengths at {}".format(path)
else:
_assert_equal(sub_cpu, sub_gpu, float_check, path + [index])
index = index + 1
elif (t is int):
assert cpu == gpu, "GPU and CPU int values are different at {}".format(path)
elif (t is float):
if (math.isnan(cpu)):
assert math.isnan(gpu), "GPU and CPU float values are different at {}".format(path)
else:
assert float_check(cpu, gpu), "GPU and CPU float values are different {}".format(path)
elif isinstance(cpu, str):
assert cpu == gpu, "GPU and CPU string values are different at {}".format(path)
elif isinstance(cpu, datetime):
assert cpu == gpu, "GPU and CPU timestamp values are different at {}".format(path)
elif isinstance(cpu, date):
assert cpu == gpu, "GPU and CPU date values are different at {}".format(path)
elif isinstance(cpu, bool):
assert cpu == gpu, "GPU and CPU boolean values are different at {}".format(path)
elif (cpu == None):
assert cpu == gpu, "GPU and CPU are not both null at {}".format(path)
else:
assert False, "Found unexpected type {} at {}".format(t, path)
def assert_equal(cpu, gpu):
"""Verify that the result from the CPU and the GPU are equal"""
_assert_equal(cpu, gpu, float_check=get_float_check(), path=[])
def _has_incompat_conf(conf):
return ('spark.rapids.sql.incompatibleOps.enabled' in conf and
conf['spark.rapids.sql.incompatibleOps.enabled'].lower() == 'true')
class _RowCmp(object):
"""Allows for sorting Rows in a consistent way"""
def __init__(self, wrapped):
#TODO will need others for maps, etc
if isinstance(wrapped, Row):
self.wrapped = [_RowCmp(c) for c in wrapped]
else:
self.wrapped = wrapped
if isinstance(wrapped, float):
self.is_nan = math.isnan(wrapped)
else:
self.is_nan = False
def cmp(self, other):
try:
#None comes before anything else
#NaN comes next
if (self.wrapped is None and other.wrapped is None):
return 0
elif (self.wrapped is None):
return -1
elif (other.wrapped is None):
return 1
elif self.is_nan and other.is_nan:
return 0
elif self.is_nan:
return -1
elif other.is_nan:
return 1
elif self.wrapped == other.wrapped:
return 0
elif self.wrapped < other.wrapped:
return -1
else:
return 1
except TypeError as te:
print("ERROR TRYING TO COMPARE {} to {} {}".format(self.wrapped, other.wrapped, te))
raise te
def __lt__(self, other):
return self.cmp(other) < 0
def __gt__(self, other):
return self.cmp(other) > 0
def __eq__(self, other):
return self.cmp(other) == 0
def __le__(self, other):
return self.cmp(other) <= 0
def __ge__(self, other):
return self.cmp(other) >= 0
def __ne__(self, other):
return self.cmp(other) != 0
def _prep_func_for_compare(func, should_collect):
sort_locally = should_sort_locally()
if should_sort_on_spark():
def with_sorted(spark):
df = func(spark)
return df.sort(df.columns)
sorted_func = with_sorted
else:
sorted_func = func
limit_val = get_limit()
if limit_val > 0:
def with_limit(spark):
df = sorted_func(spark)
return df.limit(limit_val)
limit_func = with_limit
else:
limit_func = sorted_func
if should_collect:
bring_back = lambda spark: limit_func(spark).collect()
collect_type = 'COLLECT'
else:
bring_back = lambda spark: limit_func(spark).toLocalIterator()
collect_type = 'ITERATOR'
if sort_locally:
raise RuntimeError('Local Sort is only supported on a collect')
return (bring_back, collect_type)
def _prep_incompat_conf(conf):
if is_incompat():
conf = dict(conf) # Make a copy before we change anything
conf['spark.rapids.sql.incompatibleOps.enabled'] = 'true'
elif _has_incompat_conf(conf):
raise AssertionError("incompat must be enabled by the incompat fixture")
return conf
def _assert_gpu_and_cpu_writes_are_equal(
write_func,
read_func,
base_path,
should_collect,
conf={}):
conf = _prep_incompat_conf(conf)
print('### CPU RUN ###')
cpu_start = time.time()
cpu_path = base_path + '/CPU'
with_cpu_session(lambda spark : write_func(spark, cpu_path), conf=conf)
cpu_end = time.time()
print('### GPU RUN ###')
gpu_start = time.time()
gpu_path = base_path + '/GPU'
with_gpu_session(lambda spark : write_func(spark, gpu_path), conf=conf)
gpu_end = time.time()
print('### WRITE: GPU TOOK {} CPU TOOK {} ###'.format(
gpu_end - gpu_start, cpu_end - cpu_start))
(cpu_bring_back, cpu_collect_type) = _prep_func_for_compare(
lambda spark: read_func(spark, cpu_path), should_collect)
(gpu_bring_back, gpu_collect_type) = _prep_func_for_compare(
lambda spark: read_func(spark, gpu_path), should_collect)
from_cpu = with_cpu_session(cpu_bring_back, conf=conf)
from_gpu = with_cpu_session(gpu_bring_back, conf=conf)
if should_sort_locally():
from_cpu.sort(key=_RowCmp)
from_gpu.sort(key=_RowCmp)
assert_equal(from_cpu, from_gpu)
def assert_gpu_and_cpu_writes_are_equal_collect(write_func, read_func, base_path, conf={}):
"""
Assert when running write_func on both the CPU and the GPU and reading using read_func
ont he CPU that the results are equal.
In this case the data is collected back to the driver and compared here, so be
careful about the amount of data returned.
"""
_assert_gpu_and_cpu_writes_are_equal(write_func, read_func, base_path, True, conf=conf)
def assert_gpu_and_cpu_writes_are_equal_iterator(write_func, read_func, base_path, conf={}):
"""
Assert when running write_func on both the CPU and the GPU and reading using read_func
ont he CPU that the results are equal.
In this case the data is pulled back to the driver in chunks and compared here
so any amount of data can work, just be careful about how long it might take.
"""
_assert_gpu_and_cpu_writes_are_equal(write_func, read_func, base_path, False, conf=conf)
def assert_gpu_fallback_collect(func,
cpu_fallback_class_name,
conf={}):
(bring_back, collect_type) = _prep_func_for_compare(func, True)
conf = _prep_incompat_conf(conf)
print('### CPU RUN ###')
cpu_start = time.time()
from_cpu = with_cpu_session(bring_back, conf=conf)
cpu_end = time.time()
print('### GPU RUN ###')
jvm = spark_jvm()
jvm.com.nvidia.spark.rapids.ExecutionPlanCaptureCallback.startCapture()
gpu_start = time.time()
from_gpu = with_gpu_session(bring_back,
conf=conf)
gpu_end = time.time()
jvm.com.nvidia.spark.rapids.ExecutionPlanCaptureCallback.assertCapturedAndGpuFellBack(cpu_fallback_class_name, 2000)
print('### {}: GPU TOOK {} CPU TOOK {} ###'.format(collect_type,
gpu_end - gpu_start, cpu_end - cpu_start))
if should_sort_locally():
from_cpu.sort(key=_RowCmp)
from_gpu.sort(key=_RowCmp)
assert_equal(from_cpu, from_gpu)
def _assert_gpu_and_cpu_are_equal(func,
should_collect,
conf={}):
(bring_back, collect_type) = _prep_func_for_compare(func, should_collect)
conf = _prep_incompat_conf(conf)
print('### CPU RUN ###')
cpu_start = time.time()
from_cpu = with_cpu_session(bring_back, conf=conf)
cpu_end = time.time()
print('### GPU RUN ###')
gpu_start = time.time()
from_gpu = with_gpu_session(bring_back,
conf=conf)
gpu_end = time.time()
print('### {}: GPU TOOK {} CPU TOOK {} ###'.format(collect_type,
gpu_end - gpu_start, cpu_end - cpu_start))
if should_sort_locally():
from_cpu.sort(key=_RowCmp)
from_gpu.sort(key=_RowCmp)
assert_equal(from_cpu, from_gpu)
def assert_gpu_and_cpu_are_equal_collect(func, conf={}):
"""
Assert when running func on both the CPU and the GPU that the results are equal.
In this case the data is collected back to the driver and compared here, so be
careful about the amount of data returned.
"""
_assert_gpu_and_cpu_are_equal(func, True, conf=conf)
def assert_gpu_and_cpu_are_equal_iterator(func, conf={}):
"""
Assert when running func on both the CPU and the GPU that the results are equal.
In this case the data is pulled back to the driver in chunks and compared here
so any amount of data can work, just be careful about how long it might take.
"""
_assert_gpu_and_cpu_are_equal(func, False, conf=conf)
| 37.364238 | 129 | 0.6433 |
789d875211aee54b7ced00fe09778535c6b58ef1 | 1,824 | py | Python | domains/explore/problems/training/problem116_EE.py | patras91/rae_release | 0e5faffb7eb732fdb8e3bbf2c6d2f2cbd520aa30 | [
"BSD-3-Clause"
] | 1 | 2021-09-28T12:56:56.000Z | 2021-09-28T12:56:56.000Z | domains/explore/problems/training/problem116_EE.py | patras91/rae_release | 0e5faffb7eb732fdb8e3bbf2c6d2f2cbd520aa30 | [
"BSD-3-Clause"
] | null | null | null | domains/explore/problems/training/problem116_EE.py | patras91/rae_release | 0e5faffb7eb732fdb8e3bbf2c6d2f2cbd520aa30 | [
"BSD-3-Clause"
] | 1 | 2022-03-31T16:30:39.000Z | 2022-03-31T16:30:39.000Z | __author__ = 'patras'
from domain_exploreEnv import *
from timer import DURATION
from state import state, rv
DURATION.TIME = {
'survey': 5,
'monitor': 5,
'screen': 5,
'sample': 5,
'process': 5,
'fly': 3,
'deposit': 1,
'transferData': 1,
'take': 2,
'put': 2,
'move': 10,
'charge': 5,
'negotiate': 5,
'handleAlien': 5,
}
DURATION.COUNTER = {
'survey': 5,
'monitor': 5,
'screen': 5,
'sample': 5,
'process': 5,
'fly': 3,
'deposit': 1,
'transferData': 1,
'take': 2,
'put': 2,
'move': 10,
'charge': 5,
'negotiate': 5,
'handleAlien': 5,
}
rv.TYPE = {'e1': 'survey', 'e2': 'monitor', 'e3': 'screen', 'e4': 'sample', 'e5':'process'}
rv.EQUIPMENT = {'survey': 'e1', 'monitor': 'e2', 'screen': 'e3', 'sample': 'e4', 'process': 'e5'}
rv.EQUIPMENTTYPE = {'e1': 'survey', 'e2': 'monitor', 'e3': 'screen', 'e4': 'sample', 'e5':'process'}
rv.LOCATIONS = ['base', 'z1', 'z2', 'z3', 'z4', 'z5', 'z6', 'z7']
rv.EDGES = {'base': {'z1': 15, 'z4': 15, 'z5': 35, 'z6': 35, 'z7': 35}, 'z1': {'base': 15, 'z2': 30}, 'z2': {'z1': 30, 'z3': 30}, 'z3': {'z2': 30, 'z4': 30}, 'z4': {'z3': 30, 'base': 15}, 'z5': {'base': 35}, 'z6': {'base': 35}, 'z7': {'base': 35}}
def ResetState():
state.loc = {'r1': 'base', 'r2': 'base', 'UAV': 'base'}
state.charge = { 'UAV': 50, 'r1': 80, 'r2': 50}
state.data = { 'UAV': 1, 'r1': 1, 'r2': 1}
state.pos = {'c1': 'base', 'e1': 'base', 'e2': 'base', 'e3': 'base', 'e4': 'base', 'e5': 'base', 'o1': 'UAV'}
state.load = {'r1': NIL, 'r2': NIL, 'UAV': 'o1'}
state.storm = {'active': False}
tasks = {
2: [['doActivities', 'UAV', [['survey', 'z3'], ['survey', 'z5'], ['survey', 'z7']]]],
4: [['handleEmergency', 'r2', 'z1']],
}
eventsEnv = {
4: [alienSpotted, ['z2']]
} | 29.901639 | 247 | 0.481908 |
1ef9af35f4505473472bff9ee7020ba4448f1f9b | 10,178 | py | Python | prompt_toolkit/input/vt100.py | scoennz/python-prompt-toolkit | 27e4a79ee13f4473a7a29a3b12036576606755b0 | [
"BSD-3-Clause"
] | 1 | 2021-09-07T05:26:21.000Z | 2021-09-07T05:26:21.000Z | prompt_toolkit/input/vt100.py | scoennz/python-prompt-toolkit | 27e4a79ee13f4473a7a29a3b12036576606755b0 | [
"BSD-3-Clause"
] | 1 | 2020-08-11T19:53:13.000Z | 2020-08-11T19:53:13.000Z | prompt_toolkit/input/vt100.py | scoennz/python-prompt-toolkit | 27e4a79ee13f4473a7a29a3b12036576606755b0 | [
"BSD-3-Clause"
] | 1 | 2016-10-20T11:54:20.000Z | 2016-10-20T11:54:20.000Z | import contextlib
import io
import os
import sys
import termios
import tty
from asyncio import AbstractEventLoop, get_event_loop
from typing import (
Callable,
ContextManager,
Dict,
Generator,
List,
Optional,
Set,
TextIO,
Tuple,
Union,
)
from prompt_toolkit.utils import is_dumb_terminal
from ..key_binding import KeyPress
from .base import Input
from .posix_utils import PosixStdinReader
from .vt100_parser import Vt100Parser
__all__ = [
"Vt100Input",
"raw_mode",
"cooked_mode",
]
class Vt100Input(Input):
"""
Vt100 input for Posix systems.
(This uses a posix file descriptor that can be registered in the event loop.)
"""
# For the error messages. Only display "Input is not a terminal" once per
# file descriptor.
_fds_not_a_terminal: Set[int] = set()
def __init__(self, stdin: TextIO) -> None:
# Test whether the given input object has a file descriptor.
# (Idle reports stdin to be a TTY, but fileno() is not implemented.)
try:
# This should not raise, but can return 0.
stdin.fileno()
except io.UnsupportedOperation as e:
if "idlelib.run" in sys.modules:
raise io.UnsupportedOperation(
"Stdin is not a terminal. Running from Idle is not supported."
) from e
else:
raise io.UnsupportedOperation("Stdin is not a terminal.") from e
# Even when we have a file descriptor, it doesn't mean it's a TTY.
# Normally, this requires a real TTY device, but people instantiate
# this class often during unit tests as well. They use for instance
# pexpect to pipe data into an application. For convenience, we print
# an error message and go on.
isatty = stdin.isatty()
fd = stdin.fileno()
if not isatty and fd not in Vt100Input._fds_not_a_terminal:
msg = "Warning: Input is not a terminal (fd=%r).\n"
sys.stderr.write(msg % fd)
sys.stderr.flush()
Vt100Input._fds_not_a_terminal.add(fd)
#
self.stdin = stdin
# Create a backup of the fileno(). We want this to work even if the
# underlying file is closed, so that `typeahead_hash()` keeps working.
self._fileno = stdin.fileno()
self._buffer: List[KeyPress] = [] # Buffer to collect the Key objects.
self.stdin_reader = PosixStdinReader(self._fileno, encoding=stdin.encoding)
self.vt100_parser = Vt100Parser(
lambda key_press: self._buffer.append(key_press)
)
@property
def responds_to_cpr(self) -> bool:
# When the input is a tty, we assume that CPR is supported.
# It's not when the input is piped from Pexpect.
if os.environ.get("PROMPT_TOOLKIT_NO_CPR", "") == "1":
return False
if is_dumb_terminal():
return False
try:
return self.stdin.isatty()
except ValueError:
return False # ValueError: I/O operation on closed file
def attach(self, input_ready_callback: Callable[[], None]) -> ContextManager[None]:
"""
Return a context manager that makes this input active in the current
event loop.
"""
return _attached_input(self, input_ready_callback)
def detach(self) -> ContextManager[None]:
"""
Return a context manager that makes sure that this input is not active
in the current event loop.
"""
return _detached_input(self)
def read_keys(self) -> List[KeyPress]:
" Read list of KeyPress. "
# Read text from stdin.
data = self.stdin_reader.read()
# Pass it through our vt100 parser.
self.vt100_parser.feed(data)
# Return result.
result = self._buffer
self._buffer = []
return result
def flush_keys(self) -> List[KeyPress]:
"""
Flush pending keys and return them.
(Used for flushing the 'escape' key.)
"""
# Flush all pending keys. (This is most important to flush the vt100
# 'Escape' key early when nothing else follows.)
self.vt100_parser.flush()
# Return result.
result = self._buffer
self._buffer = []
return result
@property
def closed(self) -> bool:
return self.stdin_reader.closed
def raw_mode(self) -> ContextManager[None]:
return raw_mode(self.stdin.fileno())
def cooked_mode(self) -> ContextManager[None]:
return cooked_mode(self.stdin.fileno())
def fileno(self) -> int:
return self.stdin.fileno()
def typeahead_hash(self) -> str:
return "fd-%s" % (self._fileno,)
_current_callbacks: Dict[
Tuple[AbstractEventLoop, int], Optional[Callable[[], None]]
] = {} # (loop, fd) -> current callback
@contextlib.contextmanager
def _attached_input(
input: Vt100Input, callback: Callable[[], None]
) -> Generator[None, None, None]:
"""
Context manager that makes this input active in the current event loop.
:param input: :class:`~prompt_toolkit.input.Input` object.
:param callback: Called when the input is ready to read.
"""
loop = get_event_loop()
fd = input.fileno()
previous = _current_callbacks.get((loop, fd))
loop.add_reader(fd, callback)
_current_callbacks[loop, fd] = callback
try:
yield
finally:
loop.remove_reader(fd)
if previous:
loop.add_reader(fd, previous)
_current_callbacks[loop, fd] = previous
else:
del _current_callbacks[loop, fd]
@contextlib.contextmanager
def _detached_input(input: Vt100Input) -> Generator[None, None, None]:
loop = get_event_loop()
fd = input.fileno()
previous = _current_callbacks.get((loop, fd))
if previous:
loop.remove_reader(fd)
_current_callbacks[loop, fd] = None
try:
yield
finally:
if previous:
loop.add_reader(fd, previous)
_current_callbacks[loop, fd] = previous
class raw_mode:
"""
::
with raw_mode(stdin):
''' the pseudo-terminal stdin is now used in raw mode '''
We ignore errors when executing `tcgetattr` fails.
"""
# There are several reasons for ignoring errors:
# 1. To avoid the "Inappropriate ioctl for device" crash if somebody would
# execute this code (In a Python REPL, for instance):
#
# import os; f = open(os.devnull); os.dup2(f.fileno(), 0)
#
# The result is that the eventloop will stop correctly, because it has
# to logic to quit when stdin is closed. However, we should not fail at
# this point. See:
# https://github.com/jonathanslenders/python-prompt-toolkit/pull/393
# https://github.com/jonathanslenders/python-prompt-toolkit/issues/392
# 2. Related, when stdin is an SSH pipe, and no full terminal was allocated.
# See: https://github.com/jonathanslenders/python-prompt-toolkit/pull/165
def __init__(self, fileno: int) -> None:
self.fileno = fileno
self.attrs_before: Optional[List[Union[int, List[bytes]]]]
try:
self.attrs_before = termios.tcgetattr(fileno)
except termios.error:
# Ignore attribute errors.
self.attrs_before = None
def __enter__(self) -> None:
# NOTE: On os X systems, using pty.setraw() fails. Therefor we are using this:
try:
newattr = termios.tcgetattr(self.fileno)
except termios.error:
pass
else:
newattr[tty.LFLAG] = self._patch_lflag(newattr[tty.LFLAG])
newattr[tty.IFLAG] = self._patch_iflag(newattr[tty.IFLAG])
# VMIN defines the number of characters read at a time in
# non-canonical mode. It seems to default to 1 on Linux, but on
# Solaris and derived operating systems it defaults to 4. (This is
# because the VMIN slot is the same as the VEOF slot, which
# defaults to ASCII EOT = Ctrl-D = 4.)
newattr[tty.CC][termios.VMIN] = 1 # type: ignore
termios.tcsetattr(self.fileno, termios.TCSANOW, newattr)
# Put the terminal in cursor mode. (Instead of application mode.)
os.write(self.fileno, b"\x1b[?1l")
@classmethod
def _patch_lflag(cls, attrs):
return attrs & ~(termios.ECHO | termios.ICANON | termios.IEXTEN | termios.ISIG)
@classmethod
def _patch_iflag(cls, attrs):
return attrs & ~(
# Disable XON/XOFF flow control on output and input.
# (Don't capture Ctrl-S and Ctrl-Q.)
# Like executing: "stty -ixon."
termios.IXON
| termios.IXOFF
|
# Don't translate carriage return into newline on input.
termios.ICRNL
| termios.INLCR
| termios.IGNCR
)
def __exit__(self, *a: object) -> None:
if self.attrs_before is not None:
try:
termios.tcsetattr(self.fileno, termios.TCSANOW, self.attrs_before)
except termios.error:
pass
# # Put the terminal in application mode.
# self._stdout.write('\x1b[?1h')
class cooked_mode(raw_mode):
"""
The opposite of ``raw_mode``, used when we need cooked mode inside a
`raw_mode` block. Used in `Application.run_in_terminal`.::
with cooked_mode(stdin):
''' the pseudo-terminal stdin is now used in cooked mode. '''
"""
@classmethod
def _patch_lflag(cls, attrs):
return attrs | (termios.ECHO | termios.ICANON | termios.IEXTEN | termios.ISIG)
@classmethod
def _patch_iflag(cls, attrs):
# Turn the ICRNL flag back on. (Without this, calling `input()` in
# run_in_terminal doesn't work and displays ^M instead. Ptpython
# evaluates commands using `run_in_terminal`, so it's important that
# they translate ^M back into ^J.)
return attrs | termios.ICRNL
| 32.414013 | 87 | 0.618098 |
c22326c3549be9e6f47a2b3e46d096ebe6b86357 | 6,010 | py | Python | BeesEtAl/BA_Patch.py | FJFranklin/BeesEtAl | 3fd21d044e77b4a1df56ac2f405e2084bebd54e1 | [
"MIT"
] | 1 | 2020-08-04T00:13:54.000Z | 2020-08-04T00:13:54.000Z | BeesEtAl/BA_Patch.py | FJFranklin/BeesEtAl | 3fd21d044e77b4a1df56ac2f405e2084bebd54e1 | [
"MIT"
] | null | null | null | BeesEtAl/BA_Patch.py | FJFranklin/BeesEtAl | 3fd21d044e77b4a1df56ac2f405e2084bebd54e1 | [
"MIT"
] | null | null | null | import numpy as np
class BA_Patch(object):
def __init__(self, garden, id_no):
self.G = garden # the BA_Garden object
self.id_no = id_no # a reference number to identify this patch
self.Nfails = 0 # total number of local failures to find better position
self.radius = 0 # current neighbourhood radius
self.try_X = None # a suggestion for where to try next; may be empty
self.old_X = None # the current local best position
self.old_cost = 0 # cost at old_X
self.sequence = 0 # number identifying each search sequence
self.history = None
def X_from_MESO(self):
X = np.copy(self.old_X)
indices = []
for i in range(0, len(self.try_X)):
if self.old_X[i] != self.try_X[i]:
indices.append(i)
Ni = len(indices)
Nc = np.random.binomial(Ni, (1 + self.G.Nfails - self.Nfails) / (2 + self.G.Nfails))
# equivalent to rolling the dice for each index - which, in retrospect, would be simpler to implement
if Nc == 0:
if self.G.costfn.verbose:
print('MESO: difference in indices: {i} -> (none)'.format(i=indices))
else:
changes = np.random.permutation(indices)
if self.G.costfn.verbose:
print('MESO: difference in indices: {i} -> {c}'.format(i=indices, c=changes[0:Nc]))
for i in range(0, Nc):
X[changes[i]] = self.try_X[changes[i]]
return X
def new_local_search(self, prior):
self.history = [self.history]
if self.G.costfn.verbose:
print('==== Patch {p}: #bees={b}, #fails={f}, cost={c}, radius={r}'.format(p=self.id_no, b=prior, f=self.Nfails, c=self.old_cost, r=self.radius))
bPatch = True # whether we should try to plot the patch
bFirst = True
bFailed = True
bSkippy = False # if True, we've started skipping - require neighborhood adjustment
Neval = 0 # number of evaluations of the cost function
for p in range(0, prior): # prior is the number of bees attracted to this patch
if bPatch:
if self.G.plotter:
self.G.plotter.patch(self.old_X, self.radius)
bPatch = False
if self.try_X is not None:
if bFirst and not bSkippy:
X = self.X_from_MESO()
else:
X = self.G.new_position_in_neighbourhood(self.X_from_MESO(), self.radius)
else:
X = self.G.new_position_in_neighbourhood(self.old_X, self.radius)
if self.G.costfn.calculate_cost(X) is not None:
cost = self.G.costfn.cost
XA = self.G.costfn.XA
XM = self.G.costfn.XM
if self.G.plotter:
self.G.plotter.bee(XA)
else:
if self.G.costfn.verbose:
print('(skip - bank 1 scout)')
self.G.scout.schedule(1)
bSkippy = True
continue
Neval = Neval + 1
if self.G.compare(XA, self.old_X):
bFailed = False
if self.G.dynamic:
if self.G.compare(XA, self.old_X):
if self.G.costfn.verbose:
print('(updating patch)')
self.old_X = XA
self.old_cost = cost
if np.array_equal(XA, XM):
self.try_X = None
else:
self.try_X = XM
bPatch = True
bFirst = True
else:
bFirst = False
else:
if bFirst:
best_X = X
best_XA = XA
best_XM = XM
best_cost = cost
bFirst = False
elif self.G.compare(XA, best_X):
best_X = X
best_XA = XA
best_XM = XM
best_cost = cost
self.history.append(cost)
# ... and we're done
if bFailed: # shrink the neighbourhood
self.radius = self.radius * self.G.cooling
self.Nfails = self.Nfails + 1
elif not self.G.dynamic:
self.old_X = best_XA
self.old_cost = best_cost
if np.array_equal(best_XA, best_XM):
self.try_X = None
else:
self.try_X = best_XM
return self.old_X # return the local best solution, even if old
def new_global_search(self, seq_id, seq_term): # function cost = new_global_search(sequence_number > 0, termination cause)
if self.history is not None:
self.G.report(self.sequence, seq_term, self.history)
self.sequence = seq_id
cost, XA, XM = self.G.scout.pop()
while cost is None: # shouldn't happen, but could (if solution space is small), so just in case...
print('* * * No scouts banked! * * *')
self.G.scout.schedule(1)
self.G.scout.evaluate(1)
cost, XA, XM = self.G.scout.pop() # although, if we exhaust all of space, this will go infinite
self.old_X = XA
self.old_cost = cost
if np.array_equal(XA, XM):
self.try_X = None
else:
self.try_X = XM
self.radius = self.G.radius
self.Nfails = 0
self.history = [self.old_cost]
return self.old_X
def flush_history(self):
if self.history is not None:
self.G.report(self.sequence, 'incomplete', self.history)
| 35.77381 | 158 | 0.49817 |
6a3ceff35e6660db262c9e5c22e105bacb86c4b0 | 1,472 | py | Python | tests/distributed/testFilter_distributed.py | noahyonack/Parallelogram | eb5e3d0f587434ba51116f65595ef97b6e8df19a | [
"Apache-2.0"
] | null | null | null | tests/distributed/testFilter_distributed.py | noahyonack/Parallelogram | eb5e3d0f587434ba51116f65595ef97b6e8df19a | [
"Apache-2.0"
] | null | null | null | tests/distributed/testFilter_distributed.py | noahyonack/Parallelogram | eb5e3d0f587434ba51116f65595ef97b6e8df19a | [
"Apache-2.0"
] | null | null | null | '''
Ensures correctness for p_filter() using the PyUnit (unittest) package
'''
import unittest # our test package
from parallelogram.config import PORT # PORT on which the server should listen
from parallelogram import parallelogram # library methods
from parallelogram.parallelogram_server import Server # server api
class TestFilter_Distributed(unittest.TestCase):
def test_filter_1(self):
'''
Test a basic filtering case by filtering out odd numbers from a small list
'''
def foo_1(elt, index):
'''
Filters out odd numbers.
'''
return elt % 2 == 0
# ensure correct output when filtering out odd numbers from small list
output = parallelogram.p_filter(foo_1, range(6), PORT, 10)
self.assertEqual(output, range(0, 6, 2))
def test_filter_2(self):
'''
Test a basic filtering case by filtering out odd numbers from a big list
'''
def foo_1(elt, index):
'''
Filters out odd numbers.
'''
return elt % 2 == 0
# ensure correct output when filtering out odd numbers from big list
output = parallelogram.p_filter(foo_1, range(1000), PORT, 10)
self.assertEqual(output, range(0, 1000, 2))
def test_filter_3(self):
'''
Ensure that filter operates correctly on empty lists
'''
def foo_1(elt, index):
'''
Filters out odd numbers.
'''
return elt % 2 == 0
# ensure correct output when filtering over empty lists
output = parallelogram.p_filter(foo_1, [], PORT, 10)
self.assertEqual(output, [])
| 26.285714 | 78 | 0.705842 |
d22bae1f454917d45855fed939421877eedd9d2e | 918 | py | Python | isi_sdk_8_1_0/test/test_namespace_metadata.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_8_1_0/test/test_namespace_metadata.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_8_1_0/test/test_namespace_metadata.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 5
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_1_0
from isi_sdk_8_1_0.models.namespace_metadata import NamespaceMetadata # noqa: E501
from isi_sdk_8_1_0.rest import ApiException
class TestNamespaceMetadata(unittest.TestCase):
"""NamespaceMetadata unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testNamespaceMetadata(self):
"""Test NamespaceMetadata"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_1_0.models.namespace_metadata.NamespaceMetadata() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 22.390244 | 91 | 0.71024 |
dbad50797b3f945740f40027e3833cceff96200b | 5,023 | py | Python | tests/pools/test_wallet_pool_store.py | konarshankar07/bytecash-blockchain | 1c1fab19664e7777b75be1a60facb8a454341443 | [
"Apache-2.0"
] | null | null | null | tests/pools/test_wallet_pool_store.py | konarshankar07/bytecash-blockchain | 1c1fab19664e7777b75be1a60facb8a454341443 | [
"Apache-2.0"
] | null | null | null | tests/pools/test_wallet_pool_store.py | konarshankar07/bytecash-blockchain | 1c1fab19664e7777b75be1a60facb8a454341443 | [
"Apache-2.0"
] | null | null | null | import asyncio
from pathlib import Path
from secrets import token_bytes
from typing import Optional
import aiosqlite
import pytest
from clvm_tools import binutils
from bytecash.types.blockchain_format.coin import Coin
from bytecash.types.blockchain_format.program import Program, SerializedProgram
from bytecash.types.blockchain_format.sized_bytes import bytes32
from bytecash.types.coin_spend import CoinSpend
from bytecash.util.db_wrapper import DBWrapper
from bytecash.util.ints import uint64
from bytecash.wallet.wallet_pool_store import WalletPoolStore
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
def make_child_solution(coin_spend: CoinSpend, new_coin: Optional[Coin] = None) -> CoinSpend:
# TODO: address hint error and remove ignore
# error: Incompatible types in assignment (expression has type "bytes", variable has type "bytes32")
# [assignment]
new_puzzle_hash: bytes32 = token_bytes(32) # type: ignore[assignment]
solution = "()"
puzzle = f"(q . ((51 0x{new_puzzle_hash.hex()} 1)))"
puzzle_prog = Program.to(binutils.assemble(puzzle))
solution_prog = Program.to(binutils.assemble(solution))
if new_coin is None:
new_coin = coin_spend.additions()[0]
sol: CoinSpend = CoinSpend(
new_coin,
SerializedProgram.from_program(puzzle_prog),
SerializedProgram.from_program(solution_prog),
)
return sol
class TestWalletPoolStore:
@pytest.mark.asyncio
async def test_store(self):
db_filename = Path("wallet_store_test.db")
if db_filename.exists():
db_filename.unlink()
db_connection = await aiosqlite.connect(db_filename)
db_wrapper = DBWrapper(db_connection)
store = await WalletPoolStore.create(db_wrapper)
try:
await db_wrapper.begin_transaction()
coin_0 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
coin_0_alt = Coin(token_bytes(32), token_bytes(32), uint64(12312))
solution_0: CoinSpend = make_child_solution(None, coin_0)
solution_0_alt: CoinSpend = make_child_solution(None, coin_0_alt)
solution_1: CoinSpend = make_child_solution(solution_0)
assert store.get_spends_for_wallet(0) == []
assert store.get_spends_for_wallet(1) == []
await store.add_spend(1, solution_1, 100)
assert store.get_spends_for_wallet(1) == [(100, solution_1)]
# Idempotent
await store.add_spend(1, solution_1, 100)
assert store.get_spends_for_wallet(1) == [(100, solution_1)]
with pytest.raises(ValueError):
await store.add_spend(1, solution_1, 101)
# Rebuild cache, no longer present
await db_wrapper.rollback_transaction()
await store.rebuild_cache()
assert store.get_spends_for_wallet(1) == []
await store.rebuild_cache()
await store.add_spend(1, solution_1, 100)
assert store.get_spends_for_wallet(1) == [(100, solution_1)]
solution_1_alt: CoinSpend = make_child_solution(solution_0_alt)
with pytest.raises(ValueError):
await store.add_spend(1, solution_1_alt, 100)
assert store.get_spends_for_wallet(1) == [(100, solution_1)]
solution_2: CoinSpend = make_child_solution(solution_1)
await store.add_spend(1, solution_2, 100)
await store.rebuild_cache()
solution_3: CoinSpend = make_child_solution(solution_2)
await store.add_spend(1, solution_3, 100)
solution_4: CoinSpend = make_child_solution(solution_3)
with pytest.raises(ValueError):
await store.add_spend(1, solution_4, 99)
await store.rebuild_cache()
await store.add_spend(1, solution_4, 101)
await store.rebuild_cache()
await store.rollback(101, 1)
await store.rebuild_cache()
assert store.get_spends_for_wallet(1) == [
(100, solution_1),
(100, solution_2),
(100, solution_3),
(101, solution_4),
]
await store.rebuild_cache()
await store.rollback(100, 1)
await store.rebuild_cache()
assert store.get_spends_for_wallet(1) == [
(100, solution_1),
(100, solution_2),
(100, solution_3),
]
with pytest.raises(ValueError):
await store.add_spend(1, solution_1, 105)
await store.add_spend(1, solution_4, 105)
solution_5: CoinSpend = make_child_solution(solution_4)
await store.add_spend(1, solution_5, 105)
await store.rollback(99, 1)
assert store.get_spends_for_wallet(1) == []
finally:
await db_connection.close()
db_filename.unlink()
| 37.485075 | 110 | 0.643241 |
d4083cd09a43f35986a1b9acc4afc072f2b6d005 | 5,786 | py | Python | L01/code/helper.py | dumpmemory/stat453-deep-learning-ss21 | 2202699c5fd38af398e2682f289a0868b1b91f0e | [
"MIT"
] | 175 | 2021-01-28T03:46:43.000Z | 2022-03-29T14:22:14.000Z | L01/code/helper.py | dumpmemory/stat453-deep-learning-ss21 | 2202699c5fd38af398e2682f289a0868b1b91f0e | [
"MIT"
] | 3 | 2021-04-25T16:06:50.000Z | 2021-12-27T17:31:19.000Z | L01/code/helper.py | dumpmemory/stat453-deep-learning-ss21 | 2202699c5fd38af398e2682f289a0868b1b91f0e | [
"MIT"
] | 83 | 2021-02-02T23:39:21.000Z | 2022-03-30T02:16:26.000Z | # imports from installed libraries
import os
import matplotlib.pyplot as plt
import numpy as np
import random
import torch
from torch.utils.data import sampler
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision import transforms
def set_all_seeds(seed):
os.environ["PL_GLOBAL_SEED"] = str(seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def set_deterministic():
if torch.cuda.is_available():
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.set_deterministic(True)
def plot_training_loss(minibatch_loss_list, num_epochs, iter_per_epoch,
averaging_iterations=100):
plt.figure()
ax1 = plt.subplot(1, 1, 1)
ax1.plot(range(len(minibatch_loss_list)),
(minibatch_loss_list), label='Minibatch Loss')
if len(minibatch_loss_list) > 1000:
ax1.set_ylim([
0, np.max(minibatch_loss_list[1000:])*1.5
])
ax1.set_xlabel('Iterations')
ax1.set_ylabel('Loss')
ax1.plot(np.convolve(minibatch_loss_list,
np.ones(averaging_iterations,)/averaging_iterations,
mode='valid'),
label='Running Average')
ax1.legend()
###################
# Set scond x-axis
ax2 = ax1.twiny()
newlabel = list(range(num_epochs+1))
newpos = [e*iter_per_epoch for e in newlabel]
ax2.set_xticks(newpos[::10])
ax2.set_xticklabels(newlabel[::10])
ax2.xaxis.set_ticks_position('bottom')
ax2.xaxis.set_label_position('bottom')
ax2.spines['bottom'].set_position(('outward', 45))
ax2.set_xlabel('Epochs')
ax2.set_xlim(ax1.get_xlim())
###################
plt.tight_layout()
plt.savefig(os.path.join('plot_training_loss.pdf'))
plt.clf()
def plot_accuracy(train_acc_list, valid_acc_list):
num_epochs = len(train_acc_list)
plt.plot(np.arange(1, num_epochs+1),
train_acc_list, label='Training')
plt.plot(np.arange(1, num_epochs+1),
valid_acc_list, label='Validation')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
plt.tight_layout()
plt.savefig('plot_acc_training_validation.pdf')
plt.clf()
class ChunkSampler(sampler.Sampler):
"""Samples elements sequentially from some offset.
Arguments:
num_samples: # of desired datapoints
start: offset where we should start selecting from
"""
def __init__(self, num_samples, start=0):
self.num_samples = num_samples
self.start = start
def __iter__(self):
return iter(range(self.start, self.start + self.num_samples))
def __len__(self):
return self.num_samples
def get_dataloaders_mnist(batch_size, num_workers=0,
validation_fraction=None,
train_transforms=None,
test_transforms=None):
if train_transforms is None:
train_transforms = transforms.ToTensor()
if test_transforms is None:
test_transforms = transforms.ToTensor()
train_dataset = datasets.MNIST(root='data',
train=True,
transform=train_transforms,
download=True)
valid_dataset = datasets.MNIST(root='data',
train=True,
transform=test_transforms)
test_dataset = datasets.MNIST(root='data',
train=False,
transform=test_transforms)
if validation_fraction is not None:
num = int(validation_fraction * 60000)
train_indices = torch.arange(0, 60000 - num)
valid_indices = torch.arange(60000 - num, 60000)
# train_sampler = SubsetRandomSampler(train_indices)
# valid_sampler = SubsetRandomSampler(valid_indices)
train_sampler = ChunkSampler(train_indices.shape[0], 0)
valid_sampler = ChunkSampler(valid_indices.shape[0],
train_indices.shape[0])
valid_loader = DataLoader(dataset=valid_dataset,
batch_size=batch_size,
num_workers=num_workers,
sampler=valid_sampler)
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
num_workers=num_workers,
sampler=train_sampler)
else:
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
num_workers=num_workers,
shuffle=True)
test_loader = DataLoader(dataset=test_dataset,
batch_size=batch_size,
num_workers=num_workers,
shuffle=False)
if validation_fraction is None:
return train_loader, test_loader
else:
return train_loader, valid_loader, test_loader
def compute_accuracy(model, data_loader, device):
with torch.no_grad():
correct_pred, num_examples = 0, 0
for i, (features, targets) in enumerate(data_loader):
features = features.to(device)
targets = targets.float().to(device)
logits, probas = model(features)
_, predicted_labels = torch.max(probas, 1)
num_examples += targets.size(0)
correct_pred += (predicted_labels == targets).sum()
return correct_pred.float()/num_examples * 100
| 30.941176 | 77 | 0.595576 |
baf81f73c108ce4eee28234676426750f4ba7c47 | 1,271 | py | Python | tests/hls_kernel.py | sharm294/shoal | db7dd08a70882585fb9740a39b57b4b7a48b3081 | [
"MIT"
] | 1 | 2021-04-12T06:41:33.000Z | 2021-04-12T06:41:33.000Z | tests/hls_kernel.py | UofT-HPRC/shoal | db7dd08a70882585fb9740a39b57b4b7a48b3081 | [
"MIT"
] | null | null | null | tests/hls_kernel.py | UofT-HPRC/shoal | db7dd08a70882585fb9740a39b57b4b7a48b3081 | [
"MIT"
] | null | null | null | import os
from sonar.testbench import Testbench, Module, TestVector, Thread
from sonar.interfaces import AXIS, SAXILite
hls_kernel = Testbench.default('hls_kernel')
filepath = os.path.join(os.path.dirname(__file__), 'build/hls_kernel/')
dut = Module.default("DUT")
dut.add_clock_port('ap_clk', '20ns')
dut.add_reset_port('ap_rst_n')
dut.add_port('id_0', 'input', 16)
dut.add_port('id_1', 'input', 16)
hls_kernel.add_module(dut)
################################################################################
# Test Vectors
################################################################################
# Initialization thread (added to each test vector to reset everything)
initT = Thread()
initT.init_signals()
initT.wait_negedge('ap_clk')
initT.add_delay('40ns')
initT.set_signal('ap_rst_n', 1)
initT.set_signal('id_0', 0)
initT.set_signal('id_1', 1)
#-------------------------------------------------------------------------------
# case_0
#
#-------------------------------------------------------------------------------
case_0 = TestVector()
case_0.add_thread(initT)
smA_t1 = case_0.add_thread()
smA_t1.add_delay('100ns')
smA_t1.print_elapsed_time("case_0")
smA_t1.end_vector()
hls_kernel.add_test_vector(case_0)
hls_kernel.generateTB(filepath, 'sv')
| 27.630435 | 80 | 0.568057 |
d352772ab5422d4c3566d9e4a2975fc6ddc6ca8d | 508 | py | Python | meiduo_mall/meiduo_mall/apps/payment/models.py | HOXI818/meiduomail | 8f398ac8ac0414fd23c60314db4f27dd9e68b2cd | [
"MIT"
] | null | null | null | meiduo_mall/meiduo_mall/apps/payment/models.py | HOXI818/meiduomail | 8f398ac8ac0414fd23c60314db4f27dd9e68b2cd | [
"MIT"
] | null | null | null | meiduo_mall/meiduo_mall/apps/payment/models.py | HOXI818/meiduomail | 8f398ac8ac0414fd23c60314db4f27dd9e68b2cd | [
"MIT"
] | null | null | null | from django.db import models
from meiduo_mall.utils.models import BaseModel
from orders.models import OrderInfo
# Create your models here.
class Payment(BaseModel):
"""
支付信息
"""
order = models.ForeignKey(OrderInfo, on_delete=models.CASCADE, verbose_name='订单')
trade_id = models.CharField(max_length=100, unique=True, null=True, blank=True, verbose_name="支付编号")
class Meta:
db_table = 'tb_payment'
verbose_name = '支付信息'
verbose_name_plural = verbose_name
| 24.190476 | 104 | 0.708661 |
b87041c7addfc95dea971b047e7984adced06c78 | 10,784 | py | Python | Source/Oyooni/Text Recognition/test.py | Oyooni5245/Oyooni | a00b845ac97eaee74d40cab563b9532fdeca97c8 | [
"MIT"
] | null | null | null | Source/Oyooni/Text Recognition/test.py | Oyooni5245/Oyooni | a00b845ac97eaee74d40cab563b9532fdeca97c8 | [
"MIT"
] | null | null | null | Source/Oyooni/Text Recognition/test.py | Oyooni5245/Oyooni | a00b845ac97eaee74d40cab563b9532fdeca97c8 | [
"MIT"
] | null | null | null | """
Copyright (c) 2019-present NAVER Corp.
MIT License
"""
from collections import OrderedDict
import config
from craft import CRAFT
import codecs
import imgproc
import craft_utils
import cv2
import numpy as np
import pytesseract
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torch
import os
import time
import regex as re
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
def copyStateDict(state_dict):
if list(state_dict.keys())[0].startswith("module"):
start_idx = 1
else:
start_idx = 0
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = ".".join(k.split(".")[start_idx:])
new_state_dict[name] = v
return new_state_dict
def str2bool(v):
return v.lower() in ("yes", "y", "true", "t", "1")
def hasNumbers(inputString):
return any(char.isdigit() for char in inputString)
def getFullText(imgs, path, lang):
custom_config = r'--oem 3 --psm 6'
fullText = ""
for img in imgs:
currentImage = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
currentImage = cv2.cvtColor(currentImage, cv2.COLOR_BGR2RGB)
gray = cv2.cvtColor(currentImage, cv2.COLOR_BGR2GRAY)
gray2 = cv2.bitwise_not(gray)
thresh = cv2.threshold(
gray2, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
mylang = ''
anotherText = ''
if(lang == 'en'):
mylang = 'eng'
else:
mylang = 'ara+eng'
currentText = pytesseract.image_to_string(
thresh, config=custom_config, lang=mylang)
if(not currentText.strip()):
currentText = pytesseract.image_to_string(
currentImage, config=custom_config, lang=mylang)
if hasNumbers(currentText):
if mylang == 'ara':
sLang = 'eng'
anotherText = pytesseract.image_to_string(
thresh, config=custom_config, lang=sLang)
elif mylang == 'ara+eng':
sLang = 'eng'
anotherText = pytesseract.image_to_string(
thresh, config=custom_config, lang='eng')
if hasNumbers(anotherText):
sLang = 'ara+eng'
anotherText = pytesseract.image_to_string(
thresh, config=custom_config, lang=sLang)
if hasNumbers(anotherText) == False and len(anotherText.strip()) > 0:
currentText = anotherText
currentText = currentText.strip()
if(currentText):
fullText += " "+currentText
fullTextList = []
fullTextList.append(fullText)
arText = ''
enText = ''
for txt in fullText.split(' '):
if checkTextLang(txt) == 'en':
enText += txt+" "
else:
arText += txt+" "
textDict = dict()
textDict['ar'] = arText
textDict['en'] = enText
with codecs.open(path, 'w', 'utf-8') as outfile:
outfile.write('+'.join([str(x) for x in fullTextList]))
with codecs.open(path, 'r', 'utf-8') as outfile:
textDict['full_text'] = outfile.readlines()
return textDict
def getBrandText(image, path):
textList = []
custom_config = r'--oem 3 --psm 6'
currentImage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
currentText = pytesseract.image_to_string(
currentImage, config=custom_config, lang='eng+ara')
currentText = currentText.strip()
if(currentText):
textList.append(currentText)
with codecs.open(path, 'w', 'utf-8') as outfile:
outfile.write('+'.join([str(x) for x in textList]))
with codecs.open(path, 'r', 'utf-8') as readFile:
brand_name = readFile.readline()
return brand_name
def ConvertPositionsToImages(boxPositions, image):
croppedTextBoxImages = dict()
maxArea = 1
for pos in (boxPositions):
area = (pos[1]-pos[0])*(pos[3]-pos[2])
if area in croppedTextBoxImages.keys():
area += 1
if area > maxArea:
maxArea = area
croppedTextBoxImages[area] = image[pos[0]:pos[1], pos[2]: pos[3]]
return croppedTextBoxImages, maxArea
def convPosToImages(boxpositions, image):
resList = []
for pos in boxpositions:
resList.append(image[pos[0]:pos[1], pos[2]: pos[3]])
return resList
def checkTextLang(text):
x = re.search('[a-zA-Z]', text)
if x:
return 'en'
else:
return 'ar'
def checkLang(img, alreadyPreprocessedImage=False):
custom_config = r'--oem 3 --psm 6'
if alreadyPreprocessedImage == True:
text = pytesseract.image_to_string(
img, config=custom_config, lang='eng+ara')
x = re.search('[a-zA-Z]', text)
if x:
return 'en'
else:
return 'ar'
currentImage = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
currentImage = cv2.cvtColor(currentImage, cv2.COLOR_BGR2RGB)
gray = cv2.cvtColor(currentImage, cv2.COLOR_BGR2GRAY)
gray2 = cv2.bitwise_not(gray)
thresh = cv2.threshold(
gray2, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
text = pytesseract.image_to_string(
thresh, config=custom_config, lang='eng+ara')
x = re.search('[a-zA-Z]', text)
if x:
return 'en'
else:
return 'ar'
def classifyBoxes(boxPositions):
classifiedBoxes = {}
firstBox = boxPositions[0]
classifiedBoxes[firstBox[1]] = []
classifiedBoxes[firstBox[1]].append(firstBox)
flag = True
sFlag = False
for pos in boxPositions:
if(flag):
flag = False
else:
myval = pos[1]
for key in classifiedBoxes.copy().keys():
if myval in range(key, key+20, 1):
sFlag = False
classifiedBoxes[key].append(pos)
break
else:
sFlag = True
if sFlag:
classifiedBoxes[myval] = []
classifiedBoxes[myval].append(pos)
return classifiedBoxes
def sortClassifiedBoxes(classifiedBoxes, lang):
if(lang == 'en'):
for key in classifiedBoxes.keys():
keyList = classifiedBoxes[key]
keyList = sorted(keyList, key=lambda x: x[3], reverse=False)
classifiedBoxes[key] = keyList
else:
for key in classifiedBoxes.keys():
keyList = classifiedBoxes[key]
keyList = sorted(keyList, key=lambda x: x[3], reverse=True)
classifiedBoxes[key] = keyList
return classifiedBoxes
def convertDictToList(res):
resList = []
for key in res.keys():
for element in res[key]:
resList.append(element)
return resList
def test_net(net, image, text_threshold, link_threshold, low_text, cuda, poly, refine_net=None):
t0 = time.time()
img_resized, target_ratio, _ = imgproc.resize_aspect_ratio(
image, config.canvas_size, interpolation=cv2.INTER_LINEAR, mag_ratio=config.mag_ratio)
ratio_h = ratio_w = 1 / target_ratio
x = imgproc.normalizeMeanVariance(img_resized)
x = torch.from_numpy(x).permute(2, 0, 1)
x = Variable(x.unsqueeze(0))
if cuda:
x = x.cuda()
with torch.no_grad():
y, feature = net(x)
score_text = y[0, :, :, 0].cpu().data.numpy()
score_link = y[0, :, :, 1].cpu().data.numpy()
if refine_net is not None:
with torch.no_grad():
y_refiner = refine_net(y, feature)
score_link = y_refiner[0, :, :, 0].cpu().data.numpy()
t0 = time.time() - t0
t1 = time.time()
boxes, polys = craft_utils.getDetBoxes(
score_text, score_link, text_threshold, link_threshold, low_text, poly)
boxes = craft_utils.adjustResultCoordinates(boxes, ratio_w, ratio_h)
polys = craft_utils.adjustResultCoordinates(polys, ratio_w, ratio_h)
for k in range(len(polys)):
if polys[k] is None:
polys[k] = boxes[k]
t1 = time.time() - t1
render_img = score_text.copy()
render_img = np.hstack((render_img, score_link))
ret_score_text = imgproc.cvt2HeatmapImg(render_img)
return boxes, polys, ret_score_text
def get_models():
net = CRAFT()
if config.cuda:
net.load_state_dict(copyStateDict(torch.load(config.trained_model)))
else:
net.load_state_dict(copyStateDict(torch.load(
config.trained_model, map_location=torch.device('cpu'))))
if config.cuda:
net = net.cuda()
net = torch.nn.DataParallel(net)
cudnn.benchmark = False
net.eval()
refine_net = None
if config.refine:
from refinenet import RefineNet
refine_net = RefineNet()
if config.cuda:
refine_net.load_state_dict(
copyStateDict(torch.load(config.refiner_model)))
refine_net = refine_net.cuda()
refine_net = torch.nn.DataParallel(refine_net)
else:
refine_net.load_state_dict(copyStateDict(torch.load(
config.refiner_model, map_location=torch.device('cpu'))))
refine_net.eval()
config.poly = True
return net, refine_net
def getTextFromImage(image_path, net, refine_net=None):
image = imgproc.loadImage(image_path)
bboxes, _, _ = test_net(
net, image, config.text_threshold, config.link_threshold, config.low_text, config.cuda, config.poly, refine_net)
boxPositions = []
for box in (bboxes):
boxPolys = np.array(box).astype(np.int32).reshape((-1))
boxPolys[boxPolys < 0] = 0
minY, maxY, minX, maxX = min(boxPolys[1], boxPolys[3], boxPolys[5], boxPolys[7]), max(boxPolys[1], boxPolys[3], boxPolys[5], boxPolys[7]), min(
boxPolys[0], boxPolys[2], boxPolys[4], boxPolys[6]), max(boxPolys[0], boxPolys[2], boxPolys[4], boxPolys[6])
boxPositions.append((minY, maxY, minX, maxX))
boxPositions = sorted(boxPositions, key=lambda x: x[1])
brand_name = ""
text = ""
language = "en"
if len(boxPositions) > 0:
croppedTextBoxImages, maxArea = ConvertPositionsToImages(
boxPositions, image)
brand_name = getBrandText(
croppedTextBoxImages[maxArea], 'brand-name-temp.json')
Mainlang = checkLang(croppedTextBoxImages[maxArea])
language = Mainlang
classifiedBoxes = classifyBoxes(boxPositions)
classifiedBoxes = sortClassifiedBoxes(classifiedBoxes, Mainlang)
boxPositions = convertDictToList(classifiedBoxes)
croppedTextBoxImages, maxArea = ConvertPositionsToImages(
boxPositions, image)
text = getFullText(convPosToImages(
boxPositions, image), language, 'temp.json')
return brand_name, text, language
| 30.811429 | 151 | 0.618323 |
51dc91f20a1a71ce1538486d184a96f40fb680e0 | 1,811 | py | Python | evalml/pipelines/components/estimators/classifiers/decision_tree_classifier.py | BlockchainClimateInstitute/price_microservice | 11d1cff8965fe1befc997e9da3dc09efceed4579 | [
"BSD-3-Clause"
] | null | null | null | evalml/pipelines/components/estimators/classifiers/decision_tree_classifier.py | BlockchainClimateInstitute/price_microservice | 11d1cff8965fe1befc997e9da3dc09efceed4579 | [
"BSD-3-Clause"
] | 13 | 2021-03-04T19:29:09.000Z | 2022-03-07T01:00:43.000Z | evalml/pipelines/components/estimators/classifiers/decision_tree_classifier.py | RG4421/evalml | 33c62abe6d107d1da2f54e9e44a90f18aaf916a9 | [
"BSD-3-Clause"
] | null | null | null | from sklearn.tree import DecisionTreeClassifier as SKDecisionTreeClassifier
from skopt.space import Integer
from evalml.model_family import ModelFamily
from evalml.pipelines.components.estimators import Estimator
from evalml.problem_types import ProblemTypes
from evalml.utils import deprecate_arg
class DecisionTreeClassifier(Estimator):
"""Decision Tree Classifier."""
name = "Decision Tree Classifier"
hyperparameter_ranges = {
"criterion": ["gini", "entropy"],
"max_features": ["auto", "sqrt", "log2"],
"max_depth": Integer(4, 10)
}
model_family = ModelFamily.DECISION_TREE
supported_problem_types = [ProblemTypes.BINARY, ProblemTypes.MULTICLASS,
ProblemTypes.TIME_SERIES_BINARY, ProblemTypes.TIME_SERIES_MULTICLASS]
def __init__(self,
criterion="gini",
max_features="auto",
max_depth=6,
min_samples_split=2,
min_weight_fraction_leaf=0.0,
random_state=None,
random_seed=0,
**kwargs):
parameters = {"criterion": criterion,
"max_features": max_features,
"max_depth": max_depth,
"min_samples_split": min_samples_split,
"min_weight_fraction_leaf": min_weight_fraction_leaf}
parameters.update(kwargs)
random_seed = deprecate_arg("random_state", "random_seed", random_state, random_seed)
dt_classifier = SKDecisionTreeClassifier(random_state=random_seed,
**parameters)
super().__init__(parameters=parameters,
component_obj=dt_classifier,
random_seed=random_seed)
| 42.116279 | 100 | 0.619547 |
12b803e1443ab6aa0e375a66c5b03d21055d25b4 | 5,167 | py | Python | rlgraph/tests/agent_learning/short_tasks/test_impala_agent_short_task_learning.py | RLGraph/RLGraph | 428fc136a9a075f29a397495b4226a491a287be2 | [
"Apache-2.0"
] | 290 | 2018-07-29T15:30:57.000Z | 2022-03-19T02:46:53.000Z | rlgraph/tests/agent_learning/short_tasks/test_impala_agent_short_task_learning.py | RLGraph/RLGraph | 428fc136a9a075f29a397495b4226a491a287be2 | [
"Apache-2.0"
] | 76 | 2018-10-19T08:42:01.000Z | 2020-05-03T08:34:21.000Z | rlgraph/tests/agent_learning/short_tasks/test_impala_agent_short_task_learning.py | RLGraph/RLGraph | 428fc136a9a075f29a397495b4226a491a287be2 | [
"Apache-2.0"
] | 41 | 2018-10-30T07:05:05.000Z | 2022-03-01T08:28:24.000Z | # Copyright 2018/2019 The RLgraph authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import numpy as np
import os
import time
import unittest
from rlgraph.environments import GridWorld, OpenAIGymEnv
from rlgraph.agents import IMPALAAgent
from rlgraph.utils import root_logger
from rlgraph.tests.test_util import config_from_path, recursive_assert_almost_equal
class TestIMPALAAgentShortTaskLearning(unittest.TestCase):
"""
Tests whether the DQNAgent can learn in simple environments.
"""
root_logger.setLevel(level=logging.INFO)
is_windows = os.name == "nt"
def test_impala_on_2x2_grid_world(self):
"""
Creates a single IMPALAAgent and runs it via a simple loop on a 2x2 GridWorld.
"""
env = GridWorld("2x2")
agent = IMPALAAgent.from_spec(
config_from_path("configs/impala_agent_for_2x2_gridworld.json"),
state_space=env.state_space,
action_space=env.action_space,
execution_spec=dict(seed=12),
update_spec=dict(batch_size=16),
optimizer_spec=dict(type="adam", learning_rate=0.05)
)
learn_updates = 50
for i in range(learn_updates):
ret = agent.update()
mean_return = self._calc_mean_return(ret)
print("i={} Loss={:.4} Avg-reward={:.2}".format(i, float(ret[1]), mean_return))
# Assume we have learned something.
self.assertGreater(mean_return, -0.1)
# Check the last action probs for the 2 valid next_states (start (after a reset) and one below start).
action_probs = ret[3]["action_probs"].reshape((80, 4))
next_states = ret[3]["states"][:, 1:].reshape((80,))
for s_, probs in zip(next_states, action_probs):
# Start state:
# - Assume we picked "right" in state=1 (in order to step into goal state).
# - OR we picked "up" or "left" in state=0 (unlikely, but possible).
if s_ == 0:
recursive_assert_almost_equal(probs[0], 0.0, decimals=2)
self.assertTrue(probs[1] > 0.99 or probs[2] > 0.99)
recursive_assert_almost_equal(probs[3], 0.0, decimals=2)
# One below start:
# - Assume we picked "down" in start state with very large probability.
# - OR we picked "left" or "down" in state=1 (unlikely, but possible).
elif s_ == 1:
recursive_assert_almost_equal(probs[0], 0.0, decimals=2)
self.assertTrue(probs[1] > 0.99 or probs[2] > 0.99)
recursive_assert_almost_equal(probs[3], 0.0, decimals=2)
agent.terminate()
def test_impala_on_cart_pole(self):
"""
Creates a single IMPALAAgent and runs it via a simple loop on CartPole-v0.
"""
env_spec = dict(type="open-ai-gym", gym_env="CartPole-v0", seed=10, visualize=self.is_windows)
config_ = config_from_path("configs/impala_agent_for_cartpole.json")
config_["environment_spec"] = env_spec
dummy_env = OpenAIGymEnv.from_spec(env_spec)
agent = IMPALAAgent.from_spec(
config_,
state_space=dummy_env.state_space,
action_space=dummy_env.action_space,
execution_spec=dict(seed=10)
)
learn_updates = 300
mean_returns = []
for i in range(learn_updates):
ret = agent.update()
mean_return = self._calc_mean_return(ret)
mean_returns.append(mean_return)
print("i={}/{} Loss={:.4} Avg-reward={:.2}".format(i, learn_updates, float(ret[1]), mean_return))
# Assume we have learned something.
average_return_last_n_episodes = np.nanmean(mean_returns[:-100])
print("Average return over last n episodes: {}".format(average_return_last_n_episodes))
self.assertGreater(average_return_last_n_episodes, 30.0)
time.sleep(3)
agent.terminate()
time.sleep(3)
@staticmethod
def _calc_mean_return(records):
size = records[3]["rewards"].size
rewards = records[3]["rewards"].reshape((size,))
terminals = records[3]["terminals"].reshape((size,))
returns = list()
return_ = 0.0
for r, t in zip(rewards, terminals):
return_ += r
if t:
returns.append(return_)
return_ = 0.0
return np.mean(returns)
| 39.746154 | 110 | 0.632669 |
3437d00ed5f7105beec81a5e1cf23d8df60d4ef6 | 405 | py | Python | src/recipe_project/asgi.py | mho85/recipe-app-api | f08759e0fe15d11ecb18298ece6b2b458fdc68c5 | [
"MIT"
] | 1 | 2021-03-29T16:51:49.000Z | 2021-03-29T16:51:49.000Z | src/recipe_project/asgi.py | mho85/recipe-app-api | f08759e0fe15d11ecb18298ece6b2b458fdc68c5 | [
"MIT"
] | null | null | null | src/recipe_project/asgi.py | mho85/recipe-app-api | f08759e0fe15d11ecb18298ece6b2b458fdc68c5 | [
"MIT"
] | null | null | null | """
ASGI config for recipe_project project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'recipe_project.settings')
application = get_asgi_application()
| 23.823529 | 78 | 0.792593 |
f82946dc5e207fb118e6d027b7db989cdfd81762 | 8,076 | py | Python | frappe/contacts/doctype/contact/contact.py | Vishalcse/frappev13beta | 6ef0b68d4203cd859db85417818c5889b4359008 | [
"MIT"
] | null | null | null | frappe/contacts/doctype/contact/contact.py | Vishalcse/frappev13beta | 6ef0b68d4203cd859db85417818c5889b4359008 | [
"MIT"
] | null | null | null | frappe/contacts/doctype/contact/contact.py | Vishalcse/frappev13beta | 6ef0b68d4203cd859db85417818c5889b4359008 | [
"MIT"
] | null | null | null | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr, has_gravatar, cint
from frappe import _
from frappe.model.document import Document
from frappe.core.doctype.dynamic_link.dynamic_link import deduplicate_dynamic_links
from six import iteritems
from past.builtins import cmp
from frappe.model.naming import append_number_if_name_exists
from frappe.contacts.address_and_contact import set_link_title
import functools
class Contact(Document):
def autoname(self):
# concat first and last name
self.name = " ".join(filter(None,
[cstr(self.get(f)).strip() for f in ["first_name", "last_name"]]))
if frappe.db.exists("Contact", self.name):
self.name = append_number_if_name_exists('Contact', self.name)
# concat party name if reqd
for link in self.links:
self.name = self.name + '-' + link.link_name.strip()
break
def validate(self):
self.set_primary_email()
self.set_primary("phone")
self.set_primary("mobile_no")
self.set_user()
set_link_title(self)
if self.email_id and not self.image:
self.image = has_gravatar(self.email_id)
if self.get("sync_with_google_contacts") and not self.get("google_contacts"):
frappe.throw(_("Select Google Contacts to which contact should be synced."))
deduplicate_dynamic_links(self)
def set_user(self):
if not self.user and self.email_id:
self.user = frappe.db.get_value("User", {"email": self.email_id})
def get_link_for(self, link_doctype):
'''Return the link name, if exists for the given link DocType'''
for link in self.links:
if link.link_doctype==link_doctype:
return link.link_name
return None
def has_link(self, doctype, name):
for link in self.links:
if link.link_doctype==doctype and link.link_name== name:
return True
def has_common_link(self, doc):
reference_links = [(link.link_doctype, link.link_name) for link in doc.links]
for link in self.links:
if (link.link_doctype, link.link_name) in reference_links:
return True
def add_email(self, email_id, is_primary=0, autosave=False):
if not frappe.db.exists("Contact Email", {"email_id": email_id, "parent": self.name}):
self.append("email_ids", {
"email_id": email_id,
"is_primary": is_primary
})
if autosave:
self.save(ignore_permissions=True)
def add_phone(self, phone, is_primary_phone=0, is_primary_mobile_no=0, autosave=False):
if not frappe.db.exists("Contact Phone", {"phone": phone, "parent": self.name}):
self.append("phone_nos", {
"phone": phone,
"is_primary_phone": is_primary_phone,
"is_primary_mobile_no": is_primary_mobile_no
})
if autosave:
self.save(ignore_permissions=True)
def set_primary_email(self):
if not self.email_ids:
self.email_id = ""
return
if len([email.email_id for email in self.email_ids if email.is_primary]) > 1:
frappe.throw(_("Only one {0} can be set as primary.").format(frappe.bold("Email ID")))
for d in self.email_ids:
if d.is_primary == 1:
self.email_id = d.email_id.strip()
break
def set_primary(self, fieldname):
# Used to set primary mobile and phone no.
if len(self.phone_nos) == 0:
setattr(self, fieldname, "")
return
field_name = "is_primary_" + fieldname
is_primary = [phone.phone for phone in self.phone_nos if phone.get(field_name)]
if len(is_primary) > 1:
frappe.throw(_("Only one {0} can be set as primary.").format(frappe.bold(frappe.unscrub(fieldname))))
for d in self.phone_nos:
if d.get(field_name) == 1:
setattr(self, fieldname, d.phone)
break
def get_default_contact(doctype, name):
'''Returns default contact for the given doctype, name'''
out = frappe.db.sql('''select parent,
IFNULL((select is_primary_contact from tabContact c where c.name = dl.parent), 0)
as is_primary_contact
from
`tabDynamic Link` dl
where
dl.link_doctype=%s and
dl.link_name=%s and
dl.parenttype = "Contact"''', (doctype, name))
if out:
return sorted(out, key = functools.cmp_to_key(lambda x,y: cmp(cint(y[1]), cint(x[1]))))[0][0]
else:
return None
@frappe.whitelist()
def invite_user(contact):
contact = frappe.get_doc("Contact", contact)
if not contact.email_id:
frappe.throw(_("Please set Email Address"))
if contact.has_permission("write"):
user = frappe.get_doc({
"doctype": "User",
"first_name": contact.first_name,
"last_name": contact.last_name,
"email": contact.email_id,
"user_type": "Website User",
"send_welcome_email": 1
}).insert(ignore_permissions = True)
return user.name
@frappe.whitelist()
def get_contact_details(contact):
contact = frappe.get_doc("Contact", contact)
out = {
"contact_person": contact.get("name"),
"contact_display": " ".join(filter(None,
[contact.get("salutation"), contact.get("first_name"), contact.get("last_name")])),
"contact_email": contact.get("email_id"),
"contact_mobile": contact.get("mobile_no"),
"contact_phone": contact.get("phone"),
"contact_designation": contact.get("designation"),
"contact_department": contact.get("department")
}
return out
def update_contact(doc, method):
'''Update contact when user is updated, if contact is found. Called via hooks'''
contact_name = frappe.db.get_value("Contact", {"email_id": doc.name})
if contact_name:
contact = frappe.get_doc("Contact", contact_name)
for key in ("first_name", "last_name", "phone"):
if doc.get(key):
contact.set(key, doc.get(key))
contact.flags.ignore_mandatory = True
contact.save(ignore_permissions=True)
@frappe.whitelist()
@frappe.validate_and_sanitize_search_inputs
def contact_query(doctype, txt, searchfield, start, page_len, filters):
from frappe.desk.reportview import get_match_cond
if not frappe.get_meta("Contact").get_field(searchfield)\
or searchfield not in frappe.db.DEFAULT_COLUMNS:
return []
link_doctype = filters.pop('link_doctype')
link_name = filters.pop('link_name')
return frappe.db.sql("""select
`tabContact`.name, `tabContact`.first_name, `tabContact`.last_name
from
`tabContact`, `tabDynamic Link`
where
`tabDynamic Link`.parent = `tabContact`.name and
`tabDynamic Link`.parenttype = 'Contact' and
`tabDynamic Link`.link_doctype = %(link_doctype)s and
`tabDynamic Link`.link_name = %(link_name)s and
`tabContact`.`{key}` like %(txt)s
{mcond}
order by
if(locate(%(_txt)s, `tabContact`.name), locate(%(_txt)s, `tabContact`.name), 99999),
`tabContact`.idx desc, `tabContact`.name
limit %(start)s, %(page_len)s """.format(mcond=get_match_cond(doctype), key=searchfield), {
'txt': '%' + txt + '%',
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len,
'link_name': link_name,
'link_doctype': link_doctype
})
@frappe.whitelist()
def address_query(links):
import json
links = [{"link_doctype": d.get("link_doctype"), "link_name": d.get("link_name")} for d in json.loads(links)]
result = []
for link in links:
if not frappe.has_permission(doctype=link.get("link_doctype"), ptype="read", doc=link.get("link_name")):
continue
res = frappe.db.sql("""
SELECT `tabAddress`.name
FROM `tabAddress`, `tabDynamic Link`
WHERE `tabDynamic Link`.parenttype='Address'
AND `tabDynamic Link`.parent=`tabAddress`.name
AND `tabDynamic Link`.link_doctype = %(link_doctype)s
AND `tabDynamic Link`.link_name = %(link_name)s
""", {
"link_doctype": link.get("link_doctype"),
"link_name": link.get("link_name"),
}, as_dict=True)
result.extend([l.name for l in res])
return result
def get_contact_with_phone_number(number):
if not number: return
contacts = frappe.get_all('Contact Phone', filters=[
['phone', 'like', '%{0}'.format(number)]
], fields=["parent"], limit=1)
return contacts[0].parent if contacts else None
def get_contact_name(email_id):
contact = frappe.get_list("Contact Email", filters={"email_id": email_id}, fields=["parent"], limit=1)
return contact[0].parent if contact else None
| 31.181467 | 110 | 0.712729 |
24ab857265db694610154b44c5ada9c2fca1ff39 | 24 | py | Python | Code/MQTT/main.py | ykzzyk/SmartHomeRemoteControl_STM32 | 509417c94eac491182cd074039b773848c30421b | [
"MIT"
] | null | null | null | Code/MQTT/main.py | ykzzyk/SmartHomeRemoteControl_STM32 | 509417c94eac491182cd074039b773848c30421b | [
"MIT"
] | null | null | null | Code/MQTT/main.py | ykzzyk/SmartHomeRemoteControl_STM32 | 509417c94eac491182cd074039b773848c30421b | [
"MIT"
] | null | null | null | import mqtt
mqtt.main()
| 8 | 11 | 0.75 |
b13d960843aa133057117eb40b6cc5f5871cb112 | 37,754 | py | Python | hail/python/hailtop/batch/job.py | jkgoodrich/hail | 95ce1d792b553a5e97b390d349237a7ed86fbf98 | [
"MIT"
] | null | null | null | hail/python/hailtop/batch/job.py | jkgoodrich/hail | 95ce1d792b553a5e97b390d349237a7ed86fbf98 | [
"MIT"
] | null | null | null | hail/python/hailtop/batch/job.py | jkgoodrich/hail | 95ce1d792b553a5e97b390d349237a7ed86fbf98 | [
"MIT"
] | null | null | null | import re
import warnings
import dill
import os
import functools
import inspect
import textwrap
from shlex import quote as shq
from io import BytesIO
from typing import Union, Optional, Dict, List, Set, Tuple, Callable, Any, cast
from . import backend, resource as _resource, batch # pylint: disable=cyclic-import
from .exceptions import BatchException
from .globals import DEFAULT_SHELL
def _add_resource_to_set(resource_set, resource, include_rg=True):
if isinstance(resource, _resource.ResourceGroup):
rg = resource
if include_rg:
resource_set.add(resource)
else:
resource_set.add(resource)
if isinstance(resource, _resource.ResourceFile) and resource._has_resource_group():
rg = resource._get_resource_group()
else:
rg = None
if rg is not None:
for _, resource_file in rg._resources.items():
resource_set.add(resource_file)
def opt_str(x):
if x is None:
return x
return str(x)
class Job:
"""
Object representing a single job to execute.
Notes
-----
This class should never be created directly by the user. Use :meth:`.Batch.new_job`,
:meth:`.Batch.new_bash_job`, or :meth:`.Batch.new_python_job` instead.
"""
_counter = 1
_uid_prefix = "__JOB__"
_regex_pattern = r"(?P<JOB>{}\d+)".format(_uid_prefix) # pylint: disable=consider-using-f-string
@classmethod
def _new_uid(cls):
uid = cls._uid_prefix + str(cls._counter)
cls._counter += 1
return uid
def __init__(self,
batch: 'batch.Batch',
token: str,
*,
name: Optional[str] = None,
attributes: Optional[Dict[str, str]] = None,
shell: Optional[str] = None):
self._batch = batch
self._shell = shell
self._token = token
self.name = name
self.attributes = attributes
self._cpu: Optional[str] = None
self._memory: Optional[str] = None
self._storage: Optional[str] = None
self._image: Optional[str] = None
self._always_run: bool = False
self._preemptible: Optional[bool] = None
self._machine_type: Optional[str] = None
self._timeout: Optional[Union[int, float]] = None
self._cloudfuse: List[Tuple[str, str, bool]] = []
self._env: Dict[str, str] = {}
self._wrapper_code: List[str] = []
self._user_code: List[str] = []
self._resources: Dict[str, _resource.Resource] = {}
self._resources_inverse: Dict[_resource.Resource, str] = {}
self._uid = Job._new_uid()
self._job_id: Optional[int] = None
self._inputs: Set[_resource.Resource] = set()
self._internal_outputs: Set[Union[_resource.ResourceFile, _resource.PythonResult]] = set()
self._external_outputs: Set[Union[_resource.ResourceFile, _resource.PythonResult]] = set()
self._mentioned: Set[_resource.Resource] = set() # resources used in the command
self._valid: Set[_resource.Resource] = set() # resources declared in the appropriate place
self._dependencies: Set[Job] = set()
def safe_str(s):
new_s = []
for c in s:
if c.isalnum() or c == '-':
new_s.append(c)
else:
new_s.append('_')
return ''.join(new_s)
self._dirname = f'{safe_str(name)}-{self._token}' if name else self._token
def _get_resource(self, item: str) -> '_resource.Resource':
raise NotImplementedError
def __getitem__(self, item: str) -> '_resource.Resource':
return self._get_resource(item)
def __getattr__(self, item: str) -> '_resource.Resource':
return self._get_resource(item)
def _add_internal_outputs(self, resource: '_resource.Resource') -> None:
_add_resource_to_set(self._internal_outputs, resource, include_rg=False)
def _add_inputs(self, resource: '_resource.Resource') -> None:
_add_resource_to_set(self._inputs, resource, include_rg=False)
def depends_on(self, *jobs: 'Job') -> 'Job':
"""
Explicitly set dependencies on other jobs.
Examples
--------
Initialize the batch:
>>> b = Batch()
Create the first job:
>>> j1 = b.new_job()
>>> j1.command(f'echo "hello"')
Create the second job `j2` that depends on `j1`:
>>> j2 = b.new_job()
>>> j2.depends_on(j1)
>>> j2.command(f'echo "world"')
Execute the batch:
>>> b.run()
Notes
-----
Dependencies between jobs are automatically created when resources from
one job are used in a subsequent job. This method is only needed when
no intermediate resource exists and the dependency needs to be explicitly
set.
Parameters
----------
jobs:
Sequence of jobs to depend on.
Returns
-------
Same job object with dependencies set.
"""
for j in jobs:
self._dependencies.add(j)
return self
def env(self, variable: str, value: str):
self._env[variable] = value
def storage(self, storage: Optional[Union[str, int]]) -> 'Job':
"""
Set the job's storage size.
Examples
--------
Set the job's disk requirements to 1 Gi:
>>> b = Batch()
>>> j = b.new_job()
>>> (j.storage('10Gi')
... .command(f'echo "hello"'))
>>> b.run()
Notes
-----
The storage expression must be of the form {number}{suffix}
where valid optional suffixes are *K*, *Ki*, *M*, *Mi*,
*G*, *Gi*, *T*, *Ti*, *P*, and *Pi*. Omitting a suffix means
the value is in bytes.
For the :class:`.ServiceBackend`, jobs requesting one or more cores receive
5 GiB of storage for the root file system `/`. Jobs requesting a fraction of a core
receive the same fraction of 5 GiB of storage. If you need additional storage, you
can explicitly request more storage using this method and the extra storage space
will be mounted at `/io`. Batch automatically writes all :class:`.ResourceFile` to
`/io`.
The default storage size is 0 Gi. The minimum storage size is 0 Gi and the
maximum storage size is 64 Ti. If storage is set to a value between 0 Gi
and 10 Gi, the storage request is rounded up to 10 Gi. All values are
rounded up to the nearest Gi.
Parameters
----------
storage:
Units are in bytes if `storage` is an :obj:`int`. If `None`, use the
default storage size for the :class:`.ServiceBackend` (0 Gi).
Returns
-------
Same job object with storage set.
"""
self._storage = opt_str(storage)
return self
def memory(self, memory: Optional[Union[str, int]]) -> 'Job':
"""
Set the job's memory requirements.
Examples
--------
Set the job's memory requirement to be 3Gi:
>>> b = Batch()
>>> j = b.new_job()
>>> (j.memory('3Gi')
... .command(f'echo "hello"'))
>>> b.run()
Notes
-----
The memory expression must be of the form {number}{suffix}
where valid optional suffixes are *K*, *Ki*, *M*, *Mi*,
*G*, *Gi*, *T*, *Ti*, *P*, and *Pi*. Omitting a suffix means
the value is in bytes.
For the :class:`.ServiceBackend`, the values 'lowmem', 'standard',
and 'highmem' are also valid arguments. 'lowmem' corresponds to
approximately 1 Gi/core, 'standard' corresponds to approximately
4 Gi/core, and 'highmem' corresponds to approximately 7 Gi/core.
The default value is 'standard'.
Parameters
----------
memory:
Units are in bytes if `memory` is an :obj:`int`. If `None`,
use the default value for the :class:`.ServiceBackend` ('standard').
Returns
-------
Same job object with memory requirements set.
"""
self._memory = opt_str(memory)
return self
def cpu(self, cores: Optional[Union[str, int, float]]) -> 'Job':
"""
Set the job's CPU requirements.
Notes
-----
The string expression must be of the form {number}{suffix}
where the optional suffix is *m* representing millicpu.
Omitting a suffix means the value is in cpu.
For the :class:`.ServiceBackend`, `cores` must be a power of
two between 0.25 and 16.
Examples
--------
Set the job's CPU requirement to 250 millicpu:
>>> b = Batch()
>>> j = b.new_job()
>>> (j.cpu('250m')
... .command(f'echo "hello"'))
>>> b.run()
Parameters
----------
cores:
Units are in cpu if `cores` is numeric. If `None`,
use the default value for the :class:`.ServiceBackend`
(1 cpu).
Returns
-------
Same job object with CPU requirements set.
"""
self._cpu = opt_str(cores)
return self
def always_run(self, always_run: bool = True) -> 'Job':
"""
Set the job to always run, even if dependencies fail.
Notes
-----
Can only be used with the :class:`.backend.ServiceBackend`.
Warning
-------
Jobs set to always run are not cancellable!
Examples
--------
>>> b = Batch(backend=backend.ServiceBackend('test'))
>>> j = b.new_job()
>>> (j.always_run()
... .command(f'echo "hello"'))
Parameters
----------
always_run:
If True, set job to always run.
Returns
-------
Same job object set to always run.
"""
if not isinstance(self._batch._backend, backend.ServiceBackend):
raise NotImplementedError("A ServiceBackend is required to use the 'always_run' option")
self._always_run = always_run
return self
def timeout(self, timeout: Optional[Union[float, int]]) -> 'Job':
"""
Set the maximum amount of time this job can run for in seconds.
Notes
-----
Can only be used with the :class:`.backend.ServiceBackend`.
Examples
--------
>>> b = Batch(backend=backend.ServiceBackend('test'))
>>> j = b.new_job()
>>> (j.timeout(10)
... .command(f'echo "hello"'))
Parameters
----------
timeout:
Maximum amount of time in seconds for a job to run before being killed.
If `None`, there is no timeout.
Returns
-------
Same job object set with a timeout in seconds.
"""
if not isinstance(self._batch._backend, backend.ServiceBackend):
raise NotImplementedError("A ServiceBackend is required to use the 'timeout' option")
self._timeout = timeout
return self
def gcsfuse(self, bucket, mount_point, read_only=True):
"""
Add a bucket to mount with gcsfuse.
Notes
-----
Can only be used with the :class:`.backend.ServiceBackend`. This method can
be called more than once. This method has been deprecated. Use :meth:`.Job.cloudfuse`
instead.
Warning
-------
There are performance and cost implications of using `gcsfuse <https://cloud.google.com/storage/docs/gcs-fuse>`__.
Examples
--------
>>> b = Batch(backend=backend.ServiceBackend('test'))
>>> j = b.new_job()
>>> (j.gcsfuse('my-bucket', '/my-bucket')
... .command(f'cat /my-bucket/my-file'))
Parameters
----------
bucket:
Name of the google storage bucket to mount.
mount_point:
The path at which the bucket should be mounted to in the Docker
container.
read_only:
If ``True``, mount the bucket in read-only mode.
Returns
-------
Same job object set with a bucket to mount with gcsfuse.
"""
warnings.warn("The 'gcsfuse' method has been deprecated. Use the 'cloudfuse' method instead.")
return self.cloudfuse(bucket, mount_point, read_only=read_only)
def cloudfuse(self, bucket: str, mount_point: str, *, read_only: bool = True):
"""
Add a bucket to mount with gcsfuse in GCP or a storage container with blobfuse in Azure.
Notes
-----
Can only be used with the :class:`.backend.ServiceBackend`. This method can
be called more than once.
Warning
-------
There are performance and cost implications of using `gcsfuse <https://cloud.google.com/storage/docs/gcs-fuse>`__
or `blobfuse <https://github.com/Azure/azure-storage-fuse#considerations>`__.
Examples
--------
Google Cloud Platform:
>>> b = Batch(backend=backend.ServiceBackend('test'))
>>> j = b.new_job()
>>> (j.cloudfuse('my-bucket', '/my-bucket')
... .command(f'cat /my-bucket/my-blob-object'))
Azure:
>>> b = Batch(backend=backend.ServiceBackend('test'))
>>> j = b.new_job()
>>> (j.cloudfuse('my-account/my-container', '/dest')
... .command(f'cat /dest/my-blob-object'))
Parameters
----------
bucket:
Name of the google storage bucket to mount or the path to an Azure container in the
format of `<account>/<container>`.
mount_point:
The path at which the cloud blob storage should be mounted to in the Docker
container.
read_only:
If ``True``, mount the cloud blob storage in read-only mode.
Returns
-------
Same job object set with a cloud storage path to mount with either gcsfuse or blobfuse.
"""
if not isinstance(self._batch._backend, backend.ServiceBackend):
raise NotImplementedError("A ServiceBackend is required to use the 'cloudfuse' option")
if bucket == '':
raise BatchException('location cannot be the empty string')
if mount_point == '':
raise BatchException('mount_point cannot be the empty string')
self._cloudfuse.append((bucket, mount_point, read_only))
return self
async def _compile(self, local_tmpdir, remote_tmpdir, *, dry_run=False):
raise NotImplementedError
def _interpolate_command(self, command, allow_python_results=False):
def handler(match_obj):
groups = match_obj.groupdict()
if groups['JOB']:
raise BatchException(f"found a reference to a Job object in command '{command}'.")
if groups['BATCH']:
raise BatchException(f"found a reference to a Batch object in command '{command}'.")
if groups['PYTHON_RESULT'] and not allow_python_results:
raise BatchException(f"found a reference to a PythonResult object. hint: Use one of the methods `as_str`, `as_json` or `as_repr` on a PythonResult. command: '{command}'")
assert groups['RESOURCE_FILE'] or groups['RESOURCE_GROUP'] or groups['PYTHON_RESULT']
r_uid = match_obj.group()
r = self._batch._resource_map.get(r_uid)
if r is None:
raise BatchException(f"undefined resource '{r_uid}' in command '{command}'.\n"
f"Hint: resources must be from the same batch as the current job.")
if r._source != self:
self._add_inputs(r)
if r._source is not None:
if r not in r._source._valid:
name = r._source._resources_inverse[r]
raise BatchException(f"undefined resource '{name}'\n"
f"Hint: resources must be defined within "
f"the job methods 'command' or 'declare_resource_group'")
self._dependencies.add(r._source)
r._source._add_internal_outputs(r)
else:
_add_resource_to_set(self._valid, r)
self._mentioned.add(r)
return '${BATCH_TMPDIR}' + shq(r._get_path(''))
regexes = [_resource.ResourceFile._regex_pattern,
_resource.ResourceGroup._regex_pattern,
_resource.PythonResult._regex_pattern,
Job._regex_pattern,
batch.Batch._regex_pattern]
subst_command = re.sub('(' + ')|('.join(regexes) + ')',
handler,
command)
return subst_command
def _pretty(self):
s = f"Job '{self._uid}'" \
f"\tName:\t'{self.name}'" \
f"\tAttributes:\t'{self.attributes}'" \
f"\tImage:\t'{self._image}'" \
f"\tCPU:\t'{self._cpu}'" \
f"\tMemory:\t'{self._memory}'" \
f"\tStorage:\t'{self._storage}'" \
f"\tCommand:\t'{self._command}'"
return s
def __str__(self):
return self._uid
class BashJob(Job):
"""
Object representing a single bash job to execute.
Examples
--------
Create a batch object:
>>> b = Batch()
Create a new bash job that prints hello to a temporary file `t.ofile`:
>>> j = b.new_job()
>>> j.command(f'echo "hello" > {j.ofile}')
Write the temporary file `t.ofile` to a permanent location
>>> b.write_output(j.ofile, 'hello.txt')
Execute the DAG:
>>> b.run()
Notes
-----
This class should never be created directly by the user. Use :meth:`.Batch.new_job`
or :meth:`.Batch.new_bash_job` instead.
"""
def __init__(self,
batch: 'batch.Batch',
token: str,
*,
name: Optional[str] = None,
attributes: Optional[Dict[str, str]] = None,
shell: Optional[str] = None):
super().__init__(batch, token, name=name, attributes=attributes, shell=shell)
self._command: List[str] = []
def _get_resource(self, item: str) -> '_resource.Resource':
if item not in self._resources:
r = self._batch._new_job_resource_file(self, value=item)
self._resources[item] = r
self._resources_inverse[r] = item
return self._resources[item]
def declare_resource_group(self, **mappings: Dict[str, Any]) -> 'BashJob':
"""Declare a resource group for a job.
Examples
--------
Declare a resource group:
>>> b = Batch()
>>> input = b.read_input_group(bed='data/example.bed',
... bim='data/example.bim',
... fam='data/example.fam')
>>> j = b.new_job()
>>> j.declare_resource_group(tmp1={'bed': '{root}.bed',
... 'bim': '{root}.bim',
... 'fam': '{root}.fam',
... 'log': '{root}.log'})
>>> j.command(f'plink --bfile {input} --make-bed --out {j.tmp1}')
>>> b.run() # doctest: +SKIP
Warning
-------
Be careful when specifying the expressions for each file as this is Python
code that is executed with `eval`!
Parameters
----------
mappings:
Keywords (in the above example `tmp1`) are the name(s) of the
resource group(s). File names may contain arbitrary Python
expressions, which will be evaluated by Python `eval`. To use the
keyword as the file name, use `{root}` (in the above example {root}
will be replaced with `tmp1`).
Returns
-------
Same job object with resource groups set.
"""
for name, d in mappings.items():
assert name not in self._resources
if not isinstance(d, dict):
raise BatchException(f"value for name '{name}' is not a dict. Found '{type(d)}' instead.")
rg = self._batch._new_resource_group(self, d, root=name)
self._resources[name] = rg
_add_resource_to_set(self._valid, rg)
return self
def image(self, image: str) -> 'BashJob':
"""
Set the job's docker image.
Examples
--------
Set the job's docker image to `ubuntu:20.04`:
>>> b = Batch()
>>> j = b.new_job()
>>> (j.image('ubuntu:20.04')
... .command(f'echo "hello"'))
>>> b.run() # doctest: +SKIP
Parameters
----------
image:
Docker image to use.
Returns
-------
Same job object with docker image set.
"""
self._image = image
return self
def command(self, command: str) -> 'BashJob':
"""Set the job's command to execute.
Examples
--------
Simple job with no output files:
>>> b = Batch()
>>> j = b.new_job()
>>> j.command(f'echo "hello"')
>>> b.run()
Simple job with one temporary file `j.ofile` that is written to a
permanent location:
>>> b = Batch()
>>> j = b.new_job()
>>> j.command(f'echo "hello world" > {j.ofile}')
>>> b.write_output(j.ofile, 'output/hello.txt')
>>> b.run()
Two jobs with a file interdependency:
>>> b = Batch()
>>> j1 = b.new_job()
>>> j1.command(f'echo "hello" > {j1.ofile}')
>>> j2 = b.new_bash_job()
>>> j2.command(f'cat {j1.ofile} > {j2.ofile}')
>>> b.write_output(j2.ofile, 'output/cat_output.txt')
>>> b.run()
Specify multiple commands in the same job:
>>> b = Batch()
>>> t = b.new_job()
>>> j.command(f'echo "hello" > {j.tmp1}')
>>> j.command(f'echo "world" > {j.tmp2}')
>>> j.command(f'echo "!" > {j.tmp3}')
>>> j.command(f'cat {j.tmp1} {j.tmp2} {j.tmp3} > {j.ofile}')
>>> b.write_output(j.ofile, 'output/concatenated.txt')
>>> b.run()
Notes
-----
This method can be called more than once. It's behavior is to append
commands to run to the set of previously defined commands rather than
overriding an existing command.
To declare a resource file of type :class:`.JobResourceFile`, use either
the get attribute syntax of `job.{identifier}` or the get item syntax of
`job['identifier']`. If an object for that identifier doesn't exist,
then one will be created automatically (only allowed in the
:meth:`.command` method). The identifier name can be any valid Python
identifier such as `ofile5000`.
All :class:`.JobResourceFile` are temporary files and must be written to
a permanent location using :meth:`.Batch.write_output` if the output
needs to be saved.
Only resources can be referred to in commands. Referencing a
:class:`.batch.Batch` or :class:`.Job` will result in an error.
Parameters
----------
command:
A ``bash`` command.
Returns
-------
Same job object with command appended.
"""
command = self._interpolate_command(command)
self._command.append(command)
return self
async def _compile(self, local_tmpdir, remote_tmpdir, *, dry_run=False):
if len(self._command) == 0:
return False
job_shell = self._shell if self._shell else DEFAULT_SHELL
job_command = [cmd.strip() for cmd in self._command]
job_command = [f'{{\n{x}\n}}' for x in job_command]
job_command = '\n'.join(job_command)
job_command = f'''
#! {job_shell}
{job_command}
'''
job_command_bytes = job_command.encode()
if len(job_command_bytes) <= 10 * 1024:
self._wrapper_code.append(job_command)
return False
self._user_code.append(job_command)
job_path = f'{remote_tmpdir}/{self._dirname}'
code_path = f'{job_path}/code.sh'
code = self._batch.read_input(code_path)
wrapper_command = f'''
chmod u+x {code}
source {code}
'''
wrapper_command = self._interpolate_command(wrapper_command)
self._wrapper_code.append(wrapper_command)
if not dry_run:
await self._batch._fs.makedirs(os.path.dirname(code_path), exist_ok=True)
await self._batch._fs.write(code_path, job_command_bytes)
return True
class PythonJob(Job):
"""
Object representing a single Python job to execute.
Examples
--------
Create a new Python job that multiplies two numbers and then adds 5 to the result:
.. code-block:: python
# Create a batch object with a default Python image
b = Batch(default_python_image='gcr.io/hail-vdc/python-dill:3.7-slim')
def multiply(x, y):
return x * y
def add(x, y):
return x + y
j = b.new_python_job()
result = j.call(multiply, 2, 3)
result = j.call(add, result, 5)
# Write out the str representation of result to a file
b.write_output(result.as_str(), 'hello.txt')
b.run()
Notes
-----
This class should never be created directly by the user. Use :meth:`.Batch.new_python_job`
instead.
"""
def __init__(self,
batch: 'batch.Batch',
token: str,
*,
name: Optional[str] = None,
attributes: Optional[Dict[str, str]] = None):
super().__init__(batch, token, name=name, attributes=attributes, shell=None)
self._resources: Dict[str, _resource.Resource] = {}
self._resources_inverse: Dict[_resource.Resource, str] = {}
self._functions: List[Tuple[_resource.PythonResult, Callable, Tuple[Any, ...], Dict[str, Any]]] = []
self.n_results = 0
def _get_resource(self, item: str) -> '_resource.PythonResult':
if item not in self._resources:
r = self._batch._new_python_result(self, value=item)
self._resources[item] = r
self._resources_inverse[r] = item
return cast(_resource.PythonResult, self._resources[item])
def image(self, image: str) -> 'PythonJob':
"""
Set the job's docker image.
Notes
-----
`image` must already exist and have the same version of Python as what is
being used on the computer submitting the Batch. It also must have the
`dill` Python package installed. You can use the function :func:`.docker.build_python_image`
to build a new image containing `dill` and additional Python packages.
Examples
--------
Set the job's docker image to `gcr.io/hail-vdc/python-dill:3.7-slim`:
>>> b = Batch()
>>> j = b.new_python_job()
>>> (j.image('gcr.io/hail-vdc/python-dill:3.7-slim')
... .call(print, 'hello'))
>>> b.run() # doctest: +SKIP
Parameters
----------
image:
Docker image to use.
Returns
-------
Same job object with docker image set.
"""
self._image = image
return self
def call(self, unapplied: Callable, *args, **kwargs) -> '_resource.PythonResult':
"""Execute a Python function.
Examples
--------
.. code-block:: python
import json
def add(x, y):
return x + y
def multiply(x, y):
return x * y
def format_as_csv(x, y, add_result, mult_result):
return f'{x},{y},{add_result},{mult_result}'
def csv_to_json(path):
data = []
with open(path) as f:
for line in f:
line = line.rstrip()
fields = line.split(',')
d = {'x': int(fields[0]),
'y': int(fields[1]),
'add': int(fields[2]),
'mult': int(fields[3])}
data.append(d)
return json.dumps(data)
# Get all the multiplication and addition table results
b = Batch(name='add-mult-table')
formatted_results = []
for x in range(3):
for y in range(3):
j = b.new_python_job(name=f'{x}-{y}')
add_result = j.call(add, x, y)
mult_result = j.call(multiply, x, y)
result = j.call(format_as_csv, x, y, add_result, mult_result)
formatted_results.append(result.as_str())
cat_j = b.new_bash_job(name='concatenate')
cat_j.command(f'cat {" ".join(formatted_results)} > {cat_j.output}')
csv_to_json_j = b.new_python_job(name='csv-to-json')
json_output = csv_to_json_j.call(csv_to_json, cat_j.output)
b.write_output(j.as_str(), '/output/add_mult_table.json')
b.run()
Notes
-----
Unlike the :class:`.BashJob`, a :class:`.PythonJob` returns a new
:class:`.PythonResult` for every invocation of :meth:`.PythonJob.call`. A
:class:`.PythonResult` can be used as an argument in subsequent invocations of
:meth:`.PythonJob.call`, as an argument in downstream python jobs,
or as inputs to other bash jobs. Likewise, :class:`.InputResourceFile`,
:class:`.JobResourceFile`, and :class:`.ResourceGroup` can be passed to
:meth:`.PythonJob.call`. Batch automatically detects dependencies between jobs
including between python jobs and bash jobs.
When a :class:`.ResourceFile` is passed as an argument, it is passed to the
function as a string to the local file path. When a :class:`.ResourceGroup`
is passed as an argument, it is passed to the function as a dict where the
keys are the resource identifiers in the original :class:`.ResourceGroup`
and the values are the local file paths.
Like :class:`.JobResourceFile`, all :class:`.PythonResult` are stored as
temporary files and must be written to a permanent location using
:meth:`.Batch.write_output` if the output needs to be saved. A
PythonResult is saved as a dill serialized object. However, you
can use one of the methods :meth:`.PythonResult.as_str`, :meth:`.PythonResult.as_repr`,
or :meth:`.PythonResult.as_json` to convert a `PythonResult` to a
`JobResourceFile` with the desired output.
Warning
-------
You must have any non-builtin packages that are used by `unapplied` installed
in your image. You can use :func:`.docker.build_python_image` to build a
Python image with additional Python packages installed that is compatible
with Python jobs.
Here are some tips to make sure your function can be used with Batch:
- Only reference top-level modules in your functions: like numpy or pandas.
- If you get a serialization error, try moving your imports into your function.
- Instead of serializing a complex class, determine what information is essential
and only serialize that, perhaps as a dict or array.
Parameters
----------
unapplied:
A reference to a Python function to execute.
args:
Positional arguments to the Python function. Must be either a builtin
Python object, a :class:`.Resource`, or a Dill serializable object.
kwargs:
Key-word arguments to the Python function. Must be either a builtin
Python object, a :class:`.Resource`, or a Dill serializable object.
Returns
-------
:class:`.resource.PythonResult`
"""
if not callable(unapplied):
raise BatchException(f'unapplied must be a callable function. Found {type(unapplied)}.')
for arg in args:
if isinstance(arg, Job):
raise BatchException('arguments to a PythonJob cannot be other job objects.')
for value in kwargs.values():
if isinstance(value, Job):
raise BatchException('arguments to a PythonJob cannot be other job objects.')
def handle_arg(r):
if r._source != self:
self._add_inputs(r)
if r._source is not None:
if r not in r._source._valid:
name = r._source._resources_inverse[r]
raise BatchException(f"undefined resource '{name}'\n")
self._dependencies.add(r._source)
r._source._add_internal_outputs(r)
else:
_add_resource_to_set(self._valid, r)
self._mentioned.add(r)
for arg in args:
if isinstance(arg, _resource.Resource):
handle_arg(arg)
for value in kwargs.values():
if isinstance(value, _resource.Resource):
handle_arg(value)
self.n_results += 1
result = self._get_resource(f'result{self.n_results}')
handle_arg(result)
self._functions.append((result, unapplied, args, kwargs))
return result
async def _compile(self, local_tmpdir, remote_tmpdir, *, dry_run=False):
def prepare_argument_for_serialization(arg):
if isinstance(arg, _resource.PythonResult):
return ('py_path', arg._get_path(local_tmpdir))
if isinstance(arg, _resource.ResourceFile):
return ('path', arg._get_path(local_tmpdir))
if isinstance(arg, _resource.ResourceGroup):
return ('dict_path', {name: resource._get_path(local_tmpdir)
for name, resource in arg._resources.items()})
return ('value', arg)
def deserialize_argument(arg):
typ, val = arg
if typ == 'py_path':
return dill.load(open(val, 'rb'))
if typ in ('path', 'dict_path'):
return val
assert typ == 'value'
return val
def wrap(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
args = [deserialize_argument(arg) for arg in args]
kwargs = {kw: deserialize_argument(arg) for kw, arg in kwargs.items()}
return f(*args, **kwargs)
return wrapped
for i, (result, unapplied, args, kwargs) in enumerate(self._functions):
args = [prepare_argument_for_serialization(arg) for arg in args]
kwargs = {kw: prepare_argument_for_serialization(arg) for kw, arg in kwargs.items()}
pipe = BytesIO()
dill.dump(functools.partial(wrap(unapplied), *args, **kwargs), pipe, recurse=True)
pipe.seek(0)
job_path = os.path.dirname(result._get_path(remote_tmpdir))
code_path = f'{job_path}/code{i}.p'
if not dry_run:
await self._batch._fs.makedirs(os.path.dirname(code_path), exist_ok=True)
await self._batch._fs.write(code_path, pipe.getvalue())
code = self._batch.read_input(code_path)
json_write = ''
if result._json:
json_write = f'''
with open(\\"{result._json}\\", \\"w\\") as out:
out.write(json.dumps(result) + \\"\\n\\")
'''
str_write = ''
if result._str:
str_write = f'''
with open(\\"{result._str}\\", \\"w\\") as out:
out.write(str(result) + \\"\\n\\")
'''
repr_write = ''
if result._repr:
repr_write = f'''
with open(\\"{result._repr}\\", \\"w\\") as out:
out.write(repr(result) + \\"\\n\\")
'''
wrapper_code = f'''python3 -c "
import os
import base64
import dill
import traceback
import json
import sys
with open(\\"{result}\\", \\"wb\\") as dill_out:
try:
with open(\\"{code}\\", \\"rb\\") as f:
result = dill.load(f)()
dill.dump(result, dill_out, recurse=True)
{json_write}
{str_write}
{repr_write}
except Exception as e:
traceback.print_exc()
dill.dump((e, traceback.format_exception(type(e), e, e.__traceback__)), dill_out, recurse=True)
raise e
"'''
wrapper_code = self._interpolate_command(wrapper_code, allow_python_results=True)
self._wrapper_code.append(wrapper_code)
self._user_code.append(textwrap.dedent(inspect.getsource(unapplied)))
args = ', '.join([f'{arg!r}' for _, arg in args])
kwargs = ', '.join([f'{k}={v!r}' for k, (_, v) in kwargs.items()])
separator = ', ' if args and kwargs else ''
func_call = f'{unapplied.__name__}({args}{separator}{kwargs})'
self._user_code.append(self._interpolate_command(func_call, allow_python_results=True))
return True
| 33.499556 | 186 | 0.561636 |
6bc58139794c6f0fcfe21586590c3dc0c27adc22 | 44,427 | py | Python | include/configobj/src/tests/test_configobj.py | SWEN-712/screen-reader-brandonp728 | e30c25ad2d10ce632fac0548696a61a872328f59 | [
"bzip2-1.0.6"
] | null | null | null | include/configobj/src/tests/test_configobj.py | SWEN-712/screen-reader-brandonp728 | e30c25ad2d10ce632fac0548696a61a872328f59 | [
"bzip2-1.0.6"
] | null | null | null | include/configobj/src/tests/test_configobj.py | SWEN-712/screen-reader-brandonp728 | e30c25ad2d10ce632fac0548696a61a872328f59 | [
"bzip2-1.0.6"
] | null | null | null | # *- coding: utf-8 -*-
# pylint: disable=wildcard-import, missing-docstring, no-self-use, bad-continuation
# pylint: disable=invalid-name, redefined-outer-name, too-few-public-methods
from __future__ import unicode_literals
import os
import re
import warnings
from codecs import BOM_UTF8
from warnings import catch_warnings
from tempfile import NamedTemporaryFile
import pytest
import six
import mock
import configobj as co
from configobj import ConfigObj, flatten_errors, ReloadError, DuplicateError, MissingInterpolationOption, InterpolationLoopError, ConfigObjError
from configobj.validate import Validator, VdtValueTooSmallError
def cfg_lines(config_string_representation):
"""
:param config_string_representation: string representation of a config
file (typically a triple-quoted string)
:type config_string_representation: str or unicode
:return: a list of lines of that config. Whitespace on the left will be
trimmed based on the indentation level to make it a bit saner to assert
content of a particular line
:rtype: str or unicode
"""
lines = config_string_representation.splitlines()
for idx, line in enumerate(lines):
if line.strip():
line_no_with_content = idx
break
else:
raise ValueError('no content in provided config file: '
'{!r}'.format(config_string_representation))
first_content = lines[line_no_with_content]
if isinstance(first_content, six.binary_type):
first_content = first_content.decode('utf-8')
ws_chars = len(re.search('^(\s*)', first_content).group(1))
def yield_stringified_line():
for line in lines:
if isinstance(line, six.binary_type):
yield line.decode('utf-8')
else:
yield line
return [re.sub('^\s{0,%s}' % ws_chars, '', line).encode('utf-8')
for line in yield_stringified_line()]
@pytest.fixture
def cfg_contents(request):
def make_file_with_contents_and_return_name(config_string_representation):
"""
:param config_string_representation: string representation of a config
file (typically a triple-quoted string)
:type config_string_representation: str or unicode
:return: a list of lines of that config. Whitespace on the left will be
trimmed based on the indentation level to make it a bit saner to assert
content of a particular line
:rtype: basestring
"""
lines = cfg_lines(config_string_representation)
with NamedTemporaryFile(delete=False, mode='wb') as cfg_file:
for line in lines:
if isinstance(line, six.binary_type):
cfg_file.write(line + os.linesep.encode('utf-8'))
else:
cfg_file.write((line + os.linesep).encode('utf-8'))
request.addfinalizer(lambda : os.unlink(cfg_file.name))
return cfg_file.name
return make_file_with_contents_and_return_name
def test_order_preserved():
c = ConfigObj()
c['a'] = 1
c['b'] = 2
c['c'] = 3
c['section'] = {}
c['section']['a'] = 1
c['section']['b'] = 2
c['section']['c'] = 3
c['section']['section'] = {}
c['section']['section2'] = {}
c['section']['section3'] = {}
c['section2'] = {}
c['section3'] = {}
c2 = ConfigObj(c)
assert c2.scalars == ['a', 'b', 'c']
assert c2.sections == ['section', 'section2', 'section3']
assert c2['section'].scalars == ['a', 'b', 'c']
assert c2['section'].sections == ['section', 'section2', 'section3']
assert c['section'] is not c2['section']
assert c['section']['section'] is not c2['section']['section']
def test_options_deprecation():
warnings.simplefilter('always', DeprecationWarning)
with catch_warnings(record=True) as log:
ConfigObj(options={})
# unpack the only member of log
try:
warning, = log
except ValueError:
assert len(log) == 1
assert warning.category == DeprecationWarning
def test_list_members():
c = ConfigObj()
c['a'] = []
c['a'].append('foo')
assert c['a'] == ['foo']
def test_list_interpolation_with_pop():
c = ConfigObj()
c['a'] = []
c['a'].append('%(b)s')
c['b'] = 'bar'
assert c.pop('a') == ['bar']
def test_with_default():
c = ConfigObj()
c['a'] = 3
assert c.pop('a') == 3
assert c.pop('b', 3) == 3
with pytest.raises(KeyError):
c.pop('c')
def test_interpolation_with_section_names(cfg_contents):
cfg = cfg_contents("""
item1 = 1234
[section]
[[item1]]
foo='bar'
[[DEFAULT]]
[[[item1]]]
why = would you do this?
[[other-subsection]]
item2 = '$item1'""")
c = ConfigObj(cfg, interpolation='Template')
# This raises an exception in 4.7.1 and earlier due to the section
# being found as the interpolation value
repr(c)
def test_interoplation_repr():
c = ConfigObj(['foo = $bar'], interpolation='Template')
c['baz'] = {}
c['baz']['spam'] = '%(bar)s'
# This raises a MissingInterpolationOption exception in 4.7.1 and earlier
repr(c)
class TestEncoding(object):
@pytest.fixture
def ant_cfg(self):
return """
[tags]
[[bug]]
translated = \U0001f41c
"""
#issue #18
def test_unicode_conversion_when_encoding_is_set(self, cfg_contents):
cfg = cfg_contents(b"test = some string")
c = ConfigObj(cfg, encoding='utf8')
if six.PY2:
assert not isinstance(c['test'], str)
assert isinstance(c['test'], unicode)
else:
assert isinstance(c['test'], str)
#issue #18
def test_no_unicode_conversion_when_encoding_is_omitted(self, cfg_contents):
cfg = cfg_contents(b"test = some string")
c = ConfigObj(cfg)
if six.PY2:
assert isinstance(c['test'], str)
assert not isinstance(c['test'], unicode)
else:
assert isinstance(c['test'], str)
#issue #44
def test_that_encoding_using_list_of_strings(self):
cfg = [b'test = \xf0\x9f\x90\x9c']
c = ConfigObj(cfg, encoding='utf8')
if six.PY2:
assert isinstance(c['test'], unicode)
assert not isinstance(c['test'], str)
else:
assert isinstance(c['test'], str)
assert c['test'] == '\U0001f41c'
#issue #44
def test_encoding_in_subsections(self, ant_cfg, cfg_contents):
c = cfg_contents(ant_cfg)
cfg = ConfigObj(c, encoding='utf-8')
assert isinstance(cfg['tags']['bug']['translated'], six.text_type)
#issue #44 and #55
def test_encoding_in_config_files(self, request, ant_cfg):
# the cfg_contents fixture is doing this too, but be explicit
with NamedTemporaryFile(delete=False, mode='wb') as cfg_file:
cfg_file.write(ant_cfg.encode('utf-8'))
request.addfinalizer(lambda : os.unlink(cfg_file.name))
cfg = ConfigObj(cfg_file.name, encoding='utf-8')
assert isinstance(cfg['tags']['bug']['translated'], six.text_type)
cfg.write()
def test_encoding_from_filelike(self):
"""Test for fix of #18"""
stream = mock.MagicMock()
stream.read.return_value = b'text = \xc2\xa7'
c = ConfigObj(stream, encoding='utf-8')
text = c['text']
assert isinstance(text, unicode if six.PY2 else str)
assert text == '\u00a7'
@pytest.fixture
def testconfig1():
"""
copied from the main doctest
"""
return """\
key1= val # comment 1
key2= val # comment 2
# comment 3
[lev1a] # comment 4
key1= val # comment 5
key2= val # comment 6
# comment 7
[lev1b] # comment 8
key1= val # comment 9
key2= val # comment 10
# comment 11
[[lev2ba]] # comment 12
key1= val # comment 13
# comment 14
[[lev2bb]] # comment 15
key1= val # comment 16
# comment 17
[lev1c] # comment 18
# comment 19
[[lev2c]] # comment 20
# comment 21
[[[lev3c]]] # comment 22
key1 = val # comment 23"""
@pytest.fixture
def testconfig2():
return """\
key1 = 'val1'
key2 = "val2"
key3 = val3
["section 1"] # comment
keys11 = val1
keys12 = val2
keys13 = val3
[section 2]
keys21 = val1
keys22 = val2
keys23 = val3
[['section 2 sub 1']]
fish = 3
"""
@pytest.fixture
def testconfig6():
return b'''
name1 = """ a single line value """ # comment
name2 = \''' another single line value \''' # comment
name3 = """ a single line value """
name4 = \''' another single line value \'''
[ "multi section" ]
name1 = """
Well, this is a
multiline value
"""
name2 = \'''
Well, this is a
multiline value
\'''
name3 = """
Well, this is a
multiline value
""" # a comment
name4 = \'''
Well, this is a
multiline value
\''' # I guess this is a comment too
'''
@pytest.fixture
def a(testconfig1, cfg_contents):
"""
also copied from main doc tests
"""
return ConfigObj(cfg_contents(testconfig1), raise_errors=True)
@pytest.fixture
def b(testconfig2, cfg_contents):
"""
also copied from main doc tests
"""
return ConfigObj(cfg_contents(testconfig2), raise_errors=True)
@pytest.fixture
def i(testconfig6, cfg_contents):
"""
also copied from main doc tests
"""
return ConfigObj(cfg_contents(testconfig6), raise_errors=True)
def test_configobj_dict_representation(a, b, cfg_contents):
assert a.depth == 0
assert a == {
'key2': 'val',
'key1': 'val',
'lev1c': {
'lev2c': {
'lev3c': {
'key1': 'val',
},
},
},
'lev1b': {
'key2': 'val',
'key1': 'val',
'lev2ba': {
'key1': 'val',
},
'lev2bb': {
'key1': 'val',
},
},
'lev1a': {
'key2': 'val',
'key1': 'val',
},
}
assert b.depth == 0
assert b == {
'key3': 'val3',
'key2': 'val2',
'key1': 'val1',
'section 1': {
'keys11': 'val1',
'keys13': 'val3',
'keys12': 'val2',
},
'section 2': {
'section 2 sub 1': {
'fish': '3',
},
'keys21': 'val1',
'keys22': 'val2',
'keys23': 'val3',
},
}
t = cfg_lines("""
'a' = b # !"$%^&*(),::;'@~#= 33
"b" = b #= 6, 33
""")
t2 = ConfigObj(t)
assert t2 == {'a': 'b', 'b': 'b'}
t2.inline_comments['b'] = '' # pylint: disable=unsubscriptable-object
del t2['a']
assert t2.write() == ['','b = b', '']
def test_behavior_when_list_values_is_false():
c = '''
key1 = no quotes
key2 = 'single quotes'
key3 = "double quotes"
key4 = "list", 'with', several, "quotes"
'''
cfg = ConfigObj(cfg_lines(c), list_values=False)
assert cfg == {
'key1': 'no quotes',
'key2': "'single quotes'",
'key3': '"double quotes"',
'key4': '"list", \'with\', several, "quotes"'
}
cfg2 = ConfigObj(list_values=False)
cfg2['key1'] = 'Multiline\nValue'
cfg2['key2'] = '''"Value" with 'quotes' !'''
assert cfg2.write() == [
"key1 = '''Multiline\nValue'''",
'key2 = "Value" with \'quotes\' !'
]
cfg2.list_values = True
assert cfg2.write() == [
"key1 = '''Multiline\nValue'''",
'key2 = \'\'\'"Value" with \'quotes\' !\'\'\''
]
def test_flatten_errors(val, cfg_contents):
config = cfg_contents("""
test1=40
test2=hello
test3=3
test4=5.0
[section]
test1=40
test2=hello
test3=3
test4=5.0
[[sub section]]
test1=40
test2=hello
test3=3
test4=5.0
""")
configspec = cfg_contents("""
test1= integer(30,50)
test2= string
test3=integer
test4=float(6.0)
[section]
test1=integer(30,50)
test2=string
test3=integer
test4=float(6.0)
[[sub section]]
test1=integer(30,50)
test2=string
test3=integer
test4=float(6.0)
""")
c1 = ConfigObj(config, configspec=configspec)
res = c1.validate(val)
assert flatten_errors(c1, res) == [([], 'test4', False), (['section'], 'test4', False), (['section', 'sub section'], 'test4', False)]
res = c1.validate(val, preserve_errors=True)
check = flatten_errors(c1, res)
assert check[0][:2] == ([], 'test4')
assert check[1][:2] == (['section'], 'test4')
assert check[2][:2] == (['section', 'sub section'], 'test4')
for entry in check:
assert isinstance(entry[2], VdtValueTooSmallError)
assert str(entry[2]) == 'the value "5.0" is too small.'
def test_unicode_handling():
u_base = '''
# initial comment
# inital comment 2
test1 = some value
# comment
test2 = another value # inline comment
# section comment
[section] # inline comment
test = test # another inline comment
test2 = test2
# final comment
# final comment2
'''
# needing to keep line endings means this isn't a good candidate
# for the cfg_lines utility method
u = u_base.encode('utf_8').splitlines(True)
u[0] = BOM_UTF8 + u[0]
uc = ConfigObj(u)
uc.encoding = None
assert uc.BOM
assert uc == {'test1': 'some value', 'test2': 'another value',
'section': {'test': 'test', 'test2': 'test2'}}
uc = ConfigObj(u, encoding='utf_8', default_encoding='latin-1')
assert uc.BOM
assert isinstance(uc['test1'], six.text_type)
assert uc.encoding == 'utf_8'
assert uc.newlines == '\n'
assert len(uc.write()) == 13
uc['latin1'] = "This costs lot's of "
a_list = uc.write()
assert 'latin1' in str(a_list)
assert len(a_list) == 14
assert isinstance(a_list[0], six.binary_type)
assert a_list[0].startswith(BOM_UTF8)
u = u_base.replace('\n', '\r\n').encode('utf-8').splitlines(True)
uc = ConfigObj(u)
assert uc.newlines == '\r\n'
uc.newlines = '\r'
file_like = six.BytesIO()
uc.write(file_like)
file_like.seek(0)
uc2 = ConfigObj(file_like)
assert uc2 == uc
assert uc2.filename == None
assert uc2.newlines == '\r'
class TestWritingConfigs(object):
def test_validate(self, val):
spec = [
'# Initial Comment',
'',
'key1 = string(default=Hello)',
'',
'# section comment',
'[section] # inline comment',
'# key1 comment',
'key1 = integer(default=6)',
'# key2 comment',
'key2 = boolean(default=True)',
'# subsection comment',
'[[sub-section]]# snug inline comment',
'# another key1 comment',
'key1 = float(default=3.0)'
]
blank_config = ConfigObj(configspec=spec)
assert blank_config.validate(val, copy=True)
assert blank_config.dict() == {
'key1': 'Hello',
'section': {'key1': 6, 'key2': True, 'sub-section': {'key1': 3.0}}
}
assert blank_config.write() == [
'# Initial Comment',
'',
'key1 = Hello',
'',
'# section comment',
'[section] # inline comment',
'# key1 comment',
'key1 = 6',
'# key2 comment',
'key2 = True',
'# subsection comment',
'[[sub-section]]# snug inline comment',
'# another key1 comment',
'key1 = 3.0'
]
def test_writing_empty_values(self):
config_with_empty_values = [
'',
'key1 =',
'key2 =# a comment',
]
cfg = ConfigObj(config_with_empty_values)
assert cfg.write() == ['', 'key1 = ""', 'key2 = ""# a comment']
cfg.write_empty_values = True
assert cfg.write() == ['', 'key1 = ', 'key2 = # a comment']
class TestUnrepr(object):
def test_in_reading(self):
config_to_be_unreprd = cfg_lines("""
key1 = (1, 2, 3) # comment
key2 = True
key3 = 'a string'
key4 = [1, 2, 3, 'a mixed list']
""")
cfg = ConfigObj(config_to_be_unreprd, unrepr=True)
assert cfg == {
'key1': (1, 2, 3),
'key2': True,
'key3': 'a string',
'key4': [1, 2, 3, 'a mixed list']
}
assert cfg == ConfigObj(cfg.write(), unrepr=True)
def test_in_multiline_values(self, cfg_contents):
config_with_multiline_value = cfg_contents('''
k = \"""{
'k1': 3,
'k2': 6.0}\"""
''')
cfg = ConfigObj(config_with_multiline_value, unrepr=True)
assert cfg == {'k': {'k1': 3, 'k2': 6.0}}
def test_with_a_dictionary(self):
config_with_dict_value = ['k = {"a": 1}']
cfg = ConfigObj(config_with_dict_value, unrepr=True)
assert isinstance(cfg['k'], dict)
def test_with_hash(self):
config_with_a_hash_in_a_list = [
'key1 = (1, 2, 3) # comment',
'key2 = True',
"key3 = 'a string'",
"key4 = [1, 2, 3, 'a mixed list#']"
]
cfg = ConfigObj(config_with_a_hash_in_a_list, unrepr=True)
assert cfg == {
'key1': (1, 2, 3),
'key2': True,
'key3': 'a string',
'key4': [1, 2, 3, 'a mixed list#']
}
class TestValueErrors(object):
def test_bool(self, empty_cfg):
empty_cfg['a'] = 'fish'
with pytest.raises(ValueError) as excinfo:
empty_cfg.as_bool('a')
assert str(excinfo.value) == 'Value "fish" is neither True nor False'
empty_cfg['b'] = 'True'
assert empty_cfg.as_bool('b') is True
empty_cfg['b'] = 'off'
assert empty_cfg.as_bool('b') is False
def test_int(self, empty_cfg):
for bad in ('fish', '3.2'):
empty_cfg['a'] = bad
with pytest.raises(ValueError) as excinfo:
empty_cfg.as_int('a')
assert str(excinfo.value).startswith('invalid literal for int()')
empty_cfg['b'] = '1'
assert empty_cfg.as_bool('b') is True
empty_cfg['b'] = '3.2'
def test_float(self, empty_cfg):
empty_cfg['a'] = 'fish'
with pytest.raises(ValueError):
empty_cfg.as_float('a')
empty_cfg['b'] = '1'
assert empty_cfg.as_float('b') == 1
empty_cfg['b'] = '3.2'
assert empty_cfg.as_float('b') == 3.2
def test_error_types():
# errors that don't have interesting messages
test_value = 'what'
for ErrorClass in (co.ConfigObjError, co.NestingError, co.ParseError,
co.DuplicateError, co.ConfigspecError,
co.RepeatSectionError):
with pytest.raises(ErrorClass) as excinfo:
# TODO: assert more interesting things
# now that we're not using doctest
raise ErrorClass(test_value)
assert str(excinfo.value) == test_value
for ErrorClassWithMessage, msg in (
(co.InterpolationLoopError,
'interpolation loop detected in value "{0}".'),
(co.MissingInterpolationOption,
'missing option "{0}" in interpolation.'),
):
with pytest.raises(ErrorClassWithMessage) as excinfo:
raise ErrorClassWithMessage(test_value)
assert str(excinfo.value) == msg.format(test_value)
# ReloadError is raised as IOError
with pytest.raises(IOError):
raise co.ReloadError()
class TestSectionBehavior(object):
def test_dictionary_representation(self, a):
n = a.dict()
assert n == a
assert n is not a
def test_merging(self, cfg_contents):
config_with_subsection = cfg_contents("""
[section1]
option1 = True
[[subsection]]
more_options = False
# end of file
""")
config_that_overwrites_parameter = cfg_contents("""
# File is user.ini
[section1]
option1 = False
# end of file
""")
c1 = ConfigObj(config_that_overwrites_parameter)
c2 = ConfigObj(config_with_subsection)
c2.merge(c1)
assert c2.dict() == {'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}}
def test_walking_with_in_place_updates(self, cfg_contents):
config = cfg_contents("""
[XXXXsection]
XXXXkey = XXXXvalue
""")
cfg = ConfigObj(config)
assert cfg.dict() == {'XXXXsection': {'XXXXkey': 'XXXXvalue'}}
def transform(section, key):
val = section[key]
newkey = key.replace('XXXX', 'CLIENT1')
section.rename(key, newkey)
if isinstance(val, six.string_types):
val = val.replace('XXXX', 'CLIENT1')
section[newkey] = val
assert cfg.walk(transform, call_on_sections=True) == {
'CLIENT1section': {'CLIENT1key': None}
}
assert cfg.dict() == {
'CLIENT1section': {'CLIENT1key': 'CLIENT1value'}
}
def test_reset_a_configobj():
something = object()
cfg = ConfigObj()
cfg['something'] = something
cfg['section'] = {'something': something}
cfg.filename = 'fish'
cfg.raise_errors = something
cfg.list_values = something
cfg.create_empty = something
cfg.file_error = something
cfg.stringify = something
cfg.indent_type = something
cfg.encoding = something
cfg.default_encoding = something
cfg.BOM = something
cfg.newlines = something
cfg.write_empty_values = something
cfg.unrepr = something
cfg.initial_comment = something
cfg.final_comment = something
cfg.configspec = something
cfg.inline_comments = something
cfg.comments = something
cfg.defaults = something
cfg.default_values = something
cfg.reset()
assert cfg.filename is None
assert cfg.raise_errors is False
assert cfg.list_values is True
assert cfg.create_empty is False
assert cfg.file_error is False
assert cfg.interpolation is True
assert cfg.configspec is None
assert cfg.stringify is True
assert cfg.indent_type is None
assert cfg.encoding is None
assert cfg.default_encoding is None
assert cfg.unrepr is False
assert cfg.write_empty_values is False
assert cfg.inline_comments == {}
assert cfg.comments == {}
assert cfg.defaults == []
assert cfg.default_values == {}
assert cfg == ConfigObj()
assert repr(cfg) == 'ConfigObj({})'
class TestReloading(object):
@pytest.fixture
def reloadable_cfg_content(self):
content = '''
test1=40
test2=hello
test3=3
test4=5.0
[section]
test1=40
test2=hello
test3=3
test4=5.0
[[sub section]]
test1=40
test2=hello
test3=3
test4=5.0
[section2]
test1=40
test2=hello
test3=3
test4=5.0
'''
return content
def test_handle_no_filename(self):
for bad_args in ([six.BytesIO()], [], [[]]):
cfg = ConfigObj(*bad_args)
with pytest.raises(ReloadError) as excinfo:
cfg.reload()
assert str(excinfo.value) == 'reload failed, filename is not set.'
def test_reloading_with_an_actual_file(self, request,
reloadable_cfg_content,
cfg_contents):
with NamedTemporaryFile(delete=False, mode='wb') as cfg_file:
cfg_file.write(reloadable_cfg_content.encode('utf-8'))
request.addfinalizer(lambda : os.unlink(cfg_file.name))
configspec = cfg_contents("""
test1= integer(30,50)
test2= string
test3=integer
test4=float(4.5)
[section]
test1=integer(30,50)
test2=string
test3=integer
test4=float(4.5)
[[sub section]]
test1=integer(30,50)
test2=string
test3=integer
test4=float(4.5)
[section2]
test1=integer(30,50)
test2=string
test3=integer
test4=float(4.5)
""")
cfg = ConfigObj(cfg_file.name, configspec=configspec)
cfg.configspec['test1'] = 'integer(50,60)'
backup = ConfigObj(cfg_file.name)
del cfg['section']
del cfg['test1']
cfg['extra'] = '3'
cfg['section2']['extra'] = '3'
cfg.reload()
assert cfg == backup
assert cfg.validate(Validator())
class TestDuplicates(object):
def test_duplicate_section(self):
cfg = '''
[hello]
member = value
[hello again]
member = value
[ "hello" ]
member = value
'''
with pytest.raises(DuplicateError) as excinfo:
ConfigObj(cfg.splitlines(), raise_errors=True)
assert str(excinfo.value) == 'Duplicate section name at line 6.'
def test_duplicate_members(self):
d = '''
[hello]
member=value
[helloagain]
member1=value
member2=value
'member1'=value
["andagain"]
member=value
'''
with pytest.raises(DuplicateError) as excinfo:
ConfigObj(d.splitlines(),raise_errors=True)
assert str(excinfo.value) == 'Duplicate keyword name at line 7.'
class TestInterpolation(object):
"""
tests various interpolation behaviors using config par
"""
@pytest.fixture
def config_parser_cfg(self):
cfg = ConfigObj()
cfg['DEFAULT'] = {
'b': 'goodbye',
'userdir': r'c:\\home',
'c': '%(d)s',
'd': '%(c)s'
}
cfg['section'] = {
'a': r'%(datadir)s\\some path\\file.py',
'b': r'%(userdir)s\\some path\\file.py',
'c': 'Yo %(a)s',
'd': '%(not_here)s',
'e': '%(e)s',
}
cfg['section']['DEFAULT'] = {
'datadir': r'c:\\silly_test',
'a': 'hello - %(b)s',
}
return cfg
@pytest.fixture
def template_cfg(self, cfg_contents):
interp_cfg = '''
[DEFAULT]
keyword1 = value1
'keyword 2' = 'value 2'
reference = ${keyword1}
foo = 123
[ section ]
templatebare = $keyword1/foo
bar = $$foo
dollar = $$300.00
stophere = $$notinterpolated
with_braces = ${keyword1}s (plural)
with_spaces = ${keyword 2}!!!
with_several = $keyword1/$reference/$keyword1
configparsersample = %(keyword 2)sconfig
deep = ${reference}
[[DEFAULT]]
baz = $foo
[[ sub-section ]]
quux = '$baz + $bar + $foo'
[[[ sub-sub-section ]]]
convoluted = "$bar + $baz + $quux + $bar"
'''
return ConfigObj(cfg_contents(interp_cfg), interpolation='Template')
def test_interpolation(self, config_parser_cfg):
test_section = config_parser_cfg['section']
assert test_section['a'] == r'c:\\silly_test\\some path\\file.py'
assert test_section['b'] == r'c:\\home\\some path\\file.py'
assert test_section['c'] == r'Yo c:\\silly_test\\some path\\file.py'
def test_interpolation_turned_off(self, config_parser_cfg):
config_parser_cfg.interpolation = False
test_section = config_parser_cfg['section']
assert test_section['a'] == r'%(datadir)s\\some path\\file.py'
assert test_section['b'] == r'%(userdir)s\\some path\\file.py'
assert test_section['c'] == r'Yo %(a)s'
def test_handle_errors(self, config_parser_cfg):
with pytest.raises(MissingInterpolationOption) as excinfo:
print(config_parser_cfg['section']['d'])
assert (str(excinfo.value) ==
'missing option "not_here" in interpolation.')
with pytest.raises(InterpolationLoopError) as excinfo:
print(config_parser_cfg['section']['e'])
assert (str(excinfo.value) ==
'interpolation loop detected in value "e".')
def test_template_interpolation(self, template_cfg):
test_sec = template_cfg['section']
assert test_sec['templatebare'] == 'value1/foo'
assert test_sec['dollar'] == '$300.00'
assert test_sec['stophere'] == '$notinterpolated'
assert test_sec['with_braces'] == 'value1s (plural)'
assert test_sec['with_spaces'] == 'value 2!!!'
assert test_sec['with_several'] == 'value1/value1/value1'
assert test_sec['configparsersample'] == '%(keyword 2)sconfig'
assert test_sec['deep'] == 'value1'
assert test_sec['sub-section']['quux'] == '123 + $foo + 123'
assert (test_sec['sub-section']['sub-sub-section']['convoluted'] ==
'$foo + 123 + 123 + $foo + 123 + $foo')
class TestQuotes(object):
"""
tests what happens when dealing with quotes
"""
def assert_bad_quote_message(self, empty_cfg, to_quote, **kwargs):
message = 'Value cannot be safely quoted: {0!r}'
with pytest.raises(ConfigObjError) as excinfo:
empty_cfg._quote(to_quote, **kwargs)
assert str(excinfo.value) == message.format(to_quote)
def test_handle_unbalanced(self, i):
self.assert_bad_quote_message(i, '"""\'\'\'')
def test_handle_unallowed_newline(self, i):
newline = '\n'
self.assert_bad_quote_message(i, newline, multiline=False)
def test_handle_unallowed_open_quote(self, i):
open_quote = ' "\' '
self.assert_bad_quote_message(i, open_quote, multiline=False)
def test_handle_multiple_bad_quote_values(self):
testconfig5 = '''
config = "hello # comment
test = 'goodbye
fish = 'goodbye # comment
dummy = "hello again
'''
with pytest.raises(ConfigObjError) as excinfo:
ConfigObj(testconfig5.splitlines())
assert len(excinfo.value.errors) == 4
def test_triple_quote_newline_roundtrip(self):
"""
Values with one kind of triple quote should save and load again.
From bzr bug lp:710410 and unit test written by bialix.
"""
initial_conf = ConfigObj()
initial_conf['single'] = "single triple '''\n"
initial_conf['double'] = 'double triple """\n'
io = six.BytesIO()
initial_conf.write(outfile=io)
io.seek(0)
loaded_conf = ConfigObj(io)
assert loaded_conf['single'] == "single triple '''\n"
assert loaded_conf['double'] == 'double triple """\n'
def test_handle_stringify_off():
c = ConfigObj()
c.stringify = False
with pytest.raises(TypeError) as excinfo:
c['test'] = 1
assert str(excinfo.value) == 'Value is not a string "1".'
class TestValues(object):
"""
Tests specifics about behaviors with types of values
"""
@pytest.fixture
def testconfig3(self, cfg_contents):
return cfg_contents("""
a = ,
b = test,
c = test1, test2 , test3
d = test1, test2, test3,
""")
def test_empty_values(self, cfg_contents):
cfg_with_empty = cfg_contents("""
k =
k2 =# comment test
val = test
val2 = ,
val3 = 1,
val4 = 1, 2
val5 = 1, 2, """)
cwe = ConfigObj(cfg_with_empty)
# see a comma? it's a list
assert cwe == {'k': '', 'k2': '', 'val': 'test', 'val2': [],
'val3': ['1'], 'val4': ['1', '2'], 'val5': ['1', '2']}
# not any more
cwe = ConfigObj(cfg_with_empty, list_values=False)
assert cwe == {'k': '', 'k2': '', 'val': 'test', 'val2': ',',
'val3': '1,', 'val4': '1, 2', 'val5': '1, 2,'}
def test_list_values(self, testconfig3):
cfg = ConfigObj(testconfig3, raise_errors=True)
assert cfg['a'] == []
assert cfg['b'] == ['test']
assert cfg['c'] == ['test1', 'test2', 'test3']
assert cfg['d'] == ['test1', 'test2', 'test3']
def test_list_values_off(self, testconfig3):
cfg = ConfigObj(testconfig3, raise_errors=True, list_values=False)
assert cfg['a'] == ','
assert cfg['b'] == 'test,'
assert cfg['c'] == 'test1, test2 , test3'
assert cfg['d'] == 'test1, test2, test3,'
def test_handle_multiple_list_value_errors(self):
testconfig4 = '''
config = 3,4,,
test = 3,,4
fish = ,,
dummy = ,,hello, goodbye
'''
with pytest.raises(ConfigObjError) as excinfo:
ConfigObj(testconfig4.splitlines())
assert len(excinfo.value.errors) == 4
def test_creating_with_a_dictionary():
dictionary_cfg_content = {
'key1': 'val1',
'key2': 'val2',
'section 1': {
'key1': 'val1',
'key2': 'val2',
'section 1b': {
'key1': 'val1',
'key2': 'val2',
},
},
'section 2': {
'key1': 'val1',
'key2': 'val2',
'section 2b': {
'key1': 'val1',
'key2': 'val2',
},
},
'key3': 'val3',
}
cfg = ConfigObj(dictionary_cfg_content)
assert dictionary_cfg_content == cfg
assert dictionary_cfg_content is not cfg
assert dictionary_cfg_content == cfg.dict()
assert dictionary_cfg_content is not cfg.dict()
class ConfigObjPHP(ConfigObj):
COMMENT_MARKERS = ['#', ';']
class TestComments(object):
@pytest.fixture
def comment_filled_cfg(self, cfg_contents):
return cfg_contents("""
# initial comments
# with two lines
key = "value"
# section comment
[section] # inline section comment
# key comment
key = "value"
# final comment
# with two lines"""
)
def test_multiline_comments(self, i):
expected_multiline_value = '\nWell, this is a\nmultiline value\n'
assert i == {
'name4': ' another single line value ',
'multi section': {
'name4': expected_multiline_value,
'name2': expected_multiline_value,
'name3': expected_multiline_value,
'name1': expected_multiline_value,
},
'name2': ' another single line value ',
'name3': ' a single line value ',
'name1': ' a single line value ',
}
def test_starting_and_ending_comments(self, a, testconfig1, cfg_contents):
filename = a.filename
a.filename = None
values = a.write()
index = 0
while index < 23:
index += 1
line = values[index-1]
assert line.endswith('# comment ' + str(index))
a.filename = filename
start_comment = ['# Initial Comment', '', '#']
end_comment = ['', '#', '# Final Comment']
newconfig = start_comment + testconfig1.splitlines() + end_comment
nc = ConfigObj(newconfig)
assert nc.initial_comment == ['# Initial Comment', '', '#']
assert nc.final_comment == ['', '#', '# Final Comment']
assert nc.initial_comment == start_comment
assert nc.final_comment == end_comment
def test_inline_comments(self):
c = ConfigObj()
c['foo'] = 'bar'
c.inline_comments['foo'] = 'Nice bar' # pylint: disable=unsubscriptable-object
assert c.write() == ['foo = bar # Nice bar']
def test_unrepr_comments(self, comment_filled_cfg):
c = ConfigObj(comment_filled_cfg, unrepr=True)
assert c == { 'key': 'value', 'section': { 'key': 'value'}}
assert c.initial_comment == [
'', '# initial comments', '# with two lines'
]
assert c.comments == {'section': ['# section comment'], 'key': []}
assert c.inline_comments == {
'section': ' # inline section comment', 'key': ''
}
assert c['section'].comments == { 'key': ['# key comment']}
assert c.final_comment == ['', '# final comment', '# with two lines']
def test_comments(self, comment_filled_cfg):
c = ConfigObj(comment_filled_cfg)
assert c == { 'key': 'value', 'section': { 'key': 'value'}}
assert c.initial_comment == [
'', '# initial comments', '# with two lines'
]
assert c.comments == {'section': ['# section comment'], 'key': []}
assert c.inline_comments == {
'section': ' # inline section comment', 'key': ''
}
assert c['section'].comments == { 'key': ['# key comment']}
assert c.final_comment == ['', '# final comment', '# with two lines']
def test_comment_markers(self, cfg_contents):
cfgfile = cfg_contents("""; comment
[php] # section marker
;INLINE NOT SUPPORTED YET [php] ; section marker
; Boolean: true, on, yes or false, off, no, none
switch = off
track_errors = yes
; string in double-quotes
include_path = ".:/usr/local/lib/php"
""")
c = ConfigObjPHP(cfgfile)
assert c == dict(php=dict(switch='off', track_errors='yes', include_path=".:/usr/local/lib/php"))
assert c.initial_comment == ['; comment']
def test_write_back_comment_markers(self, cfg_contents):
lines = (
'# initial comment', '; 2nd line',
'[sect_name]', '; section comment', 'foo = bar',
'', '; final comment')
c = ConfigObjPHP(lines)
for expected, got in zip(lines, c.write()):
assert expected == got
def test_overwriting_filenames(a, b, i):
#TODO: I'm not entirely sure what this test is actually asserting
filename = a.filename
a.filename = 'test.ini'
a.write()
a.filename = filename
assert a == ConfigObj('test.ini', raise_errors=True)
os.remove('test.ini')
b.filename = 'test.ini'
b.write()
assert b == ConfigObj('test.ini', raise_errors=True)
os.remove('test.ini')
i.filename = 'test.ini'
i.write()
assert i == ConfigObj('test.ini', raise_errors=True)
os.remove('test.ini')
def test_interpolation_using_default_sections():
c = ConfigObj()
c['DEFAULT'] = {'a' : 'fish'}
c['a'] = '%(a)s'
assert c.write() == ['a = %(a)s', '[DEFAULT]', 'a = fish']
class TestMerging(object):
@pytest.mark.parametrize('data', ((False, 42), (True, '3')))
def test_merge_coupling(self, data):
c1 = ConfigObj("foo = 1,2|[sect]|val=3".split('|'))
c2 = ConfigObj("bar = 3".split('|'))
c2.merge(c1, decoupled=data[0])
assert c2['sect']['val'] == '3'
c1['sect']['val'] = 42
assert c2['sect']['val'] == data[1]
class TestErrorReporting(object):
MULTI_ERROR = [
'[]',
'a = b',
'[]',
'c = d',
]
@pytest.mark.parametrize('cfg_content', (MULTI_ERROR, MULTI_ERROR * 2))
def test_multi_error(self, cfg_content):
try:
ConfigObj(cfg_content)
except ConfigObjError as cause:
msg = str(cause)
first = cause.errors[0]
##print(msg); assert False
assert type(cause.config) is ConfigObj
assert re.search(r'failed with \d+ errors', msg)
assert first.line == '[]'
assert first.line_number == 1
if len(cause.errors) < 3:
assert "2 errors" in msg
assert 'at line 3' in msg
assert 'at line 5' not in msg
else:
assert "6 errors" in msg
assert 'at line 5' in msg
assert 'Duplicate keyword name' in msg
assert '1 more error' in msg
class TestIndentationAndFormatting(object):
MAX_TABBED_CFG = ['[sect]', ' [[sect]]', ' foo = bar']
COMMENT_SPACING = [
'# left',
' # indented',
'trailspace = whitespace after value ',
'foo = 1# snug',
'[section] # section indented',
'bar = 1 # inline',
'baz = 3 # inline indented',
]
GIT_SECTIONS = [
'[branch "master"]',
'remote = origin',
]
@pytest.fixture
def max_tabbed_cfg(self):
return self.MAX_TABBED_CFG
def test_write_dictionary(self):
assert ConfigObj({'sect': {'sect': {'foo': 'bar'}}}).write() == [
'[sect]', ' [[sect]]', ' foo = bar'
]
@pytest.mark.parametrize('cfg_content', (
['[sect]', '[[sect]]', 'foo = bar'],
['[sect]', ' [[sect]]', ' foo = bar'],
MAX_TABBED_CFG, COMMENT_SPACING, GIT_SECTIONS))
def test_formatting_preserved(self, cfg_content):
assert ConfigObj(cfg_content).write() == cfg_content
def test_handle_tabs_vs_spaces(self, max_tabbed_cfg):
one_tab = ['[sect]', '\t[[sect]]', '\t\tfoo = bar']
two_tabs = ['[sect]', '\t\t[[sect]]', '\t\t\t\tfoo = bar']
tabs_and_spaces = [b'[sect]', b'\t \t [[sect]]',
b'\t \t \t \t foo = bar']
assert ConfigObj(one_tab).write() == one_tab
assert ConfigObj(two_tabs).write() == two_tabs
assert ConfigObj(tabs_and_spaces).write() == [s.decode('utf-8') for s in tabs_and_spaces]
assert ConfigObj(max_tabbed_cfg, indent_type=chr(9)).write() == one_tab
assert ConfigObj(one_tab, indent_type=' ').write() == max_tabbed_cfg
class TestEdgeCasesWhenWritingOut(object):
def test_newline_terminated(self, empty_cfg):
empty_cfg.newlines = '\n'
empty_cfg['a'] = 'b'
collector = six.BytesIO()
empty_cfg.write(collector)
assert collector.getvalue() == b'a = b\n'
def test_hash_escaping(self, empty_cfg):
empty_cfg.newlines = '\n'
empty_cfg['#a'] = 'b # something'
collector = six.BytesIO()
empty_cfg.write(collector)
assert collector.getvalue() == b'"#a" = "b # something"\n'
empty_cfg = ConfigObj()
empty_cfg.newlines = '\n'
empty_cfg['a'] = 'b # something', 'c # something'
collector = six.BytesIO()
empty_cfg.write(collector)
assert collector.getvalue() == b'a = "b # something", "c # something"\n'
def test_detecting_line_endings_from_existing_files(self):
for expected_line_ending in ('\r\n', '\n'):
with open('temp', 'w') as h:
h.write(expected_line_ending)
c = ConfigObj('temp')
assert c.newlines == expected_line_ending
os.remove('temp')
def test_writing_out_dict_value_with_unrepr(self):
# issue #42
cfg = [str('thing = {"a": 1}')]
c = ConfigObj(cfg, unrepr=True)
assert repr(c) == "ConfigObj({'thing': {'a': 1}})"
assert c.write() == ["thing = {'a': 1}"]
| 31.397173 | 144 | 0.545749 |
14451d01b4ed22ca8f47fa846b6b8f31623169db | 1,688 | py | Python | config/wsgi.py | nxxc/nxxcgram | bad344d92f75ee46bfadf3b5dacbe99668c9e9ca | [
"MIT"
] | null | null | null | config/wsgi.py | nxxc/nxxcgram | bad344d92f75ee46bfadf3b5dacbe99668c9e9ca | [
"MIT"
] | 8 | 2020-06-05T19:40:44.000Z | 2022-02-26T13:25:34.000Z | config/wsgi.py | nxxc/nxxcgram | bad344d92f75ee46bfadf3b5dacbe99668c9e9ca | [
"MIT"
] | null | null | null | """
WSGI config for nxxcgram project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# nxxcgram directory.
app_path = os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.pardir))
sys.path.append(os.path.join(app_path, 'nxxcgram'))
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 41.170732 | 79 | 0.796801 |
72705a102cdb744e97607dc473f2ab0883771e53 | 1,839 | py | Python | cloudhands/web/test/test_indexer.py | cedadev/cloudhands-web | 2e83df3111c286a54ef3957e873c9ea29c523366 | [
"BSD-3-Clause"
] | null | null | null | cloudhands/web/test/test_indexer.py | cedadev/cloudhands-web | 2e83df3111c286a54ef3957e873c9ea29c523366 | [
"BSD-3-Clause"
] | null | null | null | cloudhands/web/test/test_indexer.py | cedadev/cloudhands-web | 2e83df3111c286a54ef3957e873c9ea29c523366 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# encoding: UTF-8
import tempfile
import unittest
import whoosh.fields
from whoosh.query import Or
from whoosh.query import FuzzyTerm
from whoosh.query import Term
from cloudhands.web.indexer import create as create_index
from cloudhands.web.indexer import indexer
from cloudhands.web.indexer import ldap_types
from cloudhands.web.indexer import people
from cloudhands.web.indexer import Person
class TestIndexer(unittest.TestCase):
def test_custom_search(self):
with tempfile.TemporaryDirectory() as td:
ix = create_index(td, descr=whoosh.fields.TEXT(stored=True))
wrtr = ix.writer()
for i in range(10):
wrtr.add_document(id=str(i), descr="User {}".format(i))
wrtr.commit()
srch = ix.searcher()
query = Or([Term("id", "0"), Term("id", "9")])
hits = srch.search(query)
self.assertEqual(2, len(hits))
def test_simple_people_search(self):
with tempfile.TemporaryDirectory() as td:
ix = create_index(td, **ldap_types)
wrtr = ix.writer()
for i in range(10):
wrtr.add_document(id=str(i), gecos="User {}".format(i))
wrtr.commit()
ppl = list(people(td, "User"))
self.assertEqual(10, len(ppl))
self.assertTrue(all(isinstance(i, Person) for i in ppl))
def test_custom_people_search(self):
with tempfile.TemporaryDirectory() as td:
ix = create_index(td, descr=whoosh.fields.TEXT(stored=True))
wrtr = ix.writer()
for i in range(10):
wrtr.add_document(id=str(i), descr="User {}".format(i))
wrtr.commit()
ppl = list(people(td, "User", "descr"))
self.assertEqual(10, len(ppl))
| 29.66129 | 72 | 0.610114 |
089f98dbb0b31e145b7bbffe33efde16a8c56e9b | 12,822 | py | Python | crowdstrike/src/crowdstrike/rule/yara_master_importer.py | djds/connectors | 5fcbeb447345f2cbda1cfdbd0f9471ec60de4f7f | [
"Apache-2.0"
] | null | null | null | crowdstrike/src/crowdstrike/rule/yara_master_importer.py | djds/connectors | 5fcbeb447345f2cbda1cfdbd0f9471ec60de4f7f | [
"Apache-2.0"
] | 2 | 2021-04-29T11:32:29.000Z | 2021-05-31T14:23:52.000Z | crowdstrike/src/crowdstrike/rule/yara_master_importer.py | 0snap/connectors | a2ee3328504fd89a634fe57844d91c6be1c48dd4 | [
"Apache-2.0"
] | 1 | 2021-11-01T16:05:15.000Z | 2021-11-01T16:05:15.000Z | # -*- coding: utf-8 -*-
"""OpenCTI CrowdStrike YARA master importer module."""
import itertools
import zipfile
from datetime import datetime
from io import BytesIO
from typing import Any, Dict, List, Mapping, NamedTuple, Optional, Tuple
from crowdstrike_client.api.intel import Reports, Rules
from crowdstrike_client.api.models.download import Download
from pycti.connector.opencti_connector_helper import OpenCTIConnectorHelper # type: ignore # noqa: E501
from requests import RequestException
from stix2 import Bundle, Identity, MarkingDefinition # type: ignore
from stix2.exceptions import STIXError # type: ignore
from crowdstrike.importer import BaseImporter
from crowdstrike.utils.report_fetcher import FetchedReport, ReportFetcher
from crowdstrike.utils import datetime_to_timestamp, timestamp_to_datetime
from crowdstrike.rule.yara_master_builder import YaraRuleBundleBuilder
from crowdstrike.utils.yara_parser import YaraParser, YaraRule
class YaraMaster(NamedTuple):
"""YARA Master."""
rules: List[YaraRule]
e_tag: Optional[str]
last_modified: Optional[datetime]
class YaraMasterImporter(BaseImporter):
"""CrowdStrike YARA master importer."""
_E_TAG = "yara_master_e_tag"
_LAST_MODIFIED = "yara_master_last_modified"
_KEY_ID = "id"
_KEY_INDICATOR_PATTERN = "pattern"
def __init__(
self,
helper: OpenCTIConnectorHelper,
rules_api: Rules,
reports_api: Reports,
author: Identity,
tlp_marking: MarkingDefinition,
update_existing_data: bool,
report_status: int,
report_type: str,
) -> None:
"""Initialize CrowdStrike YARA master importer."""
super().__init__(helper, author, tlp_marking, update_existing_data)
self.rules_api = rules_api
self.report_status = report_status
self.report_type = report_type
self.report_fetcher = ReportFetcher(reports_api)
def run(self, state: Dict[str, Any]) -> Dict[str, Any]:
"""Run importer."""
self._info("Running YARA master importer with state: {0}...", state)
# Ignore the Etag, see the comment below.
# e_tag = state.get(self._E_TAG)
last_modified = state.get(self._LAST_MODIFIED)
if last_modified is not None:
last_modified = timestamp_to_datetime(last_modified)
# XXX: Using Etag and Last-Modified results in HTTP 500.
# yara_master = self._fetch_yara_master(e_tag, last_modified)
yara_master = self._fetch_yara_master()
latest_e_tag = yara_master.e_tag
latest_last_modified = yara_master.last_modified
if (
last_modified is not None
and latest_last_modified is not None
and last_modified >= latest_last_modified
):
self._info("YARA master not modified, skipping...")
return state
yara_rules = yara_master.rules
yara_rule_count = len(yara_rules)
self._info(
"YARA master with {0} rules...",
yara_rule_count,
)
new_yara_rules = self._update_existing(yara_rules)
new_yara_rule_count = len(new_yara_rules)
self._info(
"{0} new YARA rules...",
new_yara_rule_count,
)
grouped_yara_rules = self._group_yara_rules_by_report(new_yara_rules)
group_count = len(grouped_yara_rules)
self._info(
"{0} YARA rule groups...",
group_count,
)
for group, rules in grouped_yara_rules:
self._info("YARA rule group: ({0}) {1}", len(rules), group)
failed_count = 0
for yara_rule_group in grouped_yara_rules:
failed = self._process_yara_rule_group(yara_rule_group)
failed_count += failed
success_count = new_yara_rule_count - failed_count
self._info(
"YARA master importer completed (imported: {0}, total: {1}, e_tag: {2}, last_modified: {3})", # noqa: E501
success_count,
new_yara_rule_count,
latest_e_tag,
latest_last_modified,
)
self._clear_report_fetcher_cache()
new_state: Dict[str, Any] = {}
if latest_e_tag is not None:
new_state[self._E_TAG] = latest_e_tag
if latest_last_modified is not None:
new_state[self._LAST_MODIFIED] = datetime_to_timestamp(latest_last_modified)
return new_state
def _clear_report_fetcher_cache(self) -> None:
self._info("Clearing report fetcher cache...")
self.report_fetcher.clear_cache()
def _fetch_yara_master(
self, e_tag: Optional[str] = None, last_modified: Optional[datetime] = None
) -> YaraMaster:
download = self._fetch_latest_yara_master(
e_tag=e_tag, last_modified=last_modified
)
return YaraMaster(
rules=self._parse_download(download),
e_tag=download.e_tag,
last_modified=download.last_modified,
)
def _fetch_latest_yara_master(
self, e_tag: Optional[str] = None, last_modified: Optional[datetime] = None
) -> Download:
rule_set_type = "yara-master"
return self.rules_api.get_latest_file(
rule_set_type, e_tag=e_tag, last_modified=last_modified
)
def _parse_download(self, download: Download) -> List[YaraRule]:
yara_str = self._unzip_content(download.content)
return self._parse_yara_rules(yara_str)
@staticmethod
def _unzip_content(compressed_content: BytesIO) -> str:
yara_master_filename = "crowdstrike_intel_yara.yara"
with zipfile.ZipFile(compressed_content) as z:
with z.open(yara_master_filename) as yara_master:
return yara_master.read().decode("utf-8")
@staticmethod
def _parse_yara_rules(yara_rules: str) -> List[YaraRule]:
return YaraParser.parse(yara_rules)
def _update_existing(self, yara_rules: List[YaraRule]) -> List[YaraRule]:
"""Update YARA rules if they already exists in the OpenCTI."""
new_yara_rules = []
updated = 0
not_updated = 0
for yara_rule in yara_rules:
rule_updated = self._try_updating(yara_rule)
if rule_updated is None:
new_yara_rules.append(yara_rule)
else:
if rule_updated:
updated += 1
else:
not_updated += 1
existing = updated + not_updated
self._info("Updated {0} of {1} existing YARA rules", updated, existing)
return new_yara_rules
def _try_updating(self, yara_rule: YaraRule) -> Optional[bool]:
"""Try updating YARA rule if it already exists in the OpenCTI."""
name = yara_rule.name
existing_rule = self._find_rule_by_name(name)
if existing_rule is None:
return None
return self._update_if_needed(yara_rule, existing_rule)
@staticmethod
def _group_yara_rules_by_report(
yara_rules: List[YaraRule],
) -> List[Tuple[str, List[YaraRule]]]:
def _key_func(item: YaraRule) -> str:
reports = item.reports
if reports:
sorted_reports = sorted(reports)
return "_".join(sorted_reports)
return ""
groups = []
sorted_yara_rules = sorted(yara_rules, key=_key_func)
for key, group in itertools.groupby(sorted_yara_rules, key=_key_func):
groups.append((key, list(group)))
return groups
def _process_yara_rule_group(
self, yara_rule_group: Tuple[str, List[YaraRule]]
) -> int:
group = yara_rule_group[0]
self._info("Processing YARA rule group '{0}'...", group)
yara_rules = yara_rule_group[1]
total_count = len(yara_rules)
failed_count = 0
for yara_rule in yara_rules:
fetched_reports = self._get_reports_by_code(yara_rule.reports)
yara_rule_bundle = self._create_yara_rule_bundle(yara_rule, fetched_reports)
if yara_rule_bundle is None:
failed_count += 1
# with open(f"yara_rule_bundle_{yara_rule.name}.json", "w") as f:
# f.write(yara_rule_bundle.serialize(pretty=True))
self._send_bundle(yara_rule_bundle)
success_count = total_count - failed_count
self._info(
"Completed processing YARA rule group '{0}' (imported: {1}, total: {2})",
group,
success_count,
total_count,
)
return failed_count
def _update_if_needed(
self, new_rule: YaraRule, existing_rule: Tuple[str, YaraRule]
) -> bool:
new_rule_name = new_rule.name
indicator_id, current_rule = existing_rule
if self._needs_updating(current_rule, new_rule):
updated = self._update_indicator_pattern(indicator_id, new_rule.rule)
if updated:
self._info("Rule '{0}' ({1}) updated", new_rule_name, indicator_id)
else:
self._error("Rule '{0}' ({1}) not updated", new_rule_name, indicator_id)
return updated
else:
self._info("Not updating rule '{0}' ({1})", new_rule_name, indicator_id)
return False
def _find_rule_by_name(self, name: str) -> Optional[Tuple[str, YaraRule]]:
indicator = self._fetch_indicator_by_name(name)
if indicator is None:
return None
indicator_id = indicator.get(self._KEY_ID)
if indicator_id is None or not indicator_id:
self._error("Indicator '{0}' without ID", name)
return None
indicator_pattern = indicator.get(self._KEY_INDICATOR_PATTERN)
if indicator_pattern is None or not indicator_pattern:
self._error("Indicator '{0}' without pattern", name)
return None
rules = YaraParser.parse(indicator_pattern)
if not rules:
self._error("Indicator '{0}' pattern without YARA rules", name)
return None
if len(rules) > 1:
self._error(
"Indicator '{0}' pattern contains more than one YARA rules", name
)
return None
return indicator_id, rules[0]
def _fetch_indicator_by_name(self, name: str) -> Optional[Mapping[str, Any]]:
values = [name]
filters = [{"key": "name", "values": values, "operator": "eq"}]
return self.helper.api.indicator.read(filters=filters)
def _needs_updating(self, current_rule: YaraRule, new_rule: YaraRule) -> bool:
if current_rule.name != new_rule.name:
self._error(
"Current ({0}) and new ({1}) YARA rules names do no match",
current_rule.name,
new_rule.name,
)
return False
self._info(
"Current rule last modified '{0}, new rule last modified '{1}''",
current_rule.last_modified,
new_rule.last_modified,
)
if new_rule.last_modified > current_rule.last_modified:
return True
return False
def _update_indicator_pattern(
self, indicator_id: str, new_indicator_pattern: str
) -> bool:
updated = self.helper.api.stix_domain_object.update_field(
id=indicator_id,
key=self._KEY_INDICATOR_PATTERN,
value=new_indicator_pattern,
)
if updated is None:
return False
return updated.get(self._KEY_ID) == indicator_id
def _get_reports_by_code(self, codes: List[str]) -> List[FetchedReport]:
try:
return self.report_fetcher.get_by_codes(codes)
except RequestException as e:
self._error("Failed to fetch reports {0}: {1}", codes, e)
return []
def _create_yara_rule_bundle(
self, rule: YaraRule, reports: List[FetchedReport]
) -> Optional[Bundle]:
author = self.author
source_name = self._source_name()
object_marking_refs = [self.tlp_marking]
confidence_level = self._confidence_level()
report_status = self.report_status
report_type = self.report_type
bundle_builder = YaraRuleBundleBuilder(
rule,
author,
source_name,
object_marking_refs,
confidence_level,
report_status,
report_type,
reports,
)
try:
return bundle_builder.build()
except STIXError as e:
self._error(
"Failed to build YARA rule bundle for '{0}': {1}",
rule.name,
e,
)
return None
| 33.131783 | 119 | 0.624942 |
3c4c64c8c646f246e6b591bddf54d2c637de0ef3 | 8,820 | py | Python | core/jobs/types/model_property_test.py | nbaddam/oppia | e58b81e57007f25537ba8ed71b42bfd9ae661799 | [
"Apache-2.0"
] | 5,422 | 2015-08-14T01:56:44.000Z | 2022-03-31T23:31:56.000Z | core/jobs/types/model_property_test.py | nbaddam/oppia | e58b81e57007f25537ba8ed71b42bfd9ae661799 | [
"Apache-2.0"
] | 14,178 | 2015-08-14T05:21:45.000Z | 2022-03-31T23:54:10.000Z | core/jobs/types/model_property_test.py | nbaddam/oppia | e58b81e57007f25537ba8ed71b42bfd9ae661799 | [
"Apache-2.0"
] | 3,574 | 2015-08-14T04:20:06.000Z | 2022-03-29T01:52:37.000Z | # coding: utf-8
#
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for jobs.types.model_property."""
from __future__ import annotations
import pickle
from core.jobs.types import model_property
from core.platform import models
from core.tests import test_utils
(base_models,) = models.Registry.import_models([models.NAMES.base_model])
datastore_services = models.Registry.import_datastore_services()
class SubclassOfBaseModel(base_models.BaseModel):
"""Subclass of BaseModel with a StringProperty named 'value'."""
value = datastore_services.StringProperty()
class SubclassOfNdbModel(datastore_services.Model):
"""Subclass of NDB Model with a StringProperty named 'value'."""
value = datastore_services.StringProperty()
class RepeatedValueModel(base_models.BaseModel):
"""Subclass of BaseModel with a repeated StringProperty named 'values'."""
values = datastore_services.StringProperty(repeated=True)
class ModelPropertyTests(test_utils.TestBase):
def setUp(self):
self.id_property = model_property.ModelProperty(
SubclassOfBaseModel, SubclassOfBaseModel.id)
self.ndb_property = model_property.ModelProperty(
SubclassOfBaseModel, SubclassOfBaseModel.value)
self.ndb_repeated_property = model_property.ModelProperty(
RepeatedValueModel, RepeatedValueModel.values)
def test_init_with_id_property(self):
# Does not raise.
model_property.ModelProperty(
SubclassOfBaseModel, SubclassOfBaseModel.id)
def test_init_with_ndb_property(self):
# Does not raise.
model_property.ModelProperty(
SubclassOfBaseModel, SubclassOfBaseModel.value)
def test_init_with_ndb_repeated_property(self):
# Does not raise.
model_property.ModelProperty(
RepeatedValueModel, RepeatedValueModel.values)
def test_init_raises_type_error_when_model_is_not_a_class(self):
model = SubclassOfBaseModel()
with self.assertRaisesRegexp(TypeError, 'not a model class'):
model_property.ModelProperty(model, SubclassOfBaseModel.value)
def test_init_raises_type_error_when_model_is_unrelated_to_base_model(self):
with self.assertRaisesRegexp(TypeError, 'not a subclass of BaseModel'):
model_property.ModelProperty(
SubclassOfNdbModel, SubclassOfNdbModel.value)
def test_init_raises_type_error_when_property_is_not_an_ndb_property(self):
model = SubclassOfBaseModel(value='123')
with self.assertRaisesRegexp(TypeError, 'not an NDB Property'):
model_property.ModelProperty(SubclassOfBaseModel, model.value)
def test_init_raises_value_error_when_property_is_not_in_model(self):
with self.assertRaisesRegexp(ValueError, 'not a property of'):
model_property.ModelProperty(
SubclassOfBaseModel, SubclassOfNdbModel.value)
def test_model_kind_of_id_property(self):
self.assertEqual(self.id_property.model_kind, 'SubclassOfBaseModel')
def test_model_kind_of_ndb_property(self):
self.assertEqual(self.ndb_property.model_kind, 'SubclassOfBaseModel')
def test_model_kind_of_ndb_repeated_property(self):
self.assertEqual(
self.ndb_repeated_property.model_kind, 'RepeatedValueModel')
def test_property_name_of_id_property(self):
self.assertEqual(self.id_property.property_name, 'id')
def test_property_name_of_ndb_property(self):
self.assertEqual(self.ndb_property.property_name, 'value')
def test_property_name_of_ndb_repeated_property(self):
self.assertEqual(self.ndb_repeated_property.property_name, 'values')
def test_str_of_id_property(self):
self.assertEqual(str(self.id_property), 'SubclassOfBaseModel.id')
def test_str_of_ndb_property(self):
self.assertEqual(str(self.ndb_property), 'SubclassOfBaseModel.value')
def test_str_of_ndb_repeated_property(self):
self.assertEqual(
str(self.ndb_repeated_property), 'RepeatedValueModel.values')
def test_repr_of_id_property(self):
self.assertEqual(
repr(self.id_property),
'ModelProperty(SubclassOfBaseModel, SubclassOfBaseModel.id)')
def test_repr_of_ndb_property(self):
self.assertEqual(
repr(self.ndb_property),
'ModelProperty(SubclassOfBaseModel, SubclassOfBaseModel.value)')
def test_repr_of_ndb_repeated_property(self):
self.assertEqual(
repr(self.ndb_repeated_property),
'ModelProperty(RepeatedValueModel, RepeatedValueModel.values)')
def test_equality(self):
self.assertNotEqual(self.id_property, self.ndb_property)
self.assertNotEqual(self.ndb_property, self.ndb_repeated_property)
self.assertNotEqual(self.ndb_repeated_property, self.id_property)
self.assertEqual(
self.id_property,
model_property.ModelProperty(
SubclassOfBaseModel, SubclassOfBaseModel.id))
self.assertEqual(
self.ndb_property,
model_property.ModelProperty(
SubclassOfBaseModel, SubclassOfBaseModel.value))
self.assertEqual(
self.ndb_repeated_property,
model_property.ModelProperty(
RepeatedValueModel, RepeatedValueModel.values))
def test_hash_of_id_property(self):
id_property_set = {
model_property.ModelProperty(
SubclassOfBaseModel, SubclassOfBaseModel.id),
}
self.assertIn(self.id_property, id_property_set)
self.assertNotIn(self.ndb_property, id_property_set)
self.assertNotIn(self.ndb_repeated_property, id_property_set)
def test_hash_of_ndb_property(self):
ndb_property_set = {
model_property.ModelProperty(
SubclassOfBaseModel, SubclassOfBaseModel.value),
}
self.assertIn(self.ndb_property, ndb_property_set)
self.assertNotIn(self.id_property, ndb_property_set)
self.assertNotIn(self.ndb_repeated_property, ndb_property_set)
def test_hash_of_ndb_repeated_property(self):
ndb_repeated_property_set = {
model_property.ModelProperty(
RepeatedValueModel, RepeatedValueModel.values),
}
self.assertIn(self.ndb_repeated_property, ndb_repeated_property_set)
self.assertNotIn(self.id_property, ndb_repeated_property_set)
self.assertNotIn(self.ndb_property, ndb_repeated_property_set)
def test_yield_value_from_id_property(self):
model = SubclassOfBaseModel(id='123')
self.assertEqual(
list(self.id_property.yield_value_from_model(model)), ['123'])
def test_yield_value_from_ndb_property(self):
model = SubclassOfBaseModel(value='abc')
self.assertEqual(
list(self.ndb_property.yield_value_from_model(model)), ['abc'])
def test_yield_value_from_ndb_repeated_property(self):
model = RepeatedValueModel(values=['123', '456', '789'])
self.assertEqual(
list(self.ndb_repeated_property.yield_value_from_model(model)),
['123', '456', '789'])
def test_yield_value_from_model_raises_type_error_if_not_right_kind(self):
model = RepeatedValueModel(values=['123', '456', '789'])
self.assertRaisesRegexp(
TypeError, 'not an instance of SubclassOfBaseModel',
lambda: list(self.ndb_property.yield_value_from_model(model)))
def test_pickle_id_property(self):
pickle_value = pickle.loads(pickle.dumps(self.id_property))
self.assertEqual(self.id_property, pickle_value)
self.assertIn(pickle_value, {self.id_property})
def test_pickle_ndb_property(self):
pickle_value = pickle.loads(pickle.dumps(self.ndb_property))
self.assertEqual(self.ndb_property, pickle_value)
self.assertIn(pickle_value, {self.ndb_property})
def test_pickle_ndb_repeated_property(self):
pickle_value = pickle.loads(pickle.dumps(self.ndb_repeated_property))
self.assertEqual(self.ndb_repeated_property, pickle_value)
self.assertIn(pickle_value, {self.ndb_repeated_property})
| 38.017241 | 80 | 0.723356 |
f2cf509e92d5dded6662c3872dc7fa69db779453 | 9,266 | py | Python | src/greengrassHelloWorld.py | rdando/deeplens-cropped-face-detection | 8151cf04e0d765ca343a13ba548278640dfb81ae | [
"MIT"
] | 1 | 2019-06-29T10:13:54.000Z | 2019-06-29T10:13:54.000Z | src/greengrassHelloWorld.py | rdando/deeplens-cropped-face-detection | 8151cf04e0d765ca343a13ba548278640dfb81ae | [
"MIT"
] | null | null | null | src/greengrassHelloWorld.py | rdando/deeplens-cropped-face-detection | 8151cf04e0d765ca343a13ba548278640dfb81ae | [
"MIT"
] | null | null | null | # *****************************************************
# *
# Copyright 2018 Amazon.com, Inc. or its affiliates. *
# All Rights Reserved. *
# *
# *****************************************************
""" A sample lambda for face detection"""
from threading import Thread, Event
import os
import time
import json
import numpy as np
import awscam
import cv2
import greengrasssdk
import base64
class LocalDisplay(Thread):
""" Class for facilitating the local display of inference results
(as images). The class is designed to run on its own thread. In
particular the class dumps the inference results into a FIFO
located in the tmp directory (which lambda has access to). The
results can be rendered using mplayer by typing:
mplayer -demuxer lavf -lavfdopts format=mjpeg:probesize=32 /tmp/results.mjpeg
"""
def __init__(self, resolution):
""" resolution - Desired resolution of the project stream """
# Initialize the base class, so that the object can run on its own
# thread.
super(LocalDisplay, self).__init__()
# List of valid resolutions
RESOLUTION = {'1080p': (1920, 1080), '720p': (1280, 720), '480p': (858, 480)}
if resolution not in RESOLUTION:
raise Exception("Invalid resolution")
self.resolution = RESOLUTION[resolution]
# Initialize the default image to be a white canvas. Clients
# will update the image when ready.
self.frame = cv2.imencode('.jpg', 255 * np.ones([640, 480, 3]))[1]
self.stop_request = Event()
def run(self):
""" Overridden method that continually dumps images to the desired
FIFO file.
"""
# Path to the FIFO file. The lambda only has permissions to the tmp
# directory. Pointing to a FIFO file in another directory
# will cause the lambda to crash.
result_path = '/tmp/results.mjpeg'
# Create the FIFO file if it doesn't exist.
if not os.path.exists(result_path):
os.mkfifo(result_path)
# This call will block until a consumer is available
with open(result_path, 'w') as fifo_file:
while not self.stop_request.isSet():
try:
# Write the data to the FIFO file. This call will block
# meaning the code will come to a halt here until a consumer
# is available.
fifo_file.write(self.frame.tobytes())
except IOError:
continue
def set_frame_data(self, frame):
""" Method updates the image data. This currently encodes the
numpy array to jpg but can be modified to support other encodings.
frame - Numpy array containing the image data tof the next frame
in the project stream.
"""
ret, jpeg = cv2.imencode('.jpg', cv2.resize(frame, self.resolution))
if not ret:
raise Exception('Failed to set frame data')
self.frame = jpeg
def set_frame_data_padded(self, frame):
# Get image dimensions
image_height, image_width, image_channels = frame.shape
# only shrink if image is bigger than required
if self.resolution[0] < image_height or self.resolution[1] < image_width:
# get scaling factor
scaling_factor = self.resolution[0] / float(image_height)
if self.resolution[1] / float(image_width) < scaling_factor:
scaling_factor = self.resolution[1] / float(image_width)
# resize image
frame = cv2.resize(frame, None, fx=scaling_factor, fy=scaling_factor, interpolation=cv2.INTER_AREA)
# Get image dimensions and padding after scaling
image_height, image_width, image_channels = frame.shape
x_padding = self.resolution[0] - image_width
y_padding = self.resolution[1] - image_height
if x_padding <= 0:
x_padding_left, x_padding_right = 0, 0
else:
x_padding_left = int(np.floor(x_padding / 2))
x_padding_right = int(np.ceil(x_padding / 2))
if y_padding <= 0:
y_padding_bottom, y_padding_top = 0, 0
else:
y_padding_bottom = int(np.floor(y_padding / 2))
y_padding_top = int(np.ceil(y_padding / 2))
print('Face Padding: X, X, Y ,Y'.format(x_padding_left, x_padding_right, y_padding_bottom, y_padding_top))
# Add grey padding to image to fill up screen resolution
outputImage = cv2.copyMakeBorder(
frame, y_padding_top, y_padding_bottom, x_padding_left,
x_padding_right, cv2.BORDER_CONSTANT, value=[200, 200, 200]
)
ret, jpeg = cv2.imencode('.jpg', outputImage)
if not ret:
raise Exception('Failed to set frame data')
self.frame = jpeg
return outputImage
def join(self):
self.stop_request.set()
def greengrass_infinite_infer_run():
""" Entry point of the lambda function"""
try:
# This face detection model is implemented as single shot detector (ssd).
model_type = 'ssd'
output_map = {1: 'face'}
# Create an IoT client for sending to messages to the cloud.
client = greengrasssdk.client('iot-data')
iot_topic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME'])
# Create a local display instance that will dump the image bytes to a FIFO
# file that the image can be rendered locally.
local_display = LocalDisplay('480p')
local_display.start()
# The sample projects come with optimized artifacts, hence only the artifact
# path is required.
model_path = '/opt/awscam/artifacts/mxnet_deploy_ssd_FP16_FUSED.xml'
# Load the model onto the GPU.
client.publish(topic=iot_topic, payload='Loading face detection model')
model = awscam.Model(model_path, {'GPU': 1})
client.publish(topic=iot_topic, payload='Face detection model loaded')
# Set the threshold for detection
detection_threshold = 0.85 # TODO
# The height and width of the training set images
input_height = 300
input_width = 300
# Do inference until the lambda is killed.
while True:
# Get a frame from the video stream
ret, frame = awscam.getLastFrame()
if not ret:
raise Exception('Failed to get frame from the stream')
# Resize frame to the same size as the training set.
frame_resize = cv2.resize(frame, (input_height, input_width))
# Run the images through the inference engine and parse the results using
# the parser API, note it is possible to get the output of doInference
# and do the parsing manually, but since it is a ssd model,
# a simple API is provided.
parsed_inference_results = model.parseResult(model_type,
model.doInference(frame_resize))
# Compute the scale in order to draw bounding boxes on the full resolution
# image.
yscale = float(frame.shape[0] / input_height)
xscale = float(frame.shape[1] / input_width)
# Dictionary to be filled with labels and probabilities for MQTT
cloud_output = {}
# Set the next frame in the local display stream.
local_display.set_frame_data(frame)
# Get the detected faces and probabilities
for obj in parsed_inference_results[model_type]:
if obj['prob'] > detection_threshold:
# Add bounding boxes to full resolution frame
xmin = int(xscale * obj['xmin']) \
+ int((obj['xmin'] - input_width / 2) + input_width / 2)
ymin = int(yscale * obj['ymin'])
xmax = int(xscale * obj['xmax']) \
+ int((obj['xmax'] - input_width / 2) + input_width / 2)
ymax = int(yscale * obj['ymax'])
# Add face detection to iot topic payload
cloud_output[output_map[obj['label']]] = obj['prob']
# Zoom in on Face
crop_img = frame[ymin - 45:ymax + 45, xmin - 30:xmax + 30]
output_image = local_display.set_frame_data_padded(crop_img)
# Encode cropped face image and add to IoT message
frame_string_raw = cv2.imencode('.jpg', output_image)[1]
frame_string = base64.b64encode(frame_string_raw)
cloud_output['image_string'] = frame_string
# Send results to the cloud
client.publish(topic=iot_topic, payload=json.dumps(cloud_output))
time.sleep(1)
except Exception as ex:
client.publish(topic=iot_topic, payload='Error in face detection lambda: {}'.format(ex))
greengrass_infinite_infer_run()
| 43.502347 | 114 | 0.593136 |
67a7613cd5994d30092e4f0caf3675998f5d14d1 | 11,979 | py | Python | conans/model/conan_file.py | ninjayash/conan | 00fbc925fde93a148abfbcebf236c6b4f2da0572 | [
"MIT"
] | 1 | 2020-11-07T21:25:57.000Z | 2020-11-07T21:25:57.000Z | conans/model/conan_file.py | ninjayash/conan | 00fbc925fde93a148abfbcebf236c6b4f2da0572 | [
"MIT"
] | null | null | null | conans/model/conan_file.py | ninjayash/conan | 00fbc925fde93a148abfbcebf236c6b4f2da0572 | [
"MIT"
] | null | null | null | import os
from contextlib import contextmanager
import six
from six import string_types
from conans.client import tools
from conans.client.output import ScopedOutput
from conans.client.tools.env import environment_append, no_op, pythonpath
from conans.client.tools.oss import OSInfo
from conans.errors import ConanException, ConanInvalidConfiguration
from conans.model.build_info import DepsCppInfo
from conans.model.env_info import DepsEnvInfo
from conans.model.options import Options, OptionsValues, PackageOptions
from conans.model.requires import Requirements
from conans.model.user_info import DepsUserInfo
from conans.paths import RUN_LOG_NAME
from conans.util.conan_v2_mode import CONAN_V2_MODE_ENVVAR
from conans.util.conan_v2_mode import conan_v2_behavior
from conans.util.env_reader import get_env
def create_options(conanfile):
try:
package_options = PackageOptions(getattr(conanfile, "options", None))
options = Options(package_options)
default_options = getattr(conanfile, "default_options", None)
if default_options:
if isinstance(default_options, dict):
default_values = OptionsValues(default_options)
elif isinstance(default_options, (list, tuple)):
conan_v2_behavior("Declare 'default_options' as a dictionary")
default_values = OptionsValues(default_options)
elif isinstance(default_options, six.string_types):
conan_v2_behavior("Declare 'default_options' as a dictionary")
default_values = OptionsValues.loads(default_options)
else:
raise ConanException("Please define your default_options as list, "
"multiline string or dictionary")
options.values = default_values
return options
except Exception as e:
raise ConanException("Error while initializing options. %s" % str(e))
def create_requirements(conanfile):
try:
# Actual requirements of this package
if not hasattr(conanfile, "requires"):
return Requirements()
else:
if not conanfile.requires:
return Requirements()
if isinstance(conanfile.requires, (tuple, list)):
return Requirements(*conanfile.requires)
else:
return Requirements(conanfile.requires, )
except Exception as e:
raise ConanException("Error while initializing requirements. %s" % str(e))
def create_settings(conanfile, settings):
try:
defined_settings = getattr(conanfile, "settings", None)
if isinstance(defined_settings, str):
defined_settings = [defined_settings]
current = defined_settings or {}
settings.constraint(current)
return settings
except Exception as e:
raise ConanInvalidConfiguration("Error while initializing settings. %s" % str(e))
@contextmanager
def _env_and_python(conanfile):
with environment_append(conanfile.env):
if get_env(CONAN_V2_MODE_ENVVAR, False):
yield
else:
with pythonpath(conanfile):
yield
def get_env_context_manager(conanfile, without_python=False):
if not conanfile.apply_env:
return no_op()
if without_python:
return environment_append(conanfile.env)
return _env_and_python(conanfile)
class ConanFile(object):
""" The base class for all package recipes
"""
name = None
version = None # Any str, can be "1.1" or whatever
url = None # The URL where this File is located, as github, to collaborate in package
# The license of the PACKAGE, just a shortcut, does not replace or
# change the actual license of the source code
license = None
author = None # Main maintainer/responsible for the package, any format
description = None
topics = None
homepage = None
build_policy = None
short_paths = False
apply_env = True # Apply environment variables from requires deps_env_info and profiles
exports = None
exports_sources = None
generators = ["txt"]
revision_mode = "hash"
# Vars to control the build steps (build(), package())
should_configure = True
should_build = True
should_install = True
should_test = True
in_local_cache = True
develop = False
# Defaulting the reference fields
default_channel = None
default_user = None
# Settings and Options
settings = None
options = None
default_options = None
provides = None
deprecated = None
def __init__(self, output, runner, display_name="", user=None, channel=None):
# an output stream (writeln, info, warn error)
self.output = ScopedOutput(display_name, output)
self.display_name = display_name
# something that can run commands, as os.sytem
self._conan_runner = runner
self._conan_user = user
self._conan_channel = channel
self.compatible_packages = []
self._conan_using_build_profile = False
def initialize(self, settings, env):
if isinstance(self.generators, str):
self.generators = [self.generators]
# User defined options
self.options = create_options(self)
self.requires = create_requirements(self)
self.settings = create_settings(self, settings)
if 'cppstd' in self.settings.fields:
conan_v2_behavior("Setting 'cppstd' is deprecated in favor of 'compiler.cppstd',"
" please update your recipe.", v1_behavior=self.output.warn)
# needed variables to pack the project
self.cpp_info = None # Will be initialized at processing time
self._conan_dep_cpp_info = None # Will be initialized at processing time
self.deps_cpp_info = DepsCppInfo()
# environment variables declared in the package_info
self.env_info = None # Will be initialized at processing time
self.deps_env_info = DepsEnvInfo()
# user declared variables
self.user_info = None
# Keys are the package names (only 'host' if different contexts)
self.deps_user_info = DepsUserInfo()
# user specified env variables
self._conan_env_values = env.copy() # user specified -e
@property
def env(self):
"""Apply the self.deps_env_info into a copy of self._conan_env_values (will prioritize the
self._conan_env_values, user specified from profiles or -e first, then inherited)"""
# Cannot be lazy cached, because it's called in configure node, and we still don't have
# the deps_env_info objects available
tmp_env_values = self._conan_env_values.copy()
tmp_env_values.update(self.deps_env_info)
ret, multiple = tmp_env_values.env_dicts(self.name)
ret.update(multiple)
return ret
@property
def channel(self):
if not self._conan_channel:
_env_channel = os.getenv("CONAN_CHANNEL")
if _env_channel:
conan_v2_behavior("Environment variable 'CONAN_CHANNEL' is deprecated")
self._conan_channel = _env_channel or self.default_channel
if not self._conan_channel:
raise ConanException("channel not defined, but self.channel is used in conanfile")
return self._conan_channel
@property
def user(self):
if not self._conan_user:
_env_username = os.getenv("CONAN_USERNAME")
if _env_username:
conan_v2_behavior("Environment variable 'CONAN_USERNAME' is deprecated")
self._conan_user = _env_username or self.default_user
if not self._conan_user:
raise ConanException("user not defined, but self.user is used in conanfile")
return self._conan_user
def collect_libs(self, folder=None):
conan_v2_behavior("'self.collect_libs' is deprecated, use 'tools.collect_libs(self)' instead",
v1_behavior=self.output.warn)
return tools.collect_libs(self, folder=folder)
@property
def build_policy_missing(self):
return self.build_policy == "missing"
@property
def build_policy_always(self):
return self.build_policy == "always"
def source(self):
pass
def system_requirements(self):
""" this method can be overwritten to implement logic for system package
managers, as apt-get
You can define self.global_system_requirements = True, if you want the installation
to be for all packages (not depending on settings/options/requirements)
"""
def config_options(self):
""" modify options, probably conditioned to some settings. This call is executed
before config_settings. E.g.
if self.settings.os == "Windows":
del self.options.shared # shared/static not supported in win
"""
def configure(self):
""" modify settings, probably conditioned to some options. This call is executed
after config_options. E.g.
if self.options.header_only:
self.settings.clear()
This is also the place for conditional requirements
"""
def build(self):
""" build your project calling the desired build tools as done in the command line.
E.g. self.run("cmake --build .") Or use the provided build helpers. E.g. cmake.build()
"""
self.output.warn("This conanfile has no build step")
def package(self):
""" package the needed files from source and build folders.
E.g. self.copy("*.h", src="src/includes", dst="includes")
"""
self.output.warn("This conanfile has no package step")
def package_info(self):
""" define cpp_build_info, flags, etc
"""
def run(self, command, output=True, cwd=None, win_bash=False, subsystem=None, msys_mingw=True,
ignore_errors=False, run_environment=False, with_login=True):
def _run():
if not win_bash:
return self._conan_runner(command, output, os.path.abspath(RUN_LOG_NAME), cwd)
# FIXME: run in windows bash is not using output
return tools.run_in_windows_bash(self, bashcmd=command, cwd=cwd, subsystem=subsystem,
msys_mingw=msys_mingw, with_login=with_login)
if run_environment:
# When using_build_profile the required environment is already applied through 'conanfile.env'
# in the contextmanager 'get_env_context_manager'
with tools.run_environment(self) if not self._conan_using_build_profile else no_op():
if OSInfo().is_macos and isinstance(command, string_types):
# Security policy on macOS clears this variable when executing /bin/sh. To
# keep its value, set it again inside the shell when running the command.
command = 'DYLD_LIBRARY_PATH="%s" DYLD_FRAMEWORK_PATH="%s" %s' % \
(os.environ.get('DYLD_LIBRARY_PATH', ''),
os.environ.get("DYLD_FRAMEWORK_PATH", ''),
command)
retcode = _run()
else:
retcode = _run()
if not ignore_errors and retcode != 0:
raise ConanException("Error %d while executing %s" % (retcode, command))
return retcode
def package_id(self):
""" modify the binary info, typically to narrow values
e.g.: self.info.settings.compiler = "Any" => All compilers will generate same ID
"""
def test(self):
""" test the generated executable.
E.g. self.run("./example")
"""
raise ConanException("You need to create a method 'test' in your test/conanfile.py")
def __repr__(self):
return self.display_name
| 39.019544 | 106 | 0.656065 |
f5b1fff7abdcadeabda4a261234062241206f3a7 | 2,359 | py | Python | yardstick/benchmark/scenarios/lib/create_keypair.py | mythwm/yardstick | ea13581f450c9c44f6f73d383e6a192697a95cc1 | [
"Apache-2.0"
] | null | null | null | yardstick/benchmark/scenarios/lib/create_keypair.py | mythwm/yardstick | ea13581f450c9c44f6f73d383e6a192697a95cc1 | [
"Apache-2.0"
] | null | null | null | yardstick/benchmark/scenarios/lib/create_keypair.py | mythwm/yardstick | ea13581f450c9c44f6f73d383e6a192697a95cc1 | [
"Apache-2.0"
] | null | null | null | ##############################################################################
# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
from __future__ import print_function
from __future__ import absolute_import
import logging
import paramiko
from yardstick.benchmark.scenarios import base
import yardstick.common.openstack_utils as op_utils
LOG = logging.getLogger(__name__)
class CreateKeypair(base.Scenario):
"""Create an OpenStack keypair"""
__scenario_type__ = "CreateKeypair"
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
self.options = self.scenario_cfg['options']
self.key_name = self.options.get("key_name", "yardstick_key")
self.key_filename = self.options.get("key_path", "/tmp/yardstick_key")
self.setup_done = False
def setup(self):
"""scenario setup"""
self.setup_done = True
def run(self, result):
"""execute the test"""
if not self.setup_done:
self.setup()
rsa_key = paramiko.RSAKey.generate(bits=2048, progress_func=None)
rsa_key.write_private_key_file(self.key_filename)
LOG.info("Writing key_file %s ...", self.key_filename)
with open(self.key_filename + ".pub", "w") as pubkey_file:
pubkey_file.write(
"%s %s\n" % (rsa_key.get_name(), rsa_key.get_base64()))
del rsa_key
keypair = op_utils.create_keypair(self.key_name,
self.key_filename + ".pub")
if keypair:
result.update({"keypair_create": 1})
LOG.info("Create keypair successful!")
else:
result.update({"keypair_create": 0})
LOG.info("Create keypair failed!")
try:
keys = self.scenario_cfg.get('output', '').split()
except KeyError:
pass
else:
values = [keypair.id]
return self._push_to_outputs(keys, values)
| 32.763889 | 78 | 0.598559 |
5ad53ab119ec94240a7a76b5e6ae96e61655679b | 2,969 | py | Python | devutils/run_other_pylint.py | braewoods/ungoogled-chromium | fecd436a5793ecc94879dc4eef340ab44dc5c35f | [
"BSD-3-Clause"
] | 15,642 | 2015-06-13T19:55:52.000Z | 2022-03-31T22:21:23.000Z | devutils/run_other_pylint.py | braewoods/ungoogled-chromium | fecd436a5793ecc94879dc4eef340ab44dc5c35f | [
"BSD-3-Clause"
] | 1,579 | 2015-12-13T18:55:01.000Z | 2022-03-31T18:55:11.000Z | devutils/run_other_pylint.py | braewoods/ungoogled-chromium | fecd436a5793ecc94879dc4eef340ab44dc5c35f | [
"BSD-3-Clause"
] | 890 | 2016-01-06T01:01:51.000Z | 2022-03-31T17:18:30.000Z | #!/usr/bin/env python3
# Copyright (c) 2019 The ungoogled-chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run Pylint over any module"""
import argparse
import os
import shutil
from pathlib import Path
from pylint import lint
class ChangeDir:
"""
Changes directory to path in with statement
"""
def __init__(self, path):
self._path = path
self._orig_path = os.getcwd()
def __enter__(self):
os.chdir(str(self._path))
def __exit__(self, *_):
os.chdir(self._orig_path)
def run_pylint(module_path, pylint_options, ignore_prefixes=tuple()):
"""Runs Pylint. Returns a boolean indicating success"""
pylint_stats = Path('/run/user/{}/pylint_stats'.format(os.getuid()))
if not pylint_stats.parent.is_dir(): #pylint: disable=no-member
pylint_stats = Path('/run/shm/pylint_stats')
os.environ['PYLINTHOME'] = str(pylint_stats)
input_paths = list()
if not module_path.exists():
print('ERROR: Cannot find', module_path)
exit(1)
if module_path.is_dir():
for path in module_path.rglob('*.py'):
ignore_matched = False
for prefix in ignore_prefixes:
if path.parts[:len(prefix)] == prefix:
ignore_matched = True
break
if ignore_matched:
continue
input_paths.append(str(path))
else:
input_paths.append(str(module_path))
runner = lint.Run((*input_paths, *pylint_options), do_exit=False)
if pylint_stats.is_dir():
shutil.rmtree(str(pylint_stats))
if runner.linter.msg_status != 0:
print('WARNING: Non-zero exit status:', runner.linter.msg_status)
return False
return True
def main():
"""CLI entrypoint"""
parser = argparse.ArgumentParser(description='Run Pylint over arbitrary module')
parser.add_argument('--hide-fixme', action='store_true', help='Hide "fixme" Pylint warnings.')
parser.add_argument(
'--show-locally-disabled',
action='store_true',
help='Show "locally-disabled" Pylint warnings.')
parser.add_argument('module_path', type=Path, help='Path to the module to check')
args = parser.parse_args()
if not args.module_path.exists():
print('ERROR: Module path "{}" does not exist'.format(args.module_path))
exit(1)
disables = [
'wrong-import-position',
'bad-continuation',
]
if args.hide_fixme:
disables.append('fixme')
if not args.show_locally_disabled:
disables.append('locally-disabled')
pylint_options = [
'--disable={}'.format(','.join(disables)),
'--jobs=4',
'--score=n',
'--persistent=n',
]
if not run_pylint(args.module_path, pylint_options):
exit(1)
exit(0)
if __name__ == '__main__':
main()
| 28.009434 | 98 | 0.629505 |
ad0266b8eec5ec067bd4765c6e6f4c46f690599a | 1,992 | py | Python | safr-mass-delete/app.py | realnetworks-safr/python-examples | afa4ca89fb39ef4c19511565fcfe46f8dbf42dbf | [
"MIT"
] | 1 | 2020-09-15T15:36:07.000Z | 2020-09-15T15:36:07.000Z | safr-mass-delete/app.py | realnetworks-safr/python-examples | afa4ca89fb39ef4c19511565fcfe46f8dbf42dbf | [
"MIT"
] | 12 | 2020-03-03T17:48:22.000Z | 2022-03-12T00:02:51.000Z | safr-mass-delete/app.py | realnetworks-safr/python-examples | afa4ca89fb39ef4c19511565fcfe46f8dbf42dbf | [
"MIT"
] | 2 | 2019-08-16T13:01:05.000Z | 2020-06-14T15:04:57.000Z | #!/usr/bin/python3
import requests
import base64
import pandas as pd
from datetime import datetime
import logging
from concurrent.futures import ThreadPoolExecutor
import threading
logging.basicConfig(filename='app.log', filemode='w',level=logging.DEBUG, format='"%(asctime)s"; "%(levelname)s"; "%(message)s"') ## CSV Style, ; instead of comma thou
logging.getLogger().addHandler(logging.StreamHandler())
executor = ThreadPoolExecutor(max_workers=10)
BASE_INT2_URL='https://covi.int2.real.com'
BASE_PROD_URL='https://covi.real.com'
IS_CUSTOM_INSTALL = False
KEY_HEADER_AUTHORIZATION='X-RPC-AUTHORIZATION'
KEY_HEADER_DIRECTORY='X-RPC-DIRECTORY'
FIND_RESOURCE='{0}/rootpeople/{1}'
DELETE_RESOURCE='{0}/people/{1}'
session = requests.Session()
def createHeader(user_id, password, directory):
encode_password = base64.b64encode(bytes(password, 'utf-8')).decode('utf-8')
return {
KEY_HEADER_AUTHORIZATION : user_id+':'+encode_password,
KEY_HEADER_DIRECTORY : directory
}
header = createHeader('userId', 'passwd', 'directory')
def findPeople():
url = FIND_RESOURCE.format(BASE_INT2_URL,'?count=0&include-expired=true')
response = session.get(url, headers=header)
response = response.json()["people"]
return response
def deletePeople(person_id):
params = {}
url = DELETE_RESOURCE.format(BASE_INT2_URL,person_id)
print(url)
response = session.delete(url, headers=header, data = params)
if response is not None and data.status_code == 204:
logging.info('Successfully deleted person-id {}'.format(person_id))
else:
logging.error('Error deleting person-id {}'.format(person_id))
def main():
print("Starting process...")
listPeople = []
for person in findPeople():
listPeople.append(person.get('personId'))
for personId in listPeople:
executor.submit(deletePeople, personId)
if __name__ == '__main__':
main()
| 32.655738 | 168 | 0.698293 |
65a91249a90d5888435b4aacbef157645e0c3f22 | 227 | py | Python | lib/starbound_dashboard/core.py | NateScarlet/StarboundDashboard | 14ec581a559446a67fc07cf709074c9dfee7f533 | [
"MIT"
] | 1 | 2018-06-19T02:34:31.000Z | 2018-06-19T02:34:31.000Z | lib/starbound_dashboard/core.py | NateScarlet/StarboundDashboard | 14ec581a559446a67fc07cf709074c9dfee7f533 | [
"MIT"
] | 1 | 2019-04-19T06:16:08.000Z | 2019-04-19T06:16:08.000Z | lib/starbound_dashboard/core.py | NateScarlet/StarboundDashboard | 14ec581a559446a67fc07cf709074c9dfee7f533 | [
"MIT"
] | null | null | null | """App core."""
from quart import Quart, websocket
from . import config, filetools
APP = Quart(__name__, root_path=filetools.dist_path())
APP.config.from_object(config)
APP.config.from_pyfile(config.CONFIG_FILE, silent=True)
| 25.222222 | 55 | 0.779736 |
59a51abc94c1df12a8a65ee40d02ba3e1d6ce321 | 594 | py | Python | userbot/modules/misc/restart.py | ZJRDroid/PaperplaneRemix | b41dffad17cce076f174a3fb36f0aeba1177cefa | [
"MIT"
] | null | null | null | userbot/modules/misc/restart.py | ZJRDroid/PaperplaneRemix | b41dffad17cce076f174a3fb36f0aeba1177cefa | [
"MIT"
] | null | null | null | userbot/modules/misc/restart.py | ZJRDroid/PaperplaneRemix | b41dffad17cce076f174a3fb36f0aeba1177cefa | [
"MIT"
] | 1 | 2019-12-21T03:44:42.000Z | 2019-12-21T03:44:42.000Z | import sys
from os import execl
from userbot import BOTLOG, BOTLOG_CHATID
from userbot.events import register
@register(outgoing=True, pattern=r"^\.restart$")
async def knocksomesense(event):
await event.edit("`Hold tight! I just need a second to be back up....`")
if BOTLOG:
await event.client.send_message(BOTLOG_CHATID, "#RESTART \n"
"Bot Restarted")
await event.client.disconnect()
# Spin a new instance of bot
execl(sys.executable, sys.executable, *sys.argv)
# Shut the existing one down
exit()
| 31.263158 | 76 | 0.646465 |
1f4de03d3cf29ff14f3bc20fea119fefc8043fff | 49,188 | py | Python | mindspore/ops/operations/_quant_ops.py | wudenggang/mindspore | 95e75c3119909cc5c7c3098232851d1d7bc4ef8c | [
"Apache-2.0"
] | 2 | 2020-08-12T16:14:40.000Z | 2020-12-04T03:05:57.000Z | mindspore/ops/operations/_quant_ops.py | wudenggang/mindspore | 95e75c3119909cc5c7c3098232851d1d7bc4ef8c | [
"Apache-2.0"
] | null | null | null | mindspore/ops/operations/_quant_ops.py | wudenggang/mindspore | 95e75c3119909cc5c7c3098232851d1d7bc4ef8c | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0(the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Operators for quantization."""
import mindspore.context as context
from ..._checkparam import Validator as validator
from ..._checkparam import Rel
from ..primitive import PrimitiveWithInfer, prim_attr_register
from ...common import dtype as mstype
__all__ = ["MinMaxUpdatePerLayer",
"MinMaxUpdatePerChannel",
"FakeQuantPerLayer",
"FakeQuantPerLayerGrad",
"FakeQuantPerChannel",
"FakeQuantPerChannelGrad",
"BatchNormFold",
"BatchNormFoldGrad",
"CorrectionMul",
"CorrectionMulGrad",
"CorrectionMulGradReduce",
"BatchNormFold2",
"BatchNormFold2Grad",
"BatchNormFoldD",
"BatchNormFoldGradD",
"BatchNormFold2_D",
"BatchNormFold2GradD",
"BatchNormFold2GradReduce"
]
class MinMaxUpdatePerLayer(PrimitiveWithInfer):
r"""
Update min and max per layer.
Args:
ema (bool): Use EMA algorithm update value min and max. Default: False.
ema_decay (int) : EMA algorithm decay parameter. Default: 0.999.
Inputs:
- **x** (Tensor) : float32 Tensor representing the shape of the output tensor.
- **min** (Tensor) : Value of the min range of the input data x.
- **max** (Tensor) : Value of the max range of the input data x.
Outputs:
- Tensor: Simulate quantize tensor of x.
Examples:
>>> input_tensor = Tensor(np.random.rand(3, 16, 5, 5), mstype.float32)
>>> min_tensor = Tensor(np.array([-6]), mstype.float32)
>>> max_tensor = Tensor(np.array([6]), mstype.float32)
>>> output_tensor = MinMaxUpdatePerLayer(num_bits=8)(input_tensor, min_tensor, max_tensor)
"""
support_quant_bit = [4, 7, 8]
@prim_attr_register
def __init__(self, ema=False, ema_decay=0.999):
"""init FakeQuantMinMaxPerLayerUpdate OP"""
if context.get_context('device_target') == "Ascend":
from mindspore.ops._op_impl._custom_op import minmax_update_perlayer
if ema and not ema_decay:
raise ValueError(
f"For '{self.name}' attr \'ema\' and \'ema_decay\' should set together.")
self.ema = validator.check_value_type('ema', ema, (bool,), self.name)
self.ema_decay = validator.check_number_range(
'ema_decay', ema_decay, 0, 1, Rel.INC_BOTH, self.name)
self.init_prim_io_names(inputs=['x', 'min', 'max'],
outputs=['min_up', 'max_up'])
def infer_shape(self, x_shape, min_shape, max_shape):
validator.check_integer("x rank", len(x_shape), 1, Rel.GE, self.name)
validator.check("min shape", min_shape, "max shape",
max_shape, Rel.EQ, self.name)
validator.check_integer("min shape", len(
min_shape), 1, Rel.EQ, self.name)
return min_shape, max_shape
def infer_dtype(self, x_type, min_type, max_type):
valid_types = (mstype.float16, mstype.float32)
validator.check_tensor_type_same({"x": x_type}, valid_types, self.name)
validator.check_tensor_type_same(
{"min": min_type}, valid_types, self.name)
validator.check_tensor_type_same(
{"max": max_type}, valid_types, self.name)
return min_type, max_type
class MinMaxUpdatePerChannel(PrimitiveWithInfer):
r"""
Update min and max per channel.
Args:
ema (bool): Use EMA algorithm update value min and max. Default: False.
ema_decay (int) : EMA algorithm decay parameter. Default: 0.999.
channel_axis (int): Quantization by channel axis. Ascend backend only supports 0 or 1. Default: 1.
Inputs:
- **x** (Tensor) : float32 Tensor representing the shape of the output tensor.
- **min** (Tensor) : Value of the min range of the input data x.
- **max** (Tensor) : Value of the max range of the input data x.
Outputs:
- Tensor: Simulate quantize tensor of x.
Examples:
>>> x = Tensor(np.random.rand(3, 16, 5, 5), mstype.float32)
>>> min = Tensor(np.random.uniform(-1, 1, size=16), mstype.float32)
>>> max = Tensor(np.random.uniform(-1, 1, size=16), mstype.float32)
>>> output_tensor = MinMaxUpdatePerChannel(num_bits=8)(x, min, max)
"""
support_quant_bit = [4, 7, 8]
ascend_support_x_rank = [2, 4]
@prim_attr_register
def __init__(self, ema=False, ema_decay=0.999, channel_axis=1):
"""init FakeQuantPerChannelUpdate OP for Ascend"""
self.is_ascend = context.get_context('device_target') == "Ascend"
if self.is_ascend:
from mindspore.ops._op_impl._custom_op import minmax_update_perchannel
if ema and not ema_decay:
raise ValueError(
f"For '{self.name}' attr \'ema\' and \'ema_decay\' should set together.")
self.ema = validator.check_value_type('ema', ema, (bool,), self.name)
self.ema_decay = validator.check_number_range(
'ema_decay', ema_decay, 0, 1, Rel.INC_BOTH, self.name)
if self.is_ascend:
self.channel_axis = validator.check_int_range('channel_axis', channel_axis, 0, 1, Rel.INC_BOTH, self.name)
else:
self.channel_axis = validator.check_integer('channel_axis', channel_axis, 0, Rel.GE, self.name)
self.init_prim_io_names(
inputs=['x', 'min', 'max'], outputs=['min_up', 'max_up'])
def infer_shape(self, x_shape, min_shape, max_shape):
if self.is_ascend and len(x_shape) not in self.ascend_support_x_rank:
raise ValueError(f"For '{self.name}' x rank should be in '{self.ascend_support_x_rank}'")
if not self.is_ascend:
validator.check_integer("x rank", len(x_shape), 1, Rel.GE, self.name)
validator.check("min shape", min_shape, "max shape",
max_shape, Rel.EQ, self.name)
validator.check_integer("min shape", len(
min_shape), 1, Rel.EQ, self.name)
return min_shape, max_shape
def infer_dtype(self, x_type, min_type, max_type):
valid_types = (mstype.float16, mstype.float32)
validator.check_tensor_type_same(
{"x": x_type}, valid_types, self.name)
validator.check_tensor_type_same(
{"min": min_type}, valid_types, self.name)
validator.check_tensor_type_same(
{"max": max_type}, valid_types, self.name)
return min_type, max_type
class FakeQuantPerLayer(PrimitiveWithInfer):
r"""
Simulate the quantize and dequantize operations in training time.
Args:
num_bits (int) : Number bits for quantization aware. Default: 8.
ema (bool): Use EMA algorithm update value min and max. Default: False.
ema_decay (int) : EMA algorithm decay parameter. Default: 0.999.
quant_delay (int): Quantilization delay parameter. Before delay step in training time not update
simulate quantization aware funcion. After delay step in training time begin simulate the aware
quantize funcion. Default: 0.
symmetric (bool): Quantization algorithm use symmetric or not. Default: False.
narrow_range (bool): Quantization algorithm use narrow range or not. Default: False.
training (bool): Training the network or not. Default: True.
Inputs:
- **x** (Tensor) : float32 Tensor representing the shape of the output tensor.
- **min** (Tensor) : Value of the min range of the input data x.
- **max** (Tensor) : Value of the max range of the input data x.
Outputs:
- Tensor: Simulate quantize tensor of x.
Examples:
>>> input_tensor = Tensor(np.random.rand(3, 16, 5, 5), mstype.float32)
>>> min_tensor = Tensor(np.array([-6]), mstype.float32)
>>> max_tensor = Tensor(np.array([6]), mstype.float32)
>>> output_tensor = FakeQuantPerLayer(num_bits=8)(input_tensor, min_tensor, max_tensor)
"""
support_quant_bit = [4, 7, 8]
@prim_attr_register
def __init__(self,
num_bits=8,
ema=False,
ema_decay=0.999,
quant_delay=0,
symmetric=False,
narrow_range=False,
training=True):
"""init FakeQuantPerLayer OP"""
if context.get_context('device_target') == "Ascend":
from mindspore.ops._op_impl._custom_op import fake_quant_perlayer
if num_bits not in self.support_quant_bit:
raise ValueError(
f"For '{self.name}' attr \'num_bits\' is not support.")
if ema and not ema_decay:
raise ValueError(
f"For '{self.name}' attr \'ema\' and \'ema_decay\' should set together.")
self.ema = validator.check_value_type('ema', ema, (bool,), self.name)
self.symmetric = validator.check_value_type(
'symmetric', symmetric, (bool,), self.name)
self.narrow_range = validator.check_value_type(
'narrow_range', narrow_range, (bool,), self.name)
self.training = validator.check_value_type(
'training', training, (bool,), self.name)
self.ema_decay = validator.check_number_range(
'ema_decay', ema_decay, 0, 1, Rel.INC_BOTH, self.name)
self.num_bits = validator.check_integer(
'num_bits', num_bits, 0, Rel.GT, self.name)
self.quant_delay = validator.check_integer(
'quant_delay', quant_delay, 0, Rel.GE, self.name)
self.init_prim_io_names(inputs=['x', 'min', 'max'],
outputs=['out'])
def infer_shape(self, x_shape, min_shape, max_shape):
validator.check_integer("x rank", len(x_shape), 1, Rel.GE, self.name)
validator.check("min shape", min_shape, "max shape", max_shape, Rel.EQ, self.name)
validator.check_integer("min shape", len(min_shape), 1, Rel.EQ, self.name)
return x_shape
def infer_dtype(self, x_type, min_type, max_type):
valid_types = (mstype.float16, mstype.float32)
validator.check_tensor_type_same({"x": x_type}, valid_types, self.name)
validator.check_tensor_type_same(
{"min": min_type}, valid_types, self.name)
validator.check_tensor_type_same(
{"max": max_type}, valid_types, self.name)
return x_type
class FakeQuantPerLayerGrad(PrimitiveWithInfer):
r"""
Performs grad of FakeQuantPerLayerGrad operation.
Examples:
>>> fake_min_max_grad = FakeQuantPerLayerGrad()
>>> dout = Tensor(np.array([[-2.3, 1.2], [5.7, 0.2]]), mindspore.float32)
>>> input_x = Tensor(np.array([[18, -23], [0.2, 6]]), mindspore.float32)
>>> _min = Tensor(np.array([-4]), mindspore.float32)
>>> _max = Tensor(np.array([2]), mindspore.float32)
>>> result = fake_min_max_grad(dout, input_x, _min, _max)
"""
support_quant_bit = [4, 7, 8]
@prim_attr_register
def __init__(self,
num_bits=8,
quant_delay=0,
symmetric=False,
narrow_range=False):
if context.get_context('device_target') == "Ascend":
from mindspore.ops._op_impl._custom_op import fake_quant_perlayer_grad
if num_bits not in self.support_quant_bit:
raise ValueError(
f"For '{self.name}' attr \'num_bits\' is not support.")
self.num_bits = validator.check_integer(
'num_bits', num_bits, 0, Rel.GT, self.name)
self.quant_delay = validator.check_value_type(
'quant_delay', quant_delay, (int,), self.name)
self.symmetric = validator.check_value_type(
'symmetric', symmetric, (bool,), self.name)
self.narrow_range = validator.check_value_type(
'narrow_range', narrow_range, (bool,), self.name)
self.init_prim_io_names(
inputs=['dout', 'x', 'min', 'max'], outputs=['dx'])
def infer_shape(self, dout_shape, x_shape, min_shape, max_shape):
validator.check("dout shape", dout_shape, "x shape",
x_shape, Rel.EQ, self.name)
validator.check("min shape", min_shape, "max shape",
max_shape, Rel.EQ, self.name)
validator.check_integer("min shape", len(
min_shape), 1, Rel.EQ, self.name)
return dout_shape
def infer_dtype(self, dout_type, x_type, min_type, max_type):
valid_types = (mstype.float16, mstype.float32)
validator.check_tensor_type_same(
{"dout": dout_type}, valid_types, self.name)
validator.check_tensor_type_same({"x": x_type}, valid_types, self.name)
validator.check_tensor_type_same(
{"min": min_type}, valid_types, self.name)
validator.check_tensor_type_same(
{"max": max_type}, valid_types, self.name)
return dout_type
class FakeQuantPerChannel(PrimitiveWithInfer):
r"""
Simulate the quantize and dequantize operations in training time base on per channel.
Args:
num_bits (int) : Number bits to quantilization. Default: 8.
ema (bool): Use EMA algorithm update tensor min and tensor max. Default: False.
ema_decay (int) : EMA algorithm decay parameter. Default: 0.999.
quant_delay (int): Quantilization delay parameter. Before delay step in training time not
update the weight data to simulate quantize operation. After delay step in training time
begin simulate the quantize operation. Default: 0.
symmetric (bool): Quantization algorithm use symmetric or not. Default: False.
narrow_range (bool): Quantization algorithm use narrow range or not. Default: False.
training (bool): Training the network or not. Default: True.
channel_axis (int): Quantization by channel axis. Ascend backend only supports 0 or 1. Default: 1.
Inputs:
- **x** (Tensor) : 4-D float32 Tensor representing the shape of the output tensor.
- **min** (int, float) : Value of the min range of the input data.
- **max** (int, float) : Value of the max range of the input data.
Outputs:
- Tensor, has the same type as input.
Examples:
>>> fake_quant = FakeQuantPerChannel()
>>> input_x = Tensor(np.array([3, 4, 5, -2, -3, -1]).reshape(3, 2), mindspore.float32)
>>> _min = Tensor(np.linspace(-2, 2, 12).reshape(3, 2, 2), mindspore.float32)
>>> _max = Tensor(np.linspace(8, 12, 12).reshape(3, 2, 2), mindspore.float32)
>>> result = fake_quant(input_x, _min, _max)
"""
support_quant_bit = [4, 7, 8]
ascend_support_x_rank = [2, 4]
@prim_attr_register
def __init__(self,
num_bits=8,
ema=False,
ema_decay=0.999,
quant_delay=0,
symmetric=False,
narrow_range=False,
training=True,
channel_axis=1):
"""init FakeQuantPerChannel OP"""
self.is_ascend = context.get_context('device_target') == "Ascend"
if self.is_ascend:
from mindspore.ops._op_impl._custom_op import fake_quant_perchannel
if num_bits not in self.support_quant_bit:
raise ValueError(
f"For '{self.name}' Attr \'num_bits\' is not support.")
if ema and not ema_decay:
raise ValueError(
f"For '{self.name}' attr \'ema\' and \'ema_decay\' should set together.")
self.ema = validator.check_value_type('ema', ema, (bool,), self.name)
self.symmetric = validator.check_value_type(
'symmetric', symmetric, (bool,), self.name)
self.narrow_range = validator.check_value_type(
'narrow_range', narrow_range, (bool,), self.name)
self.training = validator.check_value_type(
'training', training, (bool,), self.name)
self.ema_decay = validator.check_number_range(
'ema_decay', ema_decay, 0, 1, Rel.INC_BOTH, self.name)
self.num_bits = validator.check_integer(
'num_bits', num_bits, 0, Rel.GT, self.name)
self.quant_delay = validator.check_integer(
'quant_delay', quant_delay, 0, Rel.GE, self.name)
if self.is_ascend:
self.channel_axis = validator.check_int_range('channel_axis', channel_axis, 0, 1, Rel.INC_BOTH, self.name)
else:
self.channel_axis = validator.check_integer('channel_axis', channel_axis, 0, Rel.GE, self.name)
self.init_prim_io_names(inputs=['x', 'min', 'max'], outputs=['out'])
def infer_shape(self, x_shape, min_shape, max_shape):
if self.is_ascend and len(x_shape) not in self.ascend_support_x_rank:
raise ValueError(f"For '{self.name}' x rank should be in '{self.ascend_support_x_rank}'")
if not self.is_ascend:
validator.check_integer("x rank", len(x_shape), 1, Rel.GE, self.name)
validator.check("min shape", min_shape, "max shape", max_shape, Rel.EQ, self.name)
validator.check_integer(
"min shape", min_shape[0], x_shape[self.channel_axis], Rel.EQ, self.name)
validator.check_integer(
"max shape", max_shape[0], x_shape[self.channel_axis], Rel.EQ, self.name)
return x_shape
def infer_dtype(self, x_type, min_type, max_type):
valid_types = (mstype.float16, mstype.float32)
validator.check_tensor_type_same({"x": x_type}, valid_types, self.name)
validator.check_tensor_type_same(
{"min": min_type}, valid_types, self.name)
validator.check_tensor_type_same(
{"max": max_type}, valid_types, self.name)
return x_type
class FakeQuantPerChannelGrad(PrimitiveWithInfer):
r"""
Performs grad of FakeQuantPerChannelGrad operation.
Examples:
>>> fqmmpc_grad = FakeQuantPerChannelGrad()
>>> input_x = Tensor(np.random.randint(-4, 4, (2, 3, 4)), mindspore.float32)
>>> dout = Tensor(np.random.randint(-2, 2, (2, 3, 4)), mindspore.float32)
>>> _min = Tensor(np.random.randint(-8, 2, (2, 3, 4)), mindspore.float32)
>>> _max = Tensor(np.random.randint(-2, 8, (2, 3, 4)), mindspore.float32)
>>> result = fqmmpc_grad(dout, input_x, _min, _max)
"""
support_quant_bit = [4, 7, 8]
@prim_attr_register
def __init__(self,
num_bits=8,
quant_delay=0,
symmetric=False,
narrow_range=False,
channel_axis=1):
"""init FakeQuantPerChannelGrad Fill"""
if context.get_context('device_target') == "Ascend":
from mindspore.ops._op_impl._custom_op import fake_quant_perchannel_grad
if num_bits not in self.support_quant_bit:
raise ValueError(
f"For '{self.name}' attr \'num_bits\' is not support.")
self.num_bits = validator.check_integer(
'num_bits', num_bits, 0, Rel.GT, self.name)
self.quant_delay = validator.check_value_type(
'quant_delay', quant_delay, (int,), self.name)
self.symmetric = validator.check_value_type(
'symmetric', symmetric, (bool,), self.name)
self.narrow_range = validator.check_value_type(
'narrow_range', narrow_range, (bool,), self.name)
self.channel_axis = validator.check_integer(
'channel axis', channel_axis, 0, Rel.GE, self.name)
self.init_prim_io_names(
inputs=['dout', 'x', 'min', 'max'], outputs=['dx'])
def infer_shape(self, dout_shape, x_shape, min_shape, max_shape):
validator.check("dout shape", dout_shape, "x shape", x_shape)
validator.check("min shape", min_shape, "max shape", max_shape)
return dout_shape
def infer_dtype(self, dout_type, x_type, min_type, max_type):
valid_types = (mstype.float16, mstype.float32)
validator.check_tensor_type_same(
{"dout": dout_type}, valid_types, self.name)
validator.check_tensor_type_same({"x": x_type}, valid_types, self.name)
validator.check_tensor_type_same(
{"min": min_type}, valid_types, self.name)
validator.check_tensor_type_same(
{"max": max_type}, valid_types, self.name)
return dout_type
class BatchNormFold(PrimitiveWithInfer):
"""
Batch normalization folded.
Args:
momentum (float): Momentum value should be [0, 1]. Default: 0.9.
epsilon (float): A small float number to avoid dividing by 0. 1e-5 if dtype in
float32 else 1e-3. Default: 1e-5.
is_training (bool): In training mode set True, else set False. Default: True.
freeze_bn (int): Delay in steps at which computation switches from regular batch
norm to frozen mean and std. Default: 0.
Inputs:
- **x** (Tensor) - Tensor of shape :math:`(N, C)`.
- **mean** (Tensor) - Tensor of shape :math:`(C,)`.
- **variance** (Tensor) - Tensor of shape :math:`(C,)`.
- **global_step** (Tensor) - Tensor to record current global step.
Outputs:
Tuple of 4 Tensor, the normalized input and the updated parameters.
- **batch_mean** (Tensor) - Tensor of shape :math:`(C,)`.
- **batch_std** (Tensor) - Tensor of shape :math:`(C,)`.
- **running_mean** (Tensor) - Tensor of shape :math:`(C,)`.
- **running_std** (Tensor) - Tensor of shape :math:`(C,)`.
Examples:
>>> batch_norm_fold = P.BatchNormFold()
>>> input_x = Tensor(np.array([1, 2, -1, -2, -2, 1]).reshape(2, 3), mindspore.float32)
>>> mean = Tensor(np.array([0.5, -1, 1,]), mindspore.float32)
>>> variance = Tensor(np.array([0.36, 0.4, 0.49]), mindspore.float32)
>>> global_step = Tensor(np.arange(6), mindspore.int32)
>>> batch_mean, batch_std, running_mean, running_std = batch_norm_fold(input_x, mean, variance, global_step)
"""
channel_axis = 1
@prim_attr_register
def __init__(self, momentum=0.9, epsilon=1e-5, is_training=True, freeze_bn=0):
"""init batch norm fold layer"""
self.momentum = validator.check_number_range('momentum', momentum, 0, 1, Rel.INC_BOTH, self.name)
self.epsilon = validator.check_float_positive('epsilon', epsilon, self.name)
self.is_training = validator.check_value_type('is_training', is_training, (bool,), self.name)
self.freeze_bn = validator.check_value_type('freeze_bn', freeze_bn, (int,), self.name)
self.init_prim_io_names(inputs=['x', 'mean', 'variance', 'global_step'],
outputs=['batch_mean', 'batch_std', 'running_mean', 'running_std'])
def infer_shape(self, x_shape, mean_shape, variance_shape, global_step_shape):
validator.check("mean shape", mean_shape, "gamma_shape", variance_shape, Rel.EQ, self.name)
validator.check("mean_shape[0]", mean_shape[0], "input channel", x_shape[self.channel_axis], Rel.EQ, self.name)
validator.check_integer("global step shape len", len(global_step_shape), 1, Rel.EQ, self.name)
return mean_shape, mean_shape, mean_shape, mean_shape
def infer_dtype(self, x_type, mean_type, variance_type, global_step_type):
validator.check("input type", x_type, "mean type", mean_type)
validator.check("input type", x_type, "variance type", variance_type)
args = {"x": x_type, "mean": mean_type, "variance": variance_type}
validator.check_tensor_type_same(args, (mstype.float16, mstype.float32), self.name)
validator.check_tensor_type_same({"global_step": global_step_type}, (mstype.int32,), self.name)
return x_type, x_type, x_type, x_type
class BatchNormFoldGrad(PrimitiveWithInfer):
r"""
Performs grad of BatchNormFold operation.
Examples:
>>> batch_norm_fold_grad = P.BatchNormFoldGrad()
>>> d_batch_mean = Tensor(np.random.randint(-2., 2., (1, 2, 2, 3)), mindspore.float32)
>>> d_batch_std = Tensor(np.random.randn(1, 2, 2, 3), mindspore.float32)
>>> input_x = Tensor(np.random.randint(0, 256, (4, 1, 4, 6)), mindspore.float32)
>>> batch_mean = Tensor(np.random.randint(-8., 8., (1, 2, 2, 3)), mindspore.float32)
>>> batch_std = Tensor(np.random.randint(0, 12, (1, 2, 2, 3)), mindspore.float32)
>>> global_step = Tensor([2], mindspore.int32)
>>> result = batch_norm_fold_grad(d_batch_mean, d_batch_std, input_x, batch_mean, batch_std, global_step)
"""
channel_axis = 1
@prim_attr_register
def __init__(self, epsilon=1e-5, is_training=True, freeze_bn=0):
"""init BatchNormGrad layer"""
self.is_training = validator.check_value_type('is_training', is_training, (bool,), self.name)
self.freeze_bn = validator.check_value_type('freeze_bn', freeze_bn, (int,), self.name)
self.epsilon = validator.check_float_positive('epsilon', epsilon, self.name)
self.init_prim_io_names(inputs=['d_batch_mean', 'd_batch_std', 'x', 'batch_mean', 'batch_std', 'global_step'],
outputs=['dx'])
def infer_shape(self, d_batch_mean_shape, d_batch_std_shape, x_shape, batch_mean_shape, batch_std_shape,
global_step_shape):
validator.check("d_batch_mean shape", d_batch_mean_shape,
"d_batch_std shape", d_batch_std_shape, Rel.EQ, self.name)
validator.check("d_batch_mean shape", d_batch_mean_shape,
"batch_mean shape", batch_mean_shape, Rel.EQ, self.name)
validator.check("d_batch_mean shape", d_batch_mean_shape,
"batch_std shape", batch_std_shape, Rel.EQ, self.name)
validator.check("d_batch_mean_shape[0]", d_batch_mean_shape[0],
"input channel", x_shape[self.channel_axis], Rel.EQ, self.name)
validator.check_integer("global step shape len", len(global_step_shape), 1, Rel.EQ, self.name)
return x_shape
def infer_dtype(self, d_batch_mean_type, d_batch_std_type, x_type, batch_mean_type, batch_std_type,
global_step_type):
args = {"input": x_type, "d_batch_mean": d_batch_mean_type, "d_batch_std": d_batch_std_type,
"batch_mean": batch_mean_type, "batch_std": batch_std_type}
validator.check_tensor_type_same(args, (mstype.float16, mstype.float32), self.name)
validator.check_tensor_type_same({"global_step": global_step_type}, (mstype.int32,), self.name)
return x_type
class CorrectionMul(PrimitiveWithInfer):
"""
Scale the weights with a correction factor to the long term statistics
prior to quantization. This ensures that there is no jitter in the quantized weights
due to batch to batch variation.
Inputs:
- **x** (Tensor) - Tensor of shape :math:`(N, C)`.
- **batch_std** (Tensor) - Tensor of shape :math:`(C,)`.
- **running_std** (Tensor) - Tensor of shape :math:`(C,)`.
Outputs:
- **out** (Tensor) - Tensor has the same shape as x.
Examples:
>>> correction_mul = P.CorrectionMul()
>>> input_x = Tensor(np.random.randint(-8, 12, (3, 4)), mindspore.float32)
>>> batch_std = Tensor(np.array([1.5, 3, 2]), mindspore.float32)
>>> running_std = Tensor(np.array([2, 1.2, 0.5]), mindspore.float32)
>>> out = correction_mul(input_x, batch_std, running_std)
"""
@prim_attr_register
def __init__(self, channel_axis=0):
"""init correction mul layer"""
if context.get_context('device_target') == "Ascend":
from mindspore.ops._op_impl._custom_op import correction_mul
self.channel_axis = channel_axis
self.init_prim_io_names(inputs=['x', 'batch_std', 'running_std'],
outputs=['out'])
def infer_shape(self, x_shape, batch_std_shape, running_std_shape):
validator.check("batch_std shape", batch_std_shape, "running_std shape", running_std_shape, Rel.EQ, self.name)
validator.check("batch_std_shape[0]", batch_std_shape[0], "x_shape channel size", x_shape[self.channel_axis],
Rel.EQ, self.name)
return x_shape
def infer_dtype(self, x_type, batch_std_type, running_std_type):
args = {"x": x_type, "batch_std": batch_std_type, "running_std": running_std_type}
validator.check_tensor_type_same(args, (mstype.float16, mstype.float32), self.name)
return x_type
class CorrectionMulGrad(PrimitiveWithInfer):
r"""
Performs grad of CorrectionMul operation.
Examples:
>>> correction_mul_grad = P.CorrectionMulGrad()
>>> dout = Tensor(np.array([1.5, -2.2, 0.7, -3, 1.6, 2.8]).reshape(2, 1, 1, 3), mindspore.float32)
>>> input_x = Tensor(np.random.randint(0, 256, (2, 1, 1, 3)), mindspore.float32)
>>> gamma = Tensor(np.array([0.2, -0.2, 2.5, -1.]).reshape(2, 1, 2), mindspore.float32)
>>> running_std = Tensor(np.array([1.2, 0.1, 0.7, 2.3]).reshape(2, 1, 2), mindspore.float32)
>>> result = correction_mul_grad(dout, input_x, gamma, running_std)
"""
@prim_attr_register
def __init__(self, channel_axis=0):
"""init correction mul layer"""
if context.get_context('device_target') == "Ascend":
from mindspore.ops._op_impl._custom_op import correction_mul_grad
self.channel_axis = channel_axis
self.init_prim_io_names(inputs=['dout', 'x', 'gamma', 'running_std'],
outputs=['dx', 'mul_dx'])
def infer_shape(self, dout_shape, x_shape, gamma_shape, running_std_shape):
validator.check("dout shape", dout_shape, "x_shape x", x_shape, Rel.EQ, self.name)
validator.check("gamma_shape[0]", gamma_shape[0], "dout channel size", dout_shape[self.channel_axis],
Rel.EQ, self.name)
validator.check("running_std_shape[0]", running_std_shape[0],
"dout channel size", dout_shape[self.channel_axis], Rel.EQ, self.name)
if context.get_context('device_target') == "Ascend":
return x_shape, x_shape
return x_shape, gamma_shape
def infer_dtype(self, dout_type, x_type, gamma_type, running_std_type):
args = {"dout": dout_type, "x": x_type, "gamma": gamma_type, "running_std": running_std_type}
validator.check_tensor_type_same(args, (mstype.float16, mstype.float32), self.name)
if context.get_context('device_target') == "Ascend":
return x_type, x_type
return x_type, gamma_type
class CorrectionMulGradReduce(PrimitiveWithInfer):
r"""
Performs grad reduce of CorrectionMul operation.
Examples:
>>> correction_mul_grad_rd = P.CorrectionMulGradReduce()
>>> dout = Tensor(np.array([1.5, -2.2, 0.7, -3, 1.6, 2.8]).reshape(2, 1, 1, 3), mindspore.float32)
>>> input_x = Tensor(np.random.randint(0, 256, (2, 1, 1, 3)), mindspore.float32)
>>> gamma = Tensor(np.array([0.2, -0.2, 2.5, -1.]).reshape(2, 1, 2), mindspore.float32)
>>> running_std = Tensor(np.array([1.2, 0.1, 0.7, 2.3]).reshape(2, 1, 2), mindspore.float32)
>>> result = correction_mul_grad_rd(dout, input_x, gamma, running_std)
"""
@prim_attr_register
def __init__(self, channel_axis=0):
"""init correction mul reduce layer"""
if context.get_context('device_target') == "Ascend":
from mindspore.ops._op_impl._custom_op import correction_mul_grad
self.channel_axis = channel_axis
self.init_prim_io_names(inputs=['mul_dx'],
outputs=['d_gamma'])
def infer_shape(self, mul_dx_shape):
return [mul_dx_shape[self.channel_axis]]
def infer_dtype(self, mul_dx_type):
return mul_dx_type
class BatchNormFold2(PrimitiveWithInfer):
"""
Scale the bias with a correction factor to the long term statistics
prior to quantization. This ensures that there is no jitter in the quantized bias
due to batch to batch variation.
Inputs:
- **x** (Tensor) - Tensor of shape :math:`(N, C)`.
- **beta** (Tensor) - Tensor of shape :math:`(C,)`.
- **gamma** (Tensor) - Tensor of shape :math:`(C,)`.
- **batch_std** (Tensor) - Tensor of shape :math:`(C,)`.
- **batch_mean** (Tensor) - Tensor of shape :math:`(C,)`.
- **running_std** (Tensor) - Tensor of shape :math:`(C,)`.
- **running_mean** (Tensor) - Tensor of shape :math:`(C,)`.
- **global_step** (Tensor) - Tensor to record current global step.
Outputs:
- **y** (Tensor) - Tensor has the same shape as x.
Examples:
>>> batch_norm_fold2 = P.BatchNormFold2()
>>> input_x = Tensor(np.random.randint(-6, 6, (4, 3)), mindspore.float32)
>>> beta = Tensor(np.array([0.2, -0.1, 0.25]), mindspore.float32)
>>> gamma = Tensor(np.array([-0.1, -0.25, 0.1]), mindspore.float32)
>>> batch_std = Tensor(np.array([0.1, 0.2, 0.1]), mindspore.float32)
>>> batch_mean = Tensor(np.array([0, 0.05, 0.2]), mindspore.float32)
>>> running_std = Tensor(np.array([0.1, 0.1, 0.3]), mindspore.float32)
>>> running_mean = Tensor(np.array([-0.1, 0, -0.1]), mindspore.float32)
>>> global_step = Tensor(np.random.randint(1, 8, (8, )), mindspore.int32)
>>> result = batch_norm_fold2(input_x, beta, gamma, batch_std, batch_mean,
>>> running_std, running_mean, global_step)
"""
channel_axis = 1
@prim_attr_register
def __init__(self, freeze_bn=0):
"""init conv2d fold layer"""
self.freeze_bn = validator.check_value_type('freeze_bn', freeze_bn, (int,), self.name)
self.init_prim_io_names(inputs=['x', 'beta', 'gamma', 'batch_std', 'batch_mean',
'running_std', 'running_mean', 'global_step'],
outputs=['y'])
def infer_shape(self, x_shape, beta_shape, gamma_shape, batch_std_shape, running_std_shape, batch_mean_shape,
running_mean_shape, global_step_shape):
validator.check("batch_std shape", batch_std_shape, "running_std shape", running_std_shape, Rel.EQ, self.name)
validator.check("batch_std shape", batch_std_shape, "batch_mean shape", batch_mean_shape, Rel.EQ, self.name)
validator.check("batch_std shape", batch_std_shape, "beta shape", beta_shape, Rel.EQ, self.name)
validator.check("batch_std shape", batch_std_shape, "running_mean shape", running_mean_shape, Rel.EQ, self.name)
validator.check("batch_std shape", batch_std_shape, "batch_mean shape", gamma_shape, Rel.EQ, self.name)
validator.check("batch_std_shape[0]", batch_std_shape[0], "x_shape channel size", x_shape[self.channel_axis],
Rel.EQ, self.name)
validator.check_integer("global step shape len", len(global_step_shape), 1, Rel.EQ, self.name)
return x_shape
def infer_dtype(self, x_type, beta_type, gamma_type, batch_std_type, running_std_type, batch_mean_type,
running_mean_type, global_step_type):
args = {"batch_std": batch_std_type, "running_std": running_std_type, "batch_mean": batch_mean_type,
"beta": beta_type, "running_mean": running_mean_type, "gamma": gamma_type, "x": x_type}
validator.check_tensor_type_same(args, (mstype.float16, mstype.float32), self.name)
validator.check_tensor_type_same({"global_step": global_step_type}, (mstype.int32,), self.name)
return x_type
class BatchNormFold2Grad(PrimitiveWithInfer):
r"""
Performs grad of CorrectionAddGrad operation.
Examples:
>>> bnf2_grad = P.BatchNormFold2Grad()
>>> input_x = Tensor(np.arange(3*3*12*12).reshape(6, 3, 6, 12), mindspore.float32)
>>> dout = Tensor(np.random.randint(-32, 32, (6, 3, 6, 12)), mindspore.float32)
>>> gamma = Tensor(np.random.randint(-4, 4, (3, 1, 1, 2)), mindspore.float32)
>>> batch_std = Tensor(np.random.randint(0, 8, (3, 1, 1, 2)), mindspore.float32)
>>> batch_mean = Tensor(np.random.randint(-6, 6, (3, 1, 1, 2)), mindspore.float32)
>>> running_std = Tensor(np.linspace(0, 2, 6).reshape(3, 1, 1, 2), mindspore.float32)
>>> running_mean = Tensor(np.random.randint(-3, 3, (3, 1, 1, 2)), mindspore.float32)
>>> global_step = Tensor(np.array([-2]), mindspore.int32)
>>> result = bnf2_grad(dout, input_x, gamma, batch_std, batch_mean, running_std, running_mean, global_step)
"""
channel_axis = 1
@prim_attr_register
def __init__(self, freeze_bn=0):
"""init MulFold layer"""
self.freeze_bn = freeze_bn
self.init_prim_io_names(inputs=['dout', 'x', 'gamma',
'batch_std', 'batch_mean',
'running_std', 'running_mean', 'global_step'],
outputs=['d_batch_std', 'd_batch_mean', 'd_beta', 'd_gamma', 'dx'])
def infer_shape(self, dout_shape, x_shape, gamma_shape,
batch_std_shape, batch_mean_shape,
running_std_shape, running_mean_shape, global_step_shape):
validator.check("batch_std shape", batch_std_shape, "batch_mean shape", batch_mean_shape, Rel.EQ, self.name)
validator.check("batch_std shape", batch_std_shape, "running_std shape", running_std_shape, Rel.EQ, self.name)
validator.check("batch_std shape", batch_std_shape, "running_mean shape", running_mean_shape, Rel.EQ, self.name)
validator.check("batch_std shape", batch_std_shape, "gamma shape", gamma_shape, Rel.EQ, self.name)
validator.check("batch_std size", batch_std_shape[0], "dout channel size", dout_shape[self.channel_axis],
Rel.EQ, self.name)
validator.check_integer("global step shape len", len(global_step_shape), 1, Rel.EQ, self.name)
return gamma_shape, gamma_shape, gamma_shape, gamma_shape, x_shape
def infer_dtype(self, dout_type, x_type, gamma_type,
batch_std_type, batch_mean_type,
running_std_type, running_mean_type, global_step_type):
validator.check("batch_std type", batch_std_type,
"batch_mean type", batch_mean_type)
validator.check("batch_std type", batch_std_type,
"gamma type", gamma_type)
validator.check("batch_std type", batch_std_type,
"running_std type", running_std_type)
validator.check("batch_std type", batch_std_type,
"running_mean type", running_mean_type)
validator.check("batch_std_type", batch_std_type,
"dout type", dout_type)
args = {"batch_std": batch_std_type, "batch_mean": batch_mean_type, "gamma": gamma_type,
"running_std": running_std_type, "running_mean": running_mean_type, "dout": dout_type}
validator.check_tensor_type_same(args, (mstype.float16, mstype.float32), self.name)
validator.check_tensor_type_same({"global_step": global_step_type}, (mstype.int32,), self.name)
return gamma_type, gamma_type, gamma_type, gamma_type, gamma_type
class BatchNormFoldD(PrimitiveWithInfer):
"""Performs grad of _BatchNormFold operation."""
@prim_attr_register
def __init__(self, momentum=0.9, epsilon=1e-5, is_training=True, freeze_bn=0):
"""init _BatchNormFold layer"""
from mindspore.ops._op_impl._custom_op import batchnorm_fold
self.momentum = validator.check_number_range('momentum', momentum, 0, 1, Rel.INC_BOTH, self.name)
self.epsilon = validator.check_float_positive('epsilon', epsilon, self.name)
self.is_training = validator.check_value_type('is_training', is_training, (bool,), self.name)
self.freeze_bn = validator.check_value_type('freeze_bn', freeze_bn, (int,), self.name)
self.data_format = "NCHW"
self.init_prim_io_names(inputs=['x', 'x_sum', 'x_square_sum', 'mean', 'variance'],
outputs=['batch_mean', 'batch_std', 'running_mean', 'running_std',
'mean_updated', 'variance_updated'])
def infer_shape(self, x_shape, x_sum_shape, x_square_sum_shape, mean_shape, variance_shape):
validator.check("mean shape", mean_shape, "gamma_shape", variance_shape, Rel.EQ, self.name)
validator.check("mean_shape[0]", mean_shape[0], "input channel", x_shape[1], Rel.EQ, self.name)
return x_shape, mean_shape, mean_shape, mean_shape, mean_shape, mean_shape, mean_shape
def infer_dtype(self, x_type, x_sum_type, x_square_sum_type, mean_type, variance_type):
validator.check("input type", x_type, "mean type", mean_type)
validator.check("input type", x_type, "variance type", variance_type)
args = {"x": x_type, "mean": mean_type, "variance": variance_type}
validator.check_tensor_type_same(args, (mstype.float16, mstype.float32), self.name)
return x_type, x_type, x_type, x_type, x_type, x_type, x_type
class BatchNormFoldGradD(PrimitiveWithInfer):
"""Performs grad of _BatchNormFoldGrad operation."""
@prim_attr_register
def __init__(self, epsilon=1e-5, is_training=True, freeze_bn=0):
"""init _BatchNormFoldGrad layer"""
from mindspore.ops._op_impl._custom_op import batchnorm_fold_grad
self.epsilon = validator.check_float_positive('epsilon', epsilon, self.name)
self.is_training = validator.check_value_type('is_training', is_training, (bool,), self.name)
self.freeze_bn = validator.check_value_type('freeze_bn', freeze_bn, (int,), self.name)
self.init_prim_io_names(inputs=['d_batch_mean', 'd_batch_std', 'x', 'batch_mean', 'batch_std'],
outputs=['dx'])
def infer_shape(self, d_batch_mean_shape, d_batch_std_shape, x_shape, batch_mean_shape, batch_std_shape):
validator.check("d_batch_mean shape", d_batch_mean_shape, "d_batch_std shape", d_batch_std_shape)
validator.check("d_batch_mean shape", d_batch_mean_shape, "batch_mean shape", batch_mean_shape)
validator.check("d_batch_mean shape", d_batch_mean_shape, "batch_std shape", batch_std_shape)
validator.check("x_shape shape", d_batch_mean_shape[0], "input channel", x_shape[1])
return x_shape
def infer_dtype(self, d_batch_mean_type, d_batch_std_type, x_type, batch_mean_type, batch_std_type):
validator.check("input type", x_type, "d_batch_mean type", d_batch_mean_type)
validator.check("input type", x_type, "d_batch_std type", d_batch_std_type)
validator.check("input type", x_type, "batch_mean type", batch_mean_type)
validator.check("input type", x_type, "batch_std type", batch_std_type)
args = {"input type": x_type}
validator.check_tensor_type_same(args, (mstype.float16, mstype.float32), self.name)
return x_type
class BatchNormFold2_D(PrimitiveWithInfer):
"""
Scale the bias with a correction factor to the long term statistics
prior to quantization. This ensures that there is no jitter in the quantized bias
due to batch to batch variation.
Inputs:
- **x** (Tensor) - Tensor of shape :math:`(N, C)`.
- **beta** (Tensor) - Tensor of shape :math:`(C,)`.
- **gamma** (Tensor) - Tensor of shape :math:`(C,)`.
- **batch_std** (Tensor) - Tensor of shape :math:`(C,)`.
- **batch_mean** (Tensor) - Tensor of shape :math:`(C,)`.
- **running_std** (Tensor) - Tensor of shape :math:`(C,)`.
- **running_mean** (Tensor) - Tensor of shape :math:`(C,)`.
- **global_step** (Tensor) - Tensor to record current global step.
Outputs:
- **y** (Tensor) - Tensor has the same shape as x.
"""
channel_axis = 1
@prim_attr_register
def __init__(self, freeze_bn=0):
"""init conv2d fold layer"""
from mindspore.ops._op_impl._custom_op import batchnorm_fold2
self.init_prim_io_names(inputs=['x', 'beta', 'gamma', 'batch_std', 'batch_mean', 'running_std'],
outputs=['y'])
def infer_shape(self, x_shape, beta_shape, gamma_shape, batch_std_shape, running_std_shape, batch_mean_shape):
validator.check("batch_std shape", batch_std_shape, "running_std shape", running_std_shape, Rel.EQ, self.name)
validator.check("batch_std shape", batch_std_shape, "batch_mean shape", batch_mean_shape, Rel.EQ, self.name)
validator.check("batch_std shape", batch_std_shape, "beta shape", beta_shape, Rel.EQ, self.name)
validator.check("batch_std shape", batch_std_shape, "batch_mean shape", gamma_shape, Rel.EQ, self.name)
validator.check("batch_std_shape[0]", batch_std_shape[0], "x_shape channel size", x_shape[self.channel_axis],
Rel.EQ, self.name)
return x_shape
def infer_dtype(self, x_type, beta_type, gamma_type, batch_std_type, running_std_type, batch_mean_type):
args = {"batch_std": batch_std_type, "running_std": running_std_type, "batch_mean": batch_mean_type,
"beta": beta_type, "gamma": gamma_type, "x": x_type}
validator.check_tensor_type_same(args, (mstype.float16, mstype.float32), self.name)
return x_type
class BatchNormFold2GradD(PrimitiveWithInfer):
"""Performs grad of CorrectionAddGrad operation."""
channel_axis = 1
@prim_attr_register
def __init__(self, freeze_bn=False):
"""init MulFold layer"""
from mindspore.ops._op_impl._custom_op import batchnorm_fold2_grad
self.freeze_bn = freeze_bn
self.init_prim_io_names(
inputs=['dout', 'dout_reduce', 'dout_x_reduce', 'gamma', 'batch_std', 'batch_mean', 'running_std'],
outputs=['d_batch_std', 'd_batch_mean', 'd_gamma', 'dx'])
def infer_shape(self, dout_shape, dout_reduce_shape, dout_x_reduce_shape, gamma_shape, batch_std_shape,
batch_mean_shape, running_std_shape):
validator.check("batch_std shape", batch_std_shape, "batch_mean shape", batch_mean_shape, Rel.EQ, self.name)
validator.check("batch_std shape", batch_std_shape, "running_std shape", running_std_shape, Rel.EQ, self.name)
validator.check("batch_std shape", batch_std_shape, "gamma shape", gamma_shape, Rel.EQ, self.name)
validator.check("batch_std size", batch_std_shape[0], "dout channel size", dout_shape[self.channel_axis],
Rel.EQ, self.name)
return gamma_shape, gamma_shape, gamma_shape, dout_shape
def infer_dtype(self, dout_type, dout_reduce_type, dout_x_reduce_type, gamma_type, batch_std_type,
batch_mean_type, running_std_type):
validator.check("batch_std type", batch_std_type,
"batch_mean type", batch_mean_type)
validator.check("batch_std type", batch_std_type,
"gamma type", gamma_type)
validator.check("batch_std type", batch_std_type,
"running_std type", running_std_type)
validator.check("batch_std_type", batch_std_type,
"dout type", dout_type)
args = {"batch_std": batch_std_type, "batch_mean": batch_mean_type, "gamma": gamma_type,
"running_std": running_std_type, "dout": dout_type}
validator.check_tensor_type_same(args, (mstype.float16, mstype.float32), self.name)
return gamma_type, gamma_type, gamma_type, gamma_type
class BatchNormFold2GradReduce(PrimitiveWithInfer):
"""Performs grad of CorrectionAddGrad operation."""
channel_axis = 1
@prim_attr_register
def __init__(self, freeze_bn=False):
"""init MulFold layer"""
from mindspore.ops._op_impl._custom_op import batchnorm_fold2_grad_reduce
self.freeze_bn = freeze_bn
self.init_prim_io_names(inputs=['dout', 'x'],
outputs=['dout_reduce', 'dout_x_reduce'])
def infer_shape(self, dout_shape, x_shape):
validator.check("dout shape", dout_shape, "x shape", x_shape, Rel.EQ, self.name)
return (dout_shape[self.channel_axis],), (dout_shape[self.channel_axis],)
def infer_dtype(self, dout_type, x_type):
validator.check("dout type", dout_type, "x type", x_type)
return dout_type, dout_type
| 50.81405 | 120 | 0.641356 |
e468a1e90ac113a11395795ea2aad364ddc10b59 | 1,918 | py | Python | src_py/hat/syslog/server/common.py | hat-open/hat-syslog | c4e8bf31d49214adc4319235124b29a8fab4f27d | [
"Apache-2.0"
] | 1 | 2022-02-01T13:42:53.000Z | 2022-02-01T13:42:53.000Z | src_py/hat/syslog/server/common.py | hat-open/hat-syslog | c4e8bf31d49214adc4319235124b29a8fab4f27d | [
"Apache-2.0"
] | null | null | null | src_py/hat/syslog/server/common.py | hat-open/hat-syslog | c4e8bf31d49214adc4319235124b29a8fab4f27d | [
"Apache-2.0"
] | null | null | null | """Common data structures and functions"""
import typing
from hat import json
from hat.syslog.common import (Facility,
Msg,
Severity,
msg_from_json,
msg_to_json)
from hat.syslog.common import * # NOQA
class Entry(typing.NamedTuple):
id: int
timestamp: float
msg: Msg
class Filter(typing.NamedTuple):
max_results: typing.Optional[int] = None
last_id: typing.Optional[int] = None
entry_timestamp_from: typing.Optional[float] = None
entry_timestamp_to: typing.Optional[float] = None
facility: typing.Optional[Facility] = None
severity: typing.Optional[Severity] = None
hostname: typing.Optional[str] = None
app_name: typing.Optional[str] = None
procid: typing.Optional[str] = None
msgid: typing.Optional[str] = None
msg: typing.Optional[str] = None
def filter_to_json(filter: Filter) -> json.Data:
"""Convert filter to json data"""
return dict(filter._asdict(),
facility=filter.facility.name if filter.facility else None,
severity=filter.severity.name if filter.severity else None)
def filter_from_json(json_filter: json.Data) -> Filter:
"""Create filter from json data"""
return Filter(**dict(
json_filter,
facility=(Facility[json_filter['facility']]
if json_filter['facility'] else None),
severity=(Severity[json_filter['severity']]
if json_filter['severity'] else None)))
def entry_to_json(entry: Entry) -> json.Data:
"""Convert entry to json data"""
return dict(entry._asdict(),
msg=msg_to_json(entry.msg))
def entry_from_json(json_entry: json.Data) -> Entry:
"""Create entry from json data"""
return Entry(**dict(
json_entry,
msg=msg_from_json(json_entry['msg'])))
| 30.935484 | 75 | 0.629301 |
ac10dac94ce2282f9e1a9ced29ac6a5e820bf53a | 3,603 | py | Python | src/lib/Bcfg2/version.py | stpierre/bcfg2 | 363ad4fd2b36febbbe6b766dac9e76c572048e08 | [
"mpich2"
] | null | null | null | src/lib/Bcfg2/version.py | stpierre/bcfg2 | 363ad4fd2b36febbbe6b766dac9e76c572048e08 | [
"mpich2"
] | null | null | null | src/lib/Bcfg2/version.py | stpierre/bcfg2 | 363ad4fd2b36febbbe6b766dac9e76c572048e08 | [
"mpich2"
] | null | null | null | import re
__version__ = "1.3.0"
class Bcfg2VersionInfo(tuple):
v_re = re.compile(r'(\d+)(\w+)(\d+)')
def __new__(cls, vstr):
(major, minor, rest) = vstr.split(".")
match = cls.v_re.match(rest)
if match:
micro, releaselevel, serial = match.groups()
else:
micro = rest
releaselevel = 'final'
serial = 0
return tuple.__new__(cls, [int(major), int(minor), int(micro),
releaselevel, int(serial)])
def __init__(self, vstr):
tuple.__init__(self)
self.major, self.minor, self.micro, self.releaselevel, self.serial = \
tuple(self)
def __repr__(self):
return "(major=%s, minor=%s, micro=%s, releaselevel=%s, serial=%s)" % \
tuple(self)
def _release_cmp(self, r1, r2):
if r1 == r2:
return 0
elif r1 == "final":
return -1
elif r2 == "final":
return 1
elif r1 == "rc":
return -1
elif r2 == "rc":
return 1
# should never get to anything past this point
elif r1 == "pre":
return -1
elif r2 == "pre":
return 1
else:
# wtf?
return 0
def __gt__(self, version):
if version is None:
# older bcfg2 clients didn't report their version, so we
# handle this case specially and assume that any reported
# version is newer than any indeterminate version
return True
try:
for i in range(3):
if self[i] > version[i]:
return True
elif self[i] < version[i]:
return False
rel = self._release_cmp(self[3], version[3])
if rel < 0:
return True
elif rel > 0:
return False
if self[4] > version[4]:
return True
else:
return False
except TypeError:
return self > Bcfg2VersionInfo(version)
def __lt__(self, version):
if version is None:
# older bcfg2 clients didn't report their version, so we
# handle this case specially and assume that any reported
# version is newer than any indeterminate version
return False
try:
for i in range(3):
if self[i] < version[i]:
return True
elif self[i] > version[i]:
return False
rel = self._release_cmp(self[3], version[3])
if rel > 0:
return True
elif rel < 0:
return False
if self[4] < version[4]:
return True
else:
return False
except TypeError:
return self < Bcfg2VersionInfo(version)
def __eq__(self, version):
if version is None:
# older bcfg2 clients didn't report their version, so we
# handle this case specially and assume that any reported
# version is newer than any indeterminate version
return False
try:
rv = True
for i in range(len(self)):
rv &= self[i] == version[i]
return rv
except TypeError:
return self == Bcfg2VersionInfo(version)
def __ge__(self, version):
return not self < version
def __le__(self, version):
return not self > version
| 31.060345 | 79 | 0.493478 |
f3a12d9eadb9dd7bb950a91319642d37a5ce1350 | 634 | py | Python | backend/manage.py | crowdbotics-apps/arvitech-29145 | 4e4eb8e3ba446841620f820b310ee859f3914fe5 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/manage.py | crowdbotics-apps/arvitech-29145 | 4e4eb8e3ba446841620f820b310ee859f3914fe5 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/manage.py | crowdbotics-apps/arvitech-29145 | 4e4eb8e3ba446841620f820b310ee859f3914fe5 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'arvitech_29145.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.818182 | 78 | 0.68612 |
025052b61f49455b40bf9e453e1ae34757415248 | 829 | py | Python | xitorch/_impls/integrate/fixed_quad.py | Jaikinator/xitorch | 053db8d27a7777baa7f572c2d37004e788ff4cb8 | [
"MIT"
] | null | null | null | xitorch/_impls/integrate/fixed_quad.py | Jaikinator/xitorch | 053db8d27a7777baa7f572c2d37004e788ff4cb8 | [
"MIT"
] | null | null | null | xitorch/_impls/integrate/fixed_quad.py | Jaikinator/xitorch | 053db8d27a7777baa7f572c2d37004e788ff4cb8 | [
"MIT"
] | null | null | null | import numpy as np
import torch
# no gradient flowing in the following functions
def leggauss(fcn, xl, xu, params, n=100, **unused):
"""
Performing 1D integration using Legendre-Gaussian quadrature
Keyword arguments
-----------------
n: int
The number of integration points.
"""
xlg, wlg = np.polynomial.legendre.leggauss(n)
ndim = len(xu.shape)
xlg = torch.tensor(xlg, dtype=xu.dtype, device=xu.device)[(...,) + (None,) * ndim] # (n, *nx)
wlg = torch.tensor(wlg, dtype=xu.dtype, device=xu.device)[(...,) + (None,) * ndim] # (n, *nx)
wlg *= 0.5 * (xu - xl)
xs = xlg * (0.5 * (xu - xl)) + (0.5 * (xu + xl)) # (n, *nx)
res = wlg[0] * fcn(xs[0], *params)
for i in range(1, n):
res += wlg[i] * fcn(xs[i], *params)
return res
| 31.884615 | 99 | 0.54041 |
8cd049ee06fdc6be010122ce17da2ee562959987 | 30,328 | py | Python | libs/canvas.py | Harshwardhan619/labelImg-modified | 421b3bd69150040102e8a5067fd726261bdae689 | [
"MIT"
] | null | null | null | libs/canvas.py | Harshwardhan619/labelImg-modified | 421b3bd69150040102e8a5067fd726261bdae689 | [
"MIT"
] | 2 | 2021-03-31T19:21:04.000Z | 2021-12-13T20:28:48.000Z | libs/canvas.py | Harshwardhan619/labelImg-modified | 421b3bd69150040102e8a5067fd726261bdae689 | [
"MIT"
] | null | null | null | try:
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
except ImportError:
from PyQt4.QtGui import *
from PyQt4.QtCore import *
#from PyQt4.QtOpenGL import *
from libs.shape import Shape
from libs.utils import distance
CURSOR_DEFAULT = Qt.ArrowCursor
CURSOR_POINT = Qt.PointingHandCursor
CURSOR_DRAW = Qt.CrossCursor
CURSOR_MOVE = Qt.ClosedHandCursor
CURSOR_GRAB = Qt.OpenHandCursor
# class Canvas(QGLWidget):
class Canvas(QWidget):
zoomRequest = pyqtSignal(int)
scrollRequest = pyqtSignal(int, int)
newShape = pyqtSignal()
selectionChanged = pyqtSignal(bool)
shapeMoved = pyqtSignal()
drawingPolygon = pyqtSignal(bool)
CREATE, EDIT = list(range(2))
epsilon = 11.0
def __init__(self, *args, **kwargs):
super(Canvas, self).__init__(*args, **kwargs)
# Initialise local state.
self.mode = self.EDIT
self.shapes = []
self.current = None
self.selectedShape = None # save the selected shape here
self.selectedShapeCopy = None
# self.drawingLineColor = QColor(0, 0, 255)
self.drawingLineColor = QColor(255, 255, 255)
self.drawingRectColor = QColor(0, 0, 255)
self.line = Shape(line_color=self.drawingLineColor)
self.prevPoint = QPointF()
self.offsets = QPointF(), QPointF()
self.scale = 1.0
self.pixmap = QPixmap()
self.visible = {}
self._hideBackround = False
self.hideBackround = False
self.hShape = None
self.hVertex = None
self._painter = QPainter()
self._cursor = CURSOR_DEFAULT
# Menus:
self.menus = (QMenu(), QMenu())
# Set widget options.
self.setMouseTracking(True)
self.setFocusPolicy(Qt.WheelFocus)
self.verified = False
self.drawSquare = False
def setDrawingColor(self, qColor):
self.drawingLineColor = qColor
self.drawingRectColor = qColor
def enterEvent(self, ev):
self.overrideCursor(self._cursor)
def leaveEvent(self, ev):
self.restoreCursor()
def focusOutEvent(self, ev):
self.restoreCursor()
def isVisible(self, shape):
return self.visible.get(shape, True)
def drawing(self):
return self.mode == self.CREATE
def editing(self):
return self.mode == self.EDIT
def setEditing(self, value=True):
self.mode = self.EDIT if value else self.CREATE
if not value: # Create
self.unHighlight()
self.deSelectShape()
self.prevPoint = QPointF()
self.repaint()
def unHighlight(self):
if self.hShape:
self.hShape.highlightClear()
self.hVertex = self.hShape = None
def selectedVertex(self):
return self.hVertex is not None
def mouseMoveEvent(self, ev):
"""Update line with last point and current coordinates."""
pos = self.transformPos(ev.pos())
# return
# Update coordinates in status bar if image is opened
window = self.parent().window()
if window.filePath is not None:
self.parent().window().labelCoordinates.setText(
'X: %d; Y: %d' % (pos.x(), pos.y()))
# Polygon drawing.
if self.drawing():
self.overrideCursor(CURSOR_DRAW)
if self.current:
color = self.drawingLineColor
if self.outOfPixmap(pos):
# Don't allow the user to draw outside the pixmap.
# Project the point to the pixmap's edges.
pos = self.intersectionPoint(self.current[-1], pos)
elif len(self.current) > 1 and self.closeEnough(pos, self.current[0]):
# Attract line to starting point and colorise to alert the
# user:
pos = self.current[0]
color = self.current.line_color
self.overrideCursor(CURSOR_POINT)
self.current.highlightVertex(0, Shape.NEAR_VERTEX)
# print(pos)
if self.drawSquare:
initPos = self.current[0]
minX = initPos.x()
minY = initPos.y()
min_size = min(abs(pos.x() - minX), abs(pos.y() - minY))
directionX = -1 if pos.x() - minX < 0 else 1
directionY = -1 if pos.y() - minY < 0 else 1
self.line[1] = QPointF(minX + directionX * min_size, minY + directionY * min_size)
# else:
# # print("else")
# self.line[1] = pos
self.line.line_color = color
self.prevPoint = QPointF()
self.current.highlightClear()
else:
self.prevPoint = pos
self.repaint()
return
# Polygon copy moving.
if Qt.RightButton & ev.buttons():
if self.selectedShapeCopy and self.prevPoint:
self.overrideCursor(CURSOR_MOVE)
self.boundedMoveShape(self.selectedShapeCopy, pos)
self.repaint()
elif self.selectedShape:
self.selectedShapeCopy = self.selectedShape.copy()
self.repaint()
return
# Polygon/Vertex moving.
if Qt.LeftButton & ev.buttons():
if self.selectedVertex():
self.boundedMoveVertex(pos, ev)
self.shapeMoved.emit()
self.repaint()
elif self.selectedShape and self.prevPoint:
self.overrideCursor(CURSOR_MOVE)
self.boundedMoveShape(self.selectedShape, pos)
self.shapeMoved.emit()
self.repaint()
return
# Just hovering over the canvas, 2 posibilities:
# - Highlight shapes
# - Highlight vertex
# Update shape/vertex fill and tooltip value accordingly.
self.setToolTip("Image")
for shape in reversed([s for s in self.shapes if self.isVisible(s)]):
# Look for a nearby vertex to highlight. If that fails,
# check if we happen to be inside a shape.
index = shape.nearestVertex(pos, self.epsilon)
if index is not None:
if self.selectedVertex():
self.hShape.highlightClear()
self.hVertex, self.hShape = index, shape
shape.highlightVertex(index, shape.MOVE_VERTEX)
self.overrideCursor(CURSOR_POINT)
self.setToolTip("Click & drag to move point")
self.setStatusTip(self.toolTip())
self.update()
break
elif shape.containsPoint(pos):
if self.selectedVertex():
self.hShape.highlightClear()
self.hVertex, self.hShape = None, shape
self.setToolTip(
"Click & drag to move shape '%s'" % shape.label)
self.setStatusTip(self.toolTip())
self.overrideCursor(CURSOR_GRAB)
self.update()
break
else: # Nothing found, clear highlights, reset state.
if self.hShape:
self.hShape.highlightClear()
self.update()
self.hVertex, self.hShape = None, None
self.overrideCursor(CURSOR_DEFAULT)
def mousePressEvent(self, ev):
pos = self.transformPos(ev.pos())
if ev.button() == Qt.LeftButton:
if self.drawing():
self.handleDrawing(pos)
else:
self.selectShapePoint(pos)
self.prevPoint = pos
self.repaint()
elif ev.button() == Qt.RightButton and self.editing():
self.selectShapePoint(pos)
self.prevPoint = pos
self.repaint()
def mouseReleaseEvent(self, ev):
if ev.button() == Qt.RightButton:
menu = self.menus[bool(self.selectedShapeCopy)]
self.restoreCursor()
if not menu.exec_(self.mapToGlobal(ev.pos()))\
and self.selectedShapeCopy:
# Cancel the move by deleting the shadow copy.
self.selectedShapeCopy = None
self.repaint()
elif ev.button() == Qt.LeftButton and self.selectedShape:
if self.selectedVertex():
self.overrideCursor(CURSOR_POINT)
else:
self.overrideCursor(CURSOR_GRAB)
elif ev.button() == Qt.LeftButton:
pos = self.transformPos(ev.pos())
# if self.drawing():
# self.handleDrawing(pos)
def endMove(self, copy=False):
assert self.selectedShape and self.selectedShapeCopy
shape = self.selectedShapeCopy
#del shape.fill_color
#del shape.line_color
if copy:
self.shapes.append(shape)
self.selectedShape.selected = False
self.selectedShape = shape
self.repaint()
else:
self.selectedShape.points = [p for p in shape.points]
self.selectedShapeCopy = None
def hideBackroundShapes(self, value):
self.hideBackround = value
if self.selectedShape:
# Only hide other shapes if there is a current selection.
# Otherwise the user will not be able to select a shape.
self.setHiding(True)
self.repaint()
def handleDrawing(self, pos):
# if self.current and self.current.reachMaxPoints() is False:
# initPos = self.current[0]
# # minX = initPos.x()
# # minY = initPos.y()
# # targetPos = self.line[1]
# # maxX = targetPos.x()
# # maxY = targetPos.y()
# # maxX = pos.x()
# # maxY = pos.y()
# # self.current.addPoint(QPointF(maxX, minY))
# # self.current.addPoint(QPointF(minX, maxY))
# # print(minX,maxX,minY,maxY)
# if(self.closeEnough(pos, initPos)):
# self.finalise()
# else:
# print(self.current)
# self.current.addPoint(pos)
if self.current and self.current.reachMaxPoints() is False:
self.current.addPoint(pos)
if self.current.isLastPoint():
self.minimumBoundingRectangle()
self.finalise()
elif not self.outOfPixmap(pos):
self.current = Shape()
self.current.addPoint(pos)
self.line.points = [pos, pos]
self.setHiding()
self.drawingPolygon.emit(True)
self.update()
def setHiding(self, enable=True):
self._hideBackround = self.hideBackround if enable else False
def canCloseShape(self):
return self.drawing() and self.current and len(self.current) > 2
def mouseDoubleClickEvent(self, ev):
# We need at least 4 points here, since the mousePress handler
# adds an extra one before this handler is called.
if self.canCloseShape() and len(self.current) > 3:
self.current.popPoint()
self.finalise()
def selectShape(self, shape):
self.deSelectShape()
shape.selected = True
self.selectedShape = shape
self.setHiding()
self.selectionChanged.emit(True)
self.update()
def selectShapePoint(self, point):
"""Select the first shape created which contains this point."""
self.deSelectShape()
if self.selectedVertex(): # A vertex is marked for selection.
index, shape = self.hVertex, self.hShape
shape.highlightVertex(index, shape.MOVE_VERTEX)
self.selectShape(shape)
return
for shape in reversed(self.shapes):
if self.isVisible(shape) and shape.containsPoint(point):
self.selectShape(shape)
self.calculateOffsets(shape, point)
return
def calculateOffsets(self, shape, point):
rect = shape.boundingRect()
x1 = rect.x() - point.x()
y1 = rect.y() - point.y()
x2 = (rect.x() + rect.width()) - point.x()
y2 = (rect.y() + rect.height()) - point.y()
self.offsets = QPointF(x1, y1), QPointF(x2, y2)
def snapPointToCanvas(self, x, y):
"""
Moves a point x,y to within the boundaries of the canvas.
:return: (x,y,snapped) where snapped is True if x or y were changed, False if not.
"""
if x < 0 or x > self.pixmap.width() or y < 0 or y > self.pixmap.height():
x = max(x, 0)
y = max(y, 0)
x = min(x, self.pixmap.width())
y = min(y, self.pixmap.height())
return x, y, True
return x, y, False
def boundedMoveVertex(self, pos, ev):
index, shape = self.hVertex, self.hShape
# print(pos, index, shape.__dict__)
# help()
point = shape[index]
if self.outOfPixmap(pos):
pos = self.intersectionPoint(point, pos)
if self.drawSquare:
opposite_point_index = (index + 2) % 4
opposite_point = shape[opposite_point_index]
min_size = min(abs(pos.x() - opposite_point.x()), abs(pos.y() - opposite_point.y()))
directionX = -1 if pos.x() - opposite_point.x() < 0 else 1
directionY = -1 if pos.y() - opposite_point.y() < 0 else 1
shiftPos = QPointF(opposite_point.x() + directionX * min_size - point.x(),
opposite_point.y() + directionY * min_size - point.y())
else:
shiftPos = pos - point
shape.moveVertexBy(index, shiftPos)
# print(mods.__dict__)
# if Qt.ControlModifier == int(mods):
# # self.zoomRequest.emit(v_delta)
# n_points = len(shape.points)
# lindex = (index + 1) % n_points
# rindex = (index - 1 + n_points) % n_points
# lshift = None
# rshift = None
# if index % 2 == 0:
# rshift = QPointF(shiftPos.x(), 0)
# lshift = QPointF(0, shiftPos.y())
# else:
# lshift = QPointF(shiftPos.x(), 0)
# rshift = QPointF(0, shiftPos.y())
# shape.moveVertexBy(rindex, rshift)
# shape.moveVertexBy(lindex, lshift)
# def boundedMoveVertex(self, pos):
# index, shape = self.hVertex, self.hShape
# point = shape[index]
# if self.outOfPixmap(pos):
# pos = self.intersectionPoint(point, pos)
# if self.drawSquare:
# opposite_point_index = (index + 2) % 4
# opposite_point = shape[opposite_point_index]
# min_size = min(abs(pos.x() - opposite_point.x()), abs(pos.y() - opposite_point.y()))
# directionX = -1 if pos.x() - opposite_point.x() < 0 else 1
# directionY = -1 if pos.y() - opposite_point.y() < 0 else 1
# shiftPos = QPointF(opposite_point.x() + directionX * min_size - point.x(),
# opposite_point.y() + directionY * min_size - point.y())
# else:
# shiftPos = pos - point
# shape.moveVertexBy(index, shiftPos)
# lindex = (index + 1) % 4
# rindex = (index + 3) % 4
# lshift = None
# rshift = None
# if index % 2 == 0:
# rshift = QPointF(shiftPos.x(), 0)
# lshift = QPointF(0, shiftPos.y())
# else:
# lshift = QPointF(shiftPos.x(), 0)
# rshift = QPointF(0, shiftPos.y())
# shape.moveVertexBy(rindex, rshift)
# shape.moveVertexBy(lindex, lshift)
def boundedMoveShape(self, shape, pos):
if self.outOfPixmap(pos):
return False # No need to move
o1 = pos + self.offsets[0]
if self.outOfPixmap(o1):
pos -= QPointF(min(0, o1.x()), min(0, o1.y()))
o2 = pos + self.offsets[1]
if self.outOfPixmap(o2):
pos += QPointF(min(0, self.pixmap.width() - o2.x()),
min(0, self.pixmap.height() - o2.y()))
# The next line tracks the new position of the cursor
# relative to the shape, but also results in making it
# a bit "shaky" when nearing the border and allows it to
# go outside of the shape's area for some reason. XXX
#self.calculateOffsets(self.selectedShape, pos)
dp = pos - self.prevPoint
if dp:
shape.moveBy(dp)
self.prevPoint = pos
return True
return False
def deSelectShape(self):
if self.selectedShape:
self.selectedShape.selected = False
self.selectedShape = None
self.setHiding(False)
self.selectionChanged.emit(False)
self.update()
def deleteSelected(self):
if self.selectedShape:
shape = self.selectedShape
self.shapes.remove(self.selectedShape)
self.selectedShape = None
self.update()
return shape
def copySelectedShape(self):
if self.selectedShape:
shape = self.selectedShape.copy()
self.deSelectShape()
self.shapes.append(shape)
shape.selected = True
self.selectedShape = shape
self.boundedShiftShape(shape)
return shape
def boundedShiftShape(self, shape):
# Try to move in one direction, and if it fails in another.
# Give up if both fail.
point = shape[0]
offset = QPointF(2.0, 2.0)
self.calculateOffsets(shape, point)
self.prevPoint = point
if not self.boundedMoveShape(shape, point - offset):
self.boundedMoveShape(shape, point + offset)
def paintEvent(self, event):
if not self.pixmap:
return super(Canvas, self).paintEvent(event)
p = self._painter
p.begin(self)
p.setRenderHint(QPainter.Antialiasing)
p.setRenderHint(QPainter.HighQualityAntialiasing)
p.setRenderHint(QPainter.SmoothPixmapTransform)
p.scale(self.scale, self.scale)
p.translate(self.offsetToCenter())
p.drawPixmap(0, 0, self.pixmap)
Shape.scale = self.scale
# print("shapes : ", self.shapes)
for shape in self.shapes:
if (shape.selected or not self._hideBackround) and self.isVisible(shape):
shape.fill = shape.selected or shape == self.hShape
shape.paint(p, False)
if self.current:
# print("shape")
self.current.paint(p, True)
# print("line")
self.line.paint(p, True)
if self.selectedShapeCopy:
self.selectedShapeCopy.paint(p, False)
# Paint rect
if self.current is not None and len(self.line) == 2:
# print("asdfghjk", self.current)
# leftTop = self.line[0]
# print(len(self.current.points))
# line_____ = ""
# for i in range(len(self.current.points)):
# line_____ += str(int(self.current.points[i].x())) + " " + str(int(self.current.points[i].y())) + " ::: "
# print(line_____)
leftTop = self.line[0]
rightBottom = self.line[1]
rectWidth = rightBottom.x() - leftTop.x()
rectHeight = rightBottom.y() - leftTop.y()
p.setPen(self.drawingRectColor)
brush = QBrush(Qt.BDiagPattern)
p.setBrush(brush)
# p.drawRect(leftTop.x(), leftTop.y(), rectWidth, rectHeight)
if self.drawing() and not self.prevPoint.isNull() and not self.outOfPixmap(self.prevPoint):
p.setPen(QColor(0, 0, 0))
p.drawLine(self.prevPoint.x(), 0, self.prevPoint.x(), self.pixmap.height())
p.drawLine(0, self.prevPoint.y(), self.pixmap.width(), self.prevPoint.y())
self.setAutoFillBackground(True)
if self.verified:
pal = self.palette()
pal.setColor(self.backgroundRole(), QColor(184, 239, 38, 128))
self.setPalette(pal)
else:
pal = self.palette()
pal.setColor(self.backgroundRole(), QColor(232, 232, 232, 255))
self.setPalette(pal)
p.end()
def transformPos(self, point):
"""Convert from widget-logical coordinates to painter-logical coordinates."""
return point / self.scale - self.offsetToCenter()
def offsetToCenter(self):
s = self.scale
area = super(Canvas, self).size()
w, h = self.pixmap.width() * s, self.pixmap.height() * s
aw, ah = area.width(), area.height()
x = (aw - w) / (2 * s) if aw > w else 0
y = (ah - h) / (2 * s) if ah > h else 0
return QPointF(x, y)
def outOfPixmap(self, p):
w, h = self.pixmap.width(), self.pixmap.height()
return not (0 <= p.x() <= w and 0 <= p.y() <= h)
def finalise(self):
assert self.current
if self.current.points[0] == self.current.points[-1]:
self.current = None
self.drawingPolygon.emit(False)
self.update()
return
self.current.close()
self.shapes.append(self.current)
self.current = None
self.setHiding(False)
self.newShape.emit()
self.update()
def closeEnough(self, p1, p2):
#d = distance(p1 - p2)
#m = (p1-p2).manhattanLength()
# print "d %.2f, m %d, %.2f" % (d, m, d - m)
return distance(p1 - p2) < self.epsilon
def intersectionPoint(self, p1, p2):
# Cycle through each image edge in clockwise fashion,
# and find the one intersecting the current line segment.
# http://paulbourke.net/geometry/lineline2d/
size = self.pixmap.size()
points = [(0, 0),
(size.width(), 0),
(size.width(), size.height()),
(0, size.height())]
x1, y1 = p1.x(), p1.y()
x2, y2 = p2.x(), p2.y()
d, i, (x, y) = min(self.intersectingEdges((x1, y1), (x2, y2), points))
x3, y3 = points[i]
x4, y4 = points[(i + 1) % 4]
if (x, y) == (x1, y1):
# Handle cases where previous point is on one of the edges.
if x3 == x4:
return QPointF(x3, min(max(0, y2), max(y3, y4)))
else: # y3 == y4
return QPointF(min(max(0, x2), max(x3, x4)), y3)
# Ensure the labels are within the bounds of the image. If not, fix them.
x, y, _ = self.snapPointToCanvas(x, y)
return QPointF(x, y)
def intersectingEdges(self, x1y1, x2y2, points):
"""For each edge formed by `points', yield the intersection
with the line segment `(x1,y1) - (x2,y2)`, if it exists.
Also return the distance of `(x2,y2)' to the middle of the
edge along with its index, so that the one closest can be chosen."""
x1, y1 = x1y1
x2, y2 = x2y2
for i in range(4):
x3, y3 = points[i]
x4, y4 = points[(i + 1) % 4]
denom = (y4 - y3) * (x2 - x1) - (x4 - x3) * (y2 - y1)
nua = (x4 - x3) * (y1 - y3) - (y4 - y3) * (x1 - x3)
nub = (x2 - x1) * (y1 - y3) - (y2 - y1) * (x1 - x3)
if denom == 0:
# This covers two cases:
# nua == nub == 0: Coincident
# otherwise: Parallel
continue
ua, ub = nua / denom, nub / denom
if 0 <= ua <= 1 and 0 <= ub <= 1:
x = x1 + ua * (x2 - x1)
y = y1 + ua * (y2 - y1)
m = QPointF((x3 + x4) / 2, (y3 + y4) / 2)
d = distance(m - QPointF(x2, y2))
yield d, i, (x, y)
# These two, along with a call to adjustSize are required for the
# scroll area.
def sizeHint(self):
return self.minimumSizeHint()
def minimumSizeHint(self):
if self.pixmap:
return self.scale * self.pixmap.size()
return super(Canvas, self).minimumSizeHint()
def wheelEvent(self, ev):
qt_version = 4 if hasattr(ev, "delta") else 5
if qt_version == 4:
if ev.orientation() == Qt.Vertical:
v_delta = ev.delta()
h_delta = 0
else:
h_delta = ev.delta()
v_delta = 0
else:
delta = ev.angleDelta()
h_delta = delta.x()
v_delta = delta.y()
mods = ev.modifiers()
if Qt.ControlModifier == int(mods) and v_delta:
self.zoomRequest.emit(v_delta)
else:
v_delta and self.scrollRequest.emit(v_delta, Qt.Vertical)
h_delta and self.scrollRequest.emit(h_delta, Qt.Horizontal)
ev.accept()
def keyPressEvent(self, ev):
key = ev.key()
if key == Qt.Key_Escape and self.current:
print('ESC press')
self.current = None
self.drawingPolygon.emit(False)
self.update()
elif key == Qt.Key_Return and self.canCloseShape():
self.finalise()
elif key == Qt.Key_Left and self.selectedShape:
self.moveOnePixel('Left')
elif key == Qt.Key_Right and self.selectedShape:
self.moveOnePixel('Right')
elif key == Qt.Key_Up and self.selectedShape:
self.moveOnePixel('Up')
elif key == Qt.Key_Down and self.selectedShape:
self.moveOnePixel('Down')
def moveOnePixel(self, direction):
# print(self.selectedShape.points)
if direction == 'Left' and not self.moveOutOfBound(QPointF(-1.0, 0)):
# print("move Left one pixel")
self.selectedShape.points[0] += QPointF(-1.0, 0)
self.selectedShape.points[1] += QPointF(-1.0, 0)
self.selectedShape.points[2] += QPointF(-1.0, 0)
self.selectedShape.points[3] += QPointF(-1.0, 0)
elif direction == 'Right' and not self.moveOutOfBound(QPointF(1.0, 0)):
# print("move Right one pixel")
self.selectedShape.points[0] += QPointF(1.0, 0)
self.selectedShape.points[1] += QPointF(1.0, 0)
self.selectedShape.points[2] += QPointF(1.0, 0)
self.selectedShape.points[3] += QPointF(1.0, 0)
elif direction == 'Up' and not self.moveOutOfBound(QPointF(0, -1.0)):
# print("move Up one pixel")
self.selectedShape.points[0] += QPointF(0, -1.0)
self.selectedShape.points[1] += QPointF(0, -1.0)
self.selectedShape.points[2] += QPointF(0, -1.0)
self.selectedShape.points[3] += QPointF(0, -1.0)
elif direction == 'Down' and not self.moveOutOfBound(QPointF(0, 1.0)):
# print("move Down one pixel")
self.selectedShape.points[0] += QPointF(0, 1.0)
self.selectedShape.points[1] += QPointF(0, 1.0)
self.selectedShape.points[2] += QPointF(0, 1.0)
self.selectedShape.points[3] += QPointF(0, 1.0)
self.shapeMoved.emit()
self.repaint()
def moveOutOfBound(self, step):
points = [p1+p2 for p1, p2 in zip(self.selectedShape.points, [step]*4)]
return True in map(self.outOfPixmap, points)
def setLastLabel(self, text, line_color = None, fill_color = None):
assert text
# print(self.shapes)
# z = [ (i.label, i.paintLabel) for i in self.shapes]
# print("-",z)
self.shapes[-1].label = text
# self.shapes[-1].paintLabel = True
# z = [ (i.label, i.paintLabel) for i in self.shapes]
# print("-.-.-",z)
if line_color:
self.shapes[-1].line_color = line_color
if fill_color:
self.shapes[-1].fill_color = fill_color
return self.shapes[-1]
def undoLastLine(self):
assert self.shapes
self.current = self.shapes.pop()
self.current.setOpen()
self.line.points = [self.current[-1], self.current[0]]
self.drawingPolygon.emit(True)
def resetAllLines(self):
assert self.shapes
self.current = self.shapes.pop()
self.current.setOpen()
self.line.points = [self.current[-1], self.current[0]]
self.drawingPolygon.emit(True)
self.current = None
self.drawingPolygon.emit(False)
self.update()
def loadPixmap(self, pixmap):
self.pixmap = pixmap
self.shapes = []
self.repaint()
def loadShapes(self, shapes):
self.shapes = list(shapes)
self.current = None
self.repaint()
def setShapeVisible(self, shape, value):
self.visible[shape] = value
self.repaint()
def currentCursor(self):
cursor = QApplication.overrideCursor()
if cursor is not None:
cursor = cursor.shape()
return cursor
def overrideCursor(self, cursor):
self._cursor = cursor
if self.currentCursor() is None:
QApplication.setOverrideCursor(cursor)
else:
QApplication.changeOverrideCursor(cursor)
def restoreCursor(self):
QApplication.restoreOverrideCursor()
def resetState(self):
self.restoreCursor()
self.pixmap = None
self.update()
def setDrawingShapeToSquare(self, status):
self.drawSquare = status
def minimumBoundingRectangle(self):
# get the convex hull for the points
xs = [int(point.x()) for point in self.current.points]
ys = [int(point.y()) for point in self.current.points]
min_x = min(xs)
max_x = max(xs)
min_y = min(ys)
max_y = max(ys)
final_coords = [
QPoint(min_x, max_y),
QPoint(max_x, max_y),
QPoint(max_x, min_y),
QPoint(min_x, min_y)
]
self.current.points = final_coords
# for i in range(4):
# self.current.addIntersectionPoint(self.current.popPoint())
# for point in final_coords:
# self.current.addPoint(point)
# self.update() | 36.716707 | 122 | 0.552658 |
3e573fa4a269fc325e13eb70e635681ebbffe53b | 6,101 | py | Python | base.py | mgrabovsky/cryptoverif-py-lib | d1b58e7c74edb95e4b3c526ffc48c751a142ab69 | [
"MIT"
] | 1 | 2017-05-22T18:39:42.000Z | 2017-05-22T18:39:42.000Z | base.py | mgrabovsky/cryptoverif-py-lib | d1b58e7c74edb95e4b3c526ffc48c751a142ab69 | [
"MIT"
] | null | null | null | base.py | mgrabovsky/cryptoverif-py-lib | d1b58e7c74edb95e4b3c526ffc48c751a142ab69 | [
"MIT"
] | 1 | 2015-11-04T14:28:23.000Z | 2015-11-04T14:28:23.000Z | import crypto
import io, math, os, random, struct, sys
import functools
import socket
from cryptography import utils
from cryptography.hazmat.primitives.asymmetric import rsa
class BadCall(Exception):
pass
class MatchFail(Exception):
pass
class Abort(Exception):
pass
class BadFile(Exception):
pass
# -----------------------------------------------------------------------------------
# Type predicates
# -----------------------------------------------------------------------------------
def true_pred(x) -> bool:
"""
A predicate that is always L{True}.
"""
return True
def size_pred(n) -> bool:
"""
Generate a function that returns L{True} iff its arguments length L{n}.
"""
return lambda s: len(s) == n
# -----------------------------------------------------------------------------------
# Random data generation
# -----------------------------------------------------------------------------------
def random_bytes(n: int) -> bytes:
"""
Generate a random bytestring of length L{n}.
"""
rand = os.urandom(n)
assert len(rand) == n
return rand
def random_nat(n: int) -> int:
"""
Generate a random natural number L{n} bytes long.
"""
return utils.int_from_bytes(random_bytes(n), 'big', signed=False)
def random_bool() -> bool:
"""
Generate a random Boolean value.
"""
s = random_bytes(1)[0]
return s % 2 == 0
def random_list(xs):
"""
Return a random elements of a list.
"""
return random.choice(xs)
# -----------------------------------------------------------------------------------
# File input and output
# -----------------------------------------------------------------------------------
def read_file(fname) -> bytes:
"""
Read the contents of a file as a byte string.
"""
buf = None
with open(fname, 'rb') as f:
buf = f.read()
return buf
def write_file(fname, data: bytes) -> None:
"""
Write byte string to a file.
"""
with open(fname, 'wb') as f:
f.write(data)
# -----------------------------------------------------------------------------------
# Serialization
# -----------------------------------------------------------------------------------
# TODO: Name is too specific
def prefix_size(data: bytes, size: int) -> bytes:
"""
Prefix a byte string with 4-byte size field.
"""
prefix = struct.pack('!L', size)
return prefix + data
def extract_size(data: bytes):
"""
Extract the size field from given byte string.
"""
if len(data) < 4:
raise BadCall()
(size,) = struct.unpack('!L', data[:4])
return (data[4:], size)
def compose(xs: bytes) -> bytes:
"""
Serialize a list of byte string into a single byte string with size annotation.
"""
buf = prefix_size(b'', len(xs))
for x in xs:
buf += prefix_size(x, len(x))
return buf
def decompose(data_with_size: bytes) -> list:
"""
Deserialize a byte string into a list of byte strings.
"""
(buf, size) = extract_size(data_with_size)
if size < 0:
raise BadFile()
xs = []
for i in range(size):
(buf2, chunk_size) = extract_size(buf)
chunk = buf2[:chunk_size]
xs.append(chunk)
buf = buf2[chunk_size:]
return xs
def concat(*xs: bytes) -> bytes:
return compose(xs)
def concat_pubkey_str(pk: rsa.RSAPublicKey, bs: bytes) -> bytes:
return compose([crypto.serialize_pubkey(pk), bs])
def unconcat_pubkey_str(bs: bytes):
xs = decompose(bs)
if len(xs) != 2:
raise Exception('Invalid string')
return [load_pubkey(xs[0]), xs[1]]
# -----------------------------------------------------------------------------------
# Encoding tables
# -----------------------------------------------------------------------------------
def get_from_table(fname):
"""
Retrieve all records from a table file.
"""
data = []
with open(fname, 'rb') as f:
while True:
# Read the number of records in this table
word = f.read(4)
if word is None or len(word) < 4:
# Fail silently on EOF to support insertion while reading
break
(ncomp,) = struct.unpack('!L', word)
records = []
for _ in range(ncomp):
word = f.read(4)
if word is None or len(word) < 4:
# Fail silently
break
(length,) = struct.unpack('!L', word)
records.append(f.read(length))
try:
data.insert(0, records)
except MatchFail:
continue
return data
def insert_into_table(fname, data: bytes) -> None:
"""
Insert a new record into a table file.
"""
length = len(data)
with open(fname, 'ab') as f:
f.write(struct.pack('!L', length))
for x in data:
f.write(struct.pack('!L', len(x)) + x)
# -----------------------------------------------------------------------------------
# Auxiliary functions
# -----------------------------------------------------------------------------------
def bool_from_bytes(bs: bytes) -> bool:
if bs == b'\x01':
True
elif bs == b'\x00':
False
else:
raise BadCall()
def bool_to_bytes(b: bool) -> bytes:
if b:
return b'\x01'
else:
return b'\x00'
def size_from(n: int):
pred = size_pred(n)
def inner(s):
if not pred(s):
raise BadCall()
return s
return inner
def stringbot_from_bytes(bs: bytes) -> bytes:
if bs == b'':
raise BadCall()
elif bs[0] == b'N':
return None
elif bs[0] == b'S':
return bs[1:]
else:
raise BadCall()
def stringbot_to_bytes(bs: bytes) -> bytes:
if bs is None:
return b'N'
else:
return b'S' + bs
def injbot_inv(x):
if x is None:
raise BadCall()
return x
def get_hostname() -> bytes:
return bytes(socket.gethostname(), encoding='utf-8')
| 24.902041 | 85 | 0.473037 |
ba5357a2ca52f0f48536865e3b2e6d9a9441af04 | 8,175 | py | Python | tensorforce/core/networks/network.py | CAVED123/Tensorforce | 823177f77f9047b1e71eccfffc08315ed1636878 | [
"Apache-2.0"
] | null | null | null | tensorforce/core/networks/network.py | CAVED123/Tensorforce | 823177f77f9047b1e71eccfffc08315ed1636878 | [
"Apache-2.0"
] | null | null | null | tensorforce/core/networks/network.py | CAVED123/Tensorforce | 823177f77f9047b1e71eccfffc08315ed1636878 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from collections import OrderedDict
import tensorflow as tf
from tensorforce import TensorforceError, util
from tensorforce.core import Module
from tensorforce.core.layers import Layer, layer_modules, StatefulLayer, TemporalLayer
from tensorforce.core.parameters import Parameter
class Network(Module):
"""
Base class for neural networks.
Args:
name (string): Network name
(<span style="color:#0000C0"><b>internal use</b></span>).
inputs_spec (specification): Input tensors specification
(<span style="color:#0000C0"><b>internal use</b></span>).
device (string): Device name
(<span style="color:#00C000"><b>default</b></span>: inherit value of parent module).
summary_labels ('all' | iter[string]): Labels of summaries to record
(<span style="color:#00C000"><b>default</b></span>: inherit value of parent module).
l2_regularization (float >= 0.0): Scalar controlling L2 regularization
(<span style="color:#00C000"><b>default</b></span>: inherit value of parent module).
"""
def __init__(
self, name, inputs_spec, device=None, summary_labels=None, l2_regularization=None
):
super().__init__(
name=name, device=device, summary_labels=summary_labels,
l2_regularization=l2_regularization
)
self.inputs_spec = inputs_spec
def get_output_spec(self):
raise NotImplementedError
@classmethod
def internals_spec(cls, network=None, **kwargs):
return OrderedDict()
def internals_init(self):
return OrderedDict()
def tf_dependency_horizon(self, is_optimization=False):
return tf.constant(value=0, dtype=util.tf_dtype(dtype='long'))
def tf_apply(self, x, internals, return_internals=False):
Module.update_tensors(**x)
def create_tf_function(self, name, tf_function):
if tf_function.__name__ != 'tf_apply':
return super().create_tf_function(name=name, tf_function=tf_function)
def validated_tf_function(x, internals, return_internals=False):
if util.is_atomic_values_spec(values_spec=self.inputs_spec):
if not util.is_consistent_with_value_spec(value_spec=self.inputs_spec, x=x):
raise TensorforceError("Invalid input arguments for tf_apply.")
else:
if not all(
util.is_consistent_with_value_spec(value_spec=spec, x=x[name])
for name, spec in self.inputs_spec.items()
):
raise TensorforceError("Invalid input arguments for tf_apply.")
if not all(
util.is_consistent_with_value_spec(value_spec=spec, x=internals[name])
for name, spec in self.__class__.internals_spec(network=self).items()
):
raise TensorforceError("Invalid input arguments for tf_apply.")
if return_internals:
x, internals = tf_function(x=x, internals=internals, return_internals=True)
else:
x = tf_function(x=x, internals=internals, return_internals=False)
if not util.is_consistent_with_value_spec(value_spec=self.get_output_spec(), x=x):
raise TensorforceError("Invalid output arguments for tf_apply.")
if return_internals and not all(
util.is_consistent_with_value_spec(value_spec=spec, x=internals[name])
for name, spec in self.__class__.internals_spec(network=self).items()
):
raise TensorforceError("Invalid output arguments for tf_apply.")
if return_internals:
return x, internals
else:
return x
return super().create_tf_function(name=name, tf_function=validated_tf_function)
class LayerbasedNetwork(Network):
"""
Base class for networks using Tensorforce layers.
"""
def __init__(
self, name, inputs_spec, device=None, summary_labels=None, l2_regularization=None
):
"""
Layer-based network constructor.
"""
super().__init__(
name=name, inputs_spec=inputs_spec, device=device, summary_labels=summary_labels,
l2_regularization=l2_regularization
)
if len(inputs_spec) == 1:
self.output_spec = next(iter(inputs_spec.values()))
else:
self.output_spec = None
def get_output_spec(self):
return self.output_spec
@classmethod
def internals_spec(cls, network=None, **kwargs):
internals_spec = OrderedDict()
if network is not None:
for layer in network.modules.values():
if not isinstance(layer, StatefulLayer):
continue
for name, spec in layer.__class__.internals_spec(layer=layer).items():
name = '{}-{}-{}'.format(network.name, layer.name, name)
if name in internals_spec:
raise TensorforceError.unexpected()
internals_spec[name] = spec
return internals_spec
def internals_init(self):
internals_init = OrderedDict()
for layer in self.modules.values():
if not isinstance(layer, StatefulLayer):
continue
for name, internal_init in layer.internals_init().items():
internals_init['{}-{}-{}'.format(self.name, layer.name, name)] = internal_init
return internals_init
def add_module(self, *args, **kwargs):
# Default modules set: layer_modules
if len(args) < 3 and 'modules' not in kwargs:
assert 'is_subscope' not in kwargs
kwargs['modules'] = layer_modules
kwargs['is_subscope'] = True
if 'input_spec' in kwargs:
layer = super().add_module(*args, **kwargs)
self.output_spec = layer.output_spec
else:
if self.output_spec is None:
if util.is_atomic_values_spec(values_spec=self.inputs_spec):
self.output_spec = self.inputs_spec
elif len(self.inputs_spec) == 1:
self.output_spec = next(iter(self.inputs_spec.values()))
else:
self.output_spec = None
if self.output_spec is not None:
if 'input_spec' in kwargs:
kwargs['input_spec'] = util.unify_value_specs(
value_spec1=kwargs['input_spec'], value_spec2=self.output_spec
)
else:
kwargs['input_spec'] = self.output_spec
layer = super().add_module(*args, **kwargs)
self.output_spec = layer.output_spec
if not isinstance(layer, (Layer, Parameter)):
raise TensorforceError.type(
name='layer-based network', argument='sub-module', value=layer
)
return layer
def tf_dependency_horizon(self, is_optimization=False):
dependencies = [super().tf_dependency_horizon()]
for layer in self.modules.values():
if isinstance(layer, TemporalLayer):
if not isinstance(layer, StatefulLayer) or is_optimization:
dependencies.append(layer.dependency_horizon.value())
return tf.math.reduce_max(input_tensor=tf.stack(values=dependencies, axis=0), axis=0)
| 39.302885 | 96 | 0.620306 |
ae940ba287bb380135ece64c07967d4b1bba107e | 1,695 | py | Python | cirq/study/compute_displays_result.py | jlmayfield/Cirq | dc1294f54118a9a4f92546ca13780b91615dd675 | [
"Apache-2.0"
] | 2 | 2019-04-02T10:22:21.000Z | 2019-06-19T04:54:04.000Z | cirq/study/compute_displays_result.py | jlmayfield/Cirq | dc1294f54118a9a4f92546ca13780b91615dd675 | [
"Apache-2.0"
] | 36 | 2019-04-03T23:03:51.000Z | 2019-05-15T23:49:01.000Z | cirq/study/compute_displays_result.py | jlmayfield/Cirq | dc1294f54118a9a4f92546ca13780b91615dd675 | [
"Apache-2.0"
] | 2 | 2019-04-03T22:55:05.000Z | 2019-04-24T23:24:53.000Z | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines ComputeDisplaysResult."""
from typing import Dict
from cirq.study import resolver
class ComputeDisplaysResult:
"""Results of computing the values of displays in a circuit.
Attributes:
params: A ParamResolver of settings used for this result.
display_values: A dictionary from display key to display value.
"""
__hash__ = None # type: ignore
def __init__(self,
params: resolver.ParamResolver,
display_values: Dict) -> None:
self.params = params
self.display_values = display_values
def __eq__(self, other):
if not isinstance(other, ComputeDisplaysResult):
return NotImplemented
return (self.params == other.params
and self.display_values == other.display_values)
def __ne__(self, other):
return not self == other
def __repr__(self):
return ('cirq.ComputeDisplaysResult('
'params={!r}, '
'display_values={!r})').format(self.params,
self.display_values)
| 32.596154 | 74 | 0.662537 |
e344f574bbd3d27509a27c429ae2e04c3ceac57a | 4,653 | py | Python | graph_objs/isosurface/caps/_z.py | wwwidonja/changed_plotly | 1bda35a438539a97c84a3ab3952e95e8848467bd | [
"MIT"
] | null | null | null | graph_objs/isosurface/caps/_z.py | wwwidonja/changed_plotly | 1bda35a438539a97c84a3ab3952e95e8848467bd | [
"MIT"
] | null | null | null | graph_objs/isosurface/caps/_z.py | wwwidonja/changed_plotly | 1bda35a438539a97c84a3ab3952e95e8848467bd | [
"MIT"
] | null | null | null | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Z(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "isosurface.caps"
_path_str = "isosurface.caps.z"
_valid_props = {"fill", "show"}
# fill
# ----
@property
def fill(self):
"""
Sets the fill ratio of the `caps`. The default fill value of
the `caps` is 1 meaning that they are entirely shaded. On the
other hand Applying a `fill` ratio less than one would allow
the creation of openings parallel to the edges.
The 'fill' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["fill"]
@fill.setter
def fill(self, val):
self["fill"] = val
# show
# ----
@property
def show(self):
"""
Sets the fill ratio of the `slices`. The default fill value of
the z `slices` is 1 meaning that they are entirely shaded. On
the other hand Applying a `fill` ratio less than one would
allow the creation of openings parallel to the edges.
The 'show' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["show"]
@show.setter
def show(self, val):
self["show"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
fill
Sets the fill ratio of the `caps`. The default fill
value of the `caps` is 1 meaning that they are entirely
shaded. On the other hand Applying a `fill` ratio less
than one would allow the creation of openings parallel
to the edges.
show
Sets the fill ratio of the `slices`. The default fill
value of the z `slices` is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
"""
def __init__(self, arg=None, fill=None, show=None, **kwargs):
"""
Construct a new Z object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`new_plotly.graph_objs.isosurface.caps.Z`
fill
Sets the fill ratio of the `caps`. The default fill
value of the `caps` is 1 meaning that they are entirely
shaded. On the other hand Applying a `fill` ratio less
than one would allow the creation of openings parallel
to the edges.
show
Sets the fill ratio of the `slices`. The default fill
value of the z `slices` is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
Returns
-------
Z
"""
super(Z, self).__init__("z")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the new_plotly.graph_objs.isosurface.caps.Z
constructor must be a dict or
an instance of :class:`new_plotly.graph_objs.isosurface.caps.Z`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("fill", None)
_v = fill if fill is not None else _v
if _v is not None:
self["fill"] = _v
_v = arg.pop("show", None)
_v = show if show is not None else _v
if _v is not None:
self["show"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 31.02 | 82 | 0.546099 |
c9e9a030f2f046705c651ce45f41e539c2aaee18 | 525 | py | Python | apps/amo/tests/test_pfs.py | muffinresearch/addons-server | 66613e9262a5e9475254091552de28a53b5b4072 | [
"BSD-3-Clause"
] | 1 | 2015-10-29T06:55:20.000Z | 2015-10-29T06:55:20.000Z | apps/amo/tests/test_pfs.py | magopian/olympia | 70cad15111a89e3d5c715cbade8925b12d1b98dc | [
"BSD-3-Clause"
] | 5 | 2021-02-02T23:09:35.000Z | 2021-09-08T02:47:20.000Z | apps/amo/tests/test_pfs.py | Acidburn0zzz/olympia | 7f766a5fdff255b827333d4fb01aa77546ed8c70 | [
"BSD-3-Clause"
] | null | null | null | import amo
import amo.tests
from services.pfs import get_output
from pyquery import PyQuery as pq
class TestPfs(amo.tests.TestCase):
def test_xss(self):
for k in ['name', 'mimetype', 'guid', 'version', 'iconUrl',
'InstallerLocation', 'InstallerHash', 'XPILocation',
'InstallerShowsUI', 'manualInstallationURL',
'licenseURL', 'needsRestart']:
res = get_output({k: 'fooo<script>alert("foo")</script>;'})
assert not pq(res)('script')
| 30.882353 | 71 | 0.607619 |
f26a841684194eaed06215111aded147c61d3cc5 | 2,189 | py | Python | aiida/backends/sqlalchemy/models/authinfo.py | janssenhenning/aiida-core | a80812c30ebb0de12f0429954508f9ceb6120d34 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | aiida/backends/sqlalchemy/models/authinfo.py | janssenhenning/aiida-core | a80812c30ebb0de12f0429954508f9ceb6120d34 | [
"MIT",
"BSD-3-Clause"
] | 1 | 2022-02-18T16:09:53.000Z | 2022-02-18T16:09:53.000Z | aiida/backends/sqlalchemy/models/authinfo.py | unkcpz/aiida_core | bb92dd56cc1bba142df2c48f1a73ca6b809568dc | [
"MIT",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=import-error,no-name-in-module
"""Module to manage authentification information for the SQLA backend."""
from sqlalchemy import ForeignKey
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm import relationship
from sqlalchemy.schema import Column, UniqueConstraint
from sqlalchemy.types import Boolean, Integer
from .base import Base
class DbAuthInfo(Base):
"""Class that keeps the authernification data."""
__tablename__ = 'db_dbauthinfo'
id = Column(Integer, primary_key=True) # pylint: disable=invalid-name
aiidauser_id = Column(
Integer, ForeignKey('db_dbuser.id', ondelete='CASCADE', deferrable=True, initially='DEFERRED')
)
dbcomputer_id = Column(
Integer, ForeignKey('db_dbcomputer.id', ondelete='CASCADE', deferrable=True, initially='DEFERRED')
)
aiidauser = relationship('DbUser', backref='authinfos')
dbcomputer = relationship('DbComputer', backref='authinfos')
_metadata = Column('metadata', JSONB)
auth_params = Column(JSONB)
enabled = Column(Boolean, default=True)
__table_args__ = (UniqueConstraint('aiidauser_id', 'dbcomputer_id'),)
def __init__(self, *args, **kwargs):
self._metadata = {}
self.auth_params = {}
super().__init__(*args, **kwargs)
def __str__(self):
if self.enabled:
return f'DB authorization info for {self.aiidauser.email} on {self.dbcomputer.label}'
return f'DB authorization info for {self.aiidauser.email} on {self.dbcomputer.label} [DISABLED]'
| 40.537037 | 106 | 0.613065 |
e5ca3e98dc4df0fafa25d668bf0c1996a4cad8e3 | 9,889 | py | Python | bc/inlineindex/migrations/0006_image_block.py | Buckinghamshire-Digital-Service/buckinghamshire-council | bbbdb52b515bcdfc79a2bd9198dfa4828405370e | [
"BSD-3-Clause"
] | 1 | 2021-02-27T07:27:17.000Z | 2021-02-27T07:27:17.000Z | bc/inlineindex/migrations/0006_image_block.py | Buckinghamshire-Digital-Service/buckinghamshire-council | bbbdb52b515bcdfc79a2bd9198dfa4828405370e | [
"BSD-3-Clause"
] | null | null | null | bc/inlineindex/migrations/0006_image_block.py | Buckinghamshire-Digital-Service/buckinghamshire-council | bbbdb52b515bcdfc79a2bd9198dfa4828405370e | [
"BSD-3-Clause"
] | 1 | 2021-06-09T15:56:54.000Z | 2021-06-09T15:56:54.000Z | # Generated by Django 2.2.10 on 2020-03-24 16:00
from django.db import migrations
import wagtail.contrib.table_block.blocks
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.embeds.blocks
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
("inlineindex", "0005_add_table_block"),
]
operations = [
migrations.AlterField(
model_name="inlineindex",
name="body",
field=wagtail.core.fields.StreamField(
[
(
"heading",
wagtail.core.blocks.CharBlock(
classname="full title",
icon="title",
template="patterns/molecules/streamfield/blocks/heading_block.html",
),
),
(
"paragraph",
wagtail.core.blocks.RichTextBlock(
features=[
"bold",
"italic",
"ol",
"ul",
"link",
"document-link",
]
),
),
(
"image",
wagtail.core.blocks.StructBlock(
[
("image", wagtail.images.blocks.ImageChooserBlock()),
(
"caption",
wagtail.core.blocks.CharBlock(required=False),
),
]
),
),
("embed", wagtail.embeds.blocks.EmbedBlock()),
(
"local_area_links",
wagtail.core.blocks.StructBlock(
[
(
"introduction",
wagtail.core.blocks.RichTextBlock(
default="<p>Select your local area for information:</p>",
features=[
"bold",
"italic",
"ol",
"ul",
"link",
"document-link",
],
),
),
(
"aylesbury_vale_url",
wagtail.core.blocks.URLBlock(
label="Aylesbury Vale URL", required=False
),
),
(
"chiltern_url",
wagtail.core.blocks.URLBlock(
label="Chiltern URL", required=False
),
),
(
"south_bucks_url",
wagtail.core.blocks.URLBlock(
label="South Bucks URL", required=False
),
),
(
"wycombe_url",
wagtail.core.blocks.URLBlock(
label="Wycombe URL", required=False
),
),
(
"postscript",
wagtail.core.blocks.RichTextBlock(
default='<p>Or <a href="https://www.gov.uk/find-local-council">find your area based on your postcode</a>.</p>',
features=[
"bold",
"italic",
"ol",
"ul",
"link",
"document-link",
],
required=False,
),
),
]
),
),
("table", wagtail.contrib.table_block.blocks.TableBlock()),
]
),
),
migrations.AlterField(
model_name="inlineindexchild",
name="body",
field=wagtail.core.fields.StreamField(
[
(
"heading",
wagtail.core.blocks.CharBlock(
classname="full title",
icon="title",
template="patterns/molecules/streamfield/blocks/heading_block.html",
),
),
(
"paragraph",
wagtail.core.blocks.RichTextBlock(
features=[
"bold",
"italic",
"ol",
"ul",
"link",
"document-link",
]
),
),
(
"image",
wagtail.core.blocks.StructBlock(
[
("image", wagtail.images.blocks.ImageChooserBlock()),
(
"caption",
wagtail.core.blocks.CharBlock(required=False),
),
]
),
),
("embed", wagtail.embeds.blocks.EmbedBlock()),
(
"local_area_links",
wagtail.core.blocks.StructBlock(
[
(
"introduction",
wagtail.core.blocks.RichTextBlock(
default="<p>Select your local area for information:</p>",
features=[
"bold",
"italic",
"ol",
"ul",
"link",
"document-link",
],
),
),
(
"aylesbury_vale_url",
wagtail.core.blocks.URLBlock(
label="Aylesbury Vale URL", required=False
),
),
(
"chiltern_url",
wagtail.core.blocks.URLBlock(
label="Chiltern URL", required=False
),
),
(
"south_bucks_url",
wagtail.core.blocks.URLBlock(
label="South Bucks URL", required=False
),
),
(
"wycombe_url",
wagtail.core.blocks.URLBlock(
label="Wycombe URL", required=False
),
),
(
"postscript",
wagtail.core.blocks.RichTextBlock(
default='<p>Or <a href="https://www.gov.uk/find-local-council">find your area based on your postcode</a>.</p>',
features=[
"bold",
"italic",
"ol",
"ul",
"link",
"document-link",
],
required=False,
),
),
]
),
),
("table", wagtail.contrib.table_block.blocks.TableBlock()),
]
),
),
]
| 43.756637 | 151 | 0.249469 |
fcc9b146aa2ba31fc7990ee1111a2cfd5e3e2bf0 | 4,869 | py | Python | opensearch/serializer.py | rushiagr/opensearch-py-1 | 497468f254cacfb998c102bafce9607d0f6d0452 | [
"Apache-2.0"
] | null | null | null | opensearch/serializer.py | rushiagr/opensearch-py-1 | 497468f254cacfb998c102bafce9607d0f6d0452 | [
"Apache-2.0"
] | null | null | null | opensearch/serializer.py | rushiagr/opensearch-py-1 | 497468f254cacfb998c102bafce9607d0f6d0452 | [
"Apache-2.0"
] | null | null | null | # SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
#
# Modifications Copyright OpenSearch Contributors. See
# GitHub history for details.
#
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
try:
import simplejson as json
except ImportError:
import json
import uuid
from datetime import date, datetime
from decimal import Decimal
from .compat import string_types
from .exceptions import ImproperlyConfigured, SerializationError
INTEGER_TYPES = ()
FLOAT_TYPES = (Decimal,)
TIME_TYPES = (date, datetime)
try:
import numpy as np
INTEGER_TYPES += (
np.int_,
np.intc,
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
)
FLOAT_TYPES += (
np.float_,
np.float16,
np.float32,
np.float64,
)
except ImportError:
np = None
try:
import pandas as pd
TIME_TYPES += (pd.Timestamp,)
except ImportError:
pd = None
class Serializer(object):
mimetype = ""
def loads(self, s):
raise NotImplementedError()
def dumps(self, data):
raise NotImplementedError()
class TextSerializer(Serializer):
mimetype = "text/plain"
def loads(self, s):
return s
def dumps(self, data):
if isinstance(data, string_types):
return data
raise SerializationError("Cannot serialize %r into text." % data)
class JSONSerializer(Serializer):
mimetype = "application/json"
def default(self, data):
if isinstance(data, TIME_TYPES) and getattr(pd, "NaT", None) is not data:
return data.isoformat()
elif isinstance(data, uuid.UUID):
return str(data)
elif isinstance(data, FLOAT_TYPES):
return float(data)
elif INTEGER_TYPES and isinstance(data, INTEGER_TYPES):
return int(data)
# Special cases for numpy and pandas types
elif np:
if isinstance(data, np.bool_):
return bool(data)
elif isinstance(data, np.datetime64):
return data.item().isoformat()
elif isinstance(data, np.ndarray):
return data.tolist()
if pd:
if isinstance(data, (pd.Series, pd.Categorical)):
return data.tolist()
elif data is getattr(pd, "NA", None):
return None
raise TypeError("Unable to serialize %r (type: %s)" % (data, type(data)))
def loads(self, s):
try:
return json.loads(s)
except (ValueError, TypeError) as e:
raise SerializationError(s, e)
def dumps(self, data):
# don't serialize strings
if isinstance(data, string_types):
return data
try:
return json.dumps(
data, default=self.default, ensure_ascii=False, separators=(",", ":")
)
except (ValueError, TypeError) as e:
raise SerializationError(data, e)
DEFAULT_SERIALIZERS = {
JSONSerializer.mimetype: JSONSerializer(),
TextSerializer.mimetype: TextSerializer(),
}
class Deserializer(object):
def __init__(self, serializers, default_mimetype="application/json"):
try:
self.default = serializers[default_mimetype]
except KeyError:
raise ImproperlyConfigured(
"Cannot find default serializer (%s)" % default_mimetype
)
self.serializers = serializers
def loads(self, s, mimetype=None):
if not mimetype:
deserializer = self.default
else:
# split out charset
mimetype, _, _ = mimetype.partition(";")
try:
deserializer = self.serializers[mimetype]
except KeyError:
raise SerializationError(
"Unknown mimetype, unable to deserialize: %s" % mimetype
)
return deserializer.loads(s)
| 27.664773 | 85 | 0.624153 |
dc5335dc8fade150ac0c0e9e7ed2e769b0a581ef | 7,993 | py | Python | python/cudf/cudf/utils/cudautils.py | hafixo/cudf | 7f91a17a967b0e0b502d56d37818aa0461d38c67 | [
"Apache-2.0"
] | null | null | null | python/cudf/cudf/utils/cudautils.py | hafixo/cudf | 7f91a17a967b0e0b502d56d37818aa0461d38c67 | [
"Apache-2.0"
] | null | null | null | python/cudf/cudf/utils/cudautils.py | hafixo/cudf | 7f91a17a967b0e0b502d56d37818aa0461d38c67 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018, NVIDIA CORPORATION.
from functools import lru_cache
import cupy
import numpy as np
from numba import cuda
from cudf.utils.utils import (
check_equals_float,
check_equals_int,
mask_bitsize,
mask_get,
rint,
)
try:
# Numba >= 0.49
from numba.np import numpy_support
except ImportError:
# Numba <= 0.49
from numba import numpy_support
# GPU array type casting
def as_contiguous(arr):
assert arr.ndim == 1
cupy_dtype = arr.dtype
if np.issubdtype(cupy_dtype, np.datetime64):
cupy_dtype = np.dtype("int64")
arr = arr.view("int64")
out = cupy.ascontiguousarray(cupy.asarray(arr))
return cuda.as_cuda_array(out).view(arr.dtype)
# Mask utils
def full(size, value, dtype):
cupy_dtype = dtype
if np.issubdtype(cupy_dtype, np.datetime64):
time_unit, _ = np.datetime_data(cupy_dtype)
cupy_dtype = np.int64
value = np.datetime64(value, time_unit).view(cupy_dtype)
out = cupy.full(size, value, cupy_dtype)
return cuda.as_cuda_array(out).view(dtype)
@cuda.jit
def gpu_expand_mask_bits(bits, out):
"""Expand each bits in bitmask *bits* into an element in out.
This is a flexible kernel that can be launch with any number of blocks
and threads.
"""
for i in range(cuda.grid(1), out.size, cuda.gridsize(1)):
if i < bits.size * mask_bitsize:
out[i] = mask_get(bits, i)
def expand_mask_bits(size, bits):
"""Expand bit-mask into byte-mask
"""
expanded_mask = full(size, 0, dtype=np.bool_)
numtasks = min(1024, expanded_mask.size)
if numtasks > 0:
gpu_expand_mask_bits.forall(numtasks)(bits, expanded_mask)
return expanded_mask
#
# Misc kernels
#
@cuda.jit
def gpu_diff(in_col, out_col, out_mask, N):
"""Calculate the difference between values at positions i and i - N in an
array and store the output in a new array.
"""
i = cuda.grid(1)
if N > 0:
if i < in_col.size:
out_col[i] = in_col[i] - in_col[i - N]
out_mask[i] = True
if i < N:
out_mask[i] = False
else:
if i <= (in_col.size + N):
out_col[i] = in_col[i] - in_col[i - N]
out_mask[i] = True
if i >= (in_col.size + N) and i < in_col.size:
out_mask[i] = False
@cuda.jit
def gpu_round(in_col, out_col, decimal):
i = cuda.grid(1)
f = 10 ** decimal
if i < in_col.size:
ret = in_col[i] * f
ret = rint(ret)
tmp = ret / f
out_col[i] = tmp
def apply_round(data, decimal):
output_dary = cuda.device_array_like(data)
if output_dary.size > 0:
gpu_round.forall(output_dary.size)(data, output_dary, decimal)
return output_dary
# Find segments
@cuda.jit
def gpu_mark_found_int(arr, val, out, not_found):
i = cuda.grid(1)
if i < arr.size:
if check_equals_int(arr[i], val):
out[i] = i
else:
out[i] = not_found
@cuda.jit
def gpu_mark_found_float(arr, val, out, not_found):
i = cuda.grid(1)
if i < arr.size:
if check_equals_float(arr[i], val):
out[i] = i
else:
out[i] = not_found
@cuda.jit
def gpu_mark_gt(arr, val, out, not_found):
i = cuda.grid(1)
if i < arr.size:
if arr[i] > val:
out[i] = i
else:
out[i] = not_found
@cuda.jit
def gpu_mark_lt(arr, val, out, not_found):
i = cuda.grid(1)
if i < arr.size:
if arr[i] < val:
out[i] = i
else:
out[i] = not_found
def find_first(arr, val, compare="eq"):
"""
Returns the index of the first occurrence of *val* in *arr*..
Or the first occurrence of *arr* *compare* *val*, if *compare* is not eq
Otherwise, returns -1.
Parameters
----------
arr : device array
val : scalar
compare: str ('gt', 'lt', or 'eq' (default))
"""
found = cuda.device_array_like(arr)
if found.size > 0:
if compare == "gt":
gpu_mark_gt.forall(found.size)(arr, val, found, arr.size)
elif compare == "lt":
gpu_mark_lt.forall(found.size)(arr, val, found, arr.size)
else:
if arr.dtype in ("float32", "float64"):
gpu_mark_found_float.forall(found.size)(
arr, val, found, arr.size
)
else:
gpu_mark_found_int.forall(found.size)(
arr, val, found, arr.size
)
from cudf.core.column import as_column
found_col = as_column(found)
min_index = found_col.min()
if min_index == arr.size:
return -1
else:
return min_index
def find_last(arr, val, compare="eq"):
"""
Returns the index of the last occurrence of *val* in *arr*.
Or the last occurrence of *arr* *compare* *val*, if *compare* is not eq
Otherwise, returns -1.
Parameters
----------
arr : device array
val : scalar
compare: str ('gt', 'lt', or 'eq' (default))
"""
found = cuda.device_array_like(arr)
if found.size > 0:
if compare == "gt":
gpu_mark_gt.forall(found.size)(arr, val, found, -1)
elif compare == "lt":
gpu_mark_lt.forall(found.size)(arr, val, found, -1)
else:
if arr.dtype in ("float32", "float64"):
gpu_mark_found_float.forall(found.size)(arr, val, found, -1)
else:
gpu_mark_found_int.forall(found.size)(arr, val, found, -1)
from cudf.core.column import as_column
found_col = as_column(found)
max_index = found_col.max()
return max_index
@cuda.jit
def gpu_window_sizes_from_offset(arr, window_sizes, offset):
i = cuda.grid(1)
j = i
if i < arr.size:
while j > -1:
if (arr[i] - arr[j]) >= offset:
break
j -= 1
window_sizes[i] = i - j
def window_sizes_from_offset(arr, offset):
window_sizes = cuda.device_array(shape=(arr.shape), dtype="int32")
if arr.size > 0:
gpu_window_sizes_from_offset.forall(arr.size)(
arr, window_sizes, offset
)
return window_sizes
@cuda.jit
def gpu_grouped_window_sizes_from_offset(
arr, window_sizes, group_starts, offset
):
i = cuda.grid(1)
j = i
if i < arr.size:
while j > (group_starts[i] - 1):
if (arr[i] - arr[j]) >= offset:
break
j -= 1
window_sizes[i] = i - j
def grouped_window_sizes_from_offset(arr, group_starts, offset):
window_sizes = cuda.device_array(shape=(arr.shape), dtype="int32")
if arr.size > 0:
gpu_grouped_window_sizes_from_offset.forall(arr.size)(
arr, window_sizes, group_starts, offset
)
return window_sizes
@lru_cache(maxsize=32)
def compile_udf(udf, type_signature):
"""Copmile ``udf`` with `numba`
Compile a python callable function ``udf`` with
`numba.cuda.jit(device=True)` using ``type_signature`` into CUDA PTX
together with the generated output type.
The output is expected to be passed to the PTX parser in `libcudf`
to generate a CUDA device function to be inlined into CUDA kernels,
compiled at runtime and launched.
Parameters
--------
udf:
a python callable function
type_signature:
a tuple that specifies types of each of the input parameters of ``udf``.
The types should be one in `numba.types` and could be converted from
numpy types with `numba.numpy_support.from_dtype(...)`.
Returns
--------
ptx_code:
The compiled CUDA PTX
output_type:
An numpy type
"""
decorated_udf = cuda.jit(udf, device=True)
compiled = decorated_udf.compile(type_signature)
ptx_code = decorated_udf.inspect_ptx(type_signature).decode("utf-8")
output_type = numpy_support.as_dtype(compiled.signature.return_type)
return (ptx_code, output_type.type)
| 25.951299 | 78 | 0.602277 |
c789ba1dbed3ab090d19b25ec3c1ead583d5ca37 | 468 | py | Python | tbot/migrations/20190505_01_HmBmL-twitch-discord-live-notification.py | thomaserlang/tbot | 99cfa204d86ef35cf2cc9482ae5a44abb35b443a | [
"MIT"
] | null | null | null | tbot/migrations/20190505_01_HmBmL-twitch-discord-live-notification.py | thomaserlang/tbot | 99cfa204d86ef35cf2cc9482ae5a44abb35b443a | [
"MIT"
] | 10 | 2022-02-14T11:40:20.000Z | 2022-03-09T22:44:03.000Z | tbot/migrations/20190505_01_HmBmL-twitch-discord-live-notification.py | thomaserlang/tbot | 99cfa204d86ef35cf2cc9482ae5a44abb35b443a | [
"MIT"
] | 1 | 2020-09-19T16:38:24.000Z | 2020-09-19T16:38:24.000Z | """
twitch_discord_live_notification
"""
from yoyo import step
__depends__ = {'20190504_02_ZJgxR-discord-commands'}
steps = [
step('''
CREATE TABLE `twitch_discord_live_notification` (
`id` INT(11) UNSIGNED NOT NULL AUTO_INCREMENT,
`channel_id` VARCHAR(36) NOT NULL,
`webhook_url` VARCHAR(500) NOT NULL,
`message` VARCHAR(500) NOT NULL,
PRIMARY KEY (`id`),
INDEX `channel_id` (`channel_id`)
);
''')
]
| 22.285714 | 54 | 0.632479 |
fb1866283b550131113aa54391627e43e19c0e1e | 557 | py | Python | setup.py | udit-saxena/sumologic-python-sdk | 4731badc675226efd75fd996a8f93fa8f51e041f | [
"Apache-2.0"
] | null | null | null | setup.py | udit-saxena/sumologic-python-sdk | 4731badc675226efd75fd996a8f93fa8f51e041f | [
"Apache-2.0"
] | null | null | null | setup.py | udit-saxena/sumologic-python-sdk | 4731badc675226efd75fd996a8f93fa8f51e041f | [
"Apache-2.0"
] | null | null | null | from setuptools import setup, find_packages
setup(
name="sumologic-sdk",
version="0.1.8",
packages=find_packages(),
install_requires=['requests>=2.2.1'],
# PyPI metadata
author="Yoway Buorn, Melchi Salins",
author_email="it@sumologic.com, melchisalins@icloud.com",
description="Sumo Logic Python SDK",
license="PSF",
keywords="sumologic python sdk rest api log management analytics logreduce splunk security siem collector forwarder",
url="https://github.com/SumoLogic/sumologic-python-sdk",
zip_safe=True
)
| 32.764706 | 121 | 0.716338 |
4614e0602febc0abe013475ed8d46a6ea1c19a8c | 3,383 | py | Python | pycomm3/cip/pccc.py | SISAutomationIMA/pycomm3 | e98917ba0e9ad06e6cadc15b4d2c9735fd029ec1 | [
"MIT"
] | 185 | 2019-07-09T11:59:47.000Z | 2022-03-31T13:22:13.000Z | pycomm3/cip/pccc.py | SISAutomationIMA/pycomm3 | e98917ba0e9ad06e6cadc15b4d2c9735fd029ec1 | [
"MIT"
] | 92 | 2020-01-27T02:16:42.000Z | 2022-03-09T02:10:50.000Z | pycomm3/cip/pccc.py | SISAutomationIMA/pycomm3 | e98917ba0e9ad06e6cadc15b4d2c9735fd029ec1 | [
"MIT"
] | 63 | 2019-11-25T13:45:08.000Z | 2022-03-31T06:08:42.000Z | # -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Ian Ottoway <ian@ottoway.dev>
# Copyright (c) 2014 Agostino Ruscito <ruscito@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from itertools import chain
from io import BytesIO
from .data_types import INT, DINT, REAL, StringDataType, UINT
from ..map import EnumMap
class PCCCStringType(StringDataType):
@classmethod
def _slc_string_swap(cls, data):
pairs = [
(x2, x1) for x1, x2 in (data[i : i + 2] for i in range(0, len(data), 2))
]
return bytes(chain.from_iterable(pairs))
class PCCC_ASCII(PCCCStringType):
@classmethod
def _encode(cls, value: str, *args, **kwargs) -> bytes:
char1, char2 = value[:2]
return (char2 or " ").encode(cls.encoding) + (char1 or " ").encode(cls.encoding)
@classmethod
def _decode(cls, stream: BytesIO) -> str:
return cls._slc_string_swap(stream.read(2)).decode(cls.encoding)
class PCCC_STRING(PCCCStringType):
@classmethod
def _encode(cls, value: str) -> bytes:
_len = UINT.encode(len(value))
_data = cls._slc_string_swap(value.encode(cls.encoding))
return _len + _data
@classmethod
def _decode(cls, stream: BytesIO) -> str:
_len = UINT.decode(stream)
return cls._slc_string_swap(stream.read(82)).decode(cls.encoding)
class PCCCDataTypes(EnumMap):
_return_caps_only_ = True
n = INT
b = INT
t = INT
c = INT
s = INT
o = INT
i = INT
f = REAL
a = PCCC_ASCII
r = DINT
st = PCCC_STRING
l = DINT
PCCC_CT = {
"PRE": 1,
"ACC": 2,
"EN": 15,
"TT": 14,
"DN": 13,
"CU": 15,
"CD": 14,
"OV": 12,
"UN": 11,
"UA": 10,
}
_PCCC_DATA_TYPE = {
"N": b"\x89",
"B": b"\x85",
"T": b"\x86",
"C": b"\x87",
"S": b"\x84",
"F": b"\x8a",
"ST": b"\x8d",
"A": b"\x8e",
"R": b"\x88",
"O": b"\x82", # or b'\x8b'?
"I": b"\x83", # or b'\x8c'?
"L": b"\x91",
"MG": b"\x92",
"PD": b"\x93",
"PLS": b"\x94",
}
PCCC_DATA_TYPE = {
**_PCCC_DATA_TYPE,
**{v: k for k, v in _PCCC_DATA_TYPE.items()},
}
PCCC_DATA_SIZE = {
"N": 2,
"L": 4,
"B": 2,
"T": 6,
"C": 6,
"S": 2,
"F": 4,
"ST": 84,
"A": 2,
"R": 6,
"O": 2,
"I": 2,
"MG": 50,
"PD": 46,
"PLS": 12,
}
| 24.693431 | 88 | 0.608927 |
450b041812a428b027242845ba41855c5a686512 | 16,747 | py | Python | step/utils/Quaternions.py | 1suancaiyu/take_an_emotion_walk | 693704b48757a45750dea528811a09a18afdf1b1 | [
"MIT"
] | 23 | 2020-07-10T07:24:31.000Z | 2021-11-01T02:10:44.000Z | step/utils/Quaternions.py | fanyuuuu/take_an_emotion_walk | 881b90611ca6a964794997f15e9721eade502803 | [
"MIT"
] | 6 | 2020-07-23T09:38:14.000Z | 2021-11-02T05:19:02.000Z | step/utils/Quaternions.py | fanyuuuu/take_an_emotion_walk | 881b90611ca6a964794997f15e9721eade502803 | [
"MIT"
] | 7 | 2020-09-17T02:24:16.000Z | 2021-07-21T07:01:52.000Z | import numpy as np
class Quaternions:
"""
Quaternions is a wrapper around a numpy ndarray
that allows it to act as if it were an narray of
a quaternion data type.
Therefore addition, subtraction, multiplication,
division, negation, absolute, are all defined
in terms of quaternion operations such as quaternion
multiplication.
This allows for much neater code and many routines
which conceptually do the same thing to be written
in the same way for point data and for rotation data.
The Quaternions class has been designed such that it
should support broadcasting and slicing in all of the
usual ways.
"""
def __init__(self, qs):
if isinstance(qs, np.ndarray):
if len(qs.shape) == 1: qs = np.array([qs])
self.qs = qs
return
if isinstance(qs, Quaternions):
self.qs = qs.qs
return
raise TypeError('Quaternions must be constructed from iterable, numpy array, or Quaternions, not %s' % type(qs))
def __str__(self):
return "Quaternions(" + str(self.qs) + ")"
def __repr__(self):
return "Quaternions(" + repr(self.qs) + ")"
""" Helper Methods for Broadcasting and Data extraction """
@classmethod
def _broadcast(cls, sqs, oqs, scalar=False):
if isinstance(oqs, float): return sqs, oqs * np.ones(sqs.shape[:-1])
ss = np.array(sqs.shape) if not scalar else np.array(sqs.shape[:-1])
os = np.array(oqs.shape)
if len(ss) != len(os):
raise TypeError('Quaternions cannot broadcast together shapes %s and %s' % (sqs.shape, oqs.shape))
if np.all(ss == os): return sqs, oqs
if not np.all((ss == os) | (os == np.ones(len(os))) | (ss == np.ones(len(ss)))):
raise TypeError('Quaternions cannot broadcast together shapes %s and %s' % (sqs.shape, oqs.shape))
sqsn, oqsn = sqs.copy(), oqs.copy()
for a in np.where(ss == 1)[0]: sqsn = sqsn.repeat(os[a], axis=a)
for a in np.where(os == 1)[0]: oqsn = oqsn.repeat(ss[a], axis=a)
return sqsn, oqsn
""" Adding Quaterions is just Defined as Multiplication """
def __add__(self, other):
return self * other
def __sub__(self, other):
return self / other
""" Quaterion Multiplication """
def __mul__(self, other):
"""
Quaternion multiplication has three main methods.
When multiplying a Quaternions array by Quaternions
normal quaternion multiplication is performed.
When multiplying a Quaternions array by a vector
array of the same shape, where the last axis is 3,
it is assumed to be a Quaternion by 3D-Vector
multiplication and the 3D-Vectors are rotated
in space by the Quaternions.
When multipplying a Quaternions array by a scalar
or vector of different shape it is assumed to be
a Quaternions by Scalars multiplication and the
Quaternions are scaled using Slerp and the identity
quaternions.
"""
""" If Quaternions type do Quaternions * Quaternions """
if isinstance(other, Quaternions):
sqs, oqs = Quaternions._broadcast(self.qs, other.qs)
q0 = sqs[..., 0];
q1 = sqs[..., 1];
q2 = sqs[..., 2];
q3 = sqs[..., 3];
r0 = oqs[..., 0];
r1 = oqs[..., 1];
r2 = oqs[..., 2];
r3 = oqs[..., 3];
qs = np.empty(sqs.shape)
qs[..., 0] = r0 * q0 - r1 * q1 - r2 * q2 - r3 * q3
qs[..., 1] = r0 * q1 + r1 * q0 - r2 * q3 + r3 * q2
qs[..., 2] = r0 * q2 + r1 * q3 + r2 * q0 - r3 * q1
qs[..., 3] = r0 * q3 - r1 * q2 + r2 * q1 + r3 * q0
return Quaternions(qs)
""" If array type do Quaternions * Vectors """
if isinstance(other, np.ndarray) and other.shape[-1] == 3:
vs = Quaternions(np.concatenate([np.zeros(other.shape[:-1] + (1,)), other], axis=-1))
return (self * (vs * -self)).imaginaries
""" If float do Quaternions * Scalars """
if isinstance(other, np.ndarray) or isinstance(other, float):
return Quaternions.slerp(Quaternions.id_like(self), self, other)
raise TypeError('Cannot multiply/add Quaternions with type %s' % str(type(other)))
def __div__(self, other):
"""
When a Quaternion type is supplied, division is defined
as multiplication by the inverse of that Quaternion.
When a scalar or vector is supplied it is defined
as multiplicaion of one over the supplied value.
Essentially a scaling.
"""
if isinstance(other, Quaternions): return self * (-other)
if isinstance(other, np.ndarray): return self * (1.0 / other)
if isinstance(other, float): return self * (1.0 / other)
raise TypeError('Cannot divide/subtract Quaternions with type %s' + str(type(other)))
def __eq__(self, other):
return self.qs == other.qs
def __ne__(self, other):
return self.qs != other.qs
def __neg__(self):
""" Invert Quaternions """
return Quaternions(self.qs * np.array([[1, -1, -1, -1]]))
def __abs__(self):
""" Unify Quaternions To Single Pole """
qabs = self.normalized().copy()
top = np.sum((qabs.qs) * np.array([1, 0, 0, 0]), axis=-1)
bot = np.sum((-qabs.qs) * np.array([1, 0, 0, 0]), axis=-1)
qabs.qs[top < bot] = -qabs.qs[top < bot]
return qabs
def __iter__(self):
return iter(self.qs)
def __len__(self):
return len(self.qs)
def __getitem__(self, k):
return Quaternions(self.qs[k])
def __setitem__(self, k, v):
self.qs[k] = v.qs
@property
def lengths(self):
return np.sum(self.qs ** 2.0, axis=-1) ** 0.5
@property
def reals(self):
return self.qs[..., 0]
@property
def imaginaries(self):
return self.qs[..., 1:4]
@property
def shape(self):
return self.qs.shape[:-1]
def repeat(self, n, **kwargs):
return Quaternions(self.qs.repeat(n, **kwargs))
def normalized(self):
return Quaternions(self.qs / self.lengths[..., np.newaxis])
def log(self):
norm = abs(self.normalized())
imgs = norm.imaginaries
lens = np.sqrt(np.sum(imgs ** 2, axis=-1))
lens = np.arctan2(lens, norm.reals) / (lens + 1e-10)
return imgs * lens[..., np.newaxis]
def constrained(self, axis):
rl = self.reals
im = np.sum(axis * self.imaginaries, axis=-1)
t1 = -2 * np.arctan2(rl, im) + np.pi
t2 = -2 * np.arctan2(rl, im) - np.pi
top = Quaternions.exp(axis[np.newaxis] * (t1[:, np.newaxis] / 2.0))
bot = Quaternions.exp(axis[np.newaxis] * (t2[:, np.newaxis] / 2.0))
img = self.dot(top) > self.dot(bot)
ret = top.copy()
ret[img] = top[img]
ret[~img] = bot[~img]
return ret
def constrained_x(self):
return self.constrained(np.array([1, 0, 0]))
def constrained_y(self):
return self.constrained(np.array([0, 1, 0]))
def constrained_z(self):
return self.constrained(np.array([0, 0, 1]))
def dot(self, q):
return np.sum(self.qs * q.qs, axis=-1)
def copy(self):
return Quaternions(np.copy(self.qs))
def reshape(self, s):
self.qs.reshape(s)
return self
def interpolate(self, ws):
return Quaternions.exp(np.average(abs(self).log, axis=0, weights=ws))
def euler(self, order='xyz'):
q = self.normalized().qs
q0 = q[..., 0]
q1 = q[..., 1]
q2 = q[..., 2]
q3 = q[..., 3]
es = np.zeros(self.shape + (3,))
if order == 'xyz':
es[..., 0] = np.arctan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2))
es[..., 1] = np.arcsin((2 * (q0 * q2 - q3 * q1)).clip(-1, 1))
es[..., 2] = np.arctan2(2 * (q0 * q3 + q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3))
elif order == 'yzx':
es[..., 0] = np.arctan2(2 * (q1 * q0 - q2 * q3), -q1 * q1 + q2 * q2 - q3 * q3 + q0 * q0)
es[..., 1] = np.arctan2(2 * (q2 * q0 - q1 * q3), q1 * q1 - q2 * q2 - q3 * q3 + q0 * q0)
es[..., 2] = np.arcsin((2 * (q1 * q2 + q3 * q0)).clip(-1, 1))
else:
raise NotImplementedError('Cannot convert from ordering %s' % order)
"""
# These conversion don't appear to work correctly for Maya.
# http://bediyap.com/programming/convert-quaternion-to-euler-rotations/
if order == 'xyz':
es[fa + (0,)] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q1 * q3 + q0 * q2)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
elif order == 'yzx':
es[fa + (0,)] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q1 * q2 + q0 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q2 - q1 * q3), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
elif order == 'zxy':
es[fa + (0,)] = np.arctan2(2 * (q0 * q2 - q1 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q1 + q2 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
elif order == 'xzy':
es[fa + (0,)] = np.arctan2(2 * (q0 * q2 + q1 * q3), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q3 - q1 * q2)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q1 + q2 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
elif order == 'yxz':
es[fa + (0,)] = np.arctan2(2 * (q1 * q2 + q0 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q1 - q2 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q1 * q3 + q0 * q2), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
elif order == 'zyx':
es[fa + (0,)] = np.arctan2(2 * (q0 * q1 + q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q2 - q1 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q3 + q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
else:
raise KeyError('Unknown ordering %s' % order)
"""
# https://github.com/ehsan/ogre/blob/master/OgreMain/src/OgreMatrix3.cpp
# Use this class and convert from matrix
return es
def average(self):
if len(self.shape) == 1:
import numpy.core.umath_tests as ut
system = ut.matrix_multiply(self.qs[:, :, np.newaxis], self.qs[:, np.newaxis, :]).sum(axis=0)
w, v = np.linalg.eigh(system)
qiT_dot_qref = (self.qs[:, :, np.newaxis] * v[np.newaxis, :, :]).sum(axis=1)
return Quaternions(v[:, np.argmin((1. - qiT_dot_qref ** 2).sum(axis=0))])
else:
raise NotImplementedError('Cannot average multi-dimensionsal Quaternions')
def angle_axis(self):
norm = self.normalized()
s = np.sqrt(1 - (norm.reals ** 2.0))
s[s == 0] = 0.001
angles = 2.0 * np.arccos(norm.reals)
axis = norm.imaginaries / s[..., np.newaxis]
return angles, axis
def transforms(self):
qw = self.qs[..., 0]
qx = self.qs[..., 1]
qy = self.qs[..., 2]
qz = self.qs[..., 3]
x2 = qx + qx;
y2 = qy + qy;
z2 = qz + qz;
xx = qx * x2;
yy = qy * y2;
wx = qw * x2;
xy = qx * y2;
yz = qy * z2;
wy = qw * y2;
xz = qx * z2;
zz = qz * z2;
wz = qw * z2;
m = np.empty(self.shape + (3, 3))
m[..., 0, 0] = 1.0 - (yy + zz)
m[..., 0, 1] = xy - wz
m[..., 0, 2] = xz + wy
m[..., 1, 0] = xy + wz
m[..., 1, 1] = 1.0 - (xx + zz)
m[..., 1, 2] = yz - wx
m[..., 2, 0] = xz - wy
m[..., 2, 1] = yz + wx
m[..., 2, 2] = 1.0 - (xx + yy)
return m
def ravel(self):
return self.qs.ravel()
@classmethod
def id(cls, n):
if isinstance(n, tuple):
qs = np.zeros(n + (4,))
qs[..., 0] = 1.0
return Quaternions(qs)
if isinstance(n, int) or isinstance(n, long):
qs = np.zeros((n, 4))
qs[:, 0] = 1.0
return Quaternions(qs)
raise TypeError('Cannot Construct Quaternion from %s type' % str(type(n)))
@classmethod
def id_like(cls, a):
qs = np.zeros(a.shape + (4,))
qs[..., 0] = 1.0
return Quaternions(qs)
@classmethod
def exp(cls, ws):
ts = np.sum(ws ** 2.0, axis=-1) ** 0.5
ts[ts == 0] = 0.001
ls = np.sin(ts) / ts
qs = np.empty(ws.shape[:-1] + (4,))
qs[..., 0] = np.cos(ts)
qs[..., 1] = ws[..., 0] * ls
qs[..., 2] = ws[..., 1] * ls
qs[..., 3] = ws[..., 2] * ls
return Quaternions(qs).normalized()
@classmethod
def slerp(cls, q0s, q1s, a):
fst, snd = cls._broadcast(q0s.qs, q1s.qs)
fst, a = cls._broadcast(fst, a, scalar=True)
snd, a = cls._broadcast(snd, a, scalar=True)
len = np.sum(fst * snd, axis=-1)
neg = len < 0.0
len[neg] = -len[neg]
snd[neg] = -snd[neg]
amount0 = np.zeros(a.shape)
amount1 = np.zeros(a.shape)
linear = (1.0 - len) < 0.01
omegas = np.arccos(len[~linear])
sinoms = np.sin(omegas)
amount0[linear] = 1.0 - a[linear]
amount1[linear] = a[linear]
amount0[~linear] = np.sin((1.0 - a[~linear]) * omegas) / sinoms
amount1[~linear] = np.sin(a[~linear] * omegas) / sinoms
return Quaternions(
amount0[..., np.newaxis] * fst +
amount1[..., np.newaxis] * snd)
@classmethod
def between(cls, v0s, v1s):
a = np.cross(v0s, v1s)
w = np.sqrt((v0s ** 2).sum(axis=-1) * (v1s ** 2).sum(axis=-1)) + (v0s * v1s).sum(axis=-1)
return Quaternions(np.concatenate([w[..., np.newaxis], a], axis=-1)).normalized()
@classmethod
def from_angle_axis(cls, angles, axis):
axis = axis / (np.sqrt(np.sum(axis ** 2, axis=-1)) + 1e-10)[..., np.newaxis]
sines = np.sin(angles / 2.0)[..., np.newaxis]
cosines = np.cos(angles / 2.0)[..., np.newaxis]
return Quaternions(np.concatenate([cosines, axis * sines], axis=-1))
@classmethod
def from_euler(cls, es, order='xyz', world=False):
axis = {
'x': np.array([1, 0, 0]),
'y': np.array([0, 1, 0]),
'z': np.array([0, 0, 1]),
}
q0s = Quaternions.from_angle_axis(es[..., 0], axis[order[0]])
q1s = Quaternions.from_angle_axis(es[..., 1], axis[order[1]])
q2s = Quaternions.from_angle_axis(es[..., 2], axis[order[2]])
return (q2s * (q1s * q0s)) if world else (q0s * (q1s * q2s))
@classmethod
def from_transforms(cls, ts):
d0, d1, d2 = ts[..., 0, 0], ts[..., 1, 1], ts[..., 2, 2]
q0 = (d0 + d1 + d2 + 1.0) / 4.0
q1 = (d0 - d1 - d2 + 1.0) / 4.0
q2 = (-d0 + d1 - d2 + 1.0) / 4.0
q3 = (-d0 - d1 + d2 + 1.0) / 4.0
q0 = np.sqrt(q0.clip(0, None))
q1 = np.sqrt(q1.clip(0, None))
q2 = np.sqrt(q2.clip(0, None))
q3 = np.sqrt(q3.clip(0, None))
c0 = (q0 >= q1) & (q0 >= q2) & (q0 >= q3)
c1 = (q1 >= q0) & (q1 >= q2) & (q1 >= q3)
c2 = (q2 >= q0) & (q2 >= q1) & (q2 >= q3)
c3 = (q3 >= q0) & (q3 >= q1) & (q3 >= q2)
q1[c0] *= np.sign(ts[c0, 2, 1] - ts[c0, 1, 2])
q2[c0] *= np.sign(ts[c0, 0, 2] - ts[c0, 2, 0])
q3[c0] *= np.sign(ts[c0, 1, 0] - ts[c0, 0, 1])
q0[c1] *= np.sign(ts[c1, 2, 1] - ts[c1, 1, 2])
q2[c1] *= np.sign(ts[c1, 1, 0] + ts[c1, 0, 1])
q3[c1] *= np.sign(ts[c1, 0, 2] + ts[c1, 2, 0])
q0[c2] *= np.sign(ts[c2, 0, 2] - ts[c2, 2, 0])
q1[c2] *= np.sign(ts[c2, 1, 0] + ts[c2, 0, 1])
q3[c2] *= np.sign(ts[c2, 2, 1] + ts[c2, 1, 2])
q0[c3] *= np.sign(ts[c3, 1, 0] - ts[c3, 0, 1])
q1[c3] *= np.sign(ts[c3, 2, 0] + ts[c3, 0, 2])
q2[c3] *= np.sign(ts[c3, 2, 1] + ts[c3, 1, 2])
qs = np.empty(ts.shape[:-2] + (4,))
qs[..., 0] = q0
qs[..., 1] = q1
qs[..., 2] = q2
qs[..., 3] = q3
return cls(qs)
| 33.427146 | 120 | 0.49794 |
4a3606e044b1937aee1d7ea5605783dd6db49c9f | 186 | py | Python | pywps/response/status.py | geotom/pywps | 7f228ff17594912664073a629b2c2ed9d4f5f615 | [
"MIT"
] | 117 | 2015-12-30T22:28:46.000Z | 2022-03-18T09:18:58.000Z | pywps/response/status.py | geotom/pywps | 7f228ff17594912664073a629b2c2ed9d4f5f615 | [
"MIT"
] | 533 | 2015-12-20T12:06:13.000Z | 2022-03-30T11:11:31.000Z | pywps/response/status.py | geotom/pywps | 7f228ff17594912664073a629b2c2ed9d4f5f615 | [
"MIT"
] | 123 | 2016-01-25T17:32:39.000Z | 2022-03-23T12:52:59.000Z | from collections import namedtuple
_WPS_STATUS = namedtuple('WPSStatus', ['UNKNOWN', 'ACCEPTED', 'STARTED', 'PAUSED', 'SUCCEEDED', 'FAILED'])
WPS_STATUS = _WPS_STATUS(0, 1, 2, 3, 4, 5)
| 37.2 | 106 | 0.704301 |
a0cbaa34e910a1e528eadd278f4af70ae17a3a07 | 3,400 | py | Python | Syllabus.py | anderson-github-classroom/syllabus-assignment-lightwell64646 | cc7bdafe35b58429dd2d51eb2f052703bb9de96c | [
"MIT"
] | null | null | null | Syllabus.py | anderson-github-classroom/syllabus-assignment-lightwell64646 | cc7bdafe35b58429dd2d51eb2f052703bb9de96c | [
"MIT"
] | null | null | null | Syllabus.py | anderson-github-classroom/syllabus-assignment-lightwell64646 | cc7bdafe35b58429dd2d51eb2f052703bb9de96c | [
"MIT"
] | null | null | null | # ---
# jupyter:
# jupytext:
# formats: ipynb,md,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Name(s)
# Nathan Pestes
# **Instructions:** This is an individual assignment. Complete the following code and push to get your score.
# I am providing the autograder answers locally so you may test your code before pushing. I will be reviewing your submissions, and if I find you are circumventing the autograder in any manner, you will receive a 0 on this assignment and your case will be reported to the honor board for review. i.e., approach the assignment in a genuine manner and you have nothing to worry about.
#
# **Question 1.**
# When will new material be available each week?
# You can answer the question by defining an anonymous function. This creates a function that I can test using pytest. You don't have to worry about the details. You just need to answer the question by changing the string argument that is currently set to "D". I know this is a bit weird, but I want you to get used to submitting code as early as possible.
# Nothing to modify in this cell
def question_1(answer):
answers = {
"A": "Monday morning",
"B": "Sunday night",
"C": "Monday evening",
"D": "I don't know"
}
try:
return answers[answer]
except:
return "Not a valid answer"
# YOUR SOLUTION HERE
# Sample incorrect answer
answer_question_1 = lambda: question_1("C")
# **Question 2.**
# Do I need to buy the textbook?
# Nothing to modify in this cell
def question_2(answer):
answers = {
"A": "No",
"B": "Maybe",
"C": "Yes. You will struggle with some of the chapters without the textbook",
}
try:
return answers[answer]
except:
return "Not a valid answer"
# YOUR SOLUTION HERE
# Sample incorrect answer
answer_question_2 = lambda: question_2("C")
# **Question 3.**
# Are these any required times that I be online?
# Nothing to modify in this cell
def question_3(answer):
answers = {
"A": "Yes",
"B": "No"
}
try:
return answers[answer]
except:
return "Not a valid answer"
# YOUR SOLUTION HERE
# Sample incorrect answer
answer_question_3 = lambda: question_3("A")
# **Question 4.**
# What software will I use to complete the assignments?
# Nothing to modify in this cell
def question_4(answer):
answers = {
"A": "Java",
"B": "Netbeans",
"C": "Anaconda"
}
try:
return answers[answer]
except:
return "Not a valid answer"
# YOUR SOLUTION HERE
# Sample incorrect answer
answer_question_4 = lambda: question_4("C")
# **Question 5.**
# Do I need to participate in this class or can I just do the labs and assignments?
# Nothing to modify in this cell
def question_5(answer):
answers = {
"A": "Yes. If you want to get anything higher than a C, you'll need to do more than the labs and assignments",
"B": "No",
}
try:
return answers[answer]
except:
return "Not a valid answer"
# YOUR SOLUTION HERE
# Sample incorrect answer
answer_question_5 = lambda: question_5("A")
# Don't forget to push!
print(answer_question_5())
| 24.817518 | 382 | 0.654706 |
f24c68d932ca1cb4b039dd4e8bb8f33fda5d32c6 | 42,453 | py | Python | smCounter.py | xuchang116/DeepCall | 8abecb43b6163af8b0b9c026492c9fc79260e82a | [
"MIT"
] | 24 | 2017-01-04T02:08:25.000Z | 2022-01-04T08:31:30.000Z | smCounter.py | xuchang116/DeepCall | 8abecb43b6163af8b0b9c026492c9fc79260e82a | [
"MIT"
] | 3 | 2018-02-13T08:32:09.000Z | 2022-02-28T11:58:28.000Z | smCounter.py | xuchang116/DeepCall | 8abecb43b6163af8b0b9c026492c9fc79260e82a | [
"MIT"
] | 8 | 2017-07-03T07:30:56.000Z | 2022-01-05T10:26:15.000Z | # smCounter: barcode-aware variant caller
# Chang Xu. 23May2016; online version of 10APR2017
import os
import datetime
import subprocess
import math
import operator
import argparse
import random
import multiprocessing
import traceback
from collections import defaultdict
# 3rd party modules
import pysam
import scipy.stats
# global contants (note that multiprocessing imports this .py file, so do not do much work outside functions)
pcr_error = 1e-6
pcr_no_error = 1.0 - 3e-5
atgc = ('A', 'T', 'G', 'C')
#-------------------------------------------------------------------------------------
# function to calculate posterior probability for each barcode.
#-------------------------------------------------------------------------------------
def calProb(oneBC, mtDrop):
outDict = defaultdict(float)
if len(oneBC) <= mtDrop:
outDict['A'] = 0.0
outDict['T'] = 0.0
outDict['G'] = 0.0
outDict['C'] = 0.0
else:
prodP = defaultdict(float)
cnt = defaultdict(int)
tmpOut = defaultdict(float)
rightP = 1.0
sumP = 0.0
pcrP = defaultdict(float)
# set ATGC count = 0
for char in atgc:
cnt[char] = 0
# get unique bases. Make sure uniqBaseList contains 4 members, unless the barcode already contains more than or equal to 4 bases/indels
# NOTE: existBase contains only the alleles, including indels, with at least 1 read in the MT. uniqBase may contain more.
existBase = set([info[0][0] for info in oneBC.values()])
uniqBase = set([info[0][0] for info in oneBC.values()])
if len(uniqBase) < 4:
for b in atgc:
if b not in uniqBase:
uniqBase.add(b)
if len(uniqBase) == 4:
break
uniqBaseList = list(uniqBase)
# set initial value in prodP to be 1.0
for b in uniqBaseList:
prodP[b] = 1.0
for info in oneBC.values():
base = info[0][0]
# prob is the error probability
prob = info[0][1]
pairOrder = info[0][2]
if pairOrder != 'Paired':
prob = 0.1
# prodP is the probability of no sequencing error for each base
prodP[base] *= 1.0 - prob
cnt[base] += 1
for char in list(uniqBase - set([base])):
prodP[char] *= prob
# rightP is the probabilty that there is no sequencing error, hence the alternative alleles come from PCR error
rightP *= 1.0 - prob
for char in uniqBaseList:
ratio = (cnt[char] + 0.5) / (len(oneBC) + 0.5 * len(uniqBaseList))
pcrP[char] = 10.0 ** (-6.0 * ratio)
for key in prodP.keys():
if key in existBase:
# tmpOut[key] is P(BC|key), or the likelihood of all reads in the barcode, given the true allele is *key*.
tmpOut[key] = pcr_no_error * prodP[key] + rightP * min([pcrP[char] for char in pcrP.keys() if char != key])
else:
tmpOut[key] = rightP
for char in existBase:
if char != key:
tmpOut[key] *= pcrP[char]
sumP += tmpOut[key]
for key in prodP.iterkeys():
outDict[key] = 0.0 if sumP <= 0 else tmpOut[key] / sumP
return outDict
#-------------------------------------------------------------------------------------
# convert variant type, reference base, variant base to output format
#-------------------------------------------------------------------------------------
def convertToVcf(origRef,origAlt):
vtype = '.'
ref = origRef
alt = origAlt
if len(origAlt) == 1:
vtype = 'SNP'
elif origAlt == 'DEL':
vtype = 'SDEL'
else:
vals = origAlt.split('|')
if vals[0] in ('DEL', 'INS'):
vtype = 'INDEL'
ref = vals[1]
alt = vals[2]
return (ref, alt, vtype)
#-------------------------------------------------------------------------------------
# check if a locus is within or flanked by homopolymer region and/or low complexity region
#-------------------------------------------------------------------------------------
def isHPorLowComp(chrom, pos, length, refb, altb, refGenome):
# get reference bases for interval [pos-length, pos+length]
refs = pysam.FastaFile(refGenome)
chromLength = refs.get_reference_length(chrom)
pos0 = int(pos) - 1
Lseq = refs.fetch(reference=chrom, start=max(0,pos0-length) , end=pos0).upper()
Rseq_ref = refs.fetch(reference=chrom, start= pos0+len(refb) , end=min(pos0+len(refb)+length,chromLength)).upper()
Rseq_alt = refs.fetch(reference=chrom, start= pos0+len(altb) , end=min(pos0+len(altb)+length,chromLength)).upper()
refSeq = Lseq + refb + Rseq_ref
altSeq = Lseq + altb + Rseq_alt
# check homopolymer
homoA = refSeq.find('A'*length) >= 0 or altSeq.find('A'*length) >= 0
homoT = refSeq.find('T'*length) >= 0 or altSeq.find('T'*length) >= 0
homoG = refSeq.find('G'*length) >= 0 or altSeq.find('G'*length) >= 0
homoC = refSeq.find('C'*length) >= 0 or altSeq.find('C'*length) >= 0
homop = homoA or homoT or homoG or homoC
# check low complexity -- window length is 2 * homopolymer region. If any 2 nucleotide >= 99%
len2 = 2 * length
LseqLC = refs.fetch(reference=chrom, start=max(0,pos0-len2) , end=pos0).upper()
Rseq_refLC = refs.fetch(reference=chrom, start= pos0+len(refb), end=min(pos0+len(refb)+len2,chromLength)).upper() # ref seq
Rseq_altLC = refs.fetch(reference=chrom, start= pos0+len(altb), end=min(pos0+len(altb)+len2,chromLength)).upper() # alt seq
refSeqLC = LseqLC + refb + Rseq_refLC
altSeqLC = LseqLC + altb + Rseq_altLC
lowcomp = False
# Ref seq
totalLen = len(refSeqLC)
for i in range(totalLen-len2):
subseq = refSeqLC[i:(i+len2)]
countA = subseq.count('A')
countT = subseq.count('T')
countG = subseq.count('G')
countC = subseq.count('C')
sortedCounts = sorted([countA, countT, countG, countC], reverse=True)
top2Freq = 1.0 * (sortedCounts[0] + sortedCounts[1]) / len2
if top2Freq >= 0.99:
lowcomp = True
break
# If ref seq is not LC, check alt seq
if not lowcomp:
totalLen = len(altSeqLC)
for i in range(totalLen-len2):
subseq = altSeqLC[i:(i+len2)]
countA = subseq.count('A')
countT = subseq.count('T')
countG = subseq.count('G')
countC = subseq.count('C')
sortedCounts = sorted([countA, countT, countG, countC], reverse=True)
top2Freq = 1.0 * (sortedCounts[0] + sortedCounts[1]) / len2
if top2Freq >= 0.99:
lowcomp = True
break
return (homop, lowcomp)
#-------------------------------------------------------------------------------------
# filter variants
#-------------------------------------------------------------------------------------
def filterVariants(ref,alt,vtype,origAlt,origRef,usedMT,strongMTCnt,chrom,pos,hpLen,refGenome,MTCnt,alleleCnt,cvg,discordPairCnt,concordPairCnt,reverseCnt,forwardCnt,lowQReads,r1BcEndPos,r2BcEndPos,r2PrimerEndPos,primerDist):
# init output string
fltr = ';'
# low coverage filter
if usedMT < 5:
fltr += 'LM;'
# low number of strong MTs filter
if strongMTCnt[origAlt] < 2 :
fltr += 'LSM;'
# check region for homopolymer or low complexity
(isHomopolymer,isLowComplexity) = isHPorLowComp(chrom, pos, hpLen, ref, alt, refGenome)
# homopolymer filter
if isHomopolymer and 1.0 * MTCnt[origAlt] / usedMT < 0.99:
fltr += 'HP;'
# low complexity filter
if isLowComplexity and 1.0 * MTCnt[origAlt] / usedMT < 0.99:
fltr += 'LowC;'
# strand bias and discordant pairs filter
af_alt = 100.0 * alleleCnt[origAlt] / cvg
pairs = discordPairCnt[origAlt] + concordPairCnt[origAlt] # total number of paired reads covering the pos
if pairs >= 1000 and 1.0 * discordPairCnt[origAlt] / pairs >= 0.5:
fltr += 'DP;'
elif af_alt <= 60.0:
refR = reverseCnt[origRef]
refF = forwardCnt[origRef]
altR = reverseCnt[origAlt]
altF = forwardCnt[origAlt]
fisher = scipy.stats.fisher_exact([[refR, refF], [altR, altF]])
oddsRatio = fisher[0]
pvalue = fisher[1]
if pvalue < 0.00001 and (oddsRatio >= 50 or oddsRatio <= 1.0/50):
fltr += 'SB;'
# base quality filter. Reject if more than 40% reads are lowQ
if vtype == 'SNP' and origAlt in alleleCnt.keys() and origAlt in lowQReads.keys():
bqAlt = 1.0 * lowQReads[origAlt] / alleleCnt[origAlt]
else:
bqAlt = 0.0
if bqAlt > 0.4:
fltr += 'LowQ;'
# random end and fixed end position filters
if vtype == 'SNP':
# random end position filter
endBase = 20 # distance to barcode end of the read
# R1
refLeEnd = sum(d <= endBase for d in r1BcEndPos[origRef]) # number of REF R2 reads with distance <= endBase
refGtEnd = len(r1BcEndPos[origRef]) - refLeEnd # number of REF R2 reads with distance > endBase
altLeEnd = sum(d <= endBase for d in r1BcEndPos[origAlt]) # number of ALT R2 reads with distance <= endBase
altGtEnd = len(r1BcEndPos[origAlt]) - altLeEnd # number of ALT R2 reads with distance > endBase
fisher = scipy.stats.fisher_exact([[refLeEnd, refGtEnd], [altLeEnd, altGtEnd]])
oddsRatio = fisher[0]
pvalue = fisher[1]
if pvalue < 0.001 and oddsRatio < 0.05 and af_alt <= 60.0:
fltr += 'R1CP;'
# R2
refLeEnd = sum(d <= endBase for d in r2BcEndPos[origRef]) # number of REF R2 reads with distance <= endBase
refGtEnd = len(r2BcEndPos[origRef]) - refLeEnd # number of REF R2 reads with distance > endBase
altLeEnd = sum(d <= endBase for d in r2BcEndPos[origAlt]) # number of ALT R2 reads with distance <= endBase
altGtEnd = len(r2BcEndPos[origAlt]) - altLeEnd # number of ALT R2 reads with distance > endBase
fisher = scipy.stats.fisher_exact([[refLeEnd, refGtEnd], [altLeEnd, altGtEnd]])
oddsRatio = fisher[0]
pvalue = fisher[1]
if pvalue < 0.001 and oddsRatio < 0.05 and af_alt <= 60.0:
fltr += 'R2CP;'
# fixed end position filter
endBase = primerDist # distance to primer end of the read
refLeEnd = sum(d <= endBase for d in r2PrimerEndPos[origRef]) # number of REF R2 reads with distance <= endBase
refGtEnd = len(r2PrimerEndPos[origRef]) - refLeEnd # number of REF R2 reads with distance > endBase
altLeEnd = sum(d <= endBase for d in r2PrimerEndPos[origAlt]) # number of ALT R2 reads with distance <= endBase
altGtEnd = len(r2PrimerEndPos[origAlt]) - altLeEnd # number of ALT R2 reads with distance > endBase
fisher = scipy.stats.fisher_exact([[refLeEnd, refGtEnd], [altLeEnd, altGtEnd]])
oddsRatio = fisher[0]
pvalue = fisher[1]
# reject if variant is clustered within 2 bases from primer sequence due to possible enzyme initiation error
if altLeEnd + altGtEnd > 0:
if 1.0 * altLeEnd / (altLeEnd + altGtEnd) >= 0.98 or (pvalue < 0.001 and oddsRatio < 1.0/20):
fltr += 'PrimerCP;'
# done
return fltr
#-------------------------------------------------------------------------------------
# function to call variants
#-------------------------------------------------------------------------------------
def vc(bamFile, chrom, pos, minBQ, minMQ, mtDepth, rpb, hpLen, mismatchThr, mtDrop, maxMT, primerDist, refGenome):
samfile = pysam.AlignmentFile(bamFile, 'rb')
idx = 0
cvg = 0
bcDict = defaultdict(lambda: defaultdict(list))
allBcDict = defaultdict(list)
alleleCnt = defaultdict(int)
MTCnt = defaultdict(int)
r1BcEndPos = defaultdict(list)
r2BcEndPos = defaultdict(list)
r2PrimerEndPos = defaultdict(list)
MT3Cnt = 0
MT5Cnt = 0
MT7Cnt = 0
MT10Cnt = 0
strongMTCnt = defaultdict(int)
predIndex = defaultdict(lambda: defaultdict(float))
finalDict = defaultdict(float)
r1Cnt = defaultdict(int)
r2Cnt = defaultdict(int)
forwardCnt = defaultdict(int)
reverseCnt = defaultdict(int)
concordPairCnt = defaultdict(int)
discordPairCnt = defaultdict(int)
mismatchCnt = defaultdict(float)
bqSum = defaultdict(int)
lowQReads = defaultdict(int)
# set threshold for strongMT based on mtDepth
if rpb < 1.5:
smt = 2.0
elif rpb < 3.0:
smt = 3.0
else:
smt = 4.0
# get reference base
refseq = pysam.FastaFile(refGenome)
origRef = refseq.fetch(reference=chrom, start=int(pos)-1, end=int(pos))
origRef = origRef.upper()
# pile up reads
for read in samfile.pileup(region = chrom + ':' + pos + ':' + pos, truncate=True, max_depth=1000000, stepper='nofilter'):
for pileupRead in read.pileups:
# read ID
qname = pileupRead.alignment.query_name
qnameSplit = qname.split(":")
readid = ':'.join(qnameSplit[:-2])
# barcode sequence
BC = qnameSplit[-2]
# duplex tag - temporary hack from end of readid - should be CC, TT, or NN for duplex runs
duplexTag = qnameSplit[-3]
# mapping quality
mq = pileupRead.alignment.mapping_quality
# get NM tag
NM = 0
allTags = pileupRead.alignment.tags
for (tag, value) in allTags:
if tag == 'NM':
NM = value
break
# count number of INDELs in the read sequence
nIndel = 0
cigar = pileupRead.alignment.cigar
cigarOrder = 1
leftSP = 0 # soft clipped bases on the left
rightSP = 0 # soft clipped bases on the right
for (op, value) in cigar:
# 1 for insertion
if op == 1 or op == 2:
nIndel += value
if cigarOrder == 1 and op == 4:
leftSP = value
if cigarOrder > 1 and op == 4:
rightSP += value
cigarOrder += 1
# Number of mismatches except INDEL, including softcilpped sequences
mismatch = max(0, NM - nIndel)
# read length, including softclip
readLen = pileupRead.alignment.query_length
# calculate mismatch per 100 bases
mismatchPer100b = 100.0 * mismatch / readLen if readLen > 0 else 0.0
# paired read
if pileupRead.alignment.is_read1:
pairOrder = 'R1'
if pileupRead.alignment.is_read2:
pairOrder = 'R2'
# +/- strand
strand = 'Reverse' if pileupRead.alignment.is_reverse else 'Forward'
# coverage -- read, not fragment
cvg += 1
# check if the site is the beginning of insertion
if pileupRead.indel > 0:
site = pileupRead.alignment.query_sequence[pileupRead.query_position]
inserted = pileupRead.alignment.query_sequence[(pileupRead.query_position + 1) : (pileupRead.query_position + 1 + pileupRead.indel)]
base = 'INS|' + site + '|' + site + inserted
bq = pileupRead.alignment.query_qualities[pileupRead.query_position]
bqSum[base] += bq
# inclusion condition
incCond = bq >= minBQ and mq >= minMQ and mismatchPer100b <= mismatchThr
alleleCnt[base] += 1
mismatchCnt[base] += mismatchPer100b
if pairOrder == 'R1':
r1Cnt[base] += 1
if pairOrder == 'R2':
r2Cnt[base] += 1
if strand == 'Reverse':
reverseCnt[base] += 1
else:
forwardCnt[base] += 1
# check if the site is the beginning of deletion
elif pileupRead.indel < 0:
site = pileupRead.alignment.query_sequence[pileupRead.query_position]
deleted = refseq.fetch(reference=chrom, start=int(pos), end=int(pos)+abs(pileupRead.indel))
deleted = deleted.upper()
base = 'DEL|' + site + deleted + '|' + site
bq = pileupRead.alignment.query_qualities[pileupRead.query_position]
bqSum[base] += bq
# inclusion condition
incCond = bq >= minBQ and mq >= minMQ and mismatchPer100b <= mismatchThr
alleleCnt[base] += 1
mismatchCnt[base] += mismatchPer100b
if pairOrder == 'R1':
r1Cnt[base] += 1
if pairOrder == 'R2':
r2Cnt[base] += 1
if strand == 'Reverse':
reverseCnt[base] += 1
else:
forwardCnt[base] += 1
# site is not beginning of any INDEL
else:
# If the site ifself is a deletion, set quality = minBQ
if pileupRead.is_del:
base = 'DEL'
bq = minBQ
bqSum[base] += bq
# inclusion condition
incCond = bq >= minBQ and mq >= minMQ and mismatchPer100b <= mismatchThr
# if the site is a regular locus,
else:
base = pileupRead.alignment.query_sequence[pileupRead.query_position] # note: query_sequence includes soft clipped bases
bq = pileupRead.alignment.query_qualities[pileupRead.query_position]
bqSum[base] += bq
# count the number of low quality reads (less than Q20 by default) for each base
if bq < minBQ:
lowQReads[base] += 1
# inclusion condition
incCond = bq >= minBQ and mq >= minMQ and mismatchPer100b <= mismatchThr
if pairOrder == 'R1':
# distance to the barcode end in R1;
if pileupRead.alignment.is_reverse:
distToBcEnd = pileupRead.alignment.query_alignment_length - (pileupRead.query_position - leftSP)
else:
distToBcEnd = pileupRead.query_position - leftSP
if incCond:
r1BcEndPos[base].append(distToBcEnd)
r1Cnt[base] += 1
if pairOrder == 'R2':
# distance to the barcode and/or primer end in R2. Different cases for forward and reverse strand
if pileupRead.alignment.is_reverse:
distToBcEnd = pileupRead.query_position - leftSP
distToPrimerEnd = pileupRead.alignment.query_alignment_length - (pileupRead.query_position - leftSP)
else:
distToBcEnd = pileupRead.alignment.query_alignment_length - (pileupRead.query_position - leftSP)
distToPrimerEnd = pileupRead.query_position - leftSP
if incCond:
r2BcEndPos[base].append(distToBcEnd)
r2PrimerEndPos[base].append(distToPrimerEnd)
r2Cnt[base] += 1
if strand == 'Reverse':
reverseCnt[base] += 1
else:
forwardCnt[base] += 1
alleleCnt[base] += 1
mismatchCnt[base] += mismatchPer100b
# count total number of fragments and MTs
if readid not in allBcDict[BC]:
allBcDict[BC].append(readid)
# decide which read goes into analysis
if incCond:
if readid not in bcDict[BC]:
prob = pow(10.0, -bq / 10.0)
readinfo = [base, prob, pairOrder]
bcDict[BC][readid].append(readinfo)
elif base == bcDict[BC][readid][0][0] or base in ['N', '*']:
bcDict[BC][readid][0][1] = max((pow(10.0, -bq / 10.0) , bcDict[BC][readid][0][1]))
bcDict[BC][readid][0][2] = 'Paired'
if base == bcDict[BC][readid][0][0]:
concordPairCnt[base] += 1
else:
del bcDict[BC][readid]
discordPairCnt[base] += 1
# total number of MT, fragments, reads, including those dropped from analysis
allMT = len(allBcDict)
allFrag = sum([len(allBcDict[bc]) for bc in allBcDict])
# downsampling MTs (not dropped) to args.maxMT
ds = maxMT if maxMT > 0 else int(round(2.0 * mtDepth))
# MTs used
usedMT = min(ds, len(bcDict))
# done if zero coverage (note hack for 41 blank output fields!)
if usedMT == 0:
out_long = '\t'.join([chrom, pos, origRef] + ['']*41 + ['Zero_Coverage'])
return out_long
if len(bcDict) > ds:
random.seed(pos)
bcKeys = random.sample(bcDict.keys(), ds)
else:
bcKeys = bcDict.keys()
usedFrag = sum([len(bcDict[bc]) for bc in bcKeys])
totalR1 = sum(r1Cnt.values())
totalR2 = sum(r2Cnt.values())
for bc in bcKeys:
bcProb = calProb(bcDict[bc], mtDrop)
for char in bcProb.iterkeys():
x = 1.0 - bcProb[char]
log10P = -math.log10(x) if x > 0.0 else 16.0
predIndex[bc][char] = log10P
finalDict[char] += log10P
max_base = [x for x in predIndex[bc].keys() if predIndex[bc][x] == max(predIndex[bc].values())]
if len(max_base) == 1:
cons = max_base[0]
MTCnt[cons] += 1
if predIndex[bc][cons] > smt:
strongMTCnt[cons] += 1
# Tie in max predIndex is most likely due to single read MT.
elif len(bcDict[bc]) == 1:
cons = bcDict[bc].values()[0][0][0]
MTCnt[cons] += 1
if len(bcDict[bc]) >= 3:
MT3Cnt += 1
if len(bcDict[bc]) >= 5:
MT5Cnt += 1
if len(bcDict[bc]) >= 7:
MT7Cnt += 1
if len(bcDict[bc]) >= 10:
MT10Cnt += 1
sortedList = sorted(finalDict.items(), key=operator.itemgetter(1), reverse=True)
maxBase = sortedList[0][0]
maxPI = sortedList[0][1]
secondMaxBase = sortedList[1][0]
secondMaxPI = sortedList[1][1]
# call variants
origAlt = secondMaxBase if maxBase == origRef else maxBase
altPI = secondMaxPI if maxBase == origRef else maxPI
# convert from internal smCounter format to format needed for output
(ref, alt, vtype) = convertToVcf(origRef,origAlt)
# apply filters if PI >= 5 (at least 2 MTs), and locus not in a deletion
fltr = ';'
if altPI >= 5 and vtype in ('SNP', 'INDEL'):
fltr = filterVariants(ref,alt,vtype,origAlt,origRef,usedMT,strongMTCnt,chrom,pos,hpLen,refGenome,MTCnt,alleleCnt,cvg,discordPairCnt,concordPairCnt,reverseCnt,forwardCnt,lowQReads,r1BcEndPos,r2BcEndPos,r2PrimerEndPos,primerDist)
# identify possible bi-allelic variants - top 2 alleles are non-reference and both VMFs >= 45%. Not necessarily passing the filters.
mfAlt = 1.0 * MTCnt[maxBase] / usedMT # MT fraction of the base with the highest PI
mfAlt2 = 1.0 * MTCnt[secondMaxBase] / usedMT # MT fraction of the base with the second highest PI
if maxBase != origRef and secondMaxBase != origRef and mfAlt >= 0.45 and mfAlt2 >= 0.45: # conditions to be considered bi-allelic
# convert from internal smCounter format to format needed for output
origAlt2 = secondMaxBase
(ref2, alt2, vtype2) = convertToVcf(origRef,origAlt2)
# apply filters to 2nd variant if PI2 >= 5 (at least 2 MTs), and locus not in a deletion
fltr2 = ';'
if secondMaxPI >= 5 and vtype2 in ('SNP', 'INDEL'):
fltr2 = filterVariants(ref2,alt2,vtype2,origAlt2,origRef,usedMT,strongMTCnt,chrom,pos,hpLen,refGenome,MTCnt,alleleCnt,cvg,discordPairCnt,concordPairCnt,reverseCnt,forwardCnt,lowQReads,r1BcEndPos,r2BcEndPos,r2PrimerEndPos,primerDist)
# prepare output for bi-allelic variants (if var2 is filtered, regardless of var1, do nothing. output var1 only)
if fltr == ';' and fltr2 == ';': # both var1 and var2 pass the filters -- this is a bi-allelic variant. var1's statistics (MT, DP, etc) are reported
alt = alt + ',' + alt2
vtype = vtype.lower() + ',' + vtype2.lower()
elif fltr != ';' and fltr2 == ';': # if var1 is filtered and the var2 passes, then it's a single variant of var2
alt = alt2
fltr = fltr2
origAlt = origAlt2
# build detailed output vector
frac_alt = round((1.0 * alleleCnt[origAlt] / cvg),4) # based on all reads, including the excluded reads
frac_A = round((1.0 * alleleCnt['A'] / cvg),4)
frac_T = round((1.0 * alleleCnt['T'] / cvg),4)
frac_G = round((1.0 * alleleCnt['G'] / cvg),4)
frac_C = round((1.0 * alleleCnt['C'] / cvg),4)
fracs = (alleleCnt['A'], alleleCnt['T'], alleleCnt['G'], alleleCnt['C'], frac_A, frac_T, frac_G, frac_C)
MT_f_alt = round((1.0 * MTCnt[origAlt] / usedMT),4) # based on only used MTs
MT_f_A = round((1.0 * MTCnt['A'] / usedMT),4)
MT_f_T = round((1.0 * MTCnt['T'] / usedMT),4)
MT_f_G = round((1.0 * MTCnt['G'] / usedMT),4)
MT_f_C = round((1.0 * MTCnt['C'] / usedMT),4)
MTs = (MT3Cnt, MT5Cnt, MT7Cnt, MT10Cnt, MTCnt['A'], MTCnt['T'], MTCnt['G'], MTCnt['C'], MT_f_A, MT_f_T, MT_f_G, MT_f_C)
strongMT = (strongMTCnt['A'], strongMTCnt['T'], strongMTCnt['G'], strongMTCnt['C'])
predIdx = (round(finalDict['A'], 2), round(finalDict['T'], 2), round(finalDict['G'], 2), round(finalDict['C'], 2))
outvec = [chrom, pos, ref, alt, vtype, cvg, allFrag, allMT, usedFrag, usedMT, round(finalDict[origAlt], 2), alleleCnt[origAlt], frac_alt, MTCnt[origAlt], MT_f_alt, strongMTCnt[origAlt]]
outvec.extend(fracs)
outvec.extend(MTs)
outvec.extend(strongMT)
outvec.extend(predIdx)
outvec.append(fltr)
out_long = '\t'.join((str(x) for x in outvec))
return out_long
#------------------------------------------------------------------------------------------------
# wrapper function for "vc()" - because Python multiprocessing module does not pass stack trace
#------------------------------------------------------------------------------------------------
def vc_wrapper(*args):
try:
output = vc(*args)
except:
print("Exception thrown in vc() function at genome location:", args[1], args[2])
output = "Exception thrown!\n" + traceback.format_exc()
return output
#------------------------------------------------------------------------------------------------
# global for argument parsing (hack that works when calling from either command line or pipeline)
#------------------------------------------------------------------------------------------------
parser = None
def argParseInit(): # this is done inside a function because multiprocessing module imports the script
global parser
parser = argparse.ArgumentParser(description='Variant calling using molecular barcodes', fromfile_prefix_chars='@')
parser.add_argument('--outPrefix', default=None, required=True, help='prefix for output files')
parser.add_argument('--bamFile' , default=None, required=True, help='BAM file')
parser.add_argument('--bedTarget', default=None, required=True, help='BED file for target region')
parser.add_argument('--mtDepth' , default=None, required=True, type=int, help='Mean MT depth')
parser.add_argument('--rpb' , default=None, required=True, type=float, help='Mean read pairs per MT')
parser.add_argument('--nCPU' , type=int, default=1 , help='number of CPUs to use in parallel')
parser.add_argument('--minBQ' , type=int, default=20, help='minimum base quality allowed for analysis')
parser.add_argument('--minMQ' , type=int, default=30, help='minimum mapping quality allowed for analysis')
parser.add_argument('--hpLen' , type=int, default=10, help='Minimum length for homopolymers')
parser.add_argument('--mismatchThr', type=float, default=6.0, help='average number of mismatches per 100 bases allowed')
parser.add_argument('--mtDrop' , type=int, default=0, help='Drop MTs with lower than or equal to X reads.')
parser.add_argument('--maxMT' , type=int, default=0, help='Randomly downsample to X MTs (max number of MTs at any position). If set to 0 (default), maxMT = 2.0 * mean MT depth')
parser.add_argument('--primerDist' , type=int, default=2, help='filter variants that are within X bases to primer')
parser.add_argument('--threshold' , type=int, default=0, help='Minimum prediction index for a variant to be called. Must be non-negative. Typically ranges from 10 to 60. If set to 0 (default), smCounter will choose the appropriate cutoff based on the mean MT depth.')
parser.add_argument('--refGenome' , default = '/qgen/home/rvijaya/downloads/alt_hap_masked_ref/ucsc.hg19.fasta')
parser.add_argument('--bedTandemRepeats' , default = '/qgen/home/xuc/UCSC/simpleRepeat.bed', help = 'bed for UCSC tandem repeats')
parser.add_argument('--bedRepeatMaskerSubset', default = '/qgen/home/xuc/UCSC/SR_LC_SL.nochr.bed', help = 'bed for RepeatMasker simple repeats, low complexity, microsatellite regions')
parser.add_argument('--bedtoolsPath' , default = '/qgen/bin/bedtools-2.25.0/bin/', help = 'path to bedtools')
parser.add_argument('--runPath' , default=None, help='path to working directory')
parser.add_argument('--logFile' , default=None, help='log file')
parser.add_argument('--paramFile', default=None, help='optional parameter file that contains the above paramters. if specified, this must be the only parameter, except for --logFile.')
#--------------------------------------------------------------------------------------
# main function
#--------------------------------------------------------------------------------------
def main(args):
# log run start
timeStart = datetime.datetime.now()
print("smCounter started at " + str(timeStart))
# if argument parser global not assigned yet, initialize it
if parser == None:
argParseInit()
# get arguments passed in via a lambda object (e.g. from upstream pipeline)
if type(args) is not argparse.Namespace:
argsList = []
for argName, argVal in args.iteritems():
argsList.append("--{0}={1}".format(argName, argVal))
args = parser.parse_args(argsList)
# get arguments from disk file specified on command line (warning: this silently deletes all actual command line parameters)
elif args.paramFile != None:
args = parser.parse_args(("@" + args.paramFile,))
# echo all parameters to the log file
for argName, argVal in vars(args).iteritems():
print(argName, argVal)
# change working directory to runDir
if args.runPath != None:
os.chdir(args.runPath)
# make list of loci to call variants
locList = []
for line in open(args.bedTarget, 'r'):
if not line.startswith("track "):
(chrom, regionStart, regionEnd) = line.strip().split('\t')[0:3]
for pos in range(int(regionStart),int(regionEnd)):
locList.append((chrom, str(pos+1)))
# call variants in parallel
pool = multiprocessing.Pool(processes=args.nCPU)
results = [pool.apply_async(vc_wrapper, args=(args.bamFile, x[0], x[1], args.minBQ, args.minMQ, args.mtDepth, args.rpb, args.hpLen, args.mismatchThr, args.mtDrop, args.maxMT, args.primerDist, args.refGenome)) for x in locList]
output = [p.get() for p in results]
pool.close()
pool.join()
# check for exceptions thrown by vc()
for idx in range(len(output)):
line = output[idx]
if line.startswith("Exception thrown!"):
print(line)
raise Exception("Exception thrown in vc() at location: " + str(locList[idx]))
# report start of variant filtering
print("begin variant filtering and output")
# merge and sort RepeatMasker tracks (could be done prior to run) Note: assuming TRF repeat already merged and sorted!!
bedExe = args.bedtoolsPath + 'bedtools'
bedRepeatMasker = args.outPrefix + '.tmp.repeatMasker.bed'
subprocess.check_call(bedExe + ' merge -c 4 -o distinct -i ' + args.bedRepeatMaskerSubset + ' | ' + bedExe + ' sort -i - > ' + bedRepeatMasker, shell=True)
# merge and sort target region
bedTarget = args.outPrefix + '.tmp.target.bed'
subprocess.check_call(bedExe + ' merge -i ' + args.bedTarget + ' | ' + bedExe + ' sort -i - > ' + bedTarget, shell=True)
# intersect 2 repeats tracks with target region
subprocess.check_call(bedExe + ' intersect -a ' + args.bedTandemRepeats + ' -b ' + bedTarget + ' | ' + bedExe + ' sort -i - > ' + args.outPrefix + '.tmp.target.repeats1.bed', shell=True)
subprocess.check_call(bedExe + ' intersect -a ' + bedRepeatMasker + ' -b ' + bedTarget + ' | ' + bedExe + ' sort -i - > ' + args.outPrefix + '.tmp.target.repeats2.bed', shell=True)
# read in tandem repeat list
trfRegions = defaultdict(list)
for line in open(args.outPrefix + '.tmp.target.repeats1.bed', 'r'):
vals = line.strip().split()
(chrom, regionStart, regionEnd) = vals[0:3]
trfRegions[chrom].append((int(regionStart), int(regionEnd), "RepT;"))
# read in simple repeat, low complexity, satelite list
rmRegions = defaultdict(list)
for line in open(args.outPrefix + '.tmp.target.repeats2.bed', 'r'):
(chrom, regionStart, regionEnd, typeCodes) = line.strip().split()
repTypes = []
for typeCode in typeCodes.split(","):
if typeCode == 'Simple_repeat':
repTypes.append('RepS')
elif typeCode == 'Low_complexity':
repTypes.append('LowC')
elif typeCode == 'Satellite':
repTypes.append('SL')
else:
repTypes.append('Other_Repeat')
repType = ";".join(repTypes) + ";"
rmRegions[chrom].append((int(regionStart), int(regionEnd), repType))
# remove intermediate files
os.remove(args.outPrefix + '.tmp.target.bed')
os.remove(args.outPrefix + '.tmp.repeatMasker.bed')
os.remove(args.outPrefix + '.tmp.target.repeats1.bed')
os.remove(args.outPrefix + '.tmp.target.repeats2.bed')
# set up header columns (Note: "headerAll" must parallel the output of the vc() function.)
headerAll = ('CHROM', 'POS', 'REF', 'ALT', 'TYPE', 'DP', 'FR' , 'MT', 'UFR', 'UMT', 'PI', 'VDP', 'VAF', 'VMT', 'VMF', 'VSM', 'DP_A', 'DP_T', 'DP_G', 'DP_C', 'AF_A', 'AF_T', 'AF_G', 'AF_C', 'MT_3RPM', 'MT_5RPM', 'MT_7RPM', 'MT_10RPM', 'UMT_A', 'UMT_T', 'UMT_G', 'UMT_C', 'UMF_A', 'UMF_T', 'UMF_G', 'UMF_C', 'VSM_A', 'VSM_T', 'VSM_G', 'VSM_C', 'PI_A', 'PI_T', 'PI_G', 'PI_C', 'FILTER')
headerVariants = ('CHROM', 'POS', 'REF', 'ALT', 'TYPE', 'DP', 'MT', 'UMT', 'PI', 'THR', 'VMT', 'VMF', 'VSM', 'FILTER')
# set up hash of variable fields
headerAllIndex = {}
for i in range(len(headerAll)):
headerAllIndex[headerAll[i]] = i
# ALL repeats filter. If MT fraction < 40% and the variant is inside the tandem repeat region, reject.
for i in range(len(output)):
outline = output[i]
lineList = outline.split('\t')
chromTr = lineList[headerAllIndex['CHROM']]
altTr = lineList[headerAllIndex['ALT']]
try:
posTr = int(lineList[headerAllIndex['POS']])
except ValueError:
continue
try:
altMtFracTr = float(lineList[headerAllIndex['VMF']])
except ValueError:
continue
try:
pred = int(float(lineList[headerAllIndex['PI']]))
except ValueError:
pred = 0
if pred >= 5 and altTr != 'DEL':
# check tandem repeat from TRF if MT fraction < 40%
if altMtFracTr < 40:
for (locL, locR, repType) in trfRegions[chromTr]:
if locL < posTr <= locR:
lineList[-1] += repType
break
# check simple repeat, lc, sl from RepeatMasker
for (locL, locR, repType) in rmRegions[chromTr]:
if locL < posTr <= locR:
lineList[-1] += repType
break
lineList[-1] = 'PASS' if lineList[-1] == ';' else lineList[-1].strip(';')
output[i] = '\t'.join(lineList)
# VCF header
header_vcf = \
'##fileformat=VCFv4.2\n' + \
'##reference=GRCh37\n' + \
'##INFO=<ID=TYPE,Number=1,Type=String,Description="Variant type: SNP or INDEL">\n' + \
'##INFO=<ID=DP,Number=1,Type=Integer,Description="Total read depth">\n' + \
'##INFO=<ID=MT,Number=1,Type=Integer,Description="Total MT depth">\n' + \
'##INFO=<ID=UMT,Number=1,Type=Integer,Description="Filtered MT depth">\n' + \
'##INFO=<ID=PI,Number=1,Type=Float,Description="Variant prediction index">\n' + \
'##INFO=<ID=THR,Number=1,Type=Integer,Description="Variant prediction index minimum threshold">\n' + \
'##INFO=<ID=VMT,Number=1,Type=Integer,Description="Variant MT depth">\n' + \
'##INFO=<ID=VMF,Number=1,Type=Float,Description="Variant MT fraction">\n' + \
'##INFO=<ID=VSM,Number=1,Type=Integer,Description="Variant strong MT depth">\n' + \
'##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n' + \
'##FORMAT=<ID=AD,Number=.,Type=Integer,Description="Filtered allelic MT depths for the ref and alt alleles">\n' + \
'##FORMAT=<ID=VF,Number=1,Type=Float,Description="Variant MT fraction, same as VMF">\n' + \
'##FILTER=<ID=RepT,Description="Variant in simple tandem repeat region, as defined by Tandem Repeats Finder">\n' + \
'##FILTER=<ID=RepS,Description="Variant in simple repeat region, as defined by RepeatMasker">\n' + \
'##FILTER=<ID=LowC,Description="Variant in low complexity region, as defined by RepeatMasker">\n' + \
'##FILTER=<ID=SL,Description="Variant in micro-satelite region, as defined by RepeatMasker">\n' + \
'##FILTER=<ID=HP,Description="Inside or flanked by homopolymer region">\n' + \
'##FILTER=<ID=LM,Description="Low coverage (fewer than 5 MTs)">\n' + \
'##FILTER=<ID=LSM,Description="Fewer than 2 strong MTs">\n' + \
'##FILTER=<ID=SB,Description="Strand bias">\n' + \
'##FILTER=<ID=LowQ,Description="Low base quality (mean < 22)">\n' + \
'##FILTER=<ID=MM,Description="Too many genome reference mismatches in reads (default threshold is 6.5 per 100 bases)">\n' + \
'##FILTER=<ID=DP,Description="Too many discordant read pairs">\n' + \
'##FILTER=<ID=R1CP,Description="Variants are clustered at the end of R1 reads">\n' + \
'##FILTER=<ID=R2CP,Description="Variants are clustered at the end of R2 reads">\n' + \
'##FILTER=<ID=PrimerCP,Description="Variants are clustered immediately after the primer, possible enzyme initiation error">\n' + \
'\t'.join(('#CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT', args.outPrefix)) + '\n'
# set cutoff value for about 20 FP/Mb
threshold = int(math.ceil(14.0 + 0.012 * args.mtDepth)) if args.threshold == 0 else args.threshold
# open output files
outAll = open(args.outPrefix + '.smCounter.all.txt', 'w')
outVariants = open(args.outPrefix + '.smCounter.cut.txt', 'w')
outVcf = open(args.outPrefix + '.smCounter.cut.vcf', 'w')
# write column headers
outAll.write('\t'.join(headerAll) + '\n')
outVariants.write('\t'.join(headerVariants) + '\n')
outVcf.write(header_vcf)
for line in output:
# write to the detailed output
outAll.write(line)
outAll.write("\n")
# unpack text fields
fields = line.split('\t')
# skip if no PI
PI = fields[headerAllIndex['PI']]
if len(PI) == 0:
continue
# get ALT and prediction index
ALT = fields[headerAllIndex['ALT']]
QUAL = str(int(float(PI))) # truncate PI to conform to VCF phred-like tradition
# write to vcf file and short output
if int(QUAL) >= threshold and ALT != 'DEL': # if PI > threshold, write to vcf (regardless of filters)
# parse fields needed from main data vector
CHROM = fields[headerAllIndex['CHROM']]
POS = fields[headerAllIndex['POS']]
REF = fields[headerAllIndex['REF']]
TYPE = fields[headerAllIndex['TYPE']]
DP = fields[headerAllIndex['DP']]
MT = fields[headerAllIndex['MT']]
UMT = fields[headerAllIndex['UMT']]
VMT = fields[headerAllIndex['VMT']]
VMF = fields[headerAllIndex['VMF']]
VSM = fields[headerAllIndex['VSM']]
FILTER= fields[headerAllIndex['FILTER']]
THR = str(threshold)
INFO = ';'.join(('TYPE='+TYPE, 'DP='+DP, 'MT='+MT, 'UMT='+UMT, 'PI='+PI, 'THR='+THR, 'VMT='+VMT, 'VMF='+VMF, 'VSM='+VSM))
# hack attempt to satisfy downstream software - not correct for germline heterozygous, male X, etc, etc, etc
alts = ALT.split(",")
if len(alts) == 2:
genotype = '1/2'
elif len(alts) != 1:
raise Exception("error hacking genotype field for " + alts)
elif CHROM == "chrY" or CHROM == "chrM":
genotype = '1'
elif float(VMF) > 0.95:
genotype = '1/1'
else:
genotype = '0/1'
REFMT = str(int(UMT) - int(VMT))
AD = REFMT + "," + VMT
if len(alts) == 2:
AD = AD + ",1" # horrific hack for the 2nd alt
# output
FORMAT = 'GT:AD:VF'
SAMPLE = ":".join((genotype,AD,VMF))
ID = '.'
vcfLine = '\t'.join((CHROM, POS, ID, REF, ALT, QUAL, FILTER, INFO, FORMAT, SAMPLE)) + '\n'
shortLine = '\t'.join((CHROM, POS, REF, ALT, TYPE, DP, MT, UMT, PI, THR, VMT, VMF, VSM, FILTER)) + '\n'
outVcf.write(vcfLine)
outVariants.write(shortLine)
# debug counter for summary
if TYPE == 'SNP':
numCalledSnps = 0
else:
numCalledIndels = 0
outVcf.close()
outAll.close()
outVariants.close()
# log run completion
timeEnd = datetime.datetime.now()
print("smCounter completed running at " + str(timeEnd))
print("smCounter total time: "+ str(timeEnd-timeStart))
# pass threshold back to caller
return threshold
#----------------------------------------------------------------------------------------------
# pythonism to run from the command line
#----------------------------------------------------------------------------------------------
if __name__ == "__main__":
# init the argumet parser
argParseInit()
# get command line arguments
args = parser.parse_args()
# initialize logger
import run_log
run_log.init(args.logFile)
# call main program
main(args)
| 45.796117 | 399 | 0.589546 |
a25fe2928329cd1bc55c19e7127af63a005ae581 | 343 | py | Python | heartbeat_demo/ipfs_connect.py | drbh/nucypher-ipfs | c285f21dc10fb9b61f0112267e438ad59dc6e4ed | [
"MIT"
] | 3 | 2019-03-18T13:55:17.000Z | 2019-03-24T09:15:24.000Z | heartbeat_demo/ipfs_connect.py | drbh/nucypher-ipfs | c285f21dc10fb9b61f0112267e438ad59dc6e4ed | [
"MIT"
] | null | null | null | heartbeat_demo/ipfs_connect.py | drbh/nucypher-ipfs | c285f21dc10fb9b61f0112267e438ad59dc6e4ed | [
"MIT"
] | null | null | null | import ipfsapi
try:
ipfs_gateway_api = ipfsapi.connect('127.0.0.1', 5001)
except Exception as e: # should be more specific ConnectionRefusedError, NewConnectionError, MaxRetryError not sure
print("Automatic Mode A Public Gateway will be used as a fallback")
ipfs_gateway_api = ipfsapi.connect('https://ipfs.infura.io', 5001)
| 38.111111 | 115 | 0.752187 |
8f8c153ee9d14c38b96edf9c61bcc81fd3200285 | 7,366 | py | Python | docs/source/_static/zzz_GENERATED_NOTEBOOK_SOURCE/etl/vcf2delta.py | heuermh/glow | 4a414c62a28fddce6f4428ecdb746a2a58251fc1 | [
"Apache-2.0"
] | null | null | null | docs/source/_static/zzz_GENERATED_NOTEBOOK_SOURCE/etl/vcf2delta.py | heuermh/glow | 4a414c62a28fddce6f4428ecdb746a2a58251fc1 | [
"Apache-2.0"
] | null | null | null | docs/source/_static/zzz_GENERATED_NOTEBOOK_SOURCE/etl/vcf2delta.py | heuermh/glow | 4a414c62a28fddce6f4428ecdb746a2a58251fc1 | [
"Apache-2.0"
] | null | null | null | # Databricks notebook source
# MAGIC %md
# MAGIC
# MAGIC ### Write VCF files into <img width="175px" src="https://docs.delta.io/latest/_static/delta-lake-logo.png">
# MAGIC
# MAGIC ### using Glow <img src="https://databricks-knowledge-repo-images.s3.us-east-2.amazonaws.com/HLS/glow/project_glow_logo.png" alt="logo" width="35"/>
# MAGIC
# MAGIC Delta Lake is an open-source storage layer that brings ACID transactions to Apache Spark™ and big data workloads.
# MAGIC
# MAGIC Delta Lake is applied in the following use cases in genomics:
# MAGIC
# MAGIC 1. joint-genotyping of population-scale cohorts
# MAGIC - using GATK's GenotypeGVCFs ([blog](https://databricks.com/blog/2019/06/19/accurately-building-genomic-cohorts-at-scale-with-delta-lake-and-spark-sql.html))
# MAGIC - or custom machine learning algorithms, for example for copy-number variants
# MAGIC 2. managing variant data in data lakes ([blog](https://databricks.com/blog/2019/03/07/simplifying-genomics-pipelines-at-scale-with-databricks-delta.html))
# MAGIC - once volumes reach thousands of VCF files
# MAGIC 3. querying and statistical analysis of genotype data
# MAGIC - once volumes reach billions of genotypes
# MAGIC
# MAGIC This notebook processes chromosome 22 of the 1000 Genomes project directly from cloud storage into Delta Lake, with the following steps:
# MAGIC
# MAGIC 1. Read in pVCF as a Spark Data Source using Glow's VCF reader
# MAGIC 2. Write out DataFrame as a Delta Lake
# MAGIC 3. Query individual genotypes
# MAGIC 4. Subset the data randomly
# MAGIC 5. Write out as VCF (to show backwards compatibility with flat file formats)
# COMMAND ----------
import glow
spark = glow.register(spark)
import pyspark.sql.functions as fx
from pyspark.sql.types import *
from random import sample
# COMMAND ----------
# MAGIC %md
# MAGIC #### set up paths
# COMMAND ----------
vcf_path = '/databricks-datasets/genomics/1kg-vcfs/ALL.chr22.phase3_shapeit2_mvncall_integrated_v5a.20130502.genotypes.vcf.gz'
phenotypes_path = '/databricks-datasets/genomics/1000G/phenotypes.normalized'
reference_genome_path = "/dbfs/databricks-datasets/genomics/grch37/data/human_g1k_v37.fa"
delta_output_path = "dbfs:/home/genomics/delta/1kg-delta"
vcf_output_path = "dbfs:/home/genomics/vcf/subset.vcf"
# COMMAND ----------
# MAGIC %md
# MAGIC #### Read in pVCF as a [Spark Data Source](https://spark.apache.org/docs/latest/sql-data-sources.html) using Glow <img src="https://databricks-knowledge-repo-images.s3.us-east-2.amazonaws.com/HLS/glow/project_glow_logo.png" alt="logo" width="30"/>
# COMMAND ----------
# MAGIC %md
# MAGIC Note: if there are multiple files, use a wildcard (*)
# COMMAND ----------
vcf = (
spark
.read
.format("vcf")
.load(vcf_path)
)
# COMMAND ----------
display(vcf)
# COMMAND ----------
# MAGIC %md
# MAGIC #### Print the VCF schema
# MAGIC
# MAGIC __INFO__ fields are promoted to top level columns by default, with the prefix `INFO_`
# MAGIC
# MAGIC __FILTER__ fields are nested in the `filters` array
# MAGIC
# MAGIC __FORMAT__ fields are nested in the `genotypes` array
# COMMAND ----------
vcf.printSchema()
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC #### split multiallelic events
# COMMAND ----------
spark.conf.set("spark.sql.codegen.wholeStage", False) # turn off Spark SQL whole-stage code generation for faster performance.
split_vcf = glow.transform("split_multiallelics", vcf)
# COMMAND ----------
split_vcf.show()
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC #### normalize variants
# MAGIC
# MAGIC This is an important quality control / sanity check when ingesting VCFs
# MAGIC
# MAGIC And is always necessary after multiallelics variants are split to biallelics
# COMMAND ----------
normalized_vcf = glow.transform("normalize_variants",
split_vcf,
reference_genome_path=reference_genome_path)
# COMMAND ----------
# MAGIC %md
# MAGIC Let's count how many variants have changed after normalization
# COMMAND ----------
vcf.join(normalized_vcf, ["contigName", "start", "end", "referenceAllele", "alternateAlleles"], "left_anti").count()
# COMMAND ----------
# MAGIC %md
# MAGIC #### Write out DataFrame to <img width="175px" src="https://docs.delta.io/latest/_static/delta-lake-logo.png">
# COMMAND ----------
# MAGIC %md
# MAGIC To append to existing Delta Lake use `.mode("append")`
# COMMAND ----------
(
normalized_vcf
.write
.format("delta")
.save(delta_output_path)
)
# COMMAND ----------
# MAGIC %md
# MAGIC #### Describe history of the Delta table
# MAGIC
# MAGIC With Delta, you can append more data, and time travel back to previous versions of the table
# COMMAND ----------
display(spark.sql("DESCRIBE HISTORY genomes"))
# COMMAND ----------
# MAGIC %md
# MAGIC #### Read in the Delta Lake and count the number of variants
# COMMAND ----------
delta_vcf = spark.read.format("delta").load(delta_output_path)
# COMMAND ----------
delta_vcf.count()
# COMMAND ----------
# MAGIC %md
# MAGIC #### Perform point query to retrieve specific genotype
# MAGIC
# MAGIC get genotype for a specific sample at a specific position
# MAGIC
# MAGIC for faster queries, explode the genotypes array and Z-order on contigName and position
# COMMAND ----------
sample_id = "HG00100"
chrom = "22"
start = 16050074
# COMMAND ----------
genotype = delta_vcf.where((fx.col("contigName") == chrom) &
(fx.col("start") == start)). \
selectExpr("contigName", "start", "filter(genotypes, g -> g.sampleId = '{0}') as genotypes".format(sample_id))
# COMMAND ----------
genotype.collect()
# COMMAND ----------
# MAGIC %md
# MAGIC #### Select random subset of data and write back out to VCF
# COMMAND ----------
samples_list = vcf.select("genotypes"). \
limit(1). \
select(glow.subset_struct('genotypes', 'sampleId').alias('samples')). \
collect()[0].samples.sampleId
# COMMAND ----------
subset_samples = sample(samples_list, 100)
subset_samples_str = '\' , \''.join(subset_samples)
# COMMAND ----------
sample_filter = delta_vcf.selectExpr("*", "filter(genotypes, g -> array_contains(array('{0}'), g.sampleId)) as genotypes2".format(subset_samples_str)). \
drop("genotypes"). \
withColumnRenamed("genotypes2", "genotypes")
# COMMAND ----------
len(sample_filter.limit(1).select("genotypes.sampleId").collect()[0].sampleId)
# COMMAND ----------
# MAGIC %md
# MAGIC #### VCFs can be written as plain text or you can specify block gzip [compression](https://docs.databricks.com/applications/genomics/variant-data.html#vcf)
# COMMAND ----------
(
sample_filter
.write
.format("bigvcf")
.save(vcf_output_path)
)
# COMMAND ----------
# MAGIC %md
# MAGIC #### Using the [local file API](https://docs.databricks.com/data/databricks-file-system.html#local-file-apis), we can read VCF directly from cloud storage via the shell
# COMMAND ----------
# MAGIC %sh
# MAGIC head -n 200 /dbfs/home/genomics/vcf/subset.vcf | 30.188525 | 357 | 0.675944 |
633900aea75cfb5ea59eba9ee23221bceeb4f9b6 | 906 | py | Python | tests/test_ptrello.py | Ibistylus/ptrello | b68ac54ee0fa92051b45b6b2dc116ee01189868d | [
"MIT"
] | null | null | null | tests/test_ptrello.py | Ibistylus/ptrello | b68ac54ee0fa92051b45b6b2dc116ee01189868d | [
"MIT"
] | null | null | null | tests/test_ptrello.py | Ibistylus/ptrello | b68ac54ee0fa92051b45b6b2dc116ee01189868d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `ptrello` package."""
import unittest
from click.testing import CliRunner
from ptrello import api
from ptrello import cli
class TestPtrello(unittest.TestCase):
"""Tests for `ptrello` package."""
def setUp(self):
"""Set up test fixtures, if any."""
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_000_something(self):
"""Test something."""
def test_command_line_interface(self):
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
assert 'ptrello.cli.main' in result.output
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
| 25.885714 | 75 | 0.607064 |
fb9c946c53949ec9b3f3de5a8eeac27150ab16d3 | 1,209 | py | Python | build_gcc_i386.py | SydOS/toolchains | bb09e4a929813b9b305182c596192d22f89e290d | [
"MIT"
] | null | null | null | build_gcc_i386.py | SydOS/toolchains | bb09e4a929813b9b305182c596192d22f89e290d | [
"MIT"
] | null | null | null | build_gcc_i386.py | SydOS/toolchains | bb09e4a929813b9b305182c596192d22f89e290d | [
"MIT"
] | null | null | null | # File: build_gcc_i686.py
#
# Copyright (c) 2018 The SydOS Team
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import build_gcc
import build_binutils_i386
build_gcc.do_build(target="i386-elf") | 44.777778 | 80 | 0.779156 |
97251eaa801eb4ad81c4ddf52350c9001029e1bc | 95 | py | Python | src/vicode/lsp/__init__.py | ousttrue/vicode | ebcd68eaa8239553960d6ff9048299f0c2650aca | [
"MIT"
] | null | null | null | src/vicode/lsp/__init__.py | ousttrue/vicode | ebcd68eaa8239553960d6ff9048299f0c2650aca | [
"MIT"
] | null | null | null | src/vicode/lsp/__init__.py | ousttrue/vicode | ebcd68eaa8239553960d6ff9048299f0c2650aca | [
"MIT"
] | null | null | null | from . import client
from .import protocol
__all__ = [
'client',
'protocol',
]
| 11.875 | 22 | 0.578947 |
0f45fceb5fda710e5db150d9c8b4dc3d81ee917c | 3,099 | py | Python | src/train_siamese.py | hou-yz/FairMOT | 207ad7ba6bd0807de0333d2e423132772a1f2968 | [
"MIT"
] | null | null | null | src/train_siamese.py | hou-yz/FairMOT | 207ad7ba6bd0807de0333d2e423132772a1f2968 | [
"MIT"
] | null | null | null | src/train_siamese.py | hou-yz/FairMOT | 207ad7ba6bd0807de0333d2e423132772a1f2968 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import torch
import torch.utils.data
from torchvision.transforms import transforms as T
from lib.opts import opts, update_dataset_info_and_set_heads
from lib.models.model import create_model, load_model, save_model
from lib.logger import Logger
from lib.datasets.mot17 import MOTDataset, SiameseDataset
from lib.trains.mot import MotTrainer, SiameseTrainer
def main(opt):
torch.manual_seed(opt.seed)
torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
print('Setting up data...')
transforms = T.Compose([T.ToTensor()])
dataset = MOTDataset(opt, '/home/houyz/Data/MOT17/train', augment=True, transforms=transforms)
siamese_dataset = SiameseDataset(dataset, 10)
opt = update_dataset_info_and_set_heads(opt, dataset)
print(opt)
logger = Logger(opt)
opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')
print('Creating model...')
model = create_model(opt.arch, opt.heads, opt.head_conv)
optimizer = torch.optim.Adam(model.id.parameters(), opt.lr)
start_epoch = 0
# Get dataloader
train_loader = torch.utils.data.DataLoader(
siamese_dataset,
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.num_workers,
pin_memory=True,
drop_last=True
)
print('Starting training...')
trainer = SiameseTrainer(opt, model, optimizer)
trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)
if opt.load_model != '':
model, optimizer, start_epoch = load_model(
model, opt.load_model, trainer.optimizer, opt.resume, opt.lr, opt.lr_step)
for epoch in range(start_epoch + 1, opt.num_epochs + 1):
mark = epoch if opt.save_all else 'last'
log_dict_train, _ = trainer.train(epoch, train_loader)
logger.write('epoch: {} |'.format(epoch))
for k, v in log_dict_train.items():
logger.scalar_summary('train_{}'.format(k), v, epoch)
logger.write('{} {:8f} | '.format(k, v))
if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
epoch, model, optimizer)
else:
save_model(os.path.join(opt.save_dir, 'model_last.pth'),
epoch, model, optimizer)
logger.write('\n')
if epoch in opt.lr_step:
save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
epoch, model, optimizer)
lr = opt.lr * (0.1 ** (opt.lr_step.index(epoch) + 1))
print('Drop LR to', lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
if epoch % 5 == 0 or epoch >= 25:
save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
epoch, model, optimizer)
logger.close()
if __name__ == '__main__':
opt = opts().parse()
main(opt)
| 36.034884 | 98 | 0.645692 |
8393ff33679552b8c6081a3e3d0246c297622f98 | 221 | py | Python | script/statsd_script_before.py | jonathanyeh0723/graphite-and-grafana | f0a946b0a1f42df708ab07d4087806dee07cc874 | [
"Apache-2.0"
] | null | null | null | script/statsd_script_before.py | jonathanyeh0723/graphite-and-grafana | f0a946b0a1f42df708ab07d4087806dee07cc874 | [
"Apache-2.0"
] | null | null | null | script/statsd_script_before.py | jonathanyeh0723/graphite-and-grafana | f0a946b0a1f42df708ab07d4087806dee07cc874 | [
"Apache-2.0"
] | null | null | null | import random
import time
while True:
random.seed()
random_sleep_interval = random.randint(5, 15)
print('Got random interval to sleep: {}'.format(random_sleep_interval))
time.sleep(random_sleep_interval)
| 24.555556 | 75 | 0.742081 |
1043284b8581d32d4583435e78c962e1668bc794 | 7,765 | py | Python | tests/unit/test_csv.py | treatortrick/apiritif | 4a81dbff1e3c97062b7cc4917ce96dd8d792abbd | [
"Apache-2.0"
] | null | null | null | tests/unit/test_csv.py | treatortrick/apiritif | 4a81dbff1e3c97062b7cc4917ce96dd8d792abbd | [
"Apache-2.0"
] | null | null | null | tests/unit/test_csv.py | treatortrick/apiritif | 4a81dbff1e3c97062b7cc4917ce96dd8d792abbd | [
"Apache-2.0"
] | null | null | null | import os
import tempfile
from unittest import TestCase
from apiritif.loadgen import Params, Supervisor, ApiritifPlugin
from apiritif.csv import CSVReaderPerThread, thread_data
from apiritif.utils import NormalShutdown
from tests.unit import RESOURCES_DIR
class TestCSV(TestCase):
def setUp(self):
thread_data.csv_readers = {}
def test_threads_and_processes(self):
""" check if threads and processes can divide csv fairly """
script = os.path.join(RESOURCES_DIR, "test_thread_reader.py")
outfile = tempfile.NamedTemporaryFile()
report = outfile.name + "-%s.csv"
outfile.close()
params = Params()
params.concurrency = 4
params.iterations = 2
params.report = report
params.tests = [script]
params.worker_count = 2
sup = Supervisor(params)
sup.start()
sup.join()
content = []
for i in range(params.worker_count):
with open(report % i) as f:
content.extend(f.readlines()[1::2])
threads = {"0": [], "1": [], "2": [], "3": []}
content = [item[item.index('"') + 1:].strip() for item in content]
for item in content:
self.assertEqual(item[0], item[2]) # thread equals target
self.assertEqual("a", item[-1]) # age is the same
if item[6] == "0":
self.assertEqual(-1, item.find('+'))
else:
self.assertNotEqual(-1, item.find('+')) # name value is modified
threads[item[0]].append(item[9:-2])
# format: <user>:<pass>, quoting ignored
target = {
'0': ['""u:ser0""', '""u+:ser0""', 'user4:4', 'user4+:4'],
'1': ['""user1"":1', '""user1""+:1', 'user5:5', 'user5+:5'],
'2': ['user2:""2""', 'user2+:""2""', '""u:ser0""', '""u+:ser0""'],
'3': ['user3:3', 'user3+:3', '""user1"":1', '""user1""+:1']}
self.assertEqual(threads, target)
def test_two_readers(self):
""" check different reading speed, fieldnames and separators """
script = os.path.join(RESOURCES_DIR, "test_two_readers.py")
outfile = tempfile.NamedTemporaryFile()
report = outfile.name + "-%s.csv"
outfile.close()
params = Params()
params.concurrency = 2
params.iterations = 3
params.report = report
params.tests = [script]
params.worker_count = 1
sup = Supervisor(params)
sup.start()
sup.join()
content = []
for i in range(params.worker_count):
with open(report % i) as f:
content.extend(f.readlines()[1::2])
threads = {"0": [], "1": []}
content = [item[item.index('"') + 1:].strip() for item in content]
for item in content:
threads[item[0]].append(item[2:])
target = { # reader1 runs two times faster
"0": ["0. u,ser0:000:ze:00", "1. u,ser0:000:tu:22", "0. user2:2:fo:44",
"1. user2:2:si:66", "0. user4:4:ze:00", "1. user4:4:tu:22"],
"1": ["0. user1:1:on:11", "1. user1:1:th:33", "0. user3:3:fi:55",
"1. user3:3:se:77", "0. user5:5:on:11", "1. user5:5:th:33"]}
self.assertEqual(threads, target)
def test_reader_without_loop(self):
""" check different reading speed, fieldnames and separators """
reader = CSVReaderPerThread(os.path.join(RESOURCES_DIR, "data/source0.csv"), loop=False)
data = []
try:
for i in range(20):
reader.read_vars()
data.append(reader.get_vars())
except NormalShutdown:
self.assertEqual(6, len(data))
return
self.fail()
def test_apiritif_without_loop(self):
""" check different reading speed, fieldnames and separators """
script = os.path.join(RESOURCES_DIR, "test_reader_no_loop.py")
outfile = tempfile.NamedTemporaryFile()
report = outfile.name + "-%s.csv"
outfile.close()
params = Params()
params.concurrency = 1
params.iterations = 10
params.report = report
params.tests = [script]
params.worker_count = 1
sup = Supervisor(params)
sup.start()
sup.join()
content = []
for i in range(params.worker_count):
with open(report % i) as f:
content.extend(f.readlines()[1::2])
threads = {"0": []}
content = [item[item.index('"') + 1:].strip() for item in content]
for item in content:
threads[item[0]].append(item[2:])
self.assertEqual(18, len(threads["0"]))
def test_reader_without_loop_non_stop(self):
""" check different reading speed, fieldnames and separators """
script = os.path.join(RESOURCES_DIR, "test_reader_no_loop.py")
outfile = tempfile.NamedTemporaryFile()
report = outfile.name + "-%s.csv"
outfile.close()
params = Params()
params.concurrency = 1
params.iterations = 10
params.report = report
params.tests = [script]
params.worker_count = 1
handler = ApiritifPlugin.handleError
try:
# wrong handler: doesn't stop iterations
ApiritifPlugin.handleError = lambda a, b, c: False
sup = Supervisor(params)
sup.start()
sup.join()
finally:
ApiritifPlugin.handleError = handler
content = []
for i in range(params.worker_count):
with open(report % i) as f:
content.extend(f.readlines()[1::2])
threads = {"0": []}
content = [item[item.index('"') + 1:].strip() for item in content]
for item in content:
threads[item[0]].append(item[2:])
self.assertTrue(len(threads["0"]) > 18)
def test_csv_encoding(self):
reader_utf8 = CSVReaderPerThread(os.path.join(RESOURCES_DIR, "data/encoding_utf8.csv"), loop=False)
reader_utf16 = CSVReaderPerThread(os.path.join(RESOURCES_DIR, "data/encoding_utf16.csv"), loop=False)
data_utf8, data_utf16 = [], []
reader_utf8.read_vars()
data_utf8.append(reader_utf8.get_vars())
reader_utf16.read_vars()
data_utf16.append(reader_utf16.get_vars())
self.assertEqual(data_utf8, data_utf16)
def test_csv_quoted(self):
readers = [
CSVReaderPerThread(os.path.join(RESOURCES_DIR, "data/quoted_utf8.csv"), loop=False),
CSVReaderPerThread(os.path.join(RESOURCES_DIR, "data/quoted_utf16.csv"), loop=False),
CSVReaderPerThread(os.path.join(RESOURCES_DIR, "data/unquoted_utf8.csv"), loop=False),
CSVReaderPerThread(os.path.join(RESOURCES_DIR, "data/unquoted_utf16.csv"), loop=False)]
readers_data = []
for reader in readers:
reader.read_vars()
readers_data.append(reader.get_vars())
result = {'ac1': '1', 'bc1': '2', 'cc1': '3'}
for data in readers_data:
self.assertEqual(data, result)
def test_csv_delimiter(self):
readers = [
CSVReaderPerThread(os.path.join(RESOURCES_DIR, "data/encoding_utf8.csv"), loop=False),
CSVReaderPerThread(os.path.join(RESOURCES_DIR, "data/delimiter_tab.csv"), loop=False),
CSVReaderPerThread(os.path.join(RESOURCES_DIR, "data/delimiter_semicolon.csv"), loop=False)]
readers_data = []
for reader in readers:
reader.read_vars()
readers_data.append(reader.get_vars())
result = {'ac1': '1', 'bc1': '2', 'cc1': '3'}
for data in readers_data:
self.assertEqual(data, result)
| 36.285047 | 109 | 0.572183 |
3aa9d24ed76235dd40a592e7f844bcab2b81800e | 285 | py | Python | labml_nn/activations/__init__.py | Study-Repos-Forks/annotated_deep_learning_paper_implementations | 6a41c82b30157fb146ac4d7f455e57a8e7aa7565 | [
"MIT"
] | 1 | 2020-08-26T03:55:42.000Z | 2020-08-26T03:55:42.000Z | labml_nn/activations/__init__.py | Study-Repos-Forks/annotated_deep_learning_paper_implementations | 6a41c82b30157fb146ac4d7f455e57a8e7aa7565 | [
"MIT"
] | null | null | null | labml_nn/activations/__init__.py | Study-Repos-Forks/annotated_deep_learning_paper_implementations | 6a41c82b30157fb146ac4d7f455e57a8e7aa7565 | [
"MIT"
] | null | null | null | """
---
title: Neural Network Activation Functions
summary: >
A set of PyTorch implementations/tutorials related to neural network activations
---
# Neural Networks Activations
* [Fuzzy Tiling Activations](fta/index.html)
* 🚧 [Swish](swish/index.html)
"""
from .swish import Swish
| 19 | 81 | 0.74386 |
d1512fa42290d059ca6cf44c9ba9934dcead5feb | 2,190 | py | Python | userbot/plugins/alive.py | Kishoth-45/TamilUserBot-1 | b85ac3bf68e65dd3803cf77fb0a1a40b0e312b4d | [
"MIT"
] | null | null | null | userbot/plugins/alive.py | Kishoth-45/TamilUserBot-1 | b85ac3bf68e65dd3803cf77fb0a1a40b0e312b4d | [
"MIT"
] | null | null | null | userbot/plugins/alive.py | Kishoth-45/TamilUserBot-1 | b85ac3bf68e65dd3803cf77fb0a1a40b0e312b4d | [
"MIT"
] | null | null | null | """Check if tamilBot alive. If you change these, you become the gayest gay such that even the gay world will disown you."""
# CREDITS: @WhySooSerious, @Sur_vivor
# modified by @saravanakrish
# Re-written by @iMvEtRi
from userbot.utils import admin_cmd
from userbot.uniborgConfig import Config
from userbot import ALIVE_NAME, CMD_HELP
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else "KHILADI KISHOTH"
PM_IMG = Config.ALIVE_IMAGE
pm_caption = "🤖 **Kishoth ιѕ:** `ᴏɴʟɪɴᴇ`\n\n"
pm_caption += "⨠ **ѕуѕтємѕ ѕтαтѕ 💻:**\n"
pm_caption += "⨠ **тєℓєтнση νєяѕιση :** `1.15.0` \n"
pm_caption += "⨠ **ρутнση :** `3.7.4` \n"
pm_caption += "⨠ **∂αтαвαѕє ѕтαтυѕ :** `ꜰᴜɴᴄᴛɪᴏɴᴀʟ`\n"
pm_caption += "⨠ **¢υяяєηт вяαη¢н** : `ᴍᴀꜱᴛᴇʀ`\n"
pm_caption += f"⨠ **νєяѕιση** : `6.5`\n"
pm_caption += f"⨠ **му вσѕѕ** : {DEFAULTUSER} \n\n"
# pm_caption += "⨠ **Heroku Database** : `AWS -\nWorking Properly`💥\n\n"
# pm_caption += "⫸ **License** : [MIT License](github.com/ivetri/tamilbot/blob/master/LICENSE) ✔\n"
# pm_caption += "⫸ **Copyrights** : © By [TAMIL🤖BOT](https://github.com/IVETRI/TamilBot) 👨🏻💻\n\n"
pm_caption += "•☆•»»**[🇮🇳 Khiladibot 🇮🇳]**(https://t.me/Tamil_Chat_Empire)««•☆•"
@borg.on(admin_cmd(pattern=r"alive"))
async def tamilbot(alive):
await alive.get_chat()
""" For .alive command, check if the bot is running. """
await borg.send_file(alive.chat_id, PM_IMG, caption=pm_caption)
await alive.delete()
@borg.on(admin_cmd(pattern=r"sudoalive", allow_sudo=True))
async def amireallyalive(alive):
""" For .alive command, check if the bot is running. """
await alive.edit("`என்னைப் பயன்படுத்தியதற்கு நன்றி🤖")
@borg.on(admin_cmd(outgoing=True, pattern="repo"))
async def repo(event):
if event.fwd_from:
return
tgbotname = Var.TG_BOT_USERNAME
if event.reply_to_msg_id:
await event.get_reply_message()
response = await bot.inline_query(tgbotname, "repo")
await response[0].click(event.chat_id)
await event.delete()
CMD_HELP.update(
{
"alive":
"""╼•∘ 🅲🅼🅽🅳 ∘•╾ : `.alive`\
╼•∘ 🆄🆂🅰️🅶🅴 ∘•╾ Check your bot is alive or not.\
╼•∘ 🅲🅼🅽🅳 ∘•╾ : `.repo`\
╼•∘ 🆄🆂🅰️🅶🅴 ∘•╾ get repository of TamilBot.\
"""
}
)
| 35.901639 | 123 | 0.640639 |
4e7ee2b2c6c0f86eb92039290aafe46e9d60443e | 413 | py | Python | blog/migrations/0019_auto_20190922_0713.py | notme20n/semicoloncs | b9cf0cf075eec76c6dd15c2973d2e4e8eab1f699 | [
"bzip2-1.0.6"
] | null | null | null | blog/migrations/0019_auto_20190922_0713.py | notme20n/semicoloncs | b9cf0cf075eec76c6dd15c2973d2e4e8eab1f699 | [
"bzip2-1.0.6"
] | 8 | 2020-02-12T03:21:37.000Z | 2022-02-10T11:56:24.000Z | blog/migrations/0019_auto_20190922_0713.py | notme20n/semicoloncs | b9cf0cf075eec76c6dd15c2973d2e4e8eab1f699 | [
"bzip2-1.0.6"
] | null | null | null | # Generated by Django 2.2 on 2019-09-22 04:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0018_auto_20190920_0707'),
]
operations = [
migrations.AlterField(
model_name='post',
name='image',
field=models.ImageField(default='media/code.jpg', upload_to='Post_pics'),
),
]
| 21.736842 | 85 | 0.605327 |
3b4b51634fc680b287f9ef6a0dd52fe836e35c58 | 84,949 | py | Python | geopandas/geodataframe.py | latot/geopandas | cb0c8b5ee4cf6da8e1fc70ffbd23e486f1919724 | [
"BSD-3-Clause"
] | null | null | null | geopandas/geodataframe.py | latot/geopandas | cb0c8b5ee4cf6da8e1fc70ffbd23e486f1919724 | [
"BSD-3-Clause"
] | null | null | null | geopandas/geodataframe.py | latot/geopandas | cb0c8b5ee4cf6da8e1fc70ffbd23e486f1919724 | [
"BSD-3-Clause"
] | null | null | null | import json
import warnings
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
from pandas.core.accessor import CachedAccessor
from shapely.geometry import mapping, shape
from shapely.geometry.base import BaseGeometry
from pyproj import CRS
from geopandas.array import GeometryArray, GeometryDtype, from_shapely, to_wkb, to_wkt
from geopandas.base import GeoPandasBase, is_geometry_type
from geopandas.geoseries import GeoSeries, _geoseries_constructor_with_fallback
import geopandas.io
from geopandas.explore import _explore
from . import _compat as compat
from ._decorator import doc
DEFAULT_GEO_COLUMN_NAME = "geometry"
def _geodataframe_constructor_with_fallback(*args, **kwargs):
"""
A flexible constructor for GeoDataFrame._constructor, which falls back
to returning a DataFrame (if a certain operation does not preserve the
geometry column)
"""
df = GeoDataFrame(*args, **kwargs)
geometry_cols_mask = df.dtypes == "geometry"
if len(geometry_cols_mask) == 0 or geometry_cols_mask.sum() == 0:
df = pd.DataFrame(df)
return df
def _ensure_geometry(data, crs=None):
"""
Ensure the data is of geometry dtype or converted to it.
If input is a (Geo)Series, output is a GeoSeries, otherwise output
is GeometryArray.
If the input is a GeometryDtype with a set CRS, `crs` is ignored.
"""
if is_geometry_type(data):
if isinstance(data, Series):
data = GeoSeries(data)
if data.crs is None:
data.crs = crs
return data
else:
if isinstance(data, Series):
out = from_shapely(np.asarray(data), crs=crs)
return GeoSeries(out, index=data.index, name=data.name)
else:
out = from_shapely(data, crs=crs)
return out
crs_mismatch_error = (
"CRS mismatch between CRS of the passed geometries "
"and 'crs'. Use 'GeoDataFrame.set_crs(crs, "
"allow_override=True)' to overwrite CRS or "
"'GeoDataFrame.to_crs(crs)' to reproject geometries. "
)
class GeoDataFrame(GeoPandasBase, DataFrame):
"""
A GeoDataFrame object is a pandas.DataFrame that has a column
with geometry. In addition to the standard DataFrame constructor arguments,
GeoDataFrame also accepts the following keyword arguments:
Parameters
----------
crs : value (optional)
Coordinate Reference System of the geometry objects. Can be anything accepted by
:meth:`pyproj.CRS.from_user_input() <pyproj.crs.CRS.from_user_input>`,
such as an authority string (eg "EPSG:4326") or a WKT string.
geometry : str or array (optional)
If str, column to use as geometry. If array, will be set as 'geometry'
column on GeoDataFrame.
Examples
--------
Constructing GeoDataFrame from a dictionary.
>>> from shapely.geometry import Point
>>> d = {'col1': ['name1', 'name2'], 'geometry': [Point(1, 2), Point(2, 1)]}
>>> gdf = geopandas.GeoDataFrame(d, crs="EPSG:4326")
>>> gdf
col1 geometry
0 name1 POINT (1.00000 2.00000)
1 name2 POINT (2.00000 1.00000)
Notice that the inferred dtype of 'geometry' columns is geometry.
>>> gdf.dtypes
col1 object
geometry geometry
dtype: object
Constructing GeoDataFrame from a pandas DataFrame with a column of WKT geometries:
>>> import pandas as pd
>>> d = {'col1': ['name1', 'name2'], 'wkt': ['POINT (1 2)', 'POINT (2 1)']}
>>> df = pd.DataFrame(d)
>>> gs = geopandas.GeoSeries.from_wkt(df['wkt'])
>>> gdf = geopandas.GeoDataFrame(df, geometry=gs, crs="EPSG:4326")
>>> gdf
col1 wkt geometry
0 name1 POINT (1 2) POINT (1.00000 2.00000)
1 name2 POINT (2 1) POINT (2.00000 1.00000)
See also
--------
GeoSeries : Series object designed to store shapely geometry objects
"""
_metadata = ["_crs", "_geometry_column_name"]
_geometry_column_name = DEFAULT_GEO_COLUMN_NAME
def __init__(self, data=None, *args, geometry=None, crs=None, **kwargs):
with compat.ignore_shapely2_warnings():
super().__init__(data, *args, **kwargs)
# need to set this before calling self['geometry'], because
# getitem accesses crs
self._crs = CRS.from_user_input(crs) if crs else None
# set_geometry ensures the geometry data have the proper dtype,
# but is not called if `geometry=None` ('geometry' column present
# in the data), so therefore need to ensure it here manually
# but within a try/except because currently non-geometries are
# allowed in that case
# TODO do we want to raise / return normal DataFrame in this case?
# if gdf passed in and geo_col is set, we use that for geometry
if geometry is None and isinstance(data, GeoDataFrame):
self._geometry_column_name = data._geometry_column_name
if crs is not None and data.crs != crs:
raise ValueError(crs_mismatch_error)
if geometry is None and "geometry" in self.columns:
# Check for multiple columns with name "geometry". If there are,
# self["geometry"] is a gdf and constructor gets recursively recalled
# by pandas internals trying to access this
if (self.columns == "geometry").sum() > 1:
raise ValueError(
"GeoDataFrame does not support multiple columns "
"using the geometry column name 'geometry'."
)
# only if we have actual geometry values -> call set_geometry
index = self.index
try:
if (
hasattr(self["geometry"].values, "crs")
and self["geometry"].values.crs
and crs
and not self["geometry"].values.crs == crs
):
raise ValueError(crs_mismatch_error)
self["geometry"] = _ensure_geometry(self["geometry"].values, crs)
except TypeError:
pass
else:
if self.index is not index:
# With pandas < 1.0 and an empty frame (no rows), the index
# gets reset to a default RangeIndex -> set back the original
# index if needed
self.index = index
geometry = "geometry"
if geometry is not None:
if (
hasattr(geometry, "crs")
and geometry.crs
and crs
and not geometry.crs == crs
):
raise ValueError(crs_mismatch_error)
self.set_geometry(geometry, inplace=True)
if geometry is None and crs:
warnings.warn(
"Assigning CRS to a GeoDataFrame without a geometry column is now "
"deprecated and will not be supported in the future.",
FutureWarning,
stacklevel=2,
)
def __setattr__(self, attr, val):
# have to special case geometry b/c pandas tries to use as column...
if attr == "geometry":
object.__setattr__(self, attr, val)
else:
super().__setattr__(attr, val)
def _get_geometry(self):
if self._geometry_column_name not in self:
if self._geometry_column_name is None:
msg = (
"You are calling a geospatial method on the GeoDataFrame, "
"but the active geometry column to use has not been set. "
)
else:
msg = (
"You are calling a geospatial method on the GeoDataFrame, "
f"but the active geometry column ('{self._geometry_column_name}') "
"is not present. "
)
geo_cols = list(self.columns[self.dtypes == "geometry"])
if len(geo_cols) > 0:
msg += (
f"\nThere are columns with geometry data type ({geo_cols}), and "
"you can either set one as the active geometry with "
'df.set_geometry("name") or access the column as a '
'GeoSeries (df["name"]) and call the method directly on it.'
)
else:
msg += (
"\nThere are no existing columns with geometry data type. You can "
"add a geometry column as the active geometry column with "
"df.set_geometry. "
)
raise AttributeError(msg)
return self[self._geometry_column_name]
def _set_geometry(self, col):
if not pd.api.types.is_list_like(col):
raise ValueError("Must use a list-like to set the geometry property")
self.set_geometry(col, inplace=True)
geometry = property(
fget=_get_geometry, fset=_set_geometry, doc="Geometry data for GeoDataFrame"
)
def set_geometry(self, col, drop=False, inplace=False, crs=None):
"""
Set the GeoDataFrame geometry using either an existing column or
the specified input. By default yields a new object.
The original geometry column is replaced with the input.
Parameters
----------
col : column label or array
drop : boolean, default False
Delete column to be used as the new geometry
inplace : boolean, default False
Modify the GeoDataFrame in place (do not create a new object)
crs : pyproj.CRS, optional
Coordinate system to use. The value can be anything accepted
by :meth:`pyproj.CRS.from_user_input() <pyproj.crs.CRS.from_user_input>`,
such as an authority string (eg "EPSG:4326") or a WKT string.
If passed, overrides both DataFrame and col's crs.
Otherwise, tries to get crs from passed col values or DataFrame.
Examples
--------
>>> from shapely.geometry import Point
>>> d = {'col1': ['name1', 'name2'], 'geometry': [Point(1, 2), Point(2, 1)]}
>>> gdf = geopandas.GeoDataFrame(d, crs="EPSG:4326")
>>> gdf
col1 geometry
0 name1 POINT (1.00000 2.00000)
1 name2 POINT (2.00000 1.00000)
Passing an array:
>>> df1 = gdf.set_geometry([Point(0,0), Point(1,1)])
>>> df1
col1 geometry
0 name1 POINT (0.00000 0.00000)
1 name2 POINT (1.00000 1.00000)
Using existing column:
>>> gdf["buffered"] = gdf.buffer(2)
>>> df2 = gdf.set_geometry("buffered")
>>> df2.geometry
0 POLYGON ((3.00000 2.00000, 2.99037 1.80397, 2....
1 POLYGON ((4.00000 1.00000, 3.99037 0.80397, 3....
Name: buffered, dtype: geometry
Returns
-------
GeoDataFrame
See also
--------
GeoDataFrame.rename_geometry : rename an active geometry column
"""
# Most of the code here is taken from DataFrame.set_index()
if inplace:
frame = self
else:
frame = self.copy()
# if there is no previous self.geometry, self.copy() will downcast
if type(frame) == DataFrame:
frame = GeoDataFrame(frame)
to_remove = None
geo_column_name = self._geometry_column_name
if isinstance(col, (Series, list, np.ndarray, GeometryArray)):
level = col
elif hasattr(col, "ndim") and col.ndim != 1:
raise ValueError("Must pass array with one dimension only.")
else:
try:
level = frame[col]
except KeyError:
raise ValueError("Unknown column %s" % col)
except Exception:
raise
if isinstance(level, DataFrame):
raise ValueError(
"GeoDataFrame does not support setting the geometry column where "
"the column name is shared by multiple columns."
)
if drop:
to_remove = col
geo_column_name = self._geometry_column_name
else:
geo_column_name = col
if to_remove:
del frame[to_remove]
if not crs:
level_crs = getattr(level, "crs", None)
crs = level_crs if level_crs is not None else self._crs
if isinstance(level, (GeoSeries, GeometryArray)) and level.crs != crs:
# Avoids caching issues/crs sharing issues
level = level.copy()
level.crs = crs
# Check that we are using a listlike of geometries
level = _ensure_geometry(level, crs=crs)
index = frame.index
frame[geo_column_name] = level
if frame.index is not index and len(frame.index) == len(index):
# With pandas < 1.0 and an empty frame (no rows), the index gets reset
# to a default RangeIndex -> set back the original index if needed
frame.index = index
frame._geometry_column_name = geo_column_name
frame.crs = crs
if not inplace:
return frame
def rename_geometry(self, col, inplace=False):
"""
Renames the GeoDataFrame geometry column to
the specified name. By default yields a new object.
The original geometry column is replaced with the input.
Parameters
----------
col : new geometry column label
inplace : boolean, default False
Modify the GeoDataFrame in place (do not create a new object)
Examples
--------
>>> from shapely.geometry import Point
>>> d = {'col1': ['name1', 'name2'], 'geometry': [Point(1, 2), Point(2, 1)]}
>>> df = geopandas.GeoDataFrame(d, crs="EPSG:4326")
>>> df1 = df.rename_geometry('geom1')
>>> df1.geometry.name
'geom1'
>>> df.rename_geometry('geom1', inplace=True)
>>> df.geometry.name
'geom1'
Returns
-------
geodataframe : GeoDataFrame
See also
--------
GeoDataFrame.set_geometry : set the active geometry
"""
geometry_col = self.geometry.name
if col in self.columns:
raise ValueError(f"Column named {col} already exists")
else:
if not inplace:
return self.rename(columns={geometry_col: col}).set_geometry(
col, inplace
)
self.rename(columns={geometry_col: col}, inplace=inplace)
self.set_geometry(col, inplace=inplace)
@property
def crs(self):
"""
The Coordinate Reference System (CRS) represented as a ``pyproj.CRS``
object.
Returns None if the CRS is not set, and to set the value it
:getter: Returns a ``pyproj.CRS`` or None. When setting, the value
can be anything accepted by
:meth:`pyproj.CRS.from_user_input() <pyproj.crs.CRS.from_user_input>`,
such as an authority string (eg "EPSG:4326") or a WKT string.
Examples
--------
>>> gdf.crs # doctest: +SKIP
<Geographic 2D CRS: EPSG:4326>
Name: WGS 84
Axis Info [ellipsoidal]:
- Lat[north]: Geodetic latitude (degree)
- Lon[east]: Geodetic longitude (degree)
Area of Use:
- name: World
- bounds: (-180.0, -90.0, 180.0, 90.0)
Datum: World Geodetic System 1984
- Ellipsoid: WGS 84
- Prime Meridian: Greenwich
See also
--------
GeoDataFrame.set_crs : assign CRS
GeoDataFrame.to_crs : re-project to another CRS
"""
return self._crs
@crs.setter
def crs(self, value):
"""Sets the value of the crs"""
if self._geometry_column_name not in self:
warnings.warn(
"Assigning CRS to a GeoDataFrame without a geometry column is now "
"deprecated and will not be supported in the future.",
FutureWarning,
stacklevel=4,
)
self._crs = None if not value else CRS.from_user_input(value)
else:
if hasattr(self.geometry.values, "crs"):
self.geometry.values.crs = value
self._crs = self.geometry.values.crs
else:
# column called 'geometry' without geometry
self._crs = None if not value else CRS.from_user_input(value)
def __setstate__(self, state):
# overriding DataFrame method for compat with older pickles (CRS handling)
if isinstance(state, dict):
if "_metadata" in state and "crs" in state["_metadata"]:
metadata = state["_metadata"]
metadata[metadata.index("crs")] = "_crs"
if "crs" in state and "_crs" not in state:
crs = state.pop("crs")
state["_crs"] = CRS.from_user_input(crs) if crs is not None else crs
super().__setstate__(state)
# for some versions that didn't yet have CRS at array level -> crs is set
# at GeoDataFrame level with '_crs' (and not 'crs'), so without propagating
# to the GeoSeries/GeometryArray
try:
if self.crs is not None:
if self.geometry.values.crs is None:
self.crs = self.crs
except Exception:
pass
@classmethod
def from_dict(cls, data, geometry=None, crs=None, **kwargs):
"""
Construct GeoDataFrame from dict of array-like or dicts by
overriding DataFrame.from_dict method with geometry and crs
Parameters
----------
data : dict
Of the form {field : array-like} or {field : dict}.
geometry : str or array (optional)
If str, column to use as geometry. If array, will be set as 'geometry'
column on GeoDataFrame.
crs : str or dict (optional)
Coordinate reference system to set on the resulting frame.
kwargs : key-word arguments
These arguments are passed to DataFrame.from_dict
Returns
-------
GeoDataFrame
"""
dataframe = DataFrame.from_dict(data, **kwargs)
return GeoDataFrame(dataframe, geometry=geometry, crs=crs)
@classmethod
def from_file(cls, filename, **kwargs):
"""Alternate constructor to create a ``GeoDataFrame`` from a file.
It is recommended to use :func:`geopandas.read_file` instead.
Can load a ``GeoDataFrame`` from a file in any format recognized by
`fiona`. See http://fiona.readthedocs.io/en/latest/manual.html for details.
Parameters
----------
filename : str
File path or file handle to read from. Depending on which kwargs
are included, the content of filename may vary. See
http://fiona.readthedocs.io/en/latest/README.html#usage for usage details.
kwargs : key-word arguments
These arguments are passed to fiona.open, and can be used to
access multi-layer data, data stored within archives (zip files),
etc.
Examples
--------
>>> path = geopandas.datasets.get_path('nybb')
>>> gdf = geopandas.GeoDataFrame.from_file(path)
>>> gdf # doctest: +SKIP
BoroCode BoroName Shape_Leng Shape_Area \
geometry
0 5 Staten Island 330470.010332 1.623820e+09 MULTIPOLYGON ((\
(970217.022 145643.332, 970227....
1 4 Queens 896344.047763 3.045213e+09 MULTIPOLYGON ((\
(1029606.077 156073.814, 102957...
2 3 Brooklyn 741080.523166 1.937479e+09 MULTIPOLYGON ((\
(1021176.479 151374.797, 102100...
3 1 Manhattan 359299.096471 6.364715e+08 MULTIPOLYGON ((\
(981219.056 188655.316, 980940....
4 2 Bronx 464392.991824 1.186925e+09 MULTIPOLYGON ((\
(1012821.806 229228.265, 101278...
The recommended method of reading files is :func:`geopandas.read_file`:
>>> gdf = geopandas.read_file(path)
See also
--------
read_file : read file to GeoDataFame
GeoDataFrame.to_file : write GeoDataFrame to file
"""
return geopandas.io.file._read_file(filename, **kwargs)
@classmethod
def from_features(cls, features, crs=None, columns=None):
"""
Alternate constructor to create GeoDataFrame from an iterable of
features or a feature collection.
Parameters
----------
features
- Iterable of features, where each element must be a feature
dictionary or implement the __geo_interface__.
- Feature collection, where the 'features' key contains an
iterable of features.
- Object holding a feature collection that implements the
``__geo_interface__``.
crs : str or dict (optional)
Coordinate reference system to set on the resulting frame.
columns : list of column names, optional
Optionally specify the column names to include in the output frame.
This does not overwrite the property names of the input, but can
ensure a consistent output format.
Returns
-------
GeoDataFrame
Notes
-----
For more information about the ``__geo_interface__``, see
https://gist.github.com/sgillies/2217756
Examples
--------
>>> feature_coll = {
... "type": "FeatureCollection",
... "features": [
... {
... "id": "0",
... "type": "Feature",
... "properties": {"col1": "name1"},
... "geometry": {"type": "Point", "coordinates": (1.0, 2.0)},
... "bbox": (1.0, 2.0, 1.0, 2.0),
... },
... {
... "id": "1",
... "type": "Feature",
... "properties": {"col1": "name2"},
... "geometry": {"type": "Point", "coordinates": (2.0, 1.0)},
... "bbox": (2.0, 1.0, 2.0, 1.0),
... },
... ],
... "bbox": (1.0, 1.0, 2.0, 2.0),
... }
>>> df = geopandas.GeoDataFrame.from_features(feature_coll)
>>> df
geometry col1
0 POINT (1.00000 2.00000) name1
1 POINT (2.00000 1.00000) name2
"""
# Handle feature collections
if hasattr(features, "__geo_interface__"):
fs = features.__geo_interface__
else:
fs = features
if isinstance(fs, dict) and fs.get("type") == "FeatureCollection":
features_lst = fs["features"]
else:
features_lst = features
rows = []
for feature in features_lst:
# load geometry
if hasattr(feature, "__geo_interface__"):
feature = feature.__geo_interface__
row = {
"geometry": shape(feature["geometry"]) if feature["geometry"] else None
}
# load properties
row.update(feature["properties"])
rows.append(row)
return GeoDataFrame(rows, columns=columns, crs=crs)
@classmethod
def from_postgis(
cls,
sql,
con,
geom_col="geom",
crs=None,
index_col=None,
coerce_float=True,
parse_dates=None,
params=None,
chunksize=None,
):
"""
Alternate constructor to create a ``GeoDataFrame`` from a sql query
containing a geometry column in WKB representation.
Parameters
----------
sql : string
con : sqlalchemy.engine.Connection or sqlalchemy.engine.Engine
geom_col : string, default 'geom'
column name to convert to shapely geometries
crs : optional
Coordinate reference system to use for the returned GeoDataFrame
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex)
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
parse_dates : list or dict, default None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict
corresponds to the keyword arguments of
:func:`pandas.to_datetime`. Especially useful with databases
without native Datetime support, such as SQLite.
params : list, tuple or dict, optional, default None
List of parameters to pass to execute method.
chunksize : int, default None
If specified, return an iterator where chunksize is the number
of rows to include in each chunk.
Examples
--------
PostGIS
>>> from sqlalchemy import create_engine # doctest: +SKIP
>>> db_connection_url = "postgresql://myusername:mypassword@myhost:5432/mydb"
>>> con = create_engine(db_connection_url) # doctest: +SKIP
>>> sql = "SELECT geom, highway FROM roads"
>>> df = geopandas.GeoDataFrame.from_postgis(sql, con) # doctest: +SKIP
SpatiaLite
>>> sql = "SELECT ST_Binary(geom) AS geom, highway FROM roads"
>>> df = geopandas.GeoDataFrame.from_postgis(sql, con) # doctest: +SKIP
The recommended method of reading from PostGIS is
:func:`geopandas.read_postgis`:
>>> df = geopandas.read_postgis(sql, con) # doctest: +SKIP
See also
--------
geopandas.read_postgis : read PostGIS database to GeoDataFrame
"""
df = geopandas.io.sql._read_postgis(
sql,
con,
geom_col=geom_col,
crs=crs,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
params=params,
chunksize=chunksize,
)
return df
def to_json(self, na="null", show_bbox=False, drop_id=False, **kwargs):
"""
Returns a GeoJSON representation of the ``GeoDataFrame`` as a string.
Parameters
----------
na : {'null', 'drop', 'keep'}, default 'null'
Indicates how to output missing (NaN) values in the GeoDataFrame.
See below.
show_bbox : bool, optional, default: False
Include bbox (bounds) in the geojson
drop_id : bool, default: False
Whether to retain the index of the GeoDataFrame as the id property
in the generated GeoJSON. Default is False, but may want True
if the index is just arbitrary row numbers.
Notes
-----
The remaining *kwargs* are passed to json.dumps().
Missing (NaN) values in the GeoDataFrame can be represented as follows:
- ``null``: output the missing entries as JSON null.
- ``drop``: remove the property from the feature. This applies to each
feature individually so that features may have different properties.
- ``keep``: output the missing entries as NaN.
Examples
--------
>>> from shapely.geometry import Point
>>> d = {'col1': ['name1', 'name2'], 'geometry': [Point(1, 2), Point(2, 1)]}
>>> gdf = geopandas.GeoDataFrame(d, crs="EPSG:4326")
>>> gdf
col1 geometry
0 name1 POINT (1.00000 2.00000)
1 name2 POINT (2.00000 1.00000)
>>> gdf.to_json()
'{"type": "FeatureCollection", "features": [{"id": "0", "type": "Feature", \
"properties": {"col1": "name1"}, "geometry": {"type": "Point", "coordinates": [1.0,\
2.0]}}, {"id": "1", "type": "Feature", "properties": {"col1": "name2"}, "geometry"\
: {"type": "Point", "coordinates": [2.0, 1.0]}}]}'
Alternatively, you can write GeoJSON to file:
>>> gdf.to_file(path, driver="GeoJSON") # doctest: +SKIP
See also
--------
GeoDataFrame.to_file : write GeoDataFrame to file
"""
return json.dumps(
self._to_geo(na=na, show_bbox=show_bbox, drop_id=drop_id), **kwargs
)
@property
def __geo_interface__(self):
"""Returns a ``GeoDataFrame`` as a python feature collection.
Implements the `geo_interface`. The returned python data structure
represents the ``GeoDataFrame`` as a GeoJSON-like
``FeatureCollection``.
This differs from `_to_geo()` only in that it is a property with
default args instead of a method
Examples
--------
>>> from shapely.geometry import Point
>>> d = {'col1': ['name1', 'name2'], 'geometry': [Point(1, 2), Point(2, 1)]}
>>> gdf = geopandas.GeoDataFrame(d, crs="EPSG:4326")
>>> gdf
col1 geometry
0 name1 POINT (1.00000 2.00000)
1 name2 POINT (2.00000 1.00000)
>>> gdf.__geo_interface__
{'type': 'FeatureCollection', 'features': [{'id': '0', 'type': 'Feature', \
'properties': {'col1': 'name1'}, 'geometry': {'type': 'Point', 'coordinates': (1.0\
, 2.0)}, 'bbox': (1.0, 2.0, 1.0, 2.0)}, {'id': '1', 'type': 'Feature', 'properties\
': {'col1': 'name2'}, 'geometry': {'type': 'Point', 'coordinates': (2.0, 1.0)}, 'b\
box': (2.0, 1.0, 2.0, 1.0)}], 'bbox': (1.0, 1.0, 2.0, 2.0)}
"""
return self._to_geo(na="null", show_bbox=True, drop_id=False)
def iterfeatures(self, na="null", show_bbox=False, drop_id=False):
"""
Returns an iterator that yields feature dictionaries that comply with
__geo_interface__
Parameters
----------
na : str, optional
Options are {'null', 'drop', 'keep'}, default 'null'.
Indicates how to output missing (NaN) values in the GeoDataFrame
- null: output the missing entries as JSON null
- drop: remove the property from the feature. This applies to each feature \
individually so that features may have different properties
- keep: output the missing entries as NaN
show_bbox : bool, optional
Include bbox (bounds) in the geojson. Default False.
drop_id : bool, default: False
Whether to retain the index of the GeoDataFrame as the id property
in the generated GeoJSON. Default is False, but may want True
if the index is just arbitrary row numbers.
Examples
--------
>>> from shapely.geometry import Point
>>> d = {'col1': ['name1', 'name2'], 'geometry': [Point(1, 2), Point(2, 1)]}
>>> gdf = geopandas.GeoDataFrame(d, crs="EPSG:4326")
>>> gdf
col1 geometry
0 name1 POINT (1.00000 2.00000)
1 name2 POINT (2.00000 1.00000)
>>> feature = next(gdf.iterfeatures())
>>> feature
{'id': '0', 'type': 'Feature', 'properties': {'col1': 'name1'}, 'geometry': {\
'type': 'Point', 'coordinates': (1.0, 2.0)}}
"""
if na not in ["null", "drop", "keep"]:
raise ValueError("Unknown na method {0}".format(na))
if self._geometry_column_name not in self:
raise AttributeError(
"No geometry data set (expected in"
" column '%s')." % self._geometry_column_name
)
ids = np.array(self.index, copy=False)
geometries = np.array(self[self._geometry_column_name], copy=False)
if not self.columns.is_unique:
raise ValueError("GeoDataFrame cannot contain duplicated column names.")
properties_cols = self.columns.difference([self._geometry_column_name])
if len(properties_cols) > 0:
# convert to object to get python scalars.
properties = self[properties_cols].astype(object).values
if na == "null":
properties[pd.isnull(self[properties_cols]).values] = None
for i, row in enumerate(properties):
geom = geometries[i]
if na == "drop":
properties_items = {
k: v for k, v in zip(properties_cols, row) if not pd.isnull(v)
}
else:
properties_items = {k: v for k, v in zip(properties_cols, row)}
if drop_id:
feature = {}
else:
feature = {"id": str(ids[i])}
feature["type"] = "Feature"
feature["properties"] = properties_items
feature["geometry"] = mapping(geom) if geom else None
if show_bbox:
feature["bbox"] = geom.bounds if geom else None
yield feature
else:
for fid, geom in zip(ids, geometries):
if drop_id:
feature = {}
else:
feature = {"id": str(fid)}
feature["type"] = "Feature"
feature["properties"] = {}
feature["geometry"] = mapping(geom) if geom else None
if show_bbox:
feature["bbox"] = geom.bounds if geom else None
yield feature
def _to_geo(self, **kwargs):
"""
Returns a python feature collection (i.e. the geointerface)
representation of the GeoDataFrame.
"""
geo = {
"type": "FeatureCollection",
"features": list(self.iterfeatures(**kwargs)),
}
if kwargs.get("show_bbox", False):
geo["bbox"] = tuple(self.total_bounds)
return geo
def to_wkb(self, hex=False, **kwargs):
"""
Encode all geometry columns in the GeoDataFrame to WKB.
Parameters
----------
hex : bool
If true, export the WKB as a hexadecimal string.
The default is to return a binary bytes object.
kwargs
Additional keyword args will be passed to
:func:`pygeos.to_wkb` if pygeos is installed.
Returns
-------
DataFrame
geometry columns are encoded to WKB
"""
df = DataFrame(self.copy())
# Encode all geometry columns to WKB
for col in df.columns[df.dtypes == "geometry"]:
df[col] = to_wkb(df[col].values, hex=hex, **kwargs)
return df
def to_wkt(self, **kwargs):
"""
Encode all geometry columns in the GeoDataFrame to WKT.
Parameters
----------
kwargs
Keyword args will be passed to :func:`pygeos.to_wkt`
if pygeos is installed.
Returns
-------
DataFrame
geometry columns are encoded to WKT
"""
df = DataFrame(self.copy())
# Encode all geometry columns to WKT
for col in df.columns[df.dtypes == "geometry"]:
df[col] = to_wkt(df[col].values, **kwargs)
return df
def to_parquet(self, path, index=None, compression="snappy", **kwargs):
"""Write a GeoDataFrame to the Parquet format.
Any geometry columns present are serialized to WKB format in the file.
Requires 'pyarrow'.
WARNING: this is an initial implementation of Parquet file support and
associated metadata. This is tracking version 0.1.0 of the metadata
specification at:
https://github.com/geopandas/geo-arrow-spec
This metadata specification does not yet make stability promises. As such,
we do not yet recommend using this in a production setting unless you are
able to rewrite your Parquet files.
.. versionadded:: 0.8
Parameters
----------
path : str, path object
index : bool, default None
If ``True``, always include the dataframe's index(es) as columns
in the file output.
If ``False``, the index(es) will not be written to the file.
If ``None``, the index(ex) will be included as columns in the file
output except `RangeIndex` which is stored as metadata only.
compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy'
Name of the compression to use. Use ``None`` for no compression.
kwargs
Additional keyword arguments passed to :func:`pyarrow.parquet.write_table`.
Examples
--------
>>> gdf.to_parquet('data.parquet') # doctest: +SKIP
See also
--------
GeoDataFrame.to_feather : write GeoDataFrame to feather
GeoDataFrame.to_file : write GeoDataFrame to file
"""
from geopandas.io.arrow import _to_parquet
_to_parquet(self, path, compression=compression, index=index, **kwargs)
def to_feather(self, path, index=None, compression=None, **kwargs):
"""Write a GeoDataFrame to the Feather format.
Any geometry columns present are serialized to WKB format in the file.
Requires 'pyarrow' >= 0.17.
WARNING: this is an initial implementation of Feather file support and
associated metadata. This is tracking version 0.1.0 of the metadata
specification at:
https://github.com/geopandas/geo-arrow-spec
This metadata specification does not yet make stability promises. As such,
we do not yet recommend using this in a production setting unless you are
able to rewrite your Feather files.
.. versionadded:: 0.8
Parameters
----------
path : str, path object
index : bool, default None
If ``True``, always include the dataframe's index(es) as columns
in the file output.
If ``False``, the index(es) will not be written to the file.
If ``None``, the index(ex) will be included as columns in the file
output except `RangeIndex` which is stored as metadata only.
compression : {'zstd', 'lz4', 'uncompressed'}, optional
Name of the compression to use. Use ``"uncompressed"`` for no
compression. By default uses LZ4 if available, otherwise uncompressed.
kwargs
Additional keyword arguments passed to to
:func:`pyarrow.feather.write_feather`.
Examples
--------
>>> gdf.to_feather('data.feather') # doctest: +SKIP
See also
--------
GeoDataFrame.to_parquet : write GeoDataFrame to parquet
GeoDataFrame.to_file : write GeoDataFrame to file
"""
from geopandas.io.arrow import _to_feather
_to_feather(self, path, index=index, compression=compression, **kwargs)
def to_file(self, filename, driver=None, schema=None, index=None, **kwargs):
"""Write the ``GeoDataFrame`` to a file.
By default, an ESRI shapefile is written, but any OGR data source
supported by Fiona can be written. A dictionary of supported OGR
providers is available via:
>>> import fiona
>>> fiona.supported_drivers # doctest: +SKIP
Parameters
----------
filename : string
File path or file handle to write to.
driver : string, default None
The OGR format driver used to write the vector file.
If not specified, it attempts to infer it from the file extension.
If no extension is specified, it saves ESRI Shapefile to a folder.
schema : dict, default: None
If specified, the schema dictionary is passed to Fiona to
better control how the file is written.
index : bool, default None
If True, write index into one or more columns (for MultiIndex).
Default None writes the index into one or more columns only if
the index is named, is a MultiIndex, or has a non-integer data
type. If False, no index is written.
.. versionadded:: 0.7
Previously the index was not written.
Notes
-----
The extra keyword arguments ``**kwargs`` are passed to fiona.open and
can be used to write to multi-layer data, store data within archives
(zip files), etc.
The format drivers will attempt to detect the encoding of your data, but
may fail. In this case, the proper encoding can be specified explicitly
by using the encoding keyword parameter, e.g. ``encoding='utf-8'``.
See Also
--------
GeoSeries.to_file
GeoDataFrame.to_postgis : write GeoDataFrame to PostGIS database
GeoDataFrame.to_parquet : write GeoDataFrame to parquet
GeoDataFrame.to_feather : write GeoDataFrame to feather
Examples
--------
>>> gdf.to_file('dataframe.shp') # doctest: +SKIP
>>> gdf.to_file('dataframe.gpkg', driver='GPKG', layer='name') # doctest: +SKIP
>>> gdf.to_file('dataframe.geojson', driver='GeoJSON') # doctest: +SKIP
With selected drivers you can also append to a file with `mode="a"`:
>>> gdf.to_file('dataframe.shp', mode="a") # doctest: +SKIP
"""
from geopandas.io.file import _to_file
_to_file(self, filename, driver, schema, index, **kwargs)
def set_crs(self, crs=None, epsg=None, inplace=False, allow_override=False):
"""
Set the Coordinate Reference System (CRS) of the ``GeoDataFrame``.
If there are multiple geometry columns within the GeoDataFrame, only
the CRS of the active geometry column is set.
NOTE: The underlying geometries are not transformed to this CRS. To
transform the geometries to a new CRS, use the ``to_crs`` method.
Parameters
----------
crs : pyproj.CRS, optional if `epsg` is specified
The value can be anything accepted
by :meth:`pyproj.CRS.from_user_input() <pyproj.crs.CRS.from_user_input>`,
such as an authority string (eg "EPSG:4326") or a WKT string.
epsg : int, optional if `crs` is specified
EPSG code specifying the projection.
inplace : bool, default False
If True, the CRS of the GeoDataFrame will be changed in place
(while still returning the result) instead of making a copy of
the GeoDataFrame.
allow_override : bool, default False
If the the GeoDataFrame already has a CRS, allow to replace the
existing CRS, even when both are not equal.
Examples
--------
>>> from shapely.geometry import Point
>>> d = {'col1': ['name1', 'name2'], 'geometry': [Point(1, 2), Point(2, 1)]}
>>> gdf = geopandas.GeoDataFrame(d)
>>> gdf
col1 geometry
0 name1 POINT (1.00000 2.00000)
1 name2 POINT (2.00000 1.00000)
Setting CRS to a GeoDataFrame without one:
>>> gdf.crs is None
True
>>> gdf = gdf.set_crs('epsg:3857')
>>> gdf.crs # doctest: +SKIP
<Projected CRS: EPSG:3857>
Name: WGS 84 / Pseudo-Mercator
Axis Info [cartesian]:
- X[east]: Easting (metre)
- Y[north]: Northing (metre)
Area of Use:
- name: World - 85°S to 85°N
- bounds: (-180.0, -85.06, 180.0, 85.06)
Coordinate Operation:
- name: Popular Visualisation Pseudo-Mercator
- method: Popular Visualisation Pseudo Mercator
Datum: World Geodetic System 1984
- Ellipsoid: WGS 84
- Prime Meridian: Greenwich
Overriding existing CRS:
>>> gdf = gdf.set_crs(4326, allow_override=True)
Without ``allow_override=True``, ``set_crs`` returns an error if you try to
override CRS.
See also
--------
GeoDataFrame.to_crs : re-project to another CRS
"""
if not inplace:
df = self.copy()
else:
df = self
df.geometry = df.geometry.set_crs(
crs=crs, epsg=epsg, allow_override=allow_override, inplace=True
)
return df
def to_crs(self, crs=None, epsg=None, inplace=False):
"""Transform geometries to a new coordinate reference system.
Transform all geometries in an active geometry column to a different coordinate
reference system. The ``crs`` attribute on the current GeoSeries must
be set. Either ``crs`` or ``epsg`` may be specified for output.
This method will transform all points in all objects. It has no notion
or projecting entire geometries. All segments joining points are
assumed to be lines in the current projection, not geodesics. Objects
crossing the dateline (or other projection boundary) will have
undesirable behavior.
Parameters
----------
crs : pyproj.CRS, optional if `epsg` is specified
The value can be anything accepted by
:meth:`pyproj.CRS.from_user_input() <pyproj.crs.CRS.from_user_input>`,
such as an authority string (eg "EPSG:4326") or a WKT string.
epsg : int, optional if `crs` is specified
EPSG code specifying output projection.
inplace : bool, optional, default: False
Whether to return a new GeoDataFrame or do the transformation in
place.
Returns
-------
GeoDataFrame
Examples
--------
>>> from shapely.geometry import Point
>>> d = {'col1': ['name1', 'name2'], 'geometry': [Point(1, 2), Point(2, 1)]}
>>> gdf = geopandas.GeoDataFrame(d, crs=4326)
>>> gdf
col1 geometry
0 name1 POINT (1.00000 2.00000)
1 name2 POINT (2.00000 1.00000)
>>> gdf.crs # doctest: +SKIP
<Geographic 2D CRS: EPSG:4326>
Name: WGS 84
Axis Info [ellipsoidal]:
- Lat[north]: Geodetic latitude (degree)
- Lon[east]: Geodetic longitude (degree)
Area of Use:
- name: World
- bounds: (-180.0, -90.0, 180.0, 90.0)
Datum: World Geodetic System 1984
- Ellipsoid: WGS 84
- Prime Meridian: Greenwich
>>> gdf = gdf.to_crs(3857)
>>> gdf
col1 geometry
0 name1 POINT (111319.491 222684.209)
1 name2 POINT (222638.982 111325.143)
>>> gdf.crs # doctest: +SKIP
<Projected CRS: EPSG:3857>
Name: WGS 84 / Pseudo-Mercator
Axis Info [cartesian]:
- X[east]: Easting (metre)
- Y[north]: Northing (metre)
Area of Use:
- name: World - 85°S to 85°N
- bounds: (-180.0, -85.06, 180.0, 85.06)
Coordinate Operation:
- name: Popular Visualisation Pseudo-Mercator
- method: Popular Visualisation Pseudo Mercator
Datum: World Geodetic System 1984
- Ellipsoid: WGS 84
- Prime Meridian: Greenwich
See also
--------
GeoDataFrame.set_crs : assign CRS without re-projection
"""
if inplace:
df = self
else:
df = self.copy()
geom = df.geometry.to_crs(crs=crs, epsg=epsg)
df.geometry = geom
df.crs = geom.crs
if not inplace:
return df
def estimate_utm_crs(self, datum_name="WGS 84"):
"""Returns the estimated UTM CRS based on the bounds of the dataset.
.. versionadded:: 0.9
.. note:: Requires pyproj 3+
Parameters
----------
datum_name : str, optional
The name of the datum to use in the query. Default is WGS 84.
Returns
-------
pyproj.CRS
Examples
--------
>>> world = geopandas.read_file(
... geopandas.datasets.get_path("naturalearth_lowres")
... )
>>> germany = world.loc[world.name == "Germany"]
>>> germany.estimate_utm_crs() # doctest: +SKIP
<Projected CRS: EPSG:32632>
Name: WGS 84 / UTM zone 32N
Axis Info [cartesian]:
- E[east]: Easting (metre)
- N[north]: Northing (metre)
Area of Use:
- name: World - N hemisphere - 6°E to 12°E - by country
- bounds: (6.0, 0.0, 12.0, 84.0)
Coordinate Operation:
- name: UTM zone 32N
- method: Transverse Mercator
Datum: World Geodetic System 1984
- Ellipsoid: WGS 84
- Prime Meridian: Greenwich
"""
return self.geometry.estimate_utm_crs(datum_name=datum_name)
def __getitem__(self, key):
"""
If the result is a column containing only 'geometry', return a
GeoSeries. If it's a DataFrame with any columns of GeometryDtype,
return a GeoDataFrame.
"""
result = super().__getitem__(key)
geo_col = self._geometry_column_name
if isinstance(result, Series) and isinstance(result.dtype, GeometryDtype):
result.__class__ = GeoSeries
elif isinstance(result, DataFrame):
if (result.dtypes == "geometry").sum() > 0:
result.__class__ = GeoDataFrame
if geo_col in result:
result._geometry_column_name = geo_col
else:
result._geometry_column_name = None
result._crs = None
else:
result.__class__ = DataFrame
return result
def __setitem__(self, key, value):
"""
Overwritten to preserve CRS of GeometryArray in cases like
df['geometry'] = [geom... for geom in df.geometry]
"""
if not pd.api.types.is_list_like(key) and key == self._geometry_column_name:
if pd.api.types.is_scalar(value) or isinstance(value, BaseGeometry):
value = [value] * self.shape[0]
try:
value = _ensure_geometry(value, crs=self.crs)
self._crs = value.crs
except TypeError:
warnings.warn("Geometry column does not contain geometry.")
super().__setitem__(key, value)
#
# Implement pandas methods
#
def merge(self, *args, **kwargs):
r"""Merge two ``GeoDataFrame`` objects with a database-style join.
Returns a ``GeoDataFrame`` if a geometry column is present; otherwise,
returns a pandas ``DataFrame``.
Returns
-------
GeoDataFrame or DataFrame
Notes
-----
The extra arguments ``*args`` and keyword arguments ``**kwargs`` are
passed to DataFrame.merge.
Reference
---------
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas\
.DataFrame.merge.html
"""
result = DataFrame.merge(self, *args, **kwargs)
geo_col = self._geometry_column_name
if isinstance(result, DataFrame) and geo_col in result:
result.__class__ = GeoDataFrame
result.crs = self.crs
result._geometry_column_name = geo_col
elif isinstance(result, DataFrame) and geo_col not in result:
result.__class__ = DataFrame
return result
@doc(pd.DataFrame)
def apply(self, func, axis=0, raw=False, result_type=None, args=(), **kwargs):
result = super().apply(
func, axis=axis, raw=raw, result_type=result_type, args=args, **kwargs
)
# Reconstruct gdf if it was lost by apply
if (
isinstance(result, DataFrame)
and self._geometry_column_name in result.columns
):
# axis=1 apply will split GeometryDType to object, try and cast back
try:
result = result.set_geometry(self._geometry_column_name)
except TypeError:
pass
else:
if self.crs is not None and result.crs is None:
result.set_crs(self.crs, inplace=True)
return result
@property
def _constructor(self):
return _geodataframe_constructor_with_fallback
@property
def _constructor_sliced(self):
return _geoseries_constructor_with_fallback
def __finalize__(self, other, method=None, **kwargs):
"""propagate metadata from other to self"""
self = super().__finalize__(other, method=method, **kwargs)
# merge operation: using metadata of the left object
if method == "merge":
for name in self._metadata:
object.__setattr__(self, name, getattr(other.left, name, None))
# concat operation: using metadata of the first object
elif method == "concat":
for name in self._metadata:
object.__setattr__(self, name, getattr(other.objs[0], name, None))
if (self.columns == self._geometry_column_name).sum() > 1:
raise ValueError(
"Concat operation has resulted in multiple columns using "
f"the geometry column name '{self._geometry_column_name}'.\n"
f"Please ensure this column from the first DataFrame is not "
f"repeated."
)
return self
def dissolve(
self,
by=None,
aggfunc="first",
as_index=True,
level=None,
sort=True,
observed=False,
dropna=True,
):
"""
Dissolve geometries within `groupby` into single observation.
This is accomplished by applying the `unary_union` method
to all geometries within a groupself.
Observations associated with each `groupby` group will be aggregated
using the `aggfunc`.
Parameters
----------
by : string, default None
Column whose values define groups to be dissolved. If None,
whole GeoDataFrame is considered a single group.
aggfunc : function or string, default "first"
Aggregation function for manipulation of data associated
with each group. Passed to pandas `groupby.agg` method.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. [np.sum, 'mean']
- dict of axis labels -> functions, function names or list of such.
as_index : boolean, default True
If true, groupby columns become index of result.
level : int or str or sequence of int or sequence of str, default None
If the axis is a MultiIndex (hierarchical), group by a
particular level or levels.
.. versionadded:: 0.9.0
sort : bool, default True
Sort group keys. Get better performance by turning this off.
Note this does not influence the order of observations within
each group. Groupby preserves the order of rows within each group.
.. versionadded:: 0.9.0
observed : bool, default False
This only applies if any of the groupers are Categoricals.
If True: only show observed values for categorical groupers.
If False: show all values for categorical groupers.
.. versionadded:: 0.9.0
dropna : bool, default True
If True, and if group keys contain NA values, NA values
together with row/column will be dropped. If False, NA
values will also be treated as the key in groups.
This parameter is not supported for pandas < 1.1.0.
A warning will be emitted for earlier pandas versions
if a non-default value is given for this parameter.
.. versionadded:: 0.9.0
Returns
-------
GeoDataFrame
Examples
--------
>>> from shapely.geometry import Point
>>> d = {
... "col1": ["name1", "name2", "name1"],
... "geometry": [Point(1, 2), Point(2, 1), Point(0, 1)],
... }
>>> gdf = geopandas.GeoDataFrame(d, crs=4326)
>>> gdf
col1 geometry
0 name1 POINT (1.00000 2.00000)
1 name2 POINT (2.00000 1.00000)
2 name1 POINT (0.00000 1.00000)
>>> dissolved = gdf.dissolve('col1')
>>> dissolved # doctest: +SKIP
geometry
col1
name1 MULTIPOINT (0.00000 1.00000, 1.00000 2.00000)
name2 POINT (2.00000 1.00000)
See also
--------
GeoDataFrame.explode : explode multi-part geometries into single geometries
"""
if by is None and level is None:
by = np.zeros(len(self), dtype="int64")
groupby_kwargs = dict(
by=by, level=level, sort=sort, observed=observed, dropna=dropna
)
if not compat.PANDAS_GE_11:
groupby_kwargs.pop("dropna")
if not dropna: # If they passed a non-default dropna value
warnings.warn("dropna kwarg is not supported for pandas < 1.1.0")
# Process non-spatial component
data = self.drop(labels=self.geometry.name, axis=1)
aggregated_data = data.groupby(**groupby_kwargs).agg(aggfunc)
aggregated_data.columns = aggregated_data.columns.to_flat_index()
# Process spatial component
def merge_geometries(block):
merged_geom = block.unary_union
return merged_geom
g = self.groupby(group_keys=False, **groupby_kwargs)[self.geometry.name].agg(
merge_geometries
)
# Aggregate
aggregated_geometry = GeoDataFrame(g, geometry=self.geometry.name, crs=self.crs)
# Recombine
aggregated = aggregated_geometry.join(aggregated_data)
# Reset if requested
if not as_index:
aggregated = aggregated.reset_index()
return aggregated
# overrides the pandas native explode method to break up features geometrically
def explode(self, column=None, ignore_index=False, index_parts=None, **kwargs):
"""
Explode multi-part geometries into multiple single geometries.
Each row containing a multi-part geometry will be split into
multiple rows with single geometries, thereby increasing the vertical
size of the GeoDataFrame.
.. note:: ignore_index requires pandas 1.1.0 or newer.
Parameters
----------
column : string, default None
Column to explode. In the case of a geometry column, multi-part
geometries are converted to single-part.
If None, the active geometry column is used.
ignore_index : bool, default False
If True, the resulting index will be labelled 0, 1, …, n - 1,
ignoring `index_parts`.
index_parts : boolean, default True
If True, the resulting index will be a multi-index (original
index with an additional level indicating the multiple
geometries: a new zero-based index for each single part geometry
per multi-part geometry).
Returns
-------
GeoDataFrame
Exploded geodataframe with each single geometry
as a separate entry in the geodataframe.
Examples
--------
>>> from shapely.geometry import MultiPoint
>>> d = {
... "col1": ["name1", "name2"],
... "geometry": [
... MultiPoint([(1, 2), (3, 4)]),
... MultiPoint([(2, 1), (0, 0)]),
... ],
... }
>>> gdf = geopandas.GeoDataFrame(d, crs=4326)
>>> gdf
col1 geometry
0 name1 MULTIPOINT (1.00000 2.00000, 3.00000 4.00000)
1 name2 MULTIPOINT (2.00000 1.00000, 0.00000 0.00000)
>>> exploded = gdf.explode(index_parts=True)
>>> exploded
col1 geometry
0 0 name1 POINT (1.00000 2.00000)
1 name1 POINT (3.00000 4.00000)
1 0 name2 POINT (2.00000 1.00000)
1 name2 POINT (0.00000 0.00000)
>>> exploded = gdf.explode(index_parts=False)
>>> exploded
col1 geometry
0 name1 POINT (1.00000 2.00000)
0 name1 POINT (3.00000 4.00000)
1 name2 POINT (2.00000 1.00000)
1 name2 POINT (0.00000 0.00000)
>>> exploded = gdf.explode(ignore_index=True)
>>> exploded
col1 geometry
0 name1 POINT (1.00000 2.00000)
1 name1 POINT (3.00000 4.00000)
2 name2 POINT (2.00000 1.00000)
3 name2 POINT (0.00000 0.00000)
See also
--------
GeoDataFrame.dissolve : dissolve geometries into a single observation.
"""
# If no column is specified then default to the active geometry column
if column is None:
column = self.geometry.name
# If the specified column is not a geometry dtype use pandas explode
if not isinstance(self[column].dtype, GeometryDtype):
if compat.PANDAS_GE_11:
return super().explode(column, ignore_index=ignore_index, **kwargs)
else:
return super().explode(column, **kwargs)
if index_parts is None:
if not ignore_index:
warnings.warn(
"Currently, index_parts defaults to True, but in the future, "
"it will default to False to be consistent with Pandas. "
"Use `index_parts=True` to keep the current behavior and "
"True/False to silence the warning.",
FutureWarning,
stacklevel=2,
)
index_parts = True
exploded_geom = self.geometry.reset_index(drop=True).explode(index_parts=True)
df = GeoDataFrame(
self.drop(self._geometry_column_name, axis=1).take(
exploded_geom.index.droplevel(-1)
),
geometry=exploded_geom.values,
).__finalize__(self)
if ignore_index:
df.reset_index(inplace=True, drop=True)
elif index_parts:
# reset to MultiIndex, otherwise df index is only first level of
# exploded GeoSeries index.
df = df.set_index(
exploded_geom.index.droplevel(
list(range(exploded_geom.index.nlevels - 1))
),
append=True,
)
return df
# overrides the pandas astype method to ensure the correct return type
def astype(self, dtype, copy=True, errors="raise", **kwargs):
"""
Cast a pandas object to a specified dtype ``dtype``.
Returns a GeoDataFrame when the geometry column is kept as geometries,
otherwise returns a pandas DataFrame.
See the pandas.DataFrame.astype docstring for more details.
Returns
-------
GeoDataFrame or DataFrame
"""
df = super().astype(dtype, copy=copy, errors=errors, **kwargs)
try:
geoms = df[self._geometry_column_name]
if is_geometry_type(geoms):
return geopandas.GeoDataFrame(df, geometry=self._geometry_column_name)
except KeyError:
pass
# if the geometry column is converted to non-geometries or did not exist
# do not return a GeoDataFrame
return pd.DataFrame(df)
def convert_dtypes(self, *args, **kwargs):
"""
Convert columns to best possible dtypes using dtypes supporting ``pd.NA``.
Always returns a GeoDataFrame as no conversions are applied to the
geometry column.
See the pandas.DataFrame.convert_dtypes docstring for more details.
Returns
-------
GeoDataFrame
"""
# Overridden to fix GH1870, that return type is not preserved always
# (and where it was, geometry col was not)
return GeoDataFrame(
super().convert_dtypes(*args, **kwargs),
geometry=self.geometry.name,
crs=self.crs,
)
def to_postgis(
self,
name,
con,
schema=None,
if_exists="fail",
index=False,
index_label=None,
chunksize=None,
dtype=None,
):
"""
Upload GeoDataFrame into PostGIS database.
This method requires SQLAlchemy and GeoAlchemy2, and a PostgreSQL
Python driver (e.g. psycopg2) to be installed.
Parameters
----------
name : str
Name of the target table.
con : sqlalchemy.engine.Connection or sqlalchemy.engine.Engine
Active connection to the PostGIS database.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
How to behave if the table already exists:
- fail: Raise a ValueError.
- replace: Drop the table before inserting new values.
- append: Insert new values to the existing table.
schema : string, optional
Specify the schema. If None, use default schema: 'public'.
index : bool, default True
Write DataFrame index as a column.
Uses *index_label* as the column name in the table.
index_label : string or sequence, default None
Column label for index column(s).
If None is given (default) and index is True,
then the index names are used.
chunksize : int, optional
Rows will be written in batches of this size at a time.
By default, all rows will be written at once.
dtype : dict of column name to SQL type, default None
Specifying the datatype for columns.
The keys should be the column names and the values
should be the SQLAlchemy types.
Examples
--------
>>> from sqlalchemy import create_engine
>>> engine = create_engine("postgresql://myusername:mypassword@myhost:5432\
/mydatabase") # doctest: +SKIP
>>> gdf.to_postgis("my_table", engine) # doctest: +SKIP
See also
--------
GeoDataFrame.to_file : write GeoDataFrame to file
read_postgis : read PostGIS database to GeoDataFrame
"""
geopandas.io.sql._write_postgis(
self, name, con, schema, if_exists, index, index_label, chunksize, dtype
)
#
# Implement standard operators for GeoSeries
#
def __xor__(self, other):
"""Implement ^ operator as for builtin set type"""
warnings.warn(
"'^' operator will be deprecated. Use the 'symmetric_difference' "
"method instead.",
DeprecationWarning,
stacklevel=2,
)
return self.geometry.symmetric_difference(other)
def __or__(self, other):
"""Implement | operator as for builtin set type"""
warnings.warn(
"'|' operator will be deprecated. Use the 'union' method instead.",
DeprecationWarning,
stacklevel=2,
)
return self.geometry.union(other)
def __and__(self, other):
"""Implement & operator as for builtin set type"""
warnings.warn(
"'&' operator will be deprecated. Use the 'intersection' method instead.",
DeprecationWarning,
stacklevel=2,
)
return self.geometry.intersection(other)
def __sub__(self, other):
"""Implement - operator as for builtin set type"""
warnings.warn(
"'-' operator will be deprecated. Use the 'difference' method instead.",
DeprecationWarning,
stacklevel=2,
)
return self.geometry.difference(other)
plot = CachedAccessor("plot", geopandas.plotting.GeoplotAccessor)
@doc(_explore)
def explore(self, *args, **kwargs):
"""Interactive map based on folium/leaflet.js"""
return _explore(self, *args, **kwargs)
def sjoin(self, df, *args, **kwargs):
"""Spatial join of two GeoDataFrames.
See the User Guide page :doc:`../../user_guide/mergingdata` for details.
Parameters
----------
df : GeoDataFrame
how : string, default 'inner'
The type of join:
* 'left': use keys from left_df; retain only left_df geometry column
* 'right': use keys from right_df; retain only right_df geometry column
* 'inner': use intersection of keys from both dfs; retain only
left_df geometry column
predicate : string, default 'intersects'
Binary predicate. Valid values are determined by the spatial index used.
You can check the valid values in left_df or right_df as
``left_df.sindex.valid_query_predicates`` or
``right_df.sindex.valid_query_predicates``
lsuffix : string, default 'left'
Suffix to apply to overlapping column names (left GeoDataFrame).
rsuffix : string, default 'right'
Suffix to apply to overlapping column names (right GeoDataFrame).
Examples
--------
>>> countries = geopandas.read_file( \
geopandas.datasets.get_path("naturalearth_lowres"))
>>> cities = geopandas.read_file( \
geopandas.datasets.get_path("naturalearth_cities"))
>>> countries.head() # doctest: +SKIP
pop_est continent name \
iso_a3 gdp_md_est geometry
0 920938 Oceania Fiji FJI 8374.0 \
MULTIPOLYGON (((180.00000 -16.06713, 180.00000...
1 53950935 Africa Tanzania TZA 150600.0 \
POLYGON ((33.90371 -0.95000, 34.07262 -1.05982...
2 603253 Africa W. Sahara ESH 906.5 \
POLYGON ((-8.66559 27.65643, -8.66512 27.58948...
3 35623680 North America Canada CAN 1674000.0 \
MULTIPOLYGON (((-122.84000 49.00000, -122.9742...
4 326625791 North America United States of America USA 18560000.0 \
MULTIPOLYGON (((-122.84000 49.00000, -120.0000...
>>> cities.head()
name geometry
0 Vatican City POINT (12.45339 41.90328)
1 San Marino POINT (12.44177 43.93610)
2 Vaduz POINT (9.51667 47.13372)
3 Luxembourg POINT (6.13000 49.61166)
4 Palikir POINT (158.14997 6.91664)
>>> cities_w_country_data = cities.sjoin(countries)
>>> cities_w_country_data.head() # doctest: +SKIP
name_left geometry index_right pop_est \
continent name_right iso_a3 gdp_md_est
0 Vatican City POINT (12.45339 41.90328) 141 62137802 \
Europe Italy ITA 2221000.0
1 San Marino POINT (12.44177 43.93610) 141 62137802 \
Europe Italy ITA 2221000.0
192 Rome POINT (12.48131 41.89790) 141 62137802 \
Europe Italy ITA 2221000.0
2 Vaduz POINT (9.51667 47.13372) 114 8754413 \
Europe Au stria AUT 416600.0
184 Vienna POINT (16.36469 48.20196) 114 8754413 \
Europe Austria AUT 416600.0
Notes
------
Every operation in GeoPandas is planar, i.e. the potential third
dimension is not taken into account.
See also
--------
GeoDataFrame.sjoin_nearest : nearest neighbor join
sjoin : equivalent top-level function
"""
return geopandas.sjoin(left_df=self, right_df=df, *args, **kwargs)
def sjoin_nearest(
self,
right,
how="inner",
max_distance=None,
lsuffix="left",
rsuffix="right",
distance_col=None,
):
"""
Spatial join of two GeoDataFrames based on the distance between their
geometries.
Results will include multiple output records for a single input record
where there are multiple equidistant nearest or intersected neighbors.
See the User Guide page
https://geopandas.readthedocs.io/en/latest/docs/user_guide/mergingdata.html
for more details.
Parameters
----------
right : GeoDataFrame
how : string, default 'inner'
The type of join:
* 'left': use keys from left_df; retain only left_df geometry column
* 'right': use keys from right_df; retain only right_df geometry column
* 'inner': use intersection of keys from both dfs; retain only
left_df geometry column
max_distance : float, default None
Maximum distance within which to query for nearest geometry.
Must be greater than 0.
The max_distance used to search for nearest items in the tree may have a
significant impact on performance by reducing the number of input
geometries that are evaluated for nearest items in the tree.
lsuffix : string, default 'left'
Suffix to apply to overlapping column names (left GeoDataFrame).
rsuffix : string, default 'right'
Suffix to apply to overlapping column names (right GeoDataFrame).
distance_col : string, default None
If set, save the distances computed between matching geometries under a
column of this name in the joined GeoDataFrame.
Examples
--------
>>> countries = geopandas.read_file(geopandas.datasets.get_\
path("naturalearth_lowres"))
>>> cities = geopandas.read_file(geopandas.datasets.get_path("naturalearth_citi\
es"))
>>> countries.head(2).name # doctest: +SKIP
pop_est continent name \
iso_a3 gdp_md_est geometry
0 920938 Oceania Fiji FJI 8374.0 MULTI\
POLYGON (((180.00000 -16.06713, 180.00000...
1 53950935 Africa Tanzania TZA 150600.0 POLYG\
ON ((33.90371 -0.95000, 34.07262 -1.05982...
>>> cities.head(2).name # doctest: +SKIP
name geometry
0 Vatican City POINT (12.45339 41.90328)
1 San Marino POINT (12.44177 43.93610)
>>> cities_w_country_data = cities.sjoin_nearest(countries)
>>> cities_w_country_data[['name_left', 'name_right']].head(2) # doctest: +SKIP
name_left geometry index_right pop_est continent n\
ame_right iso_a3 gdp_md_est
0 Vatican City POINT (12.45339 41.90328) 141 62137802 Europe \
Italy ITA 2221000.0
1 San Marino POINT (12.44177 43.93610) 141 62137802 Europe \
Italy ITA 2221000.0
To include the distances:
>>> cities_w_country_data = cities.sjoin_nearest(countries, \
distance_col="distances")
>>> cities_w_country_data[["name_left", "name_right", \
"distances"]].head(2) # doctest: +SKIP
name_left name_right distances
0 Vatican City Italy 0.0
1 San Marino Italy 0.0
In the following example, we get multiple cities for Italy because all results
are equidistant (in this case zero because they intersect).
In fact, we get 3 results in total:
>>> countries_w_city_data = cities.sjoin_nearest(countries, \
distance_col="distances", how="right")
>>> italy_results = \
countries_w_city_data[countries_w_city_data["name_left"] == "Italy"]
>>> italy_results # doctest: +SKIP
name_x name_y
141 Vatican City Italy
141 San Marino Italy
141 Rome Italy
See also
--------
GeoDataFrame.sjoin : binary predicate joins
sjoin_nearest : equivalent top-level function
Notes
-----
Since this join relies on distances, results will be inaccurate
if your geometries are in a geographic CRS.
Every operation in GeoPandas is planar, i.e. the potential third
dimension is not taken into account.
"""
return geopandas.sjoin_nearest(
self,
right,
how=how,
max_distance=max_distance,
lsuffix=lsuffix,
rsuffix=rsuffix,
distance_col=distance_col,
)
def clip(self, mask, keep_geom_type=False):
"""Clip points, lines, or polygon geometries to the mask extent.
Both layers must be in the same Coordinate Reference System (CRS).
The GeoDataFrame will be clipped to the full extent of the `mask` object.
If there are multiple polygons in mask, data from the GeoDataFrame will be
clipped to the total boundary of all polygons in mask.
Parameters
----------
mask : GeoDataFrame, GeoSeries, (Multi)Polygon
Polygon vector layer used to clip `gdf`.
The mask's geometry is dissolved into one geometric feature
and intersected with `gdf`.
keep_geom_type : boolean, default False
If True, return only geometries of original type in case of intersection
resulting in multiple geometry types or GeometryCollections.
If False, return all resulting geometries (potentially mixed types).
Returns
-------
GeoDataFrame
Vector data (points, lines, polygons) from `gdf` clipped to
polygon boundary from mask.
See also
--------
clip : equivalent top-level function
Examples
--------
Clip points (global cities) with a polygon (the South American continent):
>>> world = geopandas.read_file(
... geopandas.datasets.get_path('naturalearth_lowres'))
>>> south_america = world[world['continent'] == "South America"]
>>> capitals = geopandas.read_file(
... geopandas.datasets.get_path('naturalearth_cities'))
>>> capitals.shape
(202, 2)
>>> sa_capitals = capitals.clip(south_america)
>>> sa_capitals.shape
(12, 2)
"""
return geopandas.clip(self, mask=mask, keep_geom_type=keep_geom_type)
def overlay(self, right, how="intersection", keep_geom_type=None, make_valid=True):
"""Perform spatial overlay between GeoDataFrames.
Currently only supports data GeoDataFrames with uniform geometry types,
i.e. containing only (Multi)Polygons, or only (Multi)Points, or a
combination of (Multi)LineString and LinearRing shapes.
Implements several methods that are all effectively subsets of the union.
See the User Guide page :doc:`../../user_guide/set_operations` for details.
Parameters
----------
right : GeoDataFrame
how : string
Method of spatial overlay: 'intersection', 'union',
'identity', 'symmetric_difference' or 'difference'.
keep_geom_type : bool
If True, return only geometries of the same geometry type the GeoDataFrame
has, if False, return all resulting geometries. Default is None,
which will set keep_geom_type to True but warn upon dropping
geometries.
make_valid : bool, default True
If True, any invalid input geometries are corrected with a call to
`buffer(0)`, if False, a `ValueError` is raised if any input geometries
are invalid.
Returns
-------
df : GeoDataFrame
GeoDataFrame with new set of polygons and attributes
resulting from the overlay
Examples
--------
>>> from shapely.geometry import Polygon
>>> polys1 = geopandas.GeoSeries([Polygon([(0,0), (2,0), (2,2), (0,2)]),
... Polygon([(2,2), (4,2), (4,4), (2,4)])])
>>> polys2 = geopandas.GeoSeries([Polygon([(1,1), (3,1), (3,3), (1,3)]),
... Polygon([(3,3), (5,3), (5,5), (3,5)])])
>>> df1 = geopandas.GeoDataFrame({'geometry': polys1, 'df1_data':[1,2]})
>>> df2 = geopandas.GeoDataFrame({'geometry': polys2, 'df2_data':[1,2]})
>>> df1.overlay(df2, how='union')
df1_data df2_data geometry
0 1.0 1.0 POLYGON ((2.00000 2.00000, 2.00000 1.00000, 1....
1 2.0 1.0 POLYGON ((2.00000 2.00000, 2.00000 3.00000, 3....
2 2.0 2.0 POLYGON ((4.00000 4.00000, 4.00000 3.00000, 3....
3 1.0 NaN POLYGON ((2.00000 0.00000, 0.00000 0.00000, 0....
4 2.0 NaN MULTIPOLYGON (((3.00000 3.00000, 4.00000 3.000...
5 NaN 1.0 MULTIPOLYGON (((2.00000 2.00000, 3.00000 2.000...
6 NaN 2.0 POLYGON ((3.00000 5.00000, 5.00000 5.00000, 5....
>>> df1.overlay(df2, how='intersection')
df1_data df2_data geometry
0 1 1 POLYGON ((2.00000 2.00000, 2.00000 1.00000, 1....
1 2 1 POLYGON ((2.00000 2.00000, 2.00000 3.00000, 3....
2 2 2 POLYGON ((4.00000 4.00000, 4.00000 3.00000, 3....
>>> df1.overlay(df2, how='symmetric_difference')
df1_data df2_data geometry
0 1.0 NaN POLYGON ((2.00000 0.00000, 0.00000 0.00000, 0....
1 2.0 NaN MULTIPOLYGON (((3.00000 3.00000, 4.00000 3.000...
2 NaN 1.0 MULTIPOLYGON (((2.00000 2.00000, 3.00000 2.000...
3 NaN 2.0 POLYGON ((3.00000 5.00000, 5.00000 5.00000, 5....
>>> df1.overlay(df2, how='difference')
geometry df1_data
0 POLYGON ((2.00000 0.00000, 0.00000 0.00000, 0.... 1
1 MULTIPOLYGON (((3.00000 3.00000, 4.00000 3.000... 2
>>> df1.overlay(df2, how='identity')
df1_data df2_data geometry
0 1.0 1.0 POLYGON ((2.00000 2.00000, 2.00000 1.00000, 1....
1 2.0 1.0 POLYGON ((2.00000 2.00000, 2.00000 3.00000, 3....
2 2.0 2.0 POLYGON ((4.00000 4.00000, 4.00000 3.00000, 3....
3 1.0 NaN POLYGON ((2.00000 0.00000, 0.00000 0.00000, 0....
4 2.0 NaN MULTIPOLYGON (((3.00000 3.00000, 4.00000 3.000...
See also
--------
GeoDataFrame.sjoin : spatial join
overlay : equivalent top-level function
Notes
------
Every operation in GeoPandas is planar, i.e. the potential third
dimension is not taken into account.
"""
return geopandas.overlay(
self, right, how=how, keep_geom_type=keep_geom_type, make_valid=make_valid
)
def _dataframe_set_geometry(self, col, drop=False, inplace=False, crs=None):
if inplace:
raise ValueError(
"Can't do inplace setting when converting from DataFrame to GeoDataFrame"
)
gf = GeoDataFrame(self)
# this will copy so that BlockManager gets copied
return gf.set_geometry(col, drop=drop, inplace=False, crs=crs)
DataFrame.set_geometry = _dataframe_set_geometry
if not compat.PANDAS_GE_11: # i.e. on pandas 1.0.x
_geodataframe_constructor_with_fallback._from_axes = GeoDataFrame._from_axes
| 37.839198 | 88 | 0.572614 |
c52bdd0c1f8edda5e974ded2c451c4092f8b68b5 | 660 | py | Python | data_registry/process_manager/utils.py | open-contracting/data-registry | 5a73e7f2334c6af5be23070493842b494b3e5357 | [
"BSD-3-Clause"
] | null | null | null | data_registry/process_manager/utils.py | open-contracting/data-registry | 5a73e7f2334c6af5be23070493842b494b3e5357 | [
"BSD-3-Clause"
] | 170 | 2021-02-12T12:52:37.000Z | 2022-03-28T14:37:05.000Z | data_registry/process_manager/utils.py | open-contracting/data-registry | 5a73e7f2334c6af5be23070493842b494b3e5357 | [
"BSD-3-Clause"
] | null | null | null | import logging
import requests
from requests.exceptions import RequestException
from data_registry.process_manager.task.exceptions import RecoverableException
logger = logging.getLogger(__name__)
def request(method, url, **kwargs):
error_msg = kwargs.pop("error_msg", f"Request on {url} failed")
consume_exception = kwargs.pop("consume_exception", False)
try:
resp = requests.request(method, url, **kwargs)
resp.raise_for_status()
return resp
except RequestException as e:
if consume_exception:
logger.exception(error_msg)
else:
raise RecoverableException(error_msg) from e
| 27.5 | 78 | 0.712121 |
a72055db0eb8f7327335aacb5c4c4b0184b51f7f | 28,969 | py | Python | tests/pyfunc/test_model_export_with_class_and_artifacts.py | margaret-databricks/mlflow | 25dcf038e6a6a6c4e26ff3f55391eaedeeb25293 | [
"Apache-2.0"
] | null | null | null | tests/pyfunc/test_model_export_with_class_and_artifacts.py | margaret-databricks/mlflow | 25dcf038e6a6a6c4e26ff3f55391eaedeeb25293 | [
"Apache-2.0"
] | null | null | null | tests/pyfunc/test_model_export_with_class_and_artifacts.py | margaret-databricks/mlflow | 25dcf038e6a6a6c4e26ff3f55391eaedeeb25293 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import os
import json
import mock
from subprocess import Popen, STDOUT
import numpy as np
import pandas as pd
import pandas.testing
import pytest
import sklearn.datasets
import sklearn.linear_model
import sklearn.neighbors
import yaml
import mlflow
import mlflow.pyfunc
import mlflow.pyfunc.cli
import mlflow.pyfunc.model
import mlflow.pyfunc.scoring_server as pyfunc_scoring_server
import mlflow.sklearn
from mlflow.exceptions import MlflowException
from mlflow.models import Model
from mlflow.tracking.artifact_utils import get_artifact_uri as utils_get_artifact_uri, \
_get_model_log_dir
from mlflow.utils.environment import _mlflow_conda_env
from mlflow.utils.model_utils import _get_flavor_configuration
import tests
from tests.helper_functions import pyfunc_serve_and_score_model
from tests.helper_functions import score_model_in_sagemaker_docker_container
def get_model_class():
"""
Defines a custom Python model class that wraps a scikit-learn estimator.
This can be invoked within a pytest fixture to define the class in the ``__main__`` scope.
Alternatively, it can be invoked within a module to define the class in the module's scope.
"""
class CustomSklearnModel(mlflow.pyfunc.PythonModel):
def __init__(self, predict_fn):
self.predict_fn = predict_fn
def load_context(self, context):
super(CustomSklearnModel, self).load_context(context)
# pylint: disable=attribute-defined-outside-init
self.model = mlflow.sklearn.load_model(path=context.artifacts["sk_model"])
def predict(self, context, model_input):
return self.predict_fn(self.model, model_input)
return CustomSklearnModel
class ModuleScopedSklearnModel(get_model_class()):
"""
A custom Python model class defined in the test module scope.
"""
@pytest.fixture(scope="module")
def main_scoped_model_class():
"""
A custom Python model class defined in the ``__main__`` scope.
"""
return get_model_class()
@pytest.fixture(scope="module")
def iris_data():
iris = sklearn.datasets.load_iris()
x = iris.data[:, :2]
y = iris.target
return x, y
@pytest.fixture(scope="module")
def sklearn_knn_model(iris_data):
x, y = iris_data
knn_model = sklearn.neighbors.KNeighborsClassifier()
knn_model.fit(x, y)
return knn_model
@pytest.fixture(scope="module")
def sklearn_logreg_model(iris_data):
x, y = iris_data
linear_lr = sklearn.linear_model.LogisticRegression()
linear_lr.fit(x, y)
return linear_lr
@pytest.fixture
def model_path(tmpdir):
return os.path.join(str(tmpdir), "model")
@pytest.fixture
def pyfunc_custom_env(tmpdir):
conda_env = os.path.join(str(tmpdir), "conda_env.yml")
_mlflow_conda_env(
conda_env,
additional_conda_deps=["scikit-learn", "pytest", "cloudpickle"])
return conda_env
@pytest.mark.large
def test_model_save_load(sklearn_knn_model, main_scoped_model_class, iris_data, tmpdir):
sklearn_model_path = os.path.join(str(tmpdir), "sklearn_model")
mlflow.sklearn.save_model(sk_model=sklearn_knn_model, path=sklearn_model_path)
def test_predict(sk_model, model_input):
return sk_model.predict(model_input) * 2
pyfunc_model_path = os.path.join(str(tmpdir), "pyfunc_model")
mlflow.pyfunc.save_model(dst_path=pyfunc_model_path,
artifacts={
"sk_model": sklearn_model_path
},
python_model=main_scoped_model_class(test_predict))
loaded_pyfunc_model = mlflow.pyfunc.load_pyfunc(path=pyfunc_model_path)
np.testing.assert_array_equal(
loaded_pyfunc_model.predict(model_input=iris_data[0]),
test_predict(sk_model=sklearn_knn_model, model_input=iris_data[0]))
@pytest.mark.large
def test_model_log_load(sklearn_knn_model, main_scoped_model_class, iris_data):
sklearn_artifact_path = "sk_model"
with mlflow.start_run():
mlflow.sklearn.log_model(sk_model=sklearn_knn_model, artifact_path=sklearn_artifact_path)
sklearn_run_id = mlflow.active_run().info.run_uuid
def test_predict(sk_model, model_input):
return sk_model.predict(model_input) * 2
pyfunc_artifact_path = "pyfunc_model"
with mlflow.start_run():
mlflow.pyfunc.log_model(artifact_path=pyfunc_artifact_path,
artifacts={
"sk_model": utils_get_artifact_uri(
artifact_path=sklearn_artifact_path,
run_id=sklearn_run_id)
},
python_model=main_scoped_model_class(test_predict))
pyfunc_run_id = mlflow.active_run().info.run_uuid
loaded_pyfunc_model = mlflow.pyfunc.load_pyfunc(path=pyfunc_artifact_path, run_id=pyfunc_run_id)
np.testing.assert_array_equal(
loaded_pyfunc_model.predict(model_input=iris_data[0]),
test_predict(sk_model=sklearn_knn_model, model_input=iris_data[0]))
@pytest.mark.large
def test_add_to_model_adds_specified_kwargs_to_mlmodel_configuration():
custom_kwargs = {
"key1": "value1",
"key2": 20,
"key3": range(10),
}
model_config = Model()
mlflow.pyfunc.add_to_model(model=model_config,
loader_module=os.path.basename(__file__)[:-3],
data="data",
code="code",
env=None,
**custom_kwargs)
assert mlflow.pyfunc.FLAVOR_NAME in model_config.flavors
assert all([item in model_config.flavors[mlflow.pyfunc.FLAVOR_NAME] for item in custom_kwargs])
@pytest.mark.large
def test_pyfunc_model_serving_without_conda_env_activation_succeeds_with_main_scoped_class(
sklearn_knn_model, main_scoped_model_class, iris_data, tmpdir):
sklearn_model_path = os.path.join(str(tmpdir), "sklearn_model")
mlflow.sklearn.save_model(sk_model=sklearn_knn_model, path=sklearn_model_path)
def test_predict(sk_model, model_input):
return sk_model.predict(model_input) * 2
pyfunc_model_path = os.path.join(str(tmpdir), "pyfunc_model")
mlflow.pyfunc.save_model(dst_path=pyfunc_model_path,
artifacts={
"sk_model": sklearn_model_path
},
python_model=main_scoped_model_class(test_predict))
loaded_pyfunc_model = mlflow.pyfunc.load_pyfunc(path=pyfunc_model_path)
sample_input = pd.DataFrame(iris_data[0])
scoring_response = pyfunc_serve_and_score_model(
model_path=pyfunc_model_path,
data=sample_input,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED,
extra_args=["--no-conda"])
assert scoring_response.status_code == 200
np.testing.assert_array_equal(
np.array(json.loads(scoring_response.text)),
loaded_pyfunc_model.predict(sample_input))
@pytest.mark.large
def test_pyfunc_model_serving_with_conda_env_activation_succeeds_with_main_scoped_class(
sklearn_knn_model, main_scoped_model_class, iris_data, tmpdir):
sklearn_model_path = os.path.join(str(tmpdir), "sklearn_model")
mlflow.sklearn.save_model(sk_model=sklearn_knn_model, path=sklearn_model_path)
def test_predict(sk_model, model_input):
return sk_model.predict(model_input) * 2
pyfunc_model_path = os.path.join(str(tmpdir), "pyfunc_model")
mlflow.pyfunc.save_model(dst_path=pyfunc_model_path,
artifacts={
"sk_model": sklearn_model_path
},
python_model=main_scoped_model_class(test_predict))
loaded_pyfunc_model = mlflow.pyfunc.load_pyfunc(path=pyfunc_model_path)
sample_input = pd.DataFrame(iris_data[0])
scoring_response = pyfunc_serve_and_score_model(
model_path=pyfunc_model_path,
data=sample_input,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED)
assert scoring_response.status_code == 200
np.testing.assert_array_equal(
np.array(json.loads(scoring_response.text)),
loaded_pyfunc_model.predict(sample_input))
@pytest.mark.large
def test_pyfunc_model_serving_without_conda_env_activation_succeeds_with_module_scoped_class(
sklearn_knn_model, iris_data, tmpdir):
sklearn_model_path = os.path.join(str(tmpdir), "sklearn_model")
mlflow.sklearn.save_model(sk_model=sklearn_knn_model, path=sklearn_model_path)
def test_predict(sk_model, model_input):
return sk_model.predict(model_input) * 2
pyfunc_model_path = os.path.join(str(tmpdir), "pyfunc_model")
mlflow.pyfunc.save_model(dst_path=pyfunc_model_path,
artifacts={
"sk_model": sklearn_model_path
},
python_model=ModuleScopedSklearnModel(test_predict),
code_path=[os.path.dirname(tests.__file__)])
loaded_pyfunc_model = mlflow.pyfunc.load_pyfunc(path=pyfunc_model_path)
sample_input = pd.DataFrame(iris_data[0])
scoring_response = pyfunc_serve_and_score_model(
model_path=pyfunc_model_path,
data=sample_input,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED,
extra_args=["--no-conda"])
assert scoring_response.status_code == 200
np.testing.assert_array_equal(
np.array(json.loads(scoring_response.text)),
loaded_pyfunc_model.predict(sample_input))
@pytest.mark.large
def test_pyfunc_cli_predict_command_without_conda_env_activation_succeeds(
sklearn_knn_model, main_scoped_model_class, iris_data, tmpdir):
sklearn_model_path = os.path.join(str(tmpdir), "sklearn_model")
mlflow.sklearn.save_model(sk_model=sklearn_knn_model, path=sklearn_model_path)
def test_predict(sk_model, model_input):
return sk_model.predict(model_input) * 2
pyfunc_model_path = os.path.join(str(tmpdir), "pyfunc_model")
mlflow.pyfunc.save_model(dst_path=pyfunc_model_path,
artifacts={
"sk_model": sklearn_model_path
},
python_model=main_scoped_model_class(test_predict))
loaded_pyfunc_model = mlflow.pyfunc.load_pyfunc(path=pyfunc_model_path)
sample_input = pd.DataFrame(iris_data[0])
input_csv_path = os.path.join(str(tmpdir), "input with spaces.csv")
sample_input.to_csv(input_csv_path, header=True, index=False)
output_csv_path = os.path.join(str(tmpdir), "output.csv")
process = Popen(['mlflow', 'pyfunc', 'predict',
'--model-path', pyfunc_model_path,
'-i', input_csv_path,
'-o', output_csv_path,
'--no-conda'],
stderr=STDOUT,
preexec_fn=os.setsid)
process.wait()
result_df = pandas.read_csv(output_csv_path, header=None)
np.testing.assert_array_equal(result_df.values.transpose()[0],
loaded_pyfunc_model.predict(sample_input))
@pytest.mark.large
def test_pyfunc_cli_predict_command_with_conda_env_activation_succeeds(
sklearn_knn_model, main_scoped_model_class, iris_data, tmpdir):
sklearn_model_path = os.path.join(str(tmpdir), "sklearn_model")
mlflow.sklearn.save_model(sk_model=sklearn_knn_model, path=sklearn_model_path)
def test_predict(sk_model, model_input):
return sk_model.predict(model_input) * 2
pyfunc_model_path = os.path.join(str(tmpdir), "pyfunc_model")
mlflow.pyfunc.save_model(dst_path=pyfunc_model_path,
artifacts={
"sk_model": sklearn_model_path
},
python_model=main_scoped_model_class(test_predict))
loaded_pyfunc_model = mlflow.pyfunc.load_pyfunc(path=pyfunc_model_path)
sample_input = pd.DataFrame(iris_data[0])
input_csv_path = os.path.join(str(tmpdir), "input with spaces.csv")
sample_input.to_csv(input_csv_path, header=True, index=False)
output_csv_path = os.path.join(str(tmpdir), "output.csv")
process = Popen(['mlflow', 'pyfunc', 'predict',
'--model-path', pyfunc_model_path,
'-i', input_csv_path,
'-o', output_csv_path],
stderr=STDOUT,
preexec_fn=os.setsid)
process.wait()
result_df = pandas.read_csv(output_csv_path, header=None)
np.testing.assert_array_equal(result_df.values.transpose()[0],
loaded_pyfunc_model.predict(sample_input))
@pytest.mark.large
def test_save_model_persists_specified_conda_env_in_mlflow_model_directory(
sklearn_knn_model, main_scoped_model_class, pyfunc_custom_env, tmpdir):
sklearn_model_path = os.path.join(str(tmpdir), "sklearn_model")
mlflow.sklearn.save_model(sk_model=sklearn_knn_model,
path=sklearn_model_path,
serialization_format=mlflow.sklearn.SERIALIZATION_FORMAT_CLOUDPICKLE)
pyfunc_model_path = os.path.join(str(tmpdir), "pyfunc_model")
mlflow.pyfunc.save_model(dst_path=pyfunc_model_path,
artifacts={
"sk_model": sklearn_model_path
},
python_model=main_scoped_model_class(predict_fn=None),
conda_env=pyfunc_custom_env)
pyfunc_conf = _get_flavor_configuration(
model_path=pyfunc_model_path, flavor_name=mlflow.pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(pyfunc_model_path, pyfunc_conf[mlflow.pyfunc.ENV])
assert os.path.exists(saved_conda_env_path)
assert saved_conda_env_path != pyfunc_custom_env
with open(pyfunc_custom_env, "r") as f:
pyfunc_custom_env_parsed = yaml.safe_load(f)
with open(saved_conda_env_path, "r") as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == pyfunc_custom_env_parsed
@pytest.mark.large
def test_log_model_persists_specified_conda_env_in_mlflow_model_directory(
sklearn_knn_model, main_scoped_model_class, pyfunc_custom_env):
sklearn_artifact_path = "sk_model"
with mlflow.start_run():
mlflow.sklearn.log_model(sk_model=sklearn_knn_model, artifact_path=sklearn_artifact_path)
sklearn_run_id = mlflow.active_run().info.run_uuid
pyfunc_artifact_path = "pyfunc_model"
with mlflow.start_run():
mlflow.pyfunc.log_model(artifact_path=pyfunc_artifact_path,
artifacts={
"sk_model": utils_get_artifact_uri(
artifact_path=sklearn_artifact_path,
run_id=sklearn_run_id)
},
python_model=main_scoped_model_class(predict_fn=None),
conda_env=pyfunc_custom_env)
pyfunc_run_id = mlflow.active_run().info.run_uuid
pyfunc_model_path = _get_model_log_dir(pyfunc_artifact_path, pyfunc_run_id)
pyfunc_conf = _get_flavor_configuration(
model_path=pyfunc_model_path, flavor_name=mlflow.pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(pyfunc_model_path, pyfunc_conf[mlflow.pyfunc.ENV])
assert os.path.exists(saved_conda_env_path)
assert saved_conda_env_path != pyfunc_custom_env
with open(pyfunc_custom_env, "r") as f:
pyfunc_custom_env_parsed = yaml.safe_load(f)
with open(saved_conda_env_path, "r") as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == pyfunc_custom_env_parsed
@pytest.mark.large
def test_save_model_without_specified_conda_env_uses_default_env_with_expected_dependencies(
sklearn_logreg_model, main_scoped_model_class, tmpdir):
sklearn_model_path = os.path.join(str(tmpdir), "sklearn_model")
mlflow.sklearn.save_model(sk_model=sklearn_logreg_model, path=sklearn_model_path)
pyfunc_model_path = os.path.join(str(tmpdir), "pyfunc_model")
mlflow.pyfunc.save_model(dst_path=pyfunc_model_path,
artifacts={
"sk_model": sklearn_model_path
},
python_model=main_scoped_model_class(predict_fn=None))
pyfunc_conf = _get_flavor_configuration(
model_path=pyfunc_model_path, flavor_name=mlflow.pyfunc.FLAVOR_NAME)
conda_env_path = os.path.join(pyfunc_model_path, pyfunc_conf[mlflow.pyfunc.ENV])
with open(conda_env_path, "r") as f:
conda_env = yaml.safe_load(f)
assert conda_env == mlflow.pyfunc.model.DEFAULT_CONDA_ENV
@pytest.mark.large
def test_log_model_without_specified_conda_env_uses_default_env_with_expected_dependencies(
sklearn_knn_model, main_scoped_model_class):
sklearn_artifact_path = "sk_model"
with mlflow.start_run():
mlflow.sklearn.log_model(sk_model=sklearn_knn_model, artifact_path=sklearn_artifact_path)
sklearn_run_id = mlflow.active_run().info.run_uuid
pyfunc_artifact_path = "pyfunc_model"
with mlflow.start_run():
mlflow.pyfunc.log_model(artifact_path=pyfunc_artifact_path,
artifacts={
"sk_model": utils_get_artifact_uri(
artifact_path=sklearn_artifact_path,
run_id=sklearn_run_id)
},
python_model=main_scoped_model_class(predict_fn=None))
pyfunc_run_id = mlflow.active_run().info.run_uuid
pyfunc_model_path = _get_model_log_dir(pyfunc_artifact_path, pyfunc_run_id)
pyfunc_conf = _get_flavor_configuration(
model_path=pyfunc_model_path, flavor_name=mlflow.pyfunc.FLAVOR_NAME)
conda_env_path = os.path.join(pyfunc_model_path, pyfunc_conf[mlflow.pyfunc.ENV])
with open(conda_env_path, "r") as f:
conda_env = yaml.safe_load(f)
assert conda_env == mlflow.pyfunc.model.DEFAULT_CONDA_ENV
@pytest.mark.large
def test_save_model_correctly_resolves_directory_artifact_with_nested_contents(
tmpdir, model_path, iris_data):
directory_artifact_path = os.path.join(str(tmpdir), "directory_artifact")
nested_file_relative_path = os.path.join(
"my", "somewhat", "heavily", "nested", "directory", "myfile.txt")
nested_file_path = os.path.join(directory_artifact_path, nested_file_relative_path)
os.makedirs(os.path.dirname(nested_file_path))
nested_file_text = "some sample file text"
with open(nested_file_path, "w") as f:
f.write(nested_file_text)
class ArtifactValidationModel(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input):
expected_file_path = os.path.join(
context.artifacts["testdir"], nested_file_relative_path)
if not os.path.exists(expected_file_path):
return False
else:
with open(expected_file_path, "r") as f:
return (f.read() == nested_file_text)
mlflow.pyfunc.save_model(dst_path=model_path,
artifacts={
"testdir": directory_artifact_path
},
python_model=ArtifactValidationModel())
loaded_model = mlflow.pyfunc.load_pyfunc(model_path)
assert loaded_model.predict(iris_data[0])
@pytest.mark.large
def test_save_model_with_no_artifacts_does_not_produce_artifacts_dir(model_path):
mlflow.pyfunc.save_model(dst_path=model_path,
python_model=ModuleScopedSklearnModel(predict_fn=None),
artifacts=None)
assert os.path.exists(model_path)
assert "artifacts" not in os.listdir(model_path)
pyfunc_conf = _get_flavor_configuration(
model_path=model_path, flavor_name=mlflow.pyfunc.FLAVOR_NAME)
assert mlflow.pyfunc.model.CONFIG_KEY_ARTIFACTS not in pyfunc_conf
@pytest.mark.large
def test_save_model_with_python_model_argument_of_invalid_type_raises_exeption(tmpdir):
with pytest.raises(MlflowException) as exc_info:
mlflow.pyfunc.save_model(dst_path=os.path.join(str(tmpdir), "model1"),
python_model="not the right type")
assert "python_model` must be a subclass of `PythonModel`" in str(exc_info)
with pytest.raises(MlflowException) as exc_info:
mlflow.pyfunc.save_model(dst_path=os.path.join(str(tmpdir), "model2"),
python_model="not the right type")
assert "python_model` must be a subclass of `PythonModel`" in str(exc_info)
@pytest.mark.large
def test_save_model_with_unsupported_argument_combinations_throws_exception(model_path):
with pytest.raises(MlflowException) as exc_info:
mlflow.pyfunc.save_model(dst_path=model_path,
artifacts={
"artifact": "/path/to/artifact",
},
python_model=None)
assert "Either `loader_module` or `python_model` must be specified" in str(exc_info)
python_model = ModuleScopedSklearnModel(predict_fn=None)
loader_module = __name__
with pytest.raises(MlflowException) as exc_info:
mlflow.pyfunc.save_model(dst_path=model_path,
python_model=python_model,
loader_module=loader_module)
assert "The following sets of parameters cannot be specified together" in str(exc_info)
assert str(python_model) in str(exc_info)
assert str(loader_module) in str(exc_info)
with pytest.raises(MlflowException) as exc_info:
mlflow.pyfunc.save_model(dst_path=model_path,
python_model=python_model,
data_path="/path/to/data",
artifacts={
"artifact": "/path/to/artifact",
})
assert "The following sets of parameters cannot be specified together" in str(exc_info)
with pytest.raises(MlflowException) as exc_info:
mlflow.pyfunc.save_model(dst_path=model_path,
python_model=None,
loader_module=None)
assert "Either `loader_module` or `python_model` must be specified" in str(exc_info)
@pytest.mark.large
def test_log_model_with_unsupported_argument_combinations_throws_exception():
with mlflow.start_run(), pytest.raises(MlflowException) as exc_info:
mlflow.pyfunc.log_model(artifact_path="pyfunc_model",
artifacts={
"artifact": "/path/to/artifact",
},
python_model=None)
assert "Either `loader_module` or `python_model` must be specified!" in str(exc_info)
python_model = ModuleScopedSklearnModel(predict_fn=None)
loader_module = __name__
with mlflow.start_run(), pytest.raises(MlflowException) as exc_info:
mlflow.pyfunc.log_model(artifact_path="pyfunc_model",
python_model=python_model,
loader_module=loader_module)
assert "The following sets of parameters cannot be specified together" in str(exc_info)
assert str(python_model) in str(exc_info)
assert str(loader_module) in str(exc_info)
with mlflow.start_run(), pytest.raises(MlflowException) as exc_info:
mlflow.pyfunc.log_model(artifact_path="pyfunc_model",
python_model=python_model,
data_path="/path/to/data",
artifacts={
"artifact1": "/path/to/artifact",
})
assert "The following sets of parameters cannot be specified together" in str(exc_info)
with mlflow.start_run(), pytest.raises(MlflowException) as exc_info:
mlflow.pyfunc.log_model(artifact_path="pyfunc_model",
python_model=None,
loader_module=None)
assert "Either `loader_module` or `python_model` must be specified" in str(exc_info)
@pytest.mark.large
def test_load_model_with_differing_cloudpickle_version_at_micro_granularity_logs_warning(
model_path):
class TestModel(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input):
return model_input
mlflow.pyfunc.save_model(dst_path=model_path, python_model=TestModel())
saver_cloudpickle_version = "0.5.8"
model_config_path = os.path.join(model_path, "MLmodel")
model_config = Model.load(model_config_path)
model_config.flavors[mlflow.pyfunc.FLAVOR_NAME][
mlflow.pyfunc.model.CONFIG_KEY_CLOUDPICKLE_VERSION] = saver_cloudpickle_version
model_config.save(model_config_path)
log_messages = []
def custom_warn(message_text, *args, **kwargs):
log_messages.append(message_text % args % kwargs)
loader_cloudpickle_version = "0.5.7"
with mock.patch("mlflow.pyfunc._logger.warning") as warn_mock,\
mock.patch("cloudpickle.__version__") as cloudpickle_version_mock:
cloudpickle_version_mock.__str__ = lambda *args, **kwargs: loader_cloudpickle_version
warn_mock.side_effect = custom_warn
mlflow.pyfunc.load_pyfunc(path=model_path)
assert any([
"differs from the version of CloudPickle that is currently running" in log_message and
saver_cloudpickle_version in log_message and
loader_cloudpickle_version in log_message
for log_message in log_messages
])
@pytest.mark.large
def test_load_model_with_missing_cloudpickle_version_logs_warning(
model_path):
class TestModel(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input):
return model_input
mlflow.pyfunc.save_model(dst_path=model_path, python_model=TestModel())
model_config_path = os.path.join(model_path, "MLmodel")
model_config = Model.load(model_config_path)
del model_config.flavors[mlflow.pyfunc.FLAVOR_NAME][
mlflow.pyfunc.model.CONFIG_KEY_CLOUDPICKLE_VERSION]
model_config.save(model_config_path)
log_messages = []
def custom_warn(message_text, *args, **kwargs):
log_messages.append(message_text % args % kwargs)
with mock.patch("mlflow.pyfunc._logger.warning") as warn_mock:
warn_mock.side_effect = custom_warn
mlflow.pyfunc.load_pyfunc(path=model_path)
assert any([
("The version of CloudPickle used to save the model could not be found in the MLmodel"
" configuration") in log_message
for log_message in log_messages
])
# TODO(czumar) Re-mark this test as "large" instead of "release" after SageMaker docker container
# build issues have been debugged
# @pytest.mark.large
@pytest.mark.release
def test_sagemaker_docker_model_scoring_with_default_conda_env(
sklearn_logreg_model, main_scoped_model_class, iris_data, tmpdir):
sklearn_model_path = os.path.join(str(tmpdir), "sklearn_model")
mlflow.sklearn.save_model(sk_model=sklearn_logreg_model, path=sklearn_model_path)
def test_predict(sk_model, model_input):
return sk_model.predict(model_input) * 2
pyfunc_model_path = os.path.join(str(tmpdir), "pyfunc_model")
mlflow.pyfunc.save_model(dst_path=pyfunc_model_path,
artifacts={
"sk_model": sklearn_model_path
},
python_model=main_scoped_model_class(test_predict))
reloaded_pyfunc = mlflow.pyfunc.load_pyfunc(path=pyfunc_model_path)
inference_df = pd.DataFrame(iris_data[0])
scoring_response = score_model_in_sagemaker_docker_container(
model_path=pyfunc_model_path,
data=inference_df,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED,
flavor=mlflow.pyfunc.FLAVOR_NAME)
deployed_model_preds = pd.DataFrame(json.loads(scoring_response.content))
pandas.testing.assert_frame_equal(
deployed_model_preds,
pd.DataFrame(reloaded_pyfunc.predict(inference_df)),
check_dtype=False,
check_less_precise=6)
| 43.108631 | 100 | 0.673858 |
aad4c6fecc3b533efb1cdabeba8bf3651523aa70 | 941 | py | Python | Python/2.py | JWang169/LintCodeJava | b75b06fa1551f5e4d8a559ef64e1ac29db79c083 | [
"CNRI-Python"
] | 1 | 2020-12-10T05:36:15.000Z | 2020-12-10T05:36:15.000Z | Python/2.py | JWang169/LintCodeJava | b75b06fa1551f5e4d8a559ef64e1ac29db79c083 | [
"CNRI-Python"
] | null | null | null | Python/2.py | JWang169/LintCodeJava | b75b06fa1551f5e4d8a559ef64e1ac29db79c083 | [
"CNRI-Python"
] | 3 | 2020-04-06T05:55:08.000Z | 2021-08-29T14:26:54.000Z | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
if not l2 and not l2:
return None
result = 0
prev = 0
weight = 1
dummy = ListNode(0)
head = dummy
while l1 or l2 or prev:
cur = prev
if l1:
cur += l1.val
l1 = l1.next
if l2:
cur += l2.val
l2 = l2.next
prev = cur // 10
cur = cur % 10
head.next = ListNode(cur)
head = head.next
if not dummy.next:
return dummy
return dummy.next
| 23.525 | 37 | 0.411265 |
43134c6276ef1bc547fa4b7b2a59a66188e900d5 | 930 | py | Python | Semester II/Compiler Programming/3. RegEX DFA/regex dfa.py | STreK7/MSc.-CS | 78484f5bbce9f5149da680b19626eb139cc5ca90 | [
"Apache-2.0"
] | null | null | null | Semester II/Compiler Programming/3. RegEX DFA/regex dfa.py | STreK7/MSc.-CS | 78484f5bbce9f5149da680b19626eb139cc5ca90 | [
"Apache-2.0"
] | null | null | null | Semester II/Compiler Programming/3. RegEX DFA/regex dfa.py | STreK7/MSc.-CS | 78484f5bbce9f5149da680b19626eb139cc5ca90 | [
"Apache-2.0"
] | 2 | 2021-10-12T14:01:39.000Z | 2022-01-23T14:28:55.000Z | # Function to find whether the given
# is Accepted by the DFA
def DFA(str, N):
# If n <= 1, then prNo
if (N <= 1):
print("No")
return
# To count the matched characters
count = 0
# Check if the first character is C
if (str[0] == 'C'):
count += 1
# Traverse the rest of string
for i in range(1, N):
# If character is A or B,
# increment count by 1
if (str[i] == 'A' or str[i] == 'B'):
count += 1
else:
break
else:
# If the first character
# is not C, pr-1
print("No")
return
# If all characters matches
if (count == N):
print("Yes")
else:
print("No")
# Driver Code
if __name__ == '__main__':
str = input("Enter the String \n")
N = len(str)
DFA(str, N)
| 21.627907 | 49 | 0.448387 |
a2e8f6bbe0ee9d8f2d3a3759ad1f08bb7c69b357 | 2,510 | py | Python | fixture/group.py | Sazulira/python_training | 795d8b0f575dc61af989832b951a70dc96069842 | [
"Apache-2.0"
] | null | null | null | fixture/group.py | Sazulira/python_training | 795d8b0f575dc61af989832b951a70dc96069842 | [
"Apache-2.0"
] | null | null | null | fixture/group.py | Sazulira/python_training | 795d8b0f575dc61af989832b951a70dc96069842 | [
"Apache-2.0"
] | null | null | null | from model.group import Group
class GroupHelper:
def __init__(self, app):
self.app = app
def open_groups_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/group.php") and len(wd.find_elements_by_name("new")) > 0):
wd.find_element_by_link_text("groups"). click()
def create(self, group):
wd = self.app.wd
self.open_groups_page()
# init group creation
wd.find_element_by_name("new").click()
self.fill_group_form(group)
# submit group creation
wd.find_element_by_name("submit").click()
self.return_to_groups_page()
def fill_group_form(self, group):
wd = self.app.wd
self.change_field_value("group_name", group.name)
self.change_field_value("group_header", group.header)
self.change_field_value("group_footer", group.footer)
def change_field_value(self, field, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field).click()
wd.find_element_by_name(field).clear()
wd.find_element_by_name(field).send_keys(text)
def delete_first_group(self):
wd = self.app.wd
self.open_groups_page()
self.select_first_group()
# submit deletion
wd.find_element_by_name("delete").click()
self.return_to_groups_page()
def select_first_group(self):
wd = self.app.wd
wd.find_element_by_name("selected[]").click()
def return_to_groups_page(self):
wd = self.app.wd
wd.find_element_by_link_text("group page").click()
def modify_first_group(self, new_group_data):
wd = self.app.wd
self.select_first_group()
# open modification form
wd.find_element_by_name("edit").click()
# fill group form
self.fill_group_form(new_group_data)
# submit modification
wd.find_element_by_name("update").click()
self.return_to_groups_page()
def count(self):
wd = self.app.wd
self.open_groups_page()
return len(wd.find_elements_by_name("selected[]"))
def get_group_list(self):
wd = self.app.wd
self.open_groups_page()
groups = []
for element in wd.find_elements_by_css_selector("span.group"):
text = element.text
id = element.find_element_by_name("selected[]").get_attribute("value")
groups.append(Group(name=text, id=id))
return groups
| 32.179487 | 100 | 0.632669 |
402d144492f7ee85243f360a30bf9d4bda754ba4 | 1,465 | py | Python | app/database/database_functions/messages.py | tayron1/Starboard-2 | ff83e33e026107d94c306250827373ff2c32aa7d | [
"MIT"
] | 16 | 2021-01-19T19:12:00.000Z | 2021-12-21T12:00:04.000Z | app/database/database_functions/messages.py | Davi-the-Mudkip/Starboard-2 | 4de3c689ffef007e4f4a279251d107d890b69b15 | [
"MIT"
] | 15 | 2021-04-02T16:58:48.000Z | 2022-03-28T06:09:49.000Z | app/database/database_functions/messages.py | Davi-the-Mudkip/Starboard-2 | 4de3c689ffef007e4f4a279251d107d890b69b15 | [
"MIT"
] | 13 | 2021-01-21T14:26:00.000Z | 2021-09-29T18:55:17.000Z | import asyncpg
from app import errors
class Messages:
def __init__(self, db) -> None:
self.db = db
async def get(self, message_id: int) -> dict:
return await self.db.fetchrow(
"""SELECT * FROM messages
WHERE id=$1""",
message_id,
)
async def create(
self,
message_id: int,
guild_id: int,
channel_id: int,
author_id: int,
is_nsfw: bool,
check_first: bool = True,
) -> bool:
if check_first:
exists = await self.get(message_id) is not None
if exists:
return True
is_starboard_message = (
await self.db.sb_messages.get(message_id) is not None
)
if is_starboard_message:
raise errors.AlreadyStarboardMessage(
f"Could not create message {message_id} "
"because it is already starboard message."
)
await self.db.guilds.create(guild_id)
try:
await self.db.execute(
"""INSERT INTO messages
(id, guild_id, channel_id, author_id, is_nsfw)
VALUES ($1, $2, $3, $4, $5)""",
message_id,
guild_id,
channel_id,
author_id,
is_nsfw,
)
except asyncpg.exceptions.UniqueViolationError:
return True
return False
| 26.160714 | 65 | 0.509898 |
0b479eaf50e195aa824202c76c5cc45f34927641 | 7,243 | py | Python | src/models/helpers/helpers_training.py | elbuco1/AttentionMechanismsTrajectoryPrediction | 653f3f4cea55e284a68c8ab01325d8e8307d4ae1 | [
"MIT"
] | 35 | 2019-10-20T11:43:58.000Z | 2022-03-10T10:49:14.000Z | src/models/helpers/helpers_training.py | elbuco1/trajectory | 653f3f4cea55e284a68c8ab01325d8e8307d4ae1 | [
"MIT"
] | null | null | null | src/models/helpers/helpers_training.py | elbuco1/trajectory | 653f3f4cea55e284a68c8ab01325d8e8307d4ae1 | [
"MIT"
] | 9 | 2020-05-22T13:16:16.000Z | 2021-04-26T19:11:29.000Z | import torch
import time
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import numpy as np
from joblib import load
import matplotlib.cm as cm
import json
from datasets.datasets import Hdf5Dataset,CustomDataLoader
from matplotlib.lines import Line2D
import random
def load_data_loaders(parameters_project, raw_parameters, prepare_param,training_param,net_params,data_file,scenes):
train_eval_scenes,train_scenes,test_scenes,eval_scenes = scenes
if training_param["set_type_train"] == "train_eval":
train_scenes = train_eval_scenes
if training_param["set_type_test"] == "eval":
test_scenes = eval_scenes
froze_cnn = 0
if "froze_cnn" in net_params:
froze_cnn = net_params["froze_cnn"]
train_dataset = Hdf5Dataset(
hdf5_file= data_file,
scene_list= train_scenes,
t_obs=prepare_param["t_obs"],
t_pred=prepare_param["t_pred"],
set_type = training_param["set_type_train"],
data_type = "trajectories",
use_neighbors = net_params["use_neighbors"],
use_masks = 1,
predict_offsets = net_params["offsets"],
offsets_input = net_params["offsets_input"],
padding = prepare_param["padding"],
use_images = net_params["use_images"],
images_path = parameters_project["raw_images"],
froze_cnn= froze_cnn,
pixel_to_meters = raw_parameters["pixel_meter_ratios"],
data_augmentation = training_param["data_augmentation"]
)
eval_dataset = Hdf5Dataset(
hdf5_file= data_file,
scene_list= test_scenes, #eval_scenes
t_obs=prepare_param["t_obs"],
t_pred=prepare_param["t_pred"],
set_type = training_param["set_type_test"], #eval
data_type = "trajectories",
use_neighbors = net_params["use_neighbors"],
use_masks = 1,
predict_offsets = net_params["offsets"],
offsets_input = net_params["offsets_input"],
padding = prepare_param["padding"],
use_images = net_params["use_images"],
images_path = parameters_project["raw_images"],
froze_cnn= froze_cnn
)
train_loader = CustomDataLoader( batch_size = training_param["batch_size"],shuffle = True,drop_last = True,dataset = train_dataset,test=training_param["test"])
eval_loader = CustomDataLoader( batch_size = training_param["batch_size"],shuffle = False,drop_last = True,dataset = eval_dataset,test=training_param["test"])
return train_loader,eval_loader,train_dataset,eval_dataset
class MaskedLoss(nn.Module):
def __init__(self,criterion):
super(MaskedLoss, self).__init__()
self.criterion = criterion
def forward(self, outputs, targets, mask = None):
if mask is None:
mask = torch.ones_like(targets)
# if first_only:
# mask[:,1:,:,:] = 0
loss = self.criterion(outputs*mask, targets*mask)
a = (mask.sum(-1).sum(-1)>0).cuda().float()
# loss = torch.sqrt(loss.sum(dim = -1))
loss = loss.sum(dim = -1)
loss = loss.sum(dim = -1)
loss = loss.sum(dim = -1)/(a.sum(-1))
loss = loss.mean(dim = -1)
return loss
def ade_loss(outputs,targets,mask = None):
if mask is None:
mask = torch.ones_like(targets)
# if mask is not None:
outputs,targets = outputs*mask, targets*mask
# outputs = outputs.contiguous().view(-1,2)
# targets = targets.contiguous().view(-1,2)
mse = nn.MSELoss(reduction= "none")
mse_loss = mse(outputs,targets )
mse_loss = torch.sum(mse_loss,dim = 3 )
mse_loss = torch.sqrt(mse_loss )
# if mask is not None:
mse_loss = mse_loss.sum()/(mask.sum()/2.0)
# else:
# mse_loss = torch.mean(mse_loss )
return mse_loss
def fde_loss(outputs,targets,mask):
if mask is None:
mask = torch.ones_like(targets)
# if mask is not None:
outputs,targets = outputs*mask, targets*mask
b,n,s,i = outputs.size()
outputs = outputs.view(b*n,s,i)
targets = targets.view(b*n,s,i)
mask = mask.view(b*n,s,i)
ids = (mask.sum(dim = -1) > 0).sum(dim = -1)
points_o = []
points_t = []
mask_n = []
for seq_o,seq_t,m,id in zip(outputs,targets,mask,ids):
if id == 0 or id == s:
points_o.append(seq_o[-1])
points_t.append(seq_t[-1])
mask_n.append(m[-1])
else:
points_o.append(seq_o[id-1])
points_t.append(seq_t[id-1])
mask_n.append(m[id-1])
points_o = torch.stack([po for po in points_o],dim = 0)
points_t = torch.stack([pt for pt in points_t], dim = 0)
mask_n = torch.stack([m for m in mask_n], dim = 0)
mse = nn.MSELoss(reduction= "none")
mse_loss = mse(points_o,points_t )
mse_loss = torch.sum(mse_loss,dim = 1 )
mse_loss = torch.sqrt(mse_loss )
# if mask is not None:
mask = mask[:,-1]
mse_loss = mse_loss.sum()/(mask.sum()/2.0)
# else:
# mse_loss = torch.mean(mse_loss )
return mse_loss
def plot_grad_flow(named_parameters,epoch,root = "./data/reports/gradients/"):
'''Plots the gradients flowing through different layers in the net during training.
Can be used for checking for possible gradient vanishing / exploding problems.
Usage: Plug this function in Trainer class after loss.backwards() as
"plot_grad_flow(self.model.named_parameters())" to visualize the gradient flow'''
ave_grads = []
max_grads= []
layers = []
for n, p in named_parameters:
if(p.requires_grad) and ("bias" not in n):
layers.append(n)
ave_grads.append(p.grad.abs().mean())
max_grads.append(p.grad.abs().max())
fig, ax = plt.subplots()
ax.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color="c")
ax.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color="b")
ax.hlines(0, 0, len(ave_grads)+1, lw=2, color="k" )
ax.set_xticks(range(0, len(ave_grads), 1))
ax.set_xticklabels(layers, rotation='vertical', fontsize='small')
ax.set_yscale('log')
ax.set_xlabel("Layers")
ax.set_ylabel("Gradient magnitude")
ax.set_title('Gradient flow')
ax.grid(True)
lgd = ax.legend([Line2D([0], [0], color="c", lw=4),
Line2D([0], [0], color="b", lw=4),
Line2D([0], [0], color="k", lw=4)], ['max-gradient', 'mean-gradient', 'zero-gradient'])
# plt.savefig("{}gradients_{}.jpg".format(root,epoch), bbox_extra_artists=(lgd,), bbox_inches='tight')
fig.tight_layout()
plt.savefig("{}gradients_{}.jpg".format(root,time.time()), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close()
def offsets_to_trajectories(inputs,labels,outputs,offsets,offsets_input,last_points,input_last):
if offsets_input == 1:
inputs = input_last
if offsets == 1:
labels = np.add(last_points,labels)
outputs = np.add(last_points,outputs)
return inputs,labels,outputs
elif offsets == 2:
print("offset 2 not allowed")
else :
return inputs,labels,outputs
| 30.179167 | 163 | 0.629711 |
3c148da18717e9e927e4eb7d5c981aecd4edb663 | 7,575 | py | Python | src/modlog.py | DiscordLiz/salamander | 87e8dbddacd4d55672491685007237493295cf5a | [
"Apache-2.0"
] | null | null | null | src/modlog.py | DiscordLiz/salamander | 87e8dbddacd4d55672491685007237493295cf5a | [
"Apache-2.0"
] | 1 | 2021-03-23T05:13:57.000Z | 2021-03-23T05:41:41.000Z | src/modlog.py | DiscordLiz/salamander | 87e8dbddacd4d55672491685007237493295cf5a | [
"Apache-2.0"
] | null | null | null | # Copyright 2020-present Michael Hall
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import contextlib
import apsw
import discord
from .utils import MainThreadSingletonMeta as Singleton
# Statements which get used for multiple functions here
INSERT_USER_ID = """
INSERT INTO user_settings (user_id)
VALUES (?)
ON CONFLICT (user_id) DO NOTHING
"""
INSERT_MEMBER_IDS = """
INSERT INTO member_settings (user_id, guild_id)
VALUES (?, ?)
ON CONFLICT (user_id, guild_id) DO NOTHING
"""
INSERT_OR_IGNORE_GUILD = """
INSERT INTO guild_settings (guild_id) VALUES (?)
ON CONFLICT (guild_id) DO NOTHING
"""
BASIC_MODLOG_INSERT = """
INSERT INTO mod_log (
mod_action,
mod_id,
guild_id,
target_id,
reason,
username_at_action,
discrim_at_action,
nick_at_action
)
VALUES (
:action_name,
:mod_id,
:guild_id,
:target_id,
:reason,
:username,
:discrim,
:nick
)
"""
class ModlogHandler(metaclass=Singleton):
def __init__(self, connection: apsw.Connection):
self._conn: apsw.Connection = connection
with contextlib.closing(self._conn.cursor()) as cursor:
cursor.execute(""" PRAGMA foreign_keys=ON """)
def member_kick(self, mod: discord.Member, target: discord.Member, reason: str):
with contextlib.closing(self._conn.cursor()) as cursor, self._conn:
guild_id = mod.guild.id
cursor.executemany(
INSERT_USER_ID, ((target.id,), (mod.id,)),
)
cursor.execute(
INSERT_OR_IGNORE_GUILD, (guild_id,),
)
cursor.executemany(
INSERT_MEMBER_IDS, ((target.id, guild_id), (mod.id, guild_id)),
)
cursor.execute(
BASIC_MODLOG_INSERT,
dict(
action_name="KICK",
mod_id=mod.id,
guild_id=guild_id,
target_id=target.id,
reason=reason,
username=target.name,
discrim=target.discriminator,
nick=target.nick,
),
)
def member_ban(self, mod: discord.Member, target: discord.Member, reason: str):
with contextlib.closing(self._conn.cursor()) as cursor, self._conn:
guild_id = mod.guild.id
cursor.executemany(
INSERT_USER_ID, ((target.id,), (mod.id,)),
)
cursor.execute(
INSERT_OR_IGNORE_GUILD, (guild_id,),
)
cursor.executemany(
INSERT_MEMBER_IDS, ((target.id, guild_id), (mod.id, guild_id)),
)
cursor.execute(
BASIC_MODLOG_INSERT,
dict(
action_name="BAN",
mod_id=mod.id,
guild_id=guild_id,
target_id=target.id,
reason=reason,
username=target.name,
discrim=target.discriminator,
nick=target.nick,
),
)
def member_muted(self, mod: discord.Member, target: discord.Member, reason: str):
with contextlib.closing(self._conn.cursor()) as cursor, self._conn:
guild_id = mod.guild.id
cursor.executemany(
INSERT_USER_ID, ((target.id,), (mod.id,)),
)
cursor.execute(
INSERT_OR_IGNORE_GUILD, (guild_id,),
)
cursor.executemany(
INSERT_MEMBER_IDS, ((target.id, guild_id), (mod.id, guild_id)),
)
cursor.execute(
BASIC_MODLOG_INSERT,
dict(
action_name="MUTE",
mod_id=mod.id,
guild_id=guild_id,
target_id=target.id,
reason=reason,
username=target.name,
discrim=target.discriminator,
nick=target.nick,
),
)
def member_unmuted(self, mod: discord.Member, target: discord.Member, reason: str):
with contextlib.closing(self._conn.cursor()) as cursor, self._conn:
guild_id = mod.guild.id
cursor.executemany(
INSERT_USER_ID, ((target.id,), (mod.id,)),
)
cursor.execute(
INSERT_OR_IGNORE_GUILD, (guild_id,),
)
cursor.executemany(
INSERT_MEMBER_IDS, ((target.id, guild_id), (mod.id, guild_id)),
)
cursor.execute(
BASIC_MODLOG_INSERT,
dict(
action_name="UNMUTE",
mod_id=mod.id,
guild_id=guild_id,
target_id=target.id,
reason=reason,
username=target.name,
discrim=target.discriminator,
nick=target.nick,
),
)
def member_tempmuted(
self, mod: discord.Member, target: discord.Member, reason: str
):
with contextlib.closing(self._conn.cursor()) as cursor, self._conn:
guild_id = mod.guild.id
cursor.executemany(
INSERT_USER_ID, ((target.id,), (mod.id,)),
)
cursor.execute(
INSERT_OR_IGNORE_GUILD, (guild_id,),
)
cursor.executemany(
INSERT_MEMBER_IDS, ((target.id, guild_id), (mod.id, guild_id)),
)
cursor.execute(
BASIC_MODLOG_INSERT,
dict(
action_name="TEMPMUTE",
mod_id=mod.id,
guild_id=guild_id,
target_id=target.id,
reason=reason,
username=target.name,
discrim=target.discriminator,
nick=target.nick,
),
)
def user_ban(self, mod: discord.Member, target_id: int, reason: str):
with contextlib.closing(self._conn.cursor()) as cursor, self._conn:
guild_id = mod.guild.id
cursor.execute(INSERT_USER_ID, (target_id,))
cursor.executemany(
INSERT_USER_ID, ((target_id,), (mod.id,)),
)
cursor.execute(
INSERT_OR_IGNORE_GUILD, (guild_id,),
)
cursor.executemany(
INSERT_MEMBER_IDS, ((target_id, guild_id), (mod.id, guild_id)),
)
cursor.execute(
BASIC_MODLOG_INSERT,
dict(
action_name="HACKBAN",
mod_id=mod.id,
guild_id=guild_id,
target_id=target_id,
reason=reason,
username="",
discrim="",
nick="",
),
)
| 31.5625 | 87 | 0.520132 |
1b906d1f80396450dfaf4a1766b02312c4805981 | 1,515 | py | Python | ADT_LL.py | kamwithak/competitiveProgramming | ab4433568081900212a8a987d7bf8cb78d2698d1 | [
"MIT"
] | null | null | null | ADT_LL.py | kamwithak/competitiveProgramming | ab4433568081900212a8a987d7bf8cb78d2698d1 | [
"MIT"
] | 1 | 2020-07-19T15:40:25.000Z | 2020-07-19T15:40:25.000Z | ADT_LL.py | kamwithak/competitiveProgramming | ab4433568081900212a8a987d7bf8cb78d2698d1 | [
"MIT"
] | null | null | null | class Node:
def __init__(self, value, next=None):
self.value = str(value)
self.next = next
class LinkedList:
def __init__(self, size=1, head=Node(1)):
self.head = head
self.tail = self.head
self.curNumberOfNodes = 1
self.loadLinkedList(size)
def loadLinkedList(self, tailValue):
if tailValue == 0: return
for num in range(2, tailValue+1):
self.addToFront(Node(num))
def printIteratively(self):
cur = self.head
_str = ''
while cur: # O(n) - iterative
_str += str(cur.value) + ' -> '
cur = cur.next
if cur is None: _str += 'None'
print('LinkedList: ' + _str)
def reverse(self):
cur = self.head
prev = None
while cur:
next_node = cur.next
cur.next = prev
prev = cur
cur = next_node
self.tail = self.head
self.head = prev
def addToFront(self, new_node): # O(1) - constant time
new_node.next = self.head
self.head = new_node
self.curNumberOfNodes += 1
def getHead(self):
return self.head
def getTail(self):
return self.tail
def getCurrentSizeOfLinkedList(self):
return self.curNumberOfNodes
if __name__ == '__main__':
obj = LinkedList(10)
obj.printIteratively()
print("Head: " + obj.getHead().value)
print("Tail: " + obj.getTail().value)
print("Reverse operation...")
obj.reverse()
obj.printIteratively()
print("Head: " + obj.getHead().value)
print("Tail: " + obj.getTail().value)
print(obj.getHead().value)
print(obj.getTail().value)
| 21.642857 | 60 | 0.632343 |
dc45162a1706d510fa6f927d59af4cdbe56b585f | 2,232 | py | Python | aliyun-python-sdk-slb/aliyunsdkslb/request/v20140515/DescribeRuleAttributeRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | 1 | 2019-12-23T12:36:43.000Z | 2019-12-23T12:36:43.000Z | aliyun-python-sdk-slb/aliyunsdkslb/request/v20140515/DescribeRuleAttributeRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-slb/aliyunsdkslb/request/v20140515/DescribeRuleAttributeRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | 1 | 2021-02-23T11:27:54.000Z | 2021-02-23T11:27:54.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeRuleAttributeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Slb', '2014-05-15', 'DescribeRuleAttribute','slb')
def get_access_key_id(self):
return self.get_query_params().get('access_key_id')
def set_access_key_id(self,access_key_id):
self.add_query_param('access_key_id',access_key_id)
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_RuleId(self):
return self.get_query_params().get('RuleId')
def set_RuleId(self,RuleId):
self.add_query_param('RuleId',RuleId)
def get_Tags(self):
return self.get_query_params().get('Tags')
def set_Tags(self,Tags):
self.add_query_param('Tags',Tags) | 33.818182 | 80 | 0.767921 |
2ae3c3c9af97183b392b4faf18954b9f7643efca | 582 | py | Python | tests/test_config.py | greck2908/beyond | 334a37202a7138050e46d97f719f81a4350987b5 | [
"MIT"
] | 33 | 2017-08-23T06:21:29.000Z | 2022-03-14T07:39:45.000Z | tests/test_config.py | greck2908/beyond | 334a37202a7138050e46d97f719f81a4350987b5 | [
"MIT"
] | 22 | 2017-03-12T17:22:40.000Z | 2022-02-04T11:25:46.000Z | tests/test_config.py | galactics/space-api | 334a37202a7138050e46d97f719f81a4350987b5 | [
"MIT"
] | 7 | 2018-11-18T16:53:34.000Z | 2021-12-01T08:12:59.000Z | from pytest import raises
from beyond.config import config, ConfigError
def test_get():
assert config.get("eop", "missing_policy") == "pass"
assert config.get("dummy1", "dummy2") is None
assert config.get("dummy1", "dummy2", fallback=False) == False
assert config.get("dummy1", "dummy2", fallback="hello") == "hello"
with raises(ConfigError):
config.get("eop", "missing_policy", "dummy")
def test_set():
config.set("dummy1", "dummy2", "test")
assert config["dummy1"]["dummy2"] == "test"
assert config.get("dummy1", "dummy2") == "test"
| 27.714286 | 70 | 0.649485 |
0700f85540ea0395046d0280360d525a85de616e | 3,356 | py | Python | dataloaders/utils.py | ixhorse/pytorch-deeplab-xception | 568be707c2fb4852ce84fa2ec019c44e305681a8 | [
"MIT"
] | null | null | null | dataloaders/utils.py | ixhorse/pytorch-deeplab-xception | 568be707c2fb4852ce84fa2ec019c44e305681a8 | [
"MIT"
] | null | null | null | dataloaders/utils.py | ixhorse/pytorch-deeplab-xception | 568be707c2fb4852ce84fa2ec019c44e305681a8 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
import torch
def decode_seg_map_sequence(label_masks, dataset='pascal'):
rgb_masks = []
for label_mask in label_masks:
rgb_mask = decode_segmap(label_mask, dataset)
rgb_masks.append(rgb_mask)
rgb_masks = torch.from_numpy(np.array(rgb_masks).transpose([0, 3, 1, 2]))
return rgb_masks
def decode_segmap(label_mask, dataset, plot=False):
"""Decode segmentation class labels into a color image
Args:
label_mask (np.ndarray): an (M,N) array of integer values denoting
the class label at each spatial location.
plot (bool, optional): whether to show the resulting color image
in a figure.
Returns:
(np.ndarray, optional): the resulting decoded color image.
"""
if dataset in ['pascal', 'coco', 'tt100k', 'visdrone']:
n_classes = 21
label_colours = get_pascal_labels()
elif dataset == 'cityscapes':
n_classes = 19
label_colours = get_cityscapes_labels()
else:
raise NotImplementedError
r = label_mask.copy()
g = label_mask.copy()
b = label_mask.copy()
for ll in range(0, n_classes):
r[label_mask == ll] = label_colours[ll, 0]
g[label_mask == ll] = label_colours[ll, 1]
b[label_mask == ll] = label_colours[ll, 2]
rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3))
rgb[:, :, 0] = r / 255.0
rgb[:, :, 1] = g / 255.0
rgb[:, :, 2] = b / 255.0
if plot:
plt.imshow(rgb)
plt.show()
else:
return rgb
def encode_segmap(mask):
"""Encode segmentation label images as pascal classes
Args:
mask (np.ndarray): raw segmentation label image of dimension
(M, N, 3), in which the Pascal classes are encoded as colours.
Returns:
(np.ndarray): class map with dimensions (M,N), where the value at
a given location is the integer denoting the class index.
"""
mask = mask.astype(int)
label_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.int16)
for ii, label in enumerate(get_pascal_labels()):
label_mask[np.where(np.all(mask == label, axis=-1))[:2]] = ii
label_mask = label_mask.astype(int)
return label_mask
def get_cityscapes_labels():
return np.array([
[128, 64, 128],
[244, 35, 232],
[70, 70, 70],
[102, 102, 156],
[190, 153, 153],
[153, 153, 153],
[250, 170, 30],
[220, 220, 0],
[107, 142, 35],
[152, 251, 152],
[0, 130, 180],
[220, 20, 60],
[255, 0, 0],
[0, 0, 142],
[0, 0, 70],
[0, 60, 100],
[0, 80, 100],
[0, 0, 230],
[119, 11, 32]])
def get_pascal_labels():
"""Load the mapping that associates pascal classes with label colors
Returns:
np.ndarray with dimensions (21, 3)
"""
return np.asarray([[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0],
[0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128],
[64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0],
[64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128],
[0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0],
[0, 64, 128]]) | 33.227723 | 84 | 0.554529 |
a44d152db1b477575a58bbae418b9ab26fca25b8 | 122 | py | Python | test/dataset/cutprotocol.py | J4CKVVH173/yaml-config | 4366769e5af818ecfa362db9e3e343f2b3b08012 | [
"MIT"
] | null | null | null | test/dataset/cutprotocol.py | J4CKVVH173/yaml-config | 4366769e5af818ecfa362db9e3e343f2b3b08012 | [
"MIT"
] | null | null | null | test/dataset/cutprotocol.py | J4CKVVH173/yaml-config | 4366769e5af818ecfa362db9e3e343f2b3b08012 | [
"MIT"
] | null | null | null | ARGS = ['http://test', 'ftp://test', 'test', 'some_random//test']
RESULT = ['test', 'test', 'test', 'some_random//test']
| 30.5 | 65 | 0.581967 |
2e3645904ed4d5b175f4eae2b5819c4a131422ad | 315 | py | Python | Practice/hulk.py | ashishjayamohan/competitive-programming | 05c5c560c2c2eb36121c52693b8c7d084f435f9e | [
"MIT"
] | null | null | null | Practice/hulk.py | ashishjayamohan/competitive-programming | 05c5c560c2c2eb36121c52693b8c7d084f435f9e | [
"MIT"
] | null | null | null | Practice/hulk.py | ashishjayamohan/competitive-programming | 05c5c560c2c2eb36121c52693b8c7d084f435f9e | [
"MIT"
] | null | null | null | def notfunc(num):
if(num == 0):
return 1
else:
return 0
n = int(input())
case = 0
cases = ["I hate that ", "I love that ","I hate it","I love it"]
for i in range(n,0,-1):
if(i == 1):
print(cases[case+2] + "")
else:
print(cases[case], end="")
case = notfunc(case)
| 21 | 64 | 0.498413 |
8c9d349f34c44c605b01fbee6c6cac4b74245b7b | 571 | py | Python | ui/maintenance_protocols/zip.py | liyao001/BioQueue | 2b2c9f023b988fd926a037eb4755f639632b2991 | [
"Apache-2.0"
] | 33 | 2017-03-12T16:26:45.000Z | 2021-04-30T05:37:35.000Z | ui/maintenance_protocols/zip.py | liyao001/BioQueue | 2b2c9f023b988fd926a037eb4755f639632b2991 | [
"Apache-2.0"
] | 6 | 2017-04-21T08:44:47.000Z | 2018-11-11T16:20:22.000Z | ui/maintenance_protocols/zip.py | liyao001/BioQueue | 2b2c9f023b988fd926a037eb4755f639632b2991 | [
"Apache-2.0"
] | 13 | 2017-03-12T16:26:56.000Z | 2020-04-20T05:35:00.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 02/12/2017 11:40 PM
# @Project : BioQueue
# @Author : Li Yao
# @File : zip.py
def get_sub_protocol(db_obj, protocol_parent, step_order_start=1):
steps = list()
steps.append(db_obj(software='unzip',
parameter='{{LastOutput}}',
parent=protocol_parent,
user_id=0,
hash='728ab46516121c0215887cd60bcbb8bd',
step_order=step_order_start))
return step_order_start+len(steps), steps
| 31.722222 | 66 | 0.558669 |
1e95da933bab84547542280ae8751bf3fecd1527 | 702 | py | Python | dscribe/dscribe/kernels/__init__.py | rartino/hands-on-2 | 8f31b978f295a761dc0a7ae093184b3dbfa7e199 | [
"MIT"
] | 265 | 2018-12-10T21:36:30.000Z | 2022-03-24T12:58:21.000Z | dscribe/dscribe/kernels/__init__.py | rartino/hands-on-2 | 8f31b978f295a761dc0a7ae093184b3dbfa7e199 | [
"MIT"
] | 71 | 2018-12-10T22:00:39.000Z | 2022-03-30T19:38:23.000Z | dscribe/dscribe/kernels/__init__.py | rartino/hands-on-2 | 8f31b978f295a761dc0a7ae093184b3dbfa7e199 | [
"MIT"
] | 66 | 2018-11-29T13:33:05.000Z | 2022-03-21T15:15:00.000Z | # -*- coding: utf-8 -*-
"""Copyright 2019 DScribe developers
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from dscribe.kernels.averagekernel import AverageKernel
from dscribe.kernels.rematchkernel import REMatchKernel
| 39 | 72 | 0.792023 |
5e6f7e1f9f8359df4ffe0f61556e2ff7bd392c3d | 5,694 | py | Python | UNP/IO.py | lizard1998myx/UNP | 4b6c77f081863bee654d38e9e938e8e7c581593e | [
"Apache-2.0"
] | null | null | null | UNP/IO.py | lizard1998myx/UNP | 4b6c77f081863bee654d38e9e938e8e7c581593e | [
"Apache-2.0"
] | null | null | null | UNP/IO.py | lizard1998myx/UNP | 4b6c77f081863bee654d38e9e938e8e7c581593e | [
"Apache-2.0"
] | null | null | null | import datetime
from UNP.Core import Account, ActTable
from UNP.Engineering import Searcher
from UNP.Application import Loginer
class SearcherIO(Searcher):
@staticmethod
def info():
return "[SearcherIO]"
@staticmethod
def version():
version = "V4.0 - 20190808"
return version
def echo(self):
print("[Current setting]")
print("mode: " + self.mode)
print("start: " + str(self.start))
print("end: " + str(self.end))
localstring = self.string
localstring = localstring.replace("abcdefghijklmnopqrstuvwxyz", "a-z")
localstring = localstring.replace("ABCDEFGHIJKLMNOPQRSTUVWXYZ", "A-Z")
localstring = localstring.replace("0123456789", "0-9")
print("string: " + localstring)
print("list: " + str(self.list))
def chmod(self):
mode = input("Enter mode[" + self.mode + "]:")
if mode != "":
if mode[0].lower() == "a":
self.mode = "auto"
elif mode[0].lower() == "p":
if mode[-2:].lower == "id":
self.mode = "password_ID"
else:
self.mode = "password"
elif mode[0].lower() == "u":
if mode[-2:].lower() == "cn":
self.mode = "username_CN"
elif mode[-2:].lower() == "no":
self.mode = "username_NO"
else:
self.mode = "username"
def chrange(self):
start = input("Enter start[" + str(self.start) + "]:")
end = input("Enter end[" + str(self.end) + "]:")
if start != "":
self.start = int(start)
if end != "":
self.end = int(end)
def chstr(self):
command = input("Enter string setting[" + self.string + "]:")
if command == "":
return
elif command[0].lower() == "c": # 清空
self.string = ""
self.chstr()
elif command[0].lower() == "r": # 重写
self.string = input("Reset string :")
elif command[0].lower() == "w": # 附上
self.string = self.string + input("Append string:")
else:
self.string = ""
if len(command)>=1 and command[0] == '1': # 如果数字打开
self.string = self.string + "0123456789"
if len(command)>=2 and command[1] == '1': # 如果小写打开
self.string = self.string + "abcdefghijklmnopqrstuvwxyz"
if len(command)>=3 and command[2] == '1': # 如果大写打开
self.string = self.string + "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
if len(command)>=4 and command[3] == '1': # 如果符号打开
self.string = self.string + "`~!@#$%^&*()-=_+[]{}|;:,./<>?"
# 去除了三个字符\\ ' "
def chlist(self):
command = input("Enter list setting"+ str(self.list) + ":")
if command == "":
return
if command[0] == 'f': # 文件读取
self.loadtable()
return
if command[0] == "c": # 清空
self.list = []
if command != "":
while True:
element = input("Append list element/Stop:")
if element == "":
return
else:
self.list.append(element)
def loadtable(self):
self.list = ActTable(input("Enter filename:")).userlist()
@staticmethod
def example():
searcher = SearcherIO()
print(searcher.info())
while True:
searcher.chmod()
searcher.chrange()
searcher.chstr()
searcher.chlist()
searcher.echo()
if input("Continue? (enter to go)") == '':
searcher.search()
print("Search complete!")
return
class LoginerIO(Loginer):
@staticmethod
def info():
info = "[Loginer]"
return info
@staticmethod
def version():
version = "V4.1 - 20190808"
return version
def echo(self):
print("[Current setting]")
print("mode: " + self.mode)
print("timer: " + str(self.timer))
print("file: " + self.file)
def chmod(self):
mode = input("Enter mode[" + self.mode + "]:")
if mode != "":
self.mode = mode
def chtim(self):
timer = input("Enter timer[" + str(self.timer) + "]:")
if timer != "":
timer = int(timer)
if timer > 0:
self.timer = timer
@staticmethod
def customizer():
actt = ActTable("customize.csv")
while True:
username = input("Enter username/Stop:")
password = input("Enter password/Stop:")
if username == "" or password == "":
return
else:
actt.record(list=[username, password,
"Saved " + datetime.datetime.now().strftime('%Y-%m-%d')])
@staticmethod
def example():
Account.test() # 使用account类进行网络测试
loginer = LoginerIO()
loginer.passive() # 使用被动模式进行一次登录
print(loginer.info()) # 显示说明
while True:
loginer.chmod() # 设置模式
loginer.chtim() # 设置循环时间
loginer.echo()
if input("Continue? (enter to go)") == '':
if loginer.mode[0].lower in ['i', 's']:
loginer.customizer()
else:
loginer.active()
return | 33.494118 | 92 | 0.465402 |
9b20db0ab92d2955a311f32cacdae706b42c1989 | 8,347 | py | Python | pyparsing/__init__.py | jgrey4296/pyparsing | 69a8ab77ce673066b82171d1952e3b2b581f0c0c | [
"MIT"
] | null | null | null | pyparsing/__init__.py | jgrey4296/pyparsing | 69a8ab77ce673066b82171d1952e3b2b581f0c0c | [
"MIT"
] | null | null | null | pyparsing/__init__.py | jgrey4296/pyparsing | 69a8ab77ce673066b82171d1952e3b2b581f0c0c | [
"MIT"
] | null | null | null | # module pyparsing.py
#
# Copyright (c) 2003-2021 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = """
pyparsing module - Classes and methods to define and execute parsing grammars
=============================================================================
The pyparsing module is an alternative approach to creating and
executing simple grammars, vs. the traditional lex/yacc approach, or the
use of regular expressions. With pyparsing, you don't need to learn
a new syntax for defining grammars or matching expressions - the parsing
module provides a library of classes that you use to construct the
grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form
``"<salutation>, <addressee>!"``), built up using :class:`Word`,
:class:`Literal`, and :class:`And` elements
(the :meth:`'+'<ParserElement.__add__>` operators create :class:`And` expressions,
and the strings are auto-converted to :class:`Literal` expressions)::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print(hello, "->", greet.parseString(hello))
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the
self-explanatory class names, and the use of :class:`'+'<And>`,
:class:`'|'<MatchFirst>`, :class:`'^'<Or>` and :class:`'&'<Each>` operators.
The :class:`ParseResults` object returned from
:class:`ParserElement.parseString` can be
accessed as a nested list, a dictionary, or an object with named
attributes.
The pyparsing module handles some of the problems that are typically
vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle
"Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
Getting Started -
-----------------
Visit the classes :class:`ParserElement` and :class:`ParseResults` to
see the base classes that most other pyparsing
classes inherit from. Use the docstrings for examples of how to:
- construct literal match expressions from :class:`Literal` and
:class:`CaselessLiteral` classes
- construct character word-group expressions using the :class:`Word`
class
- see how to create repetitive expressions using :class:`ZeroOrMore`
and :class:`OneOrMore` classes
- use :class:`'+'<And>`, :class:`'|'<MatchFirst>`, :class:`'^'<Or>`,
and :class:`'&'<Each>` operators to combine simple expressions into
more complex ones
- associate names with your parsed results using
:class:`ParserElement.setResultsName`
- access the parsed data, which is returned as a :class:`ParseResults`
object
- find some helpful expression short-cuts like :class:`delimitedList`
and :class:`oneOf`
- find more useful common expressions in the :class:`pyparsing_common`
namespace class
"""
from collections import namedtuple
version_info = namedtuple("version_info", "major minor micro release_level serial")
__version_info__ = version_info(3, 0, 0, "candidate", 1)
__version__ = (
"{}.{}.{}".format(*__version_info__[:3])
+ ("{}{}".format(__version_info__.release_level[0], __version_info__.serial), "")[
__version_info__.release_level == "final"
]
)
__version_time__ = "16 August 2021 05:31 UTC"
__versionTime__ = __version_time__
__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"
from .util import *
from .exceptions import *
from .actions import *
from .core import __diag__, __compat__
from .results import *
from .core import *
from .core import _builtin_exprs as core_builtin_exprs
from .helpers import *
from .helpers import _builtin_exprs as helper_builtin_exprs
from .unicode import unicode_set, pyparsing_unicode as unicode
from .testing import pyparsing_test as testing
from .common import (
pyparsing_common as common,
_builtin_exprs as common_builtin_exprs,
)
# define backward compat synonyms
pyparsing_unicode = unicode
pyparsing_common = common
pyparsing_test = testing
core_builtin_exprs += common_builtin_exprs + helper_builtin_exprs
__all__ = [
"__version__",
"__version_time__",
"__author__",
"__compat__",
"__diag__",
"And",
"CaselessKeyword",
"CaselessLiteral",
"CharsNotIn",
"Combine",
"Dict",
"Each",
"Empty",
"FollowedBy",
"Forward",
"GoToColumn",
"Group",
"IndentedBlock",
"Keyword",
"LineEnd",
"LineStart",
"Literal",
"Located",
"PrecededBy",
"MatchFirst",
"NoMatch",
"NotAny",
"OneOrMore",
"OnlyOnce",
"OpAssoc",
"Opt",
"Optional",
"Or",
"ParseBaseException",
"ParseElementEnhance",
"ParseException",
"ParseExpression",
"ParseFatalException",
"ParseResults",
"ParseSyntaxException",
"ParserElement",
"QuotedString",
"RecursiveGrammarException",
"Regex",
"SkipTo",
"StringEnd",
"StringStart",
"Suppress",
"Token",
"TokenConverter",
"White",
"Word",
"WordEnd",
"WordStart",
"ZeroOrMore",
"Char",
"alphanums",
"alphas",
"alphas8bit",
"any_close_tag",
"any_open_tag",
"c_style_comment",
"col",
"common_html_entity",
"counted_array",
"cpp_style_comment",
"dbl_quoted_string",
"dbl_slash_comment",
"delimited_list",
"dict_of",
"empty",
"hexnums",
"html_comment",
"identchars",
"identbodychars",
"java_style_comment",
"line",
"line_end",
"line_start",
"lineno",
"make_html_tags",
"make_xml_tags",
"match_only_at_col",
"match_previous_expr",
"match_previous_literal",
"nested_expr",
"null_debug_action",
"nums",
"one_of",
"printables",
"punc8bit",
"python_style_comment",
"quoted_string",
"remove_quotes",
"replace_with",
"replace_html_entity",
"rest_of_line",
"sgl_quoted_string",
"srange",
"string_end",
"string_start",
"trace_parse_action",
"unicode_string",
"with_attribute",
"indentedBlock",
"original_text_for",
"ungroup",
"infix_notation",
"locatedExpr",
"with_class",
"CloseMatch",
"token_map",
"pyparsing_common",
"pyparsing_unicode",
"unicode_set",
"condition_as_parse_action",
"pyparsing_test",
# pre-PEP8 compatibility names
"__versionTime__",
"anyCloseTag",
"anyOpenTag",
"cStyleComment",
"commonHTMLEntity",
"countedArray",
"cppStyleComment",
"dblQuotedString",
"dblSlashComment",
"delimitedList",
"dictOf",
"htmlComment",
"javaStyleComment",
"lineEnd",
"lineStart",
"makeHTMLTags",
"makeXMLTags",
"matchOnlyAtCol",
"matchPreviousExpr",
"matchPreviousLiteral",
"nestedExpr",
"nullDebugAction",
"oneOf",
"opAssoc",
"pythonStyleComment",
"quotedString",
"removeQuotes",
"replaceHTMLEntity",
"replaceWith",
"restOfLine",
"sglQuotedString",
"stringEnd",
"stringStart",
"traceParseAction",
"unicodeString",
"withAttribute",
"indentedBlock",
"originalTextFor",
"infixNotation",
"locatedExpr",
"withClass",
"tokenMap",
"conditionAsParseAction",
]
| 28.010067 | 86 | 0.680724 |
2363c5aa5ccedc8d298ad8ff2a335c9aed5c895b | 1,578 | py | Python | isolateparser/tableformat.py | cdeitrick/isolate_parsers | 1b6725d086b1ccd53d985d7e010b23a811dbcf20 | [
"MIT"
] | null | null | null | isolateparser/tableformat.py | cdeitrick/isolate_parsers | 1b6725d086b1ccd53d985d7e010b23a811dbcf20 | [
"MIT"
] | 1 | 2021-06-12T12:22:04.000Z | 2021-06-12T12:22:04.000Z | isolateparser/tableformat.py | cdeitrick/isolate_parsers | 1b6725d086b1ccd53d985d7e010b23a811dbcf20 | [
"MIT"
] | null | null | null | """
Implements the classes which indicate which columns are available from each table.
These should be consolidated into a single file instead of beings spread theoughout the codebase.
"""
from typing import NamedTuple
class IsolateTableColumns(NamedTuple):
# Defines the column names for the isolate table.
# Mainly used as a reminder of what the final column labels should be.
sample_id: str = 'sampleId'
sample_name: str = 'sampleName'
sequence_id: str = 'seq id'
position: str = 'position'
annotation: str = 'annotation'
description: str = 'description'
evidence: str = 'evidence'
freq: str = 'freq'
gene: str = 'gene'
mutation: str = 'mutation'
alt: str = 'alt'
ref: str = 'ref'
alternate_amino: str = 'aminoAlt'
reference_amino: str = 'aminoRef'
alternate_codon: str = 'codonAlt'
reference_codon: str = 'codonRef'
locus_tag: str = 'locusTag'
mutation_category: str = 'mutationCategory'
IsolateTableColumns = IsolateTableColumns()
import enum
class Columns(enum.Enum):
sample_id: str = 'sampleId'
sample_name: str = 'sampleName'
sequence_id: str = 'seq id'
position: str = 'position'
annotation: str = 'annotation'
description: str = 'description'
evidence: str = 'evidence'
freq: str = 'freq'
gene: str = 'gene'
mutation: str = 'mutation'
alt: str = 'alt'
ref: str = 'ref'
alternate_amino: str = 'aminoAlt'
reference_amino: str = 'aminoRef'
alternate_codon: str = 'codonAlt'
reference_codon: str = 'codonRef'
locus_tag: str = 'locusTag'
mutation_category: str = 'mutationCategory'
if __name__ == "__main__":
print(list(Columns)) | 30.346154 | 98 | 0.724968 |
eb766f72e2a4f770af16b31cfd2d3976a4bb2753 | 1,737 | py | Python | indra/tests/test_cag_assembler.py | min-yin-sri/indra | 93d4cb8b23764a2775f9dbdf5eb73b6053006d73 | [
"BSD-2-Clause"
] | 2 | 2020-01-14T08:59:10.000Z | 2020-12-18T16:21:38.000Z | indra/tests/test_cag_assembler.py | min-yin-sri/indra | 93d4cb8b23764a2775f9dbdf5eb73b6053006d73 | [
"BSD-2-Clause"
] | null | null | null | indra/tests/test_cag_assembler.py | min-yin-sri/indra | 93d4cb8b23764a2775f9dbdf5eb73b6053006d73 | [
"BSD-2-Clause"
] | null | null | null | from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from indra.statements import *
from indra.assemblers import CAGAssembler
eg1 = {'UN': [('a/b/c', 0.123)]}
eg2 = {'UN': [('a/b/c', 0.234)]}
# An example provenance from Eidos
prov = [{
"@type" : "Provenance",
"document" : {
"@id" : "_:Document_1"
},
"sentence" : {
"@id" : "_:Sentence_1"
},
"positions" : {
"@type" : "Interval",
"start" : 29,
"end" : 31
}
}]
st1 = Influence(Agent('inorganic fertilizer', db_refs=eg1),
Agent('farm sizes', db_refs=eg2),
{'adjectives': 'serious', 'polarity': 1},
{'adjectives': 'significant', 'polarity': 1},
evidence=[Evidence(source_api='eidos',
text=('A serious increase in the use of '
'incorganic fertilizers '
'resulted in a significant increase '
'in farm sizes.'),
annotations={'provenance': prov})])
statements = [st1]
def test_assemble_influence():
ca = CAGAssembler(statements)
CAG = ca.make_model()
assert(len(CAG.nodes()) == 2)
assert(len(CAG.edges()) == 1)
def test_export_to_cyjs():
ca = CAGAssembler(statements)
ca.make_model()
cyjs = ca.export_to_cytoscapejs()
assert len(cyjs['nodes']) == 2
assert len(cyjs['edges']) == 1
ca.generate_jupyter_js()
def test_assemble_no_evidence():
ca = CAGAssembler([Influence(Concept('a'), Concept('b'))])
ca.make_model()
ca.generate_jupyter_js()
| 29.948276 | 78 | 0.53483 |
c5973ced7683b8f9d3a359ea945ceb5aa7b33aca | 3,022 | py | Python | tests/test_utils.py | csadorf/aiidalab-optimade | 5c09750a66109a67edb08771f44960d051b9aead | [
"MIT"
] | null | null | null | tests/test_utils.py | csadorf/aiidalab-optimade | 5c09750a66109a67edb08771f44960d051b9aead | [
"MIT"
] | null | null | null | tests/test_utils.py | csadorf/aiidalab-optimade | 5c09750a66109a67edb08771f44960d051b9aead | [
"MIT"
] | null | null | null | from aiidalab_optimade import utils
def test_fetch_providers_wrong_url():
"""Test when fetch_providers is provided a wrong URL
It should now return at the very least the cached list of providers
"""
import json
wrong_url = "https://this.is.a.wrong.url"
providers = utils.fetch_providers(providers_urls=wrong_url)
if utils.CACHED_PROVIDERS.exists():
with open(utils.CACHED_PROVIDERS, "r") as handle:
providers_file = json.load(handle)
assert providers == providers_file.get("data", [])
else:
assert providers == []
def test_fetch_providers_content():
"""Test known content in dict of database providers"""
exmpl = {
"type": "links",
"id": "exmpl",
"attributes": {
"name": "Example provider",
"description": "Provider used for examples, not to be assigned to a real database",
"base_url": "http://providers.optimade.org/index-metadbs/exmpl",
"homepage": "https://example.com",
"link_type": "external",
},
}
assert exmpl in utils.fetch_providers()
def test_exmpl_not_in_list():
"""Make sure the 'exmpl' database provider is not in the final list"""
from optimade.models import LinksResourceAttributes
exmpl = (
"Example provider",
LinksResourceAttributes(
**{
"name": "Example provider",
"description": "Provider used for examples, not to be assigned to a real database",
"base_url": "https://example.com/index/optimade",
"homepage": "https://example.com",
"link_type": "external",
}
),
)
mcloud = (
"Materials Cloud",
LinksResourceAttributes(
**{
"name": "Materials Cloud",
"description": "A platform for Open Science built for seamless "
"sharing of resources in computational materials science",
"base_url": "https://www.materialscloud.org/optimade/v1.0.0",
"homepage": "https://www.materialscloud.org",
"link_type": "external",
}
),
)
odbx = (
"open database of xtals",
LinksResourceAttributes(
**{
"name": "open database of xtals",
"description": "A public database of crystal structures mostly derived from ab "
"initio structure prediction from the group of Dr Andrew Morris at the University "
"of Birmingham https://ajm143.github.io",
"base_url": "https://optimade.odbx.science/v1.0.0",
"homepage": "https://odbx.science",
"link_type": "external",
}
),
)
list_of_database_providers = utils.get_list_of_valid_providers()
assert exmpl not in list_of_database_providers
assert mcloud in list_of_database_providers or odbx in list_of_database_providers
| 33.955056 | 99 | 0.587028 |
81b1ce87098bde19cc335b91cecaf5582a0892fe | 107,809 | py | Python | Lib/inspect.py | SimplyAhmazing/python-with-arabic-keywords | dfea9d0a225eadcd6845961e5829830ec2784105 | [
"PSF-2.0"
] | 1 | 2021-09-18T20:14:03.000Z | 2021-09-18T20:14:03.000Z | Lib/inspect.py | SimplyAhmazing/python-with-arabic-keywords | dfea9d0a225eadcd6845961e5829830ec2784105 | [
"PSF-2.0"
] | null | null | null | Lib/inspect.py | SimplyAhmazing/python-with-arabic-keywords | dfea9d0a225eadcd6845961e5829830ec2784105 | [
"PSF-2.0"
] | null | null | null | """Get useful information from live Python objects.
This module encapsulates the interface provided by the internal special
attributes (co_*, im_*, tb_*, etc.) in a friendlier fashion.
It also provides some help for examining source code and class layout.
Here are some of the useful functions provided by this module:
ismodule(), isclass(), ismethod(), isfunction(), isgeneratorfunction(),
isgenerator(), istraceback(), isframe(), iscode(), isbuiltin(),
isroutine() - check object types
getmembers() - get members of an object that satisfy a given condition
getfile(), getsourcefile(), getsource() - find an object's source code
getdoc(), getcomments() - get documentation on an object
getmodule() - determine the module that an object came from
getclasstree() - arrange classes so as to represent their hierarchy
getargspec(), getargvalues(), getcallargs() - get info about function arguments
getfullargspec() - same, with support for Python 3 features
formatargspec(), formatargvalues() - format an argument spec
getouterframes(), getinnerframes() - get info about frames
currentframe() - get the current stack frame
stack(), trace() - get info about frames on the stack or in a traceback
signature() - get a Signature object for the callable
"""
# This module is in the public domain. No warranties.
__author__ = ('Ka-Ping Yee <ping@lfw.org>',
'Yury Selivanov <yselivanov@sprymix.com>')
import ast
import enum
import importlib.machinery
import itertools
import linecache
import os
import re
import sys
import tokenize
import token
import types
import warnings
import functools
import builtins
from operator import attrgetter
from collections import namedtuple, OrderedDict
# Create constants for the compiler flags in Include/code.h
# We try to get them from dis to avoid duplication, but fall
# back to hard-coding so the dependency is optional
try:
from dis import COMPILER_FLAG_NAMES as _flag_names
except ImportError:
CO_OPTIMIZED, CO_NEWLOCALS = 0x1, 0x2
CO_VARARGS, CO_VARKEYWORDS = 0x4, 0x8
CO_NESTED, CO_GENERATOR, CO_NOFREE = 0x10, 0x20, 0x40
else:
mod_dict = globals()
for k, v in _flag_names.items():
mod_dict["CO_" + v] = k
# See Include/object.h
TPFLAGS_IS_ABSTRACT = 1 << 20
# ----------------------------------------------------------- type-checking
def ismodule(object):
"""Return true if the object is a module.
Module objects provide these attributes:
__cached__ pathname to byte compiled file
__doc__ documentation string
__file__ filename (missing for built-in modules)"""
return isinstance(object, types.ModuleType)
def isclass(object):
"""Return true if the object is a class.
Class objects provide these attributes:
__doc__ documentation string
__module__ name of module in which this class was defined"""
return isinstance(object, type)
def ismethod(object):
"""Return true if the object is an instance method.
Instance method objects provide these attributes:
__doc__ documentation string
__name__ name with which this method was defined
__func__ function object containing implementation of method
__self__ instance to which this method is bound"""
return isinstance(object, types.MethodType)
def ismethoddescriptor(object):
"""Return true if the object is a method descriptor.
But not if ismethod() or isclass() or isfunction() are true.
This is new in Python 2.2, and, for example, is true of int.__add__.
An object passing this test has a __get__ attribute but not a __set__
attribute, but beyond that the set of attributes varies. __name__ is
usually sensible, and __doc__ often is.
Methods implemented via descriptors that also pass one of the other
tests return false from the ismethoddescriptor() test, simply because
the other tests promise more -- you can, e.g., count on having the
__func__ attribute (etc) when an object passes ismethod()."""
if isclass(object) or ismethod(object) or isfunction(object):
# mutual exclusion
return False
tp = type(object)
return hasattr(tp, "__get__") and not hasattr(tp, "__set__")
def isdatadescriptor(object):
"""Return true if the object is a data descriptor.
Data descriptors have both a __get__ and a __set__ attribute. Examples are
properties (defined in Python) and getsets and members (defined in C).
Typically, data descriptors will also have __name__ and __doc__ attributes
(properties, getsets, and members have both of these attributes), but this
is not guaranteed."""
if isclass(object) or ismethod(object) or isfunction(object):
# mutual exclusion
return False
tp = type(object)
return hasattr(tp, "__set__") and hasattr(tp, "__get__")
if hasattr(types, 'MemberDescriptorType'):
# CPython and equivalent
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.MemberDescriptorType)
else:
# Other implementations
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return False
if hasattr(types, 'GetSetDescriptorType'):
# CPython and equivalent
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.GetSetDescriptorType)
else:
# Other implementations
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return False
def isfunction(object):
"""Return true if the object is a user-defined function.
Function objects provide these attributes:
__doc__ documentation string
__name__ name with which this function was defined
__code__ code object containing compiled function bytecode
__defaults__ tuple of any default values for arguments
__globals__ global namespace in which this function was defined
__annotations__ dict of parameter annotations
__kwdefaults__ dict of keyword only parameters with defaults"""
return isinstance(object, types.FunctionType)
def isgeneratorfunction(object):
"""Return true if the object is a user-defined generator function.
Generator function objects provides same attributes as functions.
See help(isfunction) for attributes listing."""
return bool((isfunction(object) or ismethod(object)) and
object.__code__.co_flags & CO_GENERATOR)
def isgenerator(object):
"""Return true if the object is a generator.
Generator objects provide these attributes:
__iter__ defined to support iteration over container
close raises a new GeneratorExit exception inside the
generator to terminate the iteration
gi_code code object
gi_frame frame object or possibly None once the generator has
been exhausted
gi_running set to 1 when generator is executing, 0 otherwise
next return the next item from the container
send resumes the generator and "sends" a value that becomes
the result of the current yield-expression
throw used to raise an exception inside the generator"""
return isinstance(object, types.GeneratorType)
def istraceback(object):
"""Return true if the object is a traceback.
Traceback objects provide these attributes:
tb_frame frame object at this level
tb_lasti index of last attempted instruction in bytecode
tb_lineno current line number in Python source code
tb_next next inner traceback object (called by this level)"""
return isinstance(object, types.TracebackType)
def isframe(object):
"""Return true if the object is a frame object.
Frame objects provide these attributes:
f_back next outer frame object (this frame's caller)
f_builtins built-in namespace seen by this frame
f_code code object being executed in this frame
f_globals global namespace seen by this frame
f_lasti index of last attempted instruction in bytecode
f_lineno current line number in Python source code
f_locals local namespace seen by this frame
f_trace tracing function for this frame, or None"""
return isinstance(object, types.FrameType)
def iscode(object):
"""Return true if the object is a code object.
Code objects provide these attributes:
co_argcount number of arguments (not including * or ** args)
co_code string of raw compiled bytecode
co_consts tuple of constants used in the bytecode
co_filename name of file in which this code object was created
co_firstlineno number of first line in Python source code
co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
co_lnotab encoded mapping of line numbers to bytecode indices
co_name name with which this code object was defined
co_names tuple of names of local variables
co_nlocals number of local variables
co_stacksize virtual machine stack space required
co_varnames tuple of names of arguments and local variables"""
return isinstance(object, types.CodeType)
def isbuiltin(object):
"""Return true if the object is a built-in function or method.
Built-in functions and methods provide these attributes:
__doc__ documentation string
__name__ original name of this function or method
__self__ instance to which a method is bound, or None"""
return isinstance(object, types.BuiltinFunctionType)
def isroutine(object):
"""Return true if the object is any kind of function or method."""
return (isbuiltin(object)
or isfunction(object)
or ismethod(object)
or ismethoddescriptor(object))
def isabstract(object):
"""Return true if the object is an abstract base class (ABC)."""
return bool(isinstance(object, type) and object.__flags__ & TPFLAGS_IS_ABSTRACT)
def getmembers(object, predicate=None):
"""Return all members of an object as (name, value) pairs sorted by name.
Optionally, only return members that satisfy a given predicate."""
if isclass(object):
mro = (object,) + getmro(object)
else:
mro = ()
results = []
processed = set()
names = dir(object)
# :dd any DynamicClassAttributes to the list of names if object is a class;
# this may result in duplicate entries if, for example, a virtual
# attribute with the same name as a DynamicClassAttribute exists
try:
for base in object.__bases__:
for k, v in base.__dict__.items():
if isinstance(v, types.DynamicClassAttribute):
names.append(k)
except AttributeError:
pass
for key in names:
# First try to get the value via getattr. Some descriptors don't
# like calling their __get__ (see bug #1785), so fall back to
# looking in the __dict__.
try:
value = getattr(object, key)
# handle the duplicate key
if key in processed:
raise AttributeError
except AttributeError:
for base in mro:
if key in base.__dict__:
value = base.__dict__[key]
break
else:
# could be a (currently) missing slot member, or a buggy
# __dir__; discard and move on
continue
if not predicate or predicate(value):
results.append((key, value))
processed.add(key)
results.sort(key=lambda pair: pair[0])
return results
Attribute = namedtuple('Attribute', 'name kind defining_class object')
def classify_class_attrs(cls):
"""Return list of attribute-descriptor tuples.
For each name in dir(cls), the return list contains a 4-tuple
with these elements:
0. The name (a string).
1. The kind of attribute this is, one of these strings:
'class method' created via classmethod()
'static method' created via staticmethod()
'property' created via property()
'method' any other flavor of method or descriptor
'data' not a method
2. The class which defined this attribute (a class).
3. The object as obtained by calling getattr; if this fails, or if the
resulting object does not live anywhere in the class' mro (including
metaclasses) then the object is looked up in the defining class's
dict (found by walking the mro).
If one of the items in dir(cls) is stored in the metaclass it will now
be discovered and not have None be listed as the class in which it was
defined. Any items whose home class cannot be discovered are skipped.
"""
mro = getmro(cls)
metamro = getmro(type(cls)) # for attributes stored in the metaclass
metamro = tuple([cls for cls in metamro if cls not in (type, object)])
class_bases = (cls,) + mro
all_bases = class_bases + metamro
names = dir(cls)
# :dd any DynamicClassAttributes to the list of names;
# this may result in duplicate entries if, for example, a virtual
# attribute with the same name as a DynamicClassAttribute exists.
for base in mro:
for k, v in base.__dict__.items():
if isinstance(v, types.DynamicClassAttribute):
names.append(k)
result = []
processed = set()
for name in names:
# Get the object associated with the name, and where it was defined.
# Normal objects will be looked up with both getattr and directly in
# its class' dict (in case getattr fails [bug #1785], and also to look
# for a docstring).
# For DynamicClassAttributes on the second pass we only look in the
# class's dict.
#
# Getting an obj from the __dict__ sometimes reveals more than
# using getattr. Static and class methods are dramatic examples.
homecls = None
get_obj = None
dict_obj = None
if name not in processed:
try:
if name == '__dict__':
raise Exception("__dict__ is special, don't want the proxy")
get_obj = getattr(cls, name)
except Exception as exc:
pass
else:
homecls = getattr(get_obj, "__objclass__", homecls)
if homecls not in class_bases:
# if the resulting object does not live somewhere in the
# mro, drop it and search the mro manually
homecls = None
last_cls = None
# first look in the classes
for srch_cls in class_bases:
srch_obj = getattr(srch_cls, name, None)
if srch_obj == get_obj:
last_cls = srch_cls
# then check the metaclasses
for srch_cls in metamro:
try:
srch_obj = srch_cls.__getattr__(cls, name)
except AttributeError:
continue
if srch_obj == get_obj:
last_cls = srch_cls
if last_cls is not None:
homecls = last_cls
for base in all_bases:
if name in base.__dict__:
dict_obj = base.__dict__[name]
if homecls not in metamro:
homecls = base
break
if homecls is None:
# unable to locate the attribute anywhere, most likely due to
# buggy custom __dir__; discard and move on
continue
obj = get_obj or dict_obj
# Classify the object or its descriptor.
if isinstance(dict_obj, staticmethod):
kind = "static method"
obj = dict_obj
elif isinstance(dict_obj, classmethod):
kind = "class method"
obj = dict_obj
elif isinstance(dict_obj, property):
kind = "property"
obj = dict_obj
elif isroutine(obj):
kind = "method"
else:
kind = "data"
result.append(Attribute(name, kind, homecls, obj))
processed.add(name)
return result
# ----------------------------------------------------------- class helpers
def getmro(cls):
"Return tuple of base classes (including cls) in method resolution order."
return cls.__mro__
# -------------------------------------------------------- function helpers
def unwrap(func, *, stop=None):
"""Get the object wrapped by *func*.
Follows the chain of :attr:`__wrapped__` attributes returning the last
object in the chain.
*stop* is an optional callback accepting an object in the wrapper chain
as its sole argument that allows the unwrapping to be terminated early if
the callback returns a true value. If the callback never returns a true
value, the last object in the chain is returned as usual. For example,
:func:`signature` uses this to stop unwrapping if any object in the
chain has a ``__signature__`` attribute defined.
:exc:`ValueError` is raised if a cycle is encountered.
"""
if stop is None:
def _is_wrapper(f):
return hasattr(f, '__wrapped__')
else:
def _is_wrapper(f):
return hasattr(f, '__wrapped__') and not stop(f)
f = func # remember the original func for error reporting
memo = {id(f)} # Memoise by id to tolerate non-hashable objects
while _is_wrapper(func):
func = func.__wrapped__
id_func = id(func)
if id_func in memo:
raise ValueError('wrapper loop when unwrapping {!r}'.format(f))
memo.add(id_func)
return func
# -------------------------------------------------- source code extraction
def indentsize(line):
"""Return the indent size, in spaces, at the start of a line of text."""
expline = line.expandtabs()
return len(expline) - len(expline.lstrip())
def _findclass(func):
cls = sys.modules.get(func.__module__)
if cls is None:
return None
for name in func.__qualname__.split('.')[:-1]:
cls = getattr(cls, name)
if not isclass(cls):
return None
return cls
def _finddoc(obj):
if isclass(obj):
for base in obj.__mro__:
if base is not object:
try:
doc = base.__doc__
except AttributeError:
continue
if doc is not None:
return doc
return None
if ismethod(obj):
name = obj.__func__.__name__
self = obj.__self__
if (isclass(self) and
getattr(getattr(self, name, None), '__func__') is obj.__func__):
# classmethod
cls = self
else:
cls = self.__class__
elif isfunction(obj):
name = obj.__name__
cls = _findclass(obj)
if cls is None or getattr(cls, name) is not obj:
return None
elif isbuiltin(obj):
name = obj.__name__
self = obj.__self__
if (isclass(self) and
self.__qualname__ + '.' + name == obj.__qualname__):
# classmethod
cls = self
else:
cls = self.__class__
elif ismethoddescriptor(obj) or isdatadescriptor(obj):
name = obj.__name__
cls = obj.__objclass__
if getattr(cls, name) is not obj:
return None
elif isinstance(obj, property):
func = f.fget
name = func.__name__
cls = _findclass(func)
if cls is None or getattr(cls, name) is not obj:
return None
else:
return None
for base in cls.__mro__:
try:
doc = getattr(base, name).__doc__
except AttributeError:
continue
if doc is not None:
return doc
return None
def getdoc(object):
"""Get the documentation string for an object.
All tabs are expanded to spaces. To clean up docstrings that are
indented to line up with blocks of code, any whitespace than can be
uniformly removed from the second line onwards is removed."""
try:
doc = object.__doc__
except AttributeError:
return None
if doc is None:
try:
doc = _finddoc(object)
except (AttributeError, TypeError):
return None
if not isinstance(doc, str):
return None
return cleandoc(doc)
def cleandoc(doc):
"""Clean up indentation from docstrings.
Any whitespace that can be uniformly removed from the second line
onwards is removed."""
try:
lines = doc.expandtabs().split('\n')
except UnicodeError:
return None
else:
# Find minimum indentation of any non-blank lines after first line.
margin = sys.maxsize
for line in lines[1:]:
content = len(line.lstrip())
if content:
indent = len(line) - content
margin = min(margin, indent)
# Remove indentation.
if lines:
lines[0] = lines[0].lstrip()
if margin < sys.maxsize:
for i in range(1, len(lines)): lines[i] = lines[i][margin:]
# Remove any trailing or leading blank lines.
while lines and not lines[-1]:
lines.pop()
while lines and not lines[0]:
lines.pop(0)
return '\n'.join(lines)
def getfile(object):
"""Work out which source or compiled file an object was defined in."""
if ismodule(object):
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('{!r} is a built-in module'.format(object))
if isclass(object):
if hasattr(object, '__module__'):
object = sys.modules.get(object.__module__)
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('{!r} is a built-in class'.format(object))
if ismethod(object):
object = object.__func__
if isfunction(object):
object = object.__code__
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
return object.co_filename
raise TypeError('{!r} is not a module, class, method, '
'function, traceback, frame, or code object'.format(object))
ModuleInfo = namedtuple('ModuleInfo', 'name suffix mode module_type')
def getmoduleinfo(path):
"""Get the module name, suffix, mode, and module type for a given file."""
warnings.warn('inspect.getmoduleinfo() is deprecated', DeprecationWarning,
2)
with warnings.catch_warnings():
warnings.simplefilter('ignore', PendingDeprecationWarning)
import imp
filename = os.path.basename(path)
suffixes = [(-len(suffix), suffix, mode, mtype)
for suffix, mode, mtype in imp.get_suffixes()]
suffixes.sort() # try longest suffixes first, in case they overlap
for neglen, suffix, mode, mtype in suffixes:
if filename[neglen:] == suffix:
return ModuleInfo(filename[:neglen], suffix, mode, mtype)
def getmodulename(path):
"""Return the module name for a given file, or None."""
fname = os.path.basename(path)
# Check for paths that look like an actual module file
suffixes = [(-len(suffix), suffix)
for suffix in importlib.machinery.all_suffixes()]
suffixes.sort() # try longest suffixes first, in case they overlap
for neglen, suffix in suffixes:
if fname.endswith(suffix):
return fname[:neglen]
return None
def getsourcefile(object):
"""Return the filename that can be used to locate an object's source.
Return None if no way can be identified to get the source.
"""
filename = getfile(object)
all_bytecode_suffixes = importlib.machinery.DEBUG_BYTECODE_SUFFIXES[:]
all_bytecode_suffixes += importlib.machinery.OPTIMIZED_BYTECODE_SUFFIXES[:]
if any(filename.endswith(s) for s in all_bytecode_suffixes):
filename = (os.path.splitext(filename)[0] +
importlib.machinery.SOURCE_SUFFIXES[0])
elif any(filename.endswith(s) for s in
importlib.machinery.EXTENSION_SUFFIXES):
return None
if os.path.exists(filename):
return filename
# only return a non-existent filename if the module has a PEP 302 loader
if getattr(getmodule(object, filename), '__loader__', None) is not None:
return filename
# or it is in the linecache
if filename in linecache.cache:
return filename
def getabsfile(object, _filename=None):
"""Return an absolute path to the source or compiled file for an object.
The idea is for each object to have a unique origin, so this routine
normalizes the result as much as possible."""
if _filename is None:
_filename = getsourcefile(object) or getfile(object)
return os.path.normcase(os.path.abspath(_filename))
modulesbyfile = {}
_filesbymodname = {}
def getmodule(object, _filename=None):
"""Return the module an object was defined in, or None if not found."""
if ismodule(object):
return object
if hasattr(object, '__module__'):
return sys.modules.get(object.__module__)
# Try the filename to modulename cache
if _filename is not None and _filename in modulesbyfile:
return sys.modules.get(modulesbyfile[_filename])
# Try the cache again with the absolute file name
try:
file = getabsfile(object, _filename)
except TypeError:
return None
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Update the filename to module name cache and check yet again
# Copy sys.modules in order to cope with changes while iterating
for modname, module in list(sys.modules.items()):
if ismodule(module) and hasattr(module, '__file__'):
f = module.__file__
if f == _filesbymodname.get(modname, None):
# Have already mapped this module, so skip it
continue
_filesbymodname[modname] = f
f = getabsfile(module)
# Always map to the name the module knows itself by
modulesbyfile[f] = modulesbyfile[
os.path.realpath(f)] = module.__name__
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Check the main module
main = sys.modules['__main__']
if not hasattr(object, '__name__'):
return None
if hasattr(main, object.__name__):
mainobject = getattr(main, object.__name__)
if mainobject is object:
return main
# Check builtins
builtin = sys.modules['builtins']
if hasattr(builtin, object.__name__):
builtinobject = getattr(builtin, object.__name__)
if builtinobject is object:
return builtin
def findsource(object):
"""Return the entire source file and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of all the lines
in the file and the line number indexes a line in that list. An OSError
is raised if the source code cannot be retrieved."""
file = getsourcefile(object)
if file:
# Invalidate cache if needed.
linecache.checkcache(file)
else:
file = getfile(object)
# Allow filenames in form of "<something>" to pass through.
# `doctest` monkeypatches `linecache` module to enable
# inspection, so let `linecache.getlines` to be called.
if not (file.startswith('<') and file.endswith('>')):
raise OSError('source code not available')
module = getmodule(object, file)
if module:
lines = linecache.getlines(file, module.__dict__)
else:
lines = linecache.getlines(file)
if not lines:
raise OSError('could not get source code')
if ismodule(object):
return lines, 0
if isclass(object):
name = object.__name__
pat = re.compile(r'^(\s*)class\s*' + name + r'\b')
# make some effort to find the best matching class definition:
# use the one with the least indentation, which is the one
# that's most probably not inside a function definition.
candidates = []
for i in range(len(lines)):
match = pat.match(lines[i])
if match:
# if it's at toplevel, it's already the best one
if lines[i][0] == 'c':
return lines, i
# else add whitespace to candidate list
candidates.append((match.group(1), i))
if candidates:
# this will sort by whitespace, and by line number,
# less whitespace first
candidates.sort()
return lines, candidates[0][1]
else:
raise OSError('could not find class definition')
if ismethod(object):
object = object.__func__
if isfunction(object):
object = object.__code__
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
if not hasattr(object, 'co_firstlineno'):
raise OSError('could not find function definition')
lnum = object.co_firstlineno - 1
pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
while lnum > 0:
if pat.match(lines[lnum]): break
lnum = lnum - 1
return lines, lnum
raise OSError('could not find code object')
def getcomments(object):
"""Get lines of comments immediately preceding an object's source code.
Returns None when source can't be found.
"""
try:
lines, lnum = findsource(object)
except (OSError, TypeError):
return None
if ismodule(object):
# Look for a comment block at the top of the file.
start = 0
if lines and lines[0][:2] == '#!': start = 1
while start < len(lines) and lines[start].strip() in ('', '#'):
start = start + 1
if start < len(lines) and lines[start][:1] == '#':
comments = []
end = start
while end < len(lines) and lines[end][:1] == '#':
comments.append(lines[end].expandtabs())
end = end + 1
return ''.join(comments)
# Look for a preceding block of comments at the same indentation.
elif lnum > 0:
indent = indentsize(lines[lnum])
end = lnum - 1
if end >= 0 and lines[end].lstrip()[:1] == '#' and \
indentsize(lines[end]) == indent:
comments = [lines[end].expandtabs().lstrip()]
if end > 0:
end = end - 1
comment = lines[end].expandtabs().lstrip()
while comment[:1] == '#' and indentsize(lines[end]) == indent:
comments[:0] = [comment]
end = end - 1
if end < 0: break
comment = lines[end].expandtabs().lstrip()
while comments and comments[0].strip() == '#':
comments[:1] = []
while comments and comments[-1].strip() == '#':
comments[-1:] = []
return ''.join(comments)
class EndOfBlock(Exception): pass
class BlockFinder:
"""Provide a tokeneater() method to detect the end of a code block."""
def __init__(self):
self.indent = 0
self.islambda = False
self.started = False
self.passline = False
self.last = 1
def tokeneater(self, type, token, srowcol, erowcol, line):
if not self.started:
# look for the first "def", "class" or "lambda"
if token in ("def", "class", "lambda"):
if token == "lambda":
self.islambda = True
self.started = True
self.passline = True # skip to the end of the line
elif type == tokenize.NEWLINE:
self.passline = False # stop skipping when a NEWLINE is seen
self.last = srowcol[0]
if self.islambda: # lambdas always end at the first NEWLINE
raise EndOfBlock
elif self.passline:
pass
elif type == tokenize.INDENT:
self.indent = self.indent + 1
self.passline = True
elif type == tokenize.DEDENT:
self.indent = self.indent - 1
# the end of matching indent/dedent pairs end a block
# (note that this only works for "def"/"class" blocks,
# not e.g. for "if: else:" or "try: finally:" blocks)
if self.indent <= 0:
raise EndOfBlock
elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL):
# any other token on the same indentation level end the previous
# block as well, except the pseudo-tokens COMMENT and NL.
raise EndOfBlock
def getblock(lines):
"""Extract the block of code at the top of the given list of lines."""
blockfinder = BlockFinder()
try:
tokens = tokenize.generate_tokens(iter(lines).__next__)
for _token in tokens:
blockfinder.tokeneater(*_token)
except (EndOfBlock, IndentationError):
pass
return lines[:blockfinder.last]
def getsourcelines(object):
"""Return a list of source lines and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of the lines
corresponding to the object and the line number indicates where in the
original source file the first line of code was found. An OSError is
raised if the source code cannot be retrieved."""
object = unwrap(object)
lines, lnum = findsource(object)
if ismodule(object): return lines, 0
else: return getblock(lines[lnum:]), lnum + 1
def getsource(object):
"""Return the text of the source code for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a single string. An
OSError is raised if the source code cannot be retrieved."""
lines, lnum = getsourcelines(object)
return ''.join(lines)
# --------------------------------------------------- class tree extraction
def walktree(classes, children, parent):
"""Recursive helper function for getclasstree()."""
results = []
classes.sort(key=attrgetter('__module__', '__name__'))
for c in classes:
results.append((c, c.__bases__))
if c in children:
results.append(walktree(children[c], children, c))
return results
def getclasstree(classes, unique=False):
"""Arrange the given list of classes into a hierarchy of nested lists.
Where a nested list appears, it contains classes derived from the class
whose entry immediately precedes the list. Each entry is a 2-tuple
containing a class and a tuple of its base classes. If the 'unique'
argument is true, exactly one entry appears in the returned structure
for each class in the given list. Otherwise, classes using multiple
inheritance and their descendants will appear multiple times."""
children = {}
roots = []
for c in classes:
if c.__bases__:
for parent in c.__bases__:
if not parent in children:
children[parent] = []
if c not in children[parent]:
children[parent].append(c)
if unique and parent in classes: break
elif c not in roots:
roots.append(c)
for parent in children:
if parent not in classes:
roots.append(parent)
return walktree(roots, children, None)
# ------------------------------------------------ argument list extraction
Arguments = namedtuple('Arguments', 'args, varargs, varkw')
def getargs(co):
"""Get information about the arguments accepted by a code object.
Three things are returned: (args, varargs, varkw), where
'args' is the list of argument names. Keyword-only arguments are
appended. 'varargs' and 'varkw' are the names of the * and **
arguments or None."""
args, varargs, kwonlyargs, varkw = _getfullargs(co)
return Arguments(args + kwonlyargs, varargs, varkw)
def _getfullargs(co):
"""Get information about the arguments accepted by a code object.
Four things are returned: (args, varargs, kwonlyargs, varkw), where
'args' and 'kwonlyargs' are lists of argument names, and 'varargs'
and 'varkw' are the names of the * and ** arguments or None."""
if not iscode(co):
raise TypeError('{!r} is not a code object'.format(co))
nargs = co.co_argcount
names = co.co_varnames
nkwargs = co.co_kwonlyargcount
args = list(names[:nargs])
kwonlyargs = list(names[nargs:nargs+nkwargs])
step = 0
nargs += nkwargs
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return args, varargs, kwonlyargs, varkw
ArgSpec = namedtuple('ArgSpec', 'args varargs keywords defaults')
def getargspec(func):
"""Get the names and default values of a function's arguments.
A tuple of four things is returned: (args, varargs, keywords, defaults).
'args' is a list of the argument names, including keyword-only argument names.
'varargs' and 'keywords' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
Use the getfullargspec() API for Python 3 code, as annotations
and keyword arguments are supported. getargspec() will raise ValueError
if the func has either annotations or keyword arguments.
"""
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = \
getfullargspec(func)
if kwonlyargs or ann:
raise ValueError("Function has keyword-only arguments or annotations"
", use getfullargspec() API which can support them")
return ArgSpec(args, varargs, varkw, defaults)
FullArgSpec = namedtuple('FullArgSpec',
'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations')
def getfullargspec(func):
"""Get the names and default values of a callable object's arguments.
A tuple of seven things is returned:
(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults annotations).
'args' is a list of the argument names.
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
'kwonlyargs' is a list of keyword-only argument names.
'kwonlydefaults' is a dictionary mapping names from kwonlyargs to defaults.
'annotations' is a dictionary mapping argument names to annotations.
The first four items in the tuple correspond to getargspec().
"""
try:
# Re: `skip_bound_arg=False`
#
# There is a notable difference in behaviour between getfullargspec
# and Signature: the former always returns 'self' parameter for bound
# methods, whereas the Signature always shows the actual calling
# signature of the passed object.
#
# To simulate this behaviour, we "unbind" bound methods, to trick
# inspect.signature to always return their first parameter ("self",
# usually)
# Re: `follow_wrapper_chains=False`
#
# getfullargspec() historically ignored __wrapped__ attributes,
# so we ensure that remains the case in 3.3+
sig = _signature_from_callable(func,
follow_wrapper_chains=False,
skip_bound_arg=False,
sigcls=Signature)
except Exception as ex:
# Most of the times 'signature' will raise ValueError.
# But, it can also raise AttributeError, and, maybe something
# else. So to be fully backwards compatible, we catch all
# possible exceptions here, and reraise a TypeError.
raise TypeError('unsupported callable') from ex
args = []
varargs = None
varkw = None
kwonlyargs = []
defaults = ()
annotations = {}
defaults = ()
kwdefaults = {}
if sig.return_annotation is not sig.empty:
annotations['return'] = sig.return_annotation
for param in sig.parameters.values():
kind = param.kind
name = param.name
if kind is _POSITIONAL_ONLY:
args.append(name)
elif kind is _POSITIONAL_OR_KEYWORD:
args.append(name)
if param.default is not param.empty:
defaults += (param.default,)
elif kind is _VAR_POSITIONAL:
varargs = name
elif kind is _KEYWORD_ONLY:
kwonlyargs.append(name)
if param.default is not param.empty:
kwdefaults[name] = param.default
elif kind is _VAR_KEYWORD:
varkw = name
if param.annotation is not param.empty:
annotations[name] = param.annotation
if not kwdefaults:
# compatibility with 'func.__kwdefaults__'
kwdefaults = None
if not defaults:
# compatibility with 'func.__defaults__'
defaults = None
return FullArgSpec(args, varargs, varkw, defaults,
kwonlyargs, kwdefaults, annotations)
ArgInfo = namedtuple('ArgInfo', 'args varargs keywords locals')
def getargvalues(frame):
"""Get information about arguments passed into a particular frame.
A tuple of four things is returned: (args, varargs, varkw, locals).
'args' is a list of the argument names.
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'locals' is the locals dictionary of the given frame."""
args, varargs, varkw = getargs(frame.f_code)
return ArgInfo(args, varargs, varkw, frame.f_locals)
def formatannotation(annotation, base_module=None):
if isinstance(annotation, type):
if annotation.__module__ in ('builtins', base_module):
return annotation.__qualname__
return annotation.__module__+'.'+annotation.__qualname__
return repr(annotation)
def formatannotationrelativeto(object):
module = getattr(object, '__module__', None)
def _formatannotation(annotation):
return formatannotation(annotation, module)
return _formatannotation
def formatargspec(args, varargs=None, varkw=None, defaults=None,
kwonlyargs=(), kwonlydefaults={}, annotations={},
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
formatreturns=lambda text: ' -> ' + text,
formatannotation=formatannotation):
"""Format an argument spec from the values returned by getargspec
or getfullargspec.
The first seven arguments are (args, varargs, varkw, defaults,
kwonlyargs, kwonlydefaults, annotations). The other five arguments
are the corresponding optional formatting functions that are called to
turn names and values into strings. The last argument is an optional
function to format the sequence of arguments."""
def formatargandannotation(arg):
result = formatarg(arg)
if arg in annotations:
result += ': ' + formatannotation(annotations[arg])
return result
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
for i, arg in enumerate(args):
spec = formatargandannotation(arg)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs is not None:
specs.append(formatvarargs(formatargandannotation(varargs)))
else:
if kwonlyargs:
specs.append('*')
if kwonlyargs:
for kwonlyarg in kwonlyargs:
spec = formatargandannotation(kwonlyarg)
if kwonlydefaults and kwonlyarg in kwonlydefaults:
spec += formatvalue(kwonlydefaults[kwonlyarg])
specs.append(spec)
if varkw is not None:
specs.append(formatvarkw(formatargandannotation(varkw)))
result = '(' + ', '.join(specs) + ')'
if 'return' in annotations:
result += formatreturns(formatannotation(annotations['return']))
return result
def formatargvalues(args, varargs, varkw, locals,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value)):
"""Format an argument spec from the 4 values returned by getargvalues.
The first four arguments are (args, varargs, varkw, locals). The
next four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments."""
def convert(name, locals=locals,
formatarg=formatarg, formatvalue=formatvalue):
return formatarg(name) + formatvalue(locals[name])
specs = []
for i in range(len(args)):
specs.append(convert(args[i]))
if varargs:
specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
if varkw:
specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
return '(' + ', '.join(specs) + ')'
def _missing_arguments(f_name, argnames, pos, values):
names = [repr(name) for name in argnames if name not in values]
missing = len(names)
if missing == 1:
s = names[0]
elif missing == 2:
s = "{} and {}".format(*names)
else:
tail = ", {} and {}".format(*names[-2:])
del names[-2:]
s = ", ".join(names) + tail
raise TypeError("%s() missing %i required %s argument%s: %s" %
(f_name, missing,
"positional" if pos else "keyword-only",
"" if missing == 1 else "s", s))
def _too_many(f_name, args, kwonly, varargs, defcount, given, values):
atleast = len(args) - defcount
kwonly_given = len([arg for arg in kwonly if arg in values])
if varargs:
plural = atleast != 1
sig = "at least %d" % (atleast,)
elif defcount:
plural = True
sig = "from %d to %d" % (atleast, len(args))
else:
plural = len(args) != 1
sig = str(len(args))
kwonly_sig = ""
if kwonly_given:
msg = " positional argument%s (and %d keyword-only argument%s)"
kwonly_sig = (msg % ("s" if given != 1 else "", kwonly_given,
"s" if kwonly_given != 1 else ""))
raise TypeError("%s() takes %s positional argument%s but %d%s %s given" %
(f_name, sig, "s" if plural else "", given, kwonly_sig,
"was" if given == 1 and not kwonly_given else "were"))
def getcallargs(*func_and_positional, **named):
"""Get the mapping of arguments to values.
A dict is returned, with keys the function argument names (including the
names of the * and ** arguments, if any), and values the respective bound
values from 'positional' and 'named'."""
func = func_and_positional[0]
positional = func_and_positional[1:]
spec = getfullargspec(func)
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = spec
f_name = func.__name__
arg2value = {}
if ismethod(func) and func.__self__ is not None:
# implicit 'self' (or 'cls' for classmethods) argument
positional = (func.__self__,) + positional
num_pos = len(positional)
num_args = len(args)
num_defaults = len(defaults) if defaults else 0
n = min(num_pos, num_args)
for i in range(n):
arg2value[args[i]] = positional[i]
if varargs:
arg2value[varargs] = tuple(positional[n:])
possible_kwargs = set(args + kwonlyargs)
if varkw:
arg2value[varkw] = {}
for kw, value in named.items():
if kw not in possible_kwargs:
if not varkw:
raise TypeError("%s() got an unexpected keyword argument %r" %
(f_name, kw))
arg2value[varkw][kw] = value
continue
if kw in arg2value:
raise TypeError("%s() got multiple values for argument %r" %
(f_name, kw))
arg2value[kw] = value
if num_pos > num_args and not varargs:
_too_many(f_name, args, kwonlyargs, varargs, num_defaults,
num_pos, arg2value)
if num_pos < num_args:
req = args[:num_args - num_defaults]
for arg in req:
if arg not in arg2value:
_missing_arguments(f_name, req, True, arg2value)
for i, arg in enumerate(args[num_args - num_defaults:]):
if arg not in arg2value:
arg2value[arg] = defaults[i]
missing = 0
for kwarg in kwonlyargs:
if kwarg not in arg2value:
if kwonlydefaults and kwarg in kwonlydefaults:
arg2value[kwarg] = kwonlydefaults[kwarg]
else:
missing += 1
if missing:
_missing_arguments(f_name, kwonlyargs, False, arg2value)
return arg2value
ClosureVars = namedtuple('ClosureVars', 'nonlocals globals builtins unbound')
def getclosurevars(func):
"""
Get the mapping of free variables to their current values.
Returns a named tuple of dicts mapping the current nonlocal, global
and builtin references as seen by the body of the function. A final
set of unbound names that could not be resolved is also provided.
"""
if ismethod(func):
func = func.__func__
if not isfunction(func):
raise TypeError("'{!r}' is not a Python function".format(func))
code = func.__code__
# Nonlocal references are named in co_freevars and resolved
# by looking them up in __closure__ by positional index
if func.__closure__ is None:
nonlocal_vars = {}
else:
nonlocal_vars = {
var : cell.cell_contents
for var, cell in zip(code.co_freevars, func.__closure__)
}
# Global and builtin references are named in co_names and resolved
# by looking them up in __globals__ or __builtins__
global_ns = func.__globals__
builtin_ns = global_ns.get("__builtins__", builtins.__dict__)
if ismodule(builtin_ns):
builtin_ns = builtin_ns.__dict__
global_vars = {}
builtin_vars = {}
unbound_names = set()
for name in code.co_names:
if name in ("None", "True", "False"):
# Because these used to be builtins instead of keywords, they
# may still show up as name references. We ignore them.
continue
try:
global_vars[name] = global_ns[name]
except KeyError:
try:
builtin_vars[name] = builtin_ns[name]
except KeyError:
unbound_names.add(name)
return ClosureVars(nonlocal_vars, global_vars,
builtin_vars, unbound_names)
# -------------------------------------------------- stack frame extraction
Traceback = namedtuple('Traceback', 'filename lineno function code_context index')
def getframeinfo(frame, context=1):
"""Get information about a frame or traceback object.
A tuple of five things is returned: the filename, the line number of
the current line, the function name, a list of lines of context from
the source code, and the index of the current line within that list.
The optional second argument specifies the number of lines of context
to return, which are centered around the current line."""
if istraceback(frame):
lineno = frame.tb_lineno
frame = frame.tb_frame
else:
lineno = frame.f_lineno
if not isframe(frame):
raise TypeError('{!r} is not a frame or traceback object'.format(frame))
filename = getsourcefile(frame) or getfile(frame)
if context > 0:
start = lineno - 1 - context//2
try:
lines, lnum = findsource(frame)
except OSError:
lines = index = None
else:
start = max(start, 1)
start = max(0, min(start, len(lines) - context))
lines = lines[start:start+context]
index = lineno - 1 - start
else:
lines = index = None
return Traceback(filename, lineno, frame.f_code.co_name, lines, index)
def getlineno(frame):
"""Get the line number from a frame object, allowing for optimization."""
# FrameType.f_lineno is now a descriptor that grovels co_lnotab
return frame.f_lineno
FrameInfo = namedtuple('FrameInfo', ('frame',) + Traceback._fields)
def getouterframes(frame, context=1):
"""Get a list of records for a frame and all higher (calling) frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while frame:
frameinfo = (frame,) + getframeinfo(frame, context)
framelist.append(FrameInfo(*frameinfo))
frame = frame.f_back
return framelist
def getinnerframes(tb, context=1):
"""Get a list of records for a traceback's frame and all lower frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while tb:
frameinfo = (tb.tb_frame,) + getframeinfo(tb, context)
framelist.append(FrameInfo(*frameinfo))
tb = tb.tb_next
return framelist
def currentframe():
"""Return the frame of the caller or None if this is not possible."""
return sys._getframe(1) if hasattr(sys, "_getframe") else None
def stack(context=1):
"""Return a list of records for the stack above the caller's frame."""
return getouterframes(sys._getframe(1), context)
def trace(context=1):
"""Return a list of records for the stack below the current exception."""
return getinnerframes(sys.exc_info()[2], context)
# ------------------------------------------------ static version of getattr
_sentinel = object()
def _static_getmro(klass):
return type.__dict__['__mro__'].__get__(klass)
def _check_instance(obj, attr):
instance_dict = {}
try:
instance_dict = object.__getattribute__(obj, "__dict__")
except AttributeError:
pass
return dict.get(instance_dict, attr, _sentinel)
def _check_class(klass, attr):
for entry in _static_getmro(klass):
if _shadowed_dict(type(entry)) is _sentinel:
try:
return entry.__dict__[attr]
except KeyError:
pass
return _sentinel
def _is_type(obj):
try:
_static_getmro(obj)
except TypeError:
return False
return True
def _shadowed_dict(klass):
dict_attr = type.__dict__["__dict__"]
for entry in _static_getmro(klass):
try:
class_dict = dict_attr.__get__(entry)["__dict__"]
except KeyError:
pass
else:
if not (type(class_dict) is types.GetSetDescriptorType and
class_dict.__name__ == "__dict__" and
class_dict.__objclass__ is entry):
return class_dict
return _sentinel
def getattr_static(obj, attr, default=_sentinel):
"""Retrieve attributes without triggering dynamic lookup via the
descriptor protocol, __getattr__ or __getattribute__.
Note: this function may not be able to retrieve all attributes
that getattr can fetch (like dynamically created attributes)
and may find attributes that getattr can't (like descriptors
that raise AttributeError). It can also return descriptor objects
instead of instance members in some cases. See the
documentation for details.
"""
instance_result = _sentinel
if not _is_type(obj):
klass = type(obj)
dict_attr = _shadowed_dict(klass)
if (dict_attr is _sentinel or
type(dict_attr) is types.MemberDescriptorType):
instance_result = _check_instance(obj, attr)
else:
klass = obj
klass_result = _check_class(klass, attr)
if instance_result is not _sentinel and klass_result is not _sentinel:
if (_check_class(type(klass_result), '__get__') is not _sentinel and
_check_class(type(klass_result), '__set__') is not _sentinel):
return klass_result
if instance_result is not _sentinel:
return instance_result
if klass_result is not _sentinel:
return klass_result
if obj is klass:
# for types we check the metaclass too
for entry in _static_getmro(type(klass)):
if _shadowed_dict(type(entry)) is _sentinel:
try:
return entry.__dict__[attr]
except KeyError:
pass
if default is not _sentinel:
return default
raise AttributeError(attr)
# ------------------------------------------------ generator introspection
GEN_CREATED = 'GEN_CREATED'
GEN_RUNNING = 'GEN_RUNNING'
GEN_SUSPENDED = 'GEN_SUSPENDED'
GEN_CLOSED = 'GEN_CLOSED'
def getgeneratorstate(generator):
"""Get current state of a generator-iterator.
Possible states are:
GEN_CREATED: Waiting to start execution.
GEN_RUNNING: Currently being executed by the interpreter.
GEN_SUSPENDED: Currently suspended at a yield expression.
GEN_CLOSED: Execution has completed.
"""
if generator.gi_running:
return GEN_RUNNING
if generator.gi_frame is None:
return GEN_CLOSED
if generator.gi_frame.f_lasti == -1:
return GEN_CREATED
return GEN_SUSPENDED
def getgeneratorlocals(generator):
"""
Get the mapping of generator local variables to their current values.
A dict is returned, with the keys the local variable names and values the
bound values."""
if not isgenerator(generator):
raise TypeError("'{!r}' is not a Python generator".format(generator))
frame = getattr(generator, "gi_frame", None)
if frame is not None:
return generator.gi_frame.f_locals
else:
return {}
###############################################################################
### Function Signature Object (PEP 362)
###############################################################################
_WrapperDescriptor = type(type.__call__)
_MethodWrapper = type(all.__call__)
_ClassMethodWrapper = type(int.__dict__['from_bytes'])
_NonUserDefinedCallables = (_WrapperDescriptor,
_MethodWrapper,
_ClassMethodWrapper,
types.BuiltinFunctionType)
def _signature_get_user_defined_method(cls, method_name):
"""Private helper. Checks if ``cls`` has an attribute
named ``method_name`` and returns it only if it is a
pure python function.
"""
try:
meth = getattr(cls, method_name)
except AttributeError:
return
else:
if not isinstance(meth, _NonUserDefinedCallables):
# Once '__signature__' will be added to 'C'-level
# callables, this check won't be necessary
return meth
def _signature_get_partial(wrapped_sig, partial, extra_args=()):
"""Private helper to calculate how 'wrapped_sig' signature will
look like after applying a 'functools.partial' object (or alike)
on it.
"""
old_params = wrapped_sig.parameters
new_params = OrderedDict(old_params.items())
partial_args = partial.args or ()
partial_keywords = partial.keywords or {}
if extra_args:
partial_args = extra_args + partial_args
try:
ba = wrapped_sig.bind_partial(*partial_args, **partial_keywords)
except TypeError as ex:
msg = 'partial object {!r} has incorrect arguments'.format(partial)
raise ValueError(msg) from ex
transform_to_kwonly = False
for param_name, param in old_params.items():
try:
arg_value = ba.arguments[param_name]
except KeyError:
pass
else:
if param.kind is _POSITIONAL_ONLY:
# If positional-only parameter is bound by partial,
# it effectively disappears from the signature
new_params.pop(param_name)
continue
if param.kind is _POSITIONAL_OR_KEYWORD:
if param_name in partial_keywords:
# This means that this parameter, and all parameters
# after it should be keyword-only (and var-positional
# should be removed). Here's why. Consider the following
# function:
# foo(a, b, *args, c):
# pass
#
# "partial(foo, a='spam')" will have the following
# signature: "(*, a='spam', b, c)". Because attempting
# to call that partial with "(10, 20)" arguments will
# raise a TypeError, saying that "a" argument received
# multiple values.
transform_to_kwonly = True
# Set the new default value
new_params[param_name] = param.replace(default=arg_value)
else:
# was passed as a positional argument
new_params.pop(param.name)
continue
if param.kind is _KEYWORD_ONLY:
# Set the new default value
new_params[param_name] = param.replace(default=arg_value)
if transform_to_kwonly:
assert param.kind is not _POSITIONAL_ONLY
if param.kind is _POSITIONAL_OR_KEYWORD:
new_param = new_params[param_name].replace(kind=_KEYWORD_ONLY)
new_params[param_name] = new_param
new_params.move_to_end(param_name)
elif param.kind in (_KEYWORD_ONLY, _VAR_KEYWORD):
new_params.move_to_end(param_name)
elif param.kind is _VAR_POSITIONAL:
new_params.pop(param.name)
return wrapped_sig.replace(parameters=new_params.values())
def _signature_bound_method(sig):
"""Private helper to transform signatures for unbound
functions to bound methods.
"""
params = tuple(sig.parameters.values())
if not params or params[0].kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
raise ValueError('invalid method signature')
kind = params[0].kind
if kind in (_POSITIONAL_OR_KEYWORD, _POSITIONAL_ONLY):
# Drop first parameter:
# '(p1, p2[, ...])' -> '(p2[, ...])'
params = params[1:]
else:
if kind is not _VAR_POSITIONAL:
# Unless we add a new parameter type we never
# get here
raise ValueError('invalid argument type')
# It's a var-positional parameter.
# Do nothing. '(*args[, ...])' -> '(*args[, ...])'
return sig.replace(parameters=params)
def _signature_is_builtin(obj):
"""Private helper to test if `obj` is a callable that might
support Argument Clinic's __text_signature__ protocol.
"""
return (isbuiltin(obj) or
ismethoddescriptor(obj) or
isinstance(obj, _NonUserDefinedCallables) or
# Can't test 'isinstance(type)' here, as it would
# also be True for regular python classes
obj in (type, object))
def _signature_is_functionlike(obj):
"""Private helper to test if `obj` is a duck type of FunctionType.
A good example of such objects are functions compiled with
Cython, which have all attributes that a pure Python function
would have, but have their code statically compiled.
"""
if not callable(obj) or isclass(obj):
# All function-like objects are obviously callables,
# and not classes.
return False
name = getattr(obj, '__name__', None)
code = getattr(obj, '__code__', None)
defaults = getattr(obj, '__defaults__', _void) # Important to use _void ...
kwdefaults = getattr(obj, '__kwdefaults__', _void) # ... and not None here
annotations = getattr(obj, '__annotations__', None)
return (isinstance(code, types.CodeType) and
isinstance(name, str) and
(defaults is None or isinstance(defaults, tuple)) and
(kwdefaults is None or isinstance(kwdefaults, dict)) and
isinstance(annotations, dict))
def _signature_get_bound_param(spec):
""" Private helper to get first parameter name from a
__text_signature__ of a builtin method, which should
be in the following format: '($param1, ...)'.
Assumptions are that the first argument won't have
a default value or an annotation.
"""
assert spec.startswith('($')
pos = spec.find(',')
if pos == -1:
pos = spec.find(')')
cpos = spec.find(':')
assert cpos == -1 or cpos > pos
cpos = spec.find('=')
assert cpos == -1 or cpos > pos
return spec[2:pos]
def _signature_strip_non_python_syntax(signature):
"""
Private helper function. Takes a signature in Argument Clinic's
extended signature format.
Returns a tuple of three things:
* that signature re-rendered in standard Python syntax,
* the index of the "self" parameter (generally 0), or None if
the function does not have a "self" parameter, and
* the index of the last "positional only" parameter,
or None if the signature has no positional-only parameters.
"""
if not signature:
return signature, None, None
self_parameter = None
last_positional_only = None
lines = [l.encode('ascii') for l in signature.split('\n')]
generator = iter(lines).__next__
token_stream = tokenize.tokenize(generator)
delayed_comma = False
skip_next_comma = False
text = []
add = text.append
current_parameter = 0
OP = token.OP
ERRORTOKEN = token.ERRORTOKEN
# token stream always starts with ENCODING token, skip it
t = next(token_stream)
assert t.type == tokenize.ENCODING
for t in token_stream:
type, string = t.type, t.string
if type == OP:
if string == ',':
if skip_next_comma:
skip_next_comma = False
else:
assert not delayed_comma
delayed_comma = True
current_parameter += 1
continue
if string == '/':
assert not skip_next_comma
assert last_positional_only is None
skip_next_comma = True
last_positional_only = current_parameter - 1
continue
if (type == ERRORTOKEN) and (string == '$'):
assert self_parameter is None
self_parameter = current_parameter
continue
if delayed_comma:
delayed_comma = False
if not ((type == OP) and (string == ')')):
add(', ')
add(string)
if (string == ','):
add(' ')
clean_signature = ''.join(text)
return clean_signature, self_parameter, last_positional_only
def _signature_fromstr(cls, obj, s, skip_bound_arg=True):
"""Private helper to parse content of '__text_signature__'
and return a Signature based on it.
"""
Parameter = cls._parameter_cls
clean_signature, self_parameter, last_positional_only = \
_signature_strip_non_python_syntax(s)
program = "def foo" + clean_signature + ": pass"
try:
module = ast.parse(program)
except SyntaxError:
module = None
if not isinstance(module, ast.Module):
raise ValueError("{!r} builtin has invalid signature".format(obj))
f = module.body[0]
parameters = []
empty = Parameter.empty
invalid = object()
module = None
module_dict = {}
module_name = getattr(obj, '__module__', None)
if module_name:
module = sys.modules.get(module_name, None)
if module:
module_dict = module.__dict__
sys_module_dict = sys.modules
def parse_name(node):
assert isinstance(node, ast.arg)
if node.annotation != None:
raise ValueError("Annotations are not currently supported")
return node.arg
def wrap_value(s):
try:
value = eval(s, module_dict)
except NameError:
try:
value = eval(s, sys_module_dict)
except NameError:
raise RuntimeError()
if isinstance(value, str):
return ast.Str(value)
if isinstance(value, (int, float)):
return ast.Num(value)
if isinstance(value, bytes):
return ast.Bytes(value)
if value in (True, False, None):
return ast.NameConstant(value)
raise RuntimeError()
class RewriteSymbolics(ast.NodeTransformer):
def visit_Attribute(self, node):
a = []
n = node
while isinstance(n, ast.Attribute):
a.append(n.attr)
n = n.value
if not isinstance(n, ast.Name):
raise RuntimeError()
a.append(n.id)
value = ".".join(reversed(a))
return wrap_value(value)
def visit_Name(self, node):
if not isinstance(node.ctx, ast.Load):
raise ValueError()
return wrap_value(node.id)
def p(name_node, default_node, default=empty):
name = parse_name(name_node)
if name is invalid:
return None
if default_node and default_node is not _empty:
try:
default_node = RewriteSymbolics().visit(default_node)
o = ast.literal_eval(default_node)
except ValueError:
o = invalid
if o is invalid:
return None
default = o if o is not invalid else default
parameters.append(Parameter(name, kind, default=default, annotation=empty))
# non-keyword-only parameters
args = reversed(f.args.args)
defaults = reversed(f.args.defaults)
iter = itertools.zip_longest(args, defaults, fillvalue=None)
if last_positional_only is not None:
kind = Parameter.POSITIONAL_ONLY
else:
kind = Parameter.POSITIONAL_OR_KEYWORD
for i, (name, default) in enumerate(reversed(list(iter))):
p(name, default)
if i == last_positional_only:
kind = Parameter.POSITIONAL_OR_KEYWORD
# *args
if f.args.vararg:
kind = Parameter.VAR_POSITIONAL
p(f.args.vararg, empty)
# keyword-only arguments
kind = Parameter.KEYWORD_ONLY
for name, default in zip(f.args.kwonlyargs, f.args.kw_defaults):
p(name, default)
# **kwargs
if f.args.kwarg:
kind = Parameter.VAR_KEYWORD
p(f.args.kwarg, empty)
if self_parameter is not None:
# Possibly strip the bound argument:
# - We *always* strip first bound argument if
# it is a module.
# - We don't strip first bound argument if
# skip_bound_arg is False.
assert parameters
_self = getattr(obj, '__self__', None)
self_isbound = _self is not None
self_ismodule = ismodule(_self)
if self_isbound and (self_ismodule or skip_bound_arg):
parameters.pop(0)
else:
# for builtins, self parameter is always positional-only!
p = parameters[0].replace(kind=Parameter.POSITIONAL_ONLY)
parameters[0] = p
return cls(parameters, return_annotation=cls.empty)
def _signature_from_builtin(cls, func, skip_bound_arg=True):
"""Private helper function to get signature for
builtin callables.
"""
if not _signature_is_builtin(func):
raise TypeError("{!r} is not a Python builtin "
"function".format(func))
s = getattr(func, "__text_signature__", None)
if not s:
raise ValueError("no signature found for builtin {!r}".format(func))
return _signature_fromstr(cls, func, s, skip_bound_arg)
def _signature_from_callable(obj, *,
follow_wrapper_chains=True,
skip_bound_arg=True,
sigcls):
"""Private helper function to get signature for arbitrary
callable objects.
"""
if not callable(obj):
raise TypeError('{!r} is not a callable object'.format(obj))
if isinstance(obj, types.MethodType):
# In this case we skip the first parameter of the underlying
# function (usually `self` or `cls`).
sig = _signature_from_callable(
obj.__func__,
follow_wrapper_chains=follow_wrapper_chains,
skip_bound_arg=skip_bound_arg,
sigcls=sigcls)
if skip_bound_arg:
return _signature_bound_method(sig)
else:
return sig
# Was this function wrapped by a decorator?
if follow_wrapper_chains:
obj = unwrap(obj, stop=(lambda f: hasattr(f, "__signature__")))
try:
sig = obj.__signature__
except AttributeError:
pass
else:
if sig is not None:
if not isinstance(sig, Signature):
raise TypeError(
'unexpected object {!r} in __signature__ '
'attribute'.format(sig))
return sig
try:
partialmethod = obj._partialmethod
except AttributeError:
pass
else:
if isinstance(partialmethod, functools.partialmethod):
# Unbound partialmethod (see functools.partialmethod)
# This means, that we need to calculate the signature
# as if it's a regular partial object, but taking into
# account that the first positional argument
# (usually `self`, or `cls`) will not be passed
# automatically (as for boundmethods)
wrapped_sig = _signature_from_callable(
partialmethod.func,
follow_wrapper_chains=follow_wrapper_chains,
skip_bound_arg=skip_bound_arg,
sigcls=sigcls)
sig = _signature_get_partial(wrapped_sig, partialmethod, (None,))
first_wrapped_param = tuple(wrapped_sig.parameters.values())[0]
new_params = (first_wrapped_param,) + tuple(sig.parameters.values())
return sig.replace(parameters=new_params)
if isfunction(obj) or _signature_is_functionlike(obj):
# If it's a pure Python function, or an object that is duck type
# of a Python function (Cython functions, for instance), then:
return sigcls.from_function(obj)
if _signature_is_builtin(obj):
return _signature_from_builtin(sigcls, obj,
skip_bound_arg=skip_bound_arg)
if isinstance(obj, functools.partial):
wrapped_sig = _signature_from_callable(
obj.func,
follow_wrapper_chains=follow_wrapper_chains,
skip_bound_arg=skip_bound_arg,
sigcls=sigcls)
return _signature_get_partial(wrapped_sig, obj)
sig = None
if isinstance(obj, type):
# obj is a class or a metaclass
# First, let's see if it has an overloaded __call__ defined
# in its metaclass
call = _signature_get_user_defined_method(type(obj), '__call__')
if call is not None:
sig = _signature_from_callable(
call,
follow_wrapper_chains=follow_wrapper_chains,
skip_bound_arg=skip_bound_arg,
sigcls=sigcls)
else:
# Now we check if the 'obj' class has a '__new__' method
new = _signature_get_user_defined_method(obj, '__new__')
if new is not None:
sig = _signature_from_callable(
new,
follow_wrapper_chains=follow_wrapper_chains,
skip_bound_arg=skip_bound_arg,
sigcls=sigcls)
else:
# Finally, we should have at least __init__ implemented
init = _signature_get_user_defined_method(obj, '__init__')
if init is not None:
sig = _signature_from_callable(
init,
follow_wrapper_chains=follow_wrapper_chains,
skip_bound_arg=skip_bound_arg,
sigcls=sigcls)
if sig is None:
# At this point we know, that `obj` is a class, with no user-
# defined '__init__', '__new__', or class-level '__call__'
for base in obj.__mro__[:-1]:
# Since '__text_signature__' is implemented as a
# descriptor that extracts text signature from the
# class docstring, if 'obj' is derived from a builtin
# class, its own '__text_signature__' may be 'None'.
# Therefore, we go through the MRO (except the last
# class in there, which is 'object') to find the first
# class with non-empty text signature.
try:
text_sig = base.__text_signature__
except AttributeError:
pass
else:
if text_sig:
# If 'obj' class has a __text_signature__ attribute:
# return a signature based on it
return _signature_fromstr(sigcls, obj, text_sig)
# No '__text_signature__' was found for the 'obj' class.
# Last option is to check if its '__init__' is
# object.__init__ or type.__init__.
if type not in obj.__mro__:
# We have a class (not metaclass), but no user-defined
# __init__ or __new__ for it
if obj.__init__ is object.__init__:
# Return a signature of 'object' builtin.
return signature(object)
elif not isinstance(obj, _NonUserDefinedCallables):
# An object with __call__
# We also check that the 'obj' is not an instance of
# _WrapperDescriptor or _MethodWrapper to avoid
# infinite recursion (and even potential segfault)
call = _signature_get_user_defined_method(type(obj), '__call__')
if call is not None:
try:
sig = _signature_from_callable(
call,
follow_wrapper_chains=follow_wrapper_chains,
skip_bound_arg=skip_bound_arg,
sigcls=sigcls)
except ValueError as ex:
msg = 'no signature found for {!r}'.format(obj)
raise ValueError(msg) from ex
if sig is not None:
# For classes and objects we skip the first parameter of their
# __call__, __new__, or __init__ methods
if skip_bound_arg:
return _signature_bound_method(sig)
else:
return sig
if isinstance(obj, types.BuiltinFunctionType):
# Raise a nicer error message for builtins
msg = 'no signature found for builtin function {!r}'.format(obj)
raise ValueError(msg)
raise ValueError('callable {!r} is not supported by signature'.format(obj))
class _void:
"""A private marker - used in Parameter & Signature."""
class _empty:
"""Marker object for Signature.empty and Parameter.empty."""
class _ParameterKind(enum.IntEnum):
POSITIONAL_ONLY = 0
POSITIONAL_OR_KEYWORD = 1
VAR_POSITIONAL = 2
KEYWORD_ONLY = 3
VAR_KEYWORD = 4
def __str__(self):
return self._name_
_POSITIONAL_ONLY = _ParameterKind.POSITIONAL_ONLY
_POSITIONAL_OR_KEYWORD = _ParameterKind.POSITIONAL_OR_KEYWORD
_VAR_POSITIONAL = _ParameterKind.VAR_POSITIONAL
_KEYWORD_ONLY = _ParameterKind.KEYWORD_ONLY
_VAR_KEYWORD = _ParameterKind.VAR_KEYWORD
class Parameter:
"""Represents a parameter in a function signature.
Has the following public attributes:
* name : str
The name of the parameter as a string.
* default : object
The default value for the parameter if specified. If the
parameter has no default value, this attribute is set to
`Parameter.empty`.
* annotation
The annotation for the parameter if specified. If the
parameter has no annotation, this attribute is set to
`Parameter.empty`.
* kind : str
Describes how argument values are bound to the parameter.
Possible values: `Parameter.POSITIONAL_ONLY`,
`Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`,
`Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`.
"""
__slots__ = ('_name', '_kind', '_default', '_annotation')
POSITIONAL_ONLY = _POSITIONAL_ONLY
POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD
VAR_POSITIONAL = _VAR_POSITIONAL
KEYWORD_ONLY = _KEYWORD_ONLY
VAR_KEYWORD = _VAR_KEYWORD
empty = _empty
def __init__(self, name, kind, *, default=_empty, annotation=_empty):
if kind not in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD,
_VAR_POSITIONAL, _KEYWORD_ONLY, _VAR_KEYWORD):
raise ValueError("invalid value for 'Parameter.kind' attribute")
self._kind = kind
if default is not _empty:
if kind in (_VAR_POSITIONAL, _VAR_KEYWORD):
msg = '{} parameters cannot have default values'.format(kind)
raise ValueError(msg)
self._default = default
self._annotation = annotation
if name is _empty:
raise ValueError('name is a required attribute for Parameter')
if not isinstance(name, str):
raise TypeError("name must be a str, not a {!r}".format(name))
if not name.isidentifier():
raise ValueError('{!r} is not a valid parameter name'.format(name))
self._name = name
def __reduce__(self):
return (type(self),
(self._name, self._kind),
{'_default': self._default,
'_annotation': self._annotation})
def __setstate__(self, state):
self._default = state['_default']
self._annotation = state['_annotation']
@property
def name(self):
return self._name
@property
def default(self):
return self._default
@property
def annotation(self):
return self._annotation
@property
def kind(self):
return self._kind
def replace(self, *, name=_void, kind=_void,
annotation=_void, default=_void):
"""Creates a customized copy of the Parameter."""
if name is _void:
name = self._name
if kind is _void:
kind = self._kind
if annotation is _void:
annotation = self._annotation
if default is _void:
default = self._default
return type(self)(name, kind, default=default, annotation=annotation)
def __str__(self):
kind = self.kind
formatted = self._name
# Add annotation and default value
if self._annotation is not _empty:
formatted = '{}:{}'.format(formatted,
formatannotation(self._annotation))
if self._default is not _empty:
formatted = '{}={}'.format(formatted, repr(self._default))
if kind == _VAR_POSITIONAL:
formatted = '*' + formatted
elif kind == _VAR_KEYWORD:
formatted = '**' + formatted
return formatted
def __repr__(self):
return '<{} at {:#x} "{}">'.format(self.__class__.__name__,
id(self), self)
def __hash__(self):
return hash((self.name, self.kind, self.annotation, self.default))
def __eq__(self, other):
return (issubclass(other.__class__, Parameter) and
self._name == other._name and
self._kind == other._kind and
self._default == other._default and
self._annotation == other._annotation)
class BoundArguments:
"""Result of `Signature.bind` call. Holds the mapping of arguments
to the function's parameters.
Has the following public attributes:
* arguments : OrderedDict
An ordered mutable mapping of parameters' names to arguments' values.
Does not contain arguments' default values.
* signature : Signature
The Signature object that created this instance.
* args : tuple
Tuple of positional arguments values.
* kwargs : dict
Dict of keyword arguments values.
"""
def __init__(self, signature, arguments):
self.arguments = arguments
self._signature = signature
@property
def signature(self):
return self._signature
@property
def args(self):
args = []
for param_name, param in self._signature.parameters.items():
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
break
try:
arg = self.arguments[param_name]
except KeyError:
# We're done here. Other arguments
# will be mapped in 'BoundArguments.kwargs'
break
else:
if param.kind == _VAR_POSITIONAL:
# *args
args.extend(arg)
else:
# plain argument
args.append(arg)
return tuple(args)
@property
def kwargs(self):
kwargs = {}
kwargs_started = False
for param_name, param in self._signature.parameters.items():
if not kwargs_started:
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
kwargs_started = True
else:
if param_name not in self.arguments:
kwargs_started = True
continue
if not kwargs_started:
continue
try:
arg = self.arguments[param_name]
except KeyError:
pass
else:
if param.kind == _VAR_KEYWORD:
# **kwargs
kwargs.update(arg)
else:
# plain keyword argument
kwargs[param_name] = arg
return kwargs
def __eq__(self, other):
return (issubclass(other.__class__, BoundArguments) and
self.signature == other.signature and
self.arguments == other.arguments)
class Signature:
"""A Signature object represents the overall signature of a function.
It stores a Parameter object for each parameter accepted by the
function, as well as information specific to the function itself.
A Signature object has the following public attributes and methods:
* parameters : OrderedDict
An ordered mapping of parameters' names to the corresponding
Parameter objects (keyword-only arguments are in the same order
as listed in `code.co_varnames`).
* return_annotation : object
The annotation for the return type of the function if specified.
If the function has no annotation for its return type, this
attribute is set to `Signature.empty`.
* bind(*args, **kwargs) -> BoundArguments
Creates a mapping from positional and keyword arguments to
parameters.
* bind_partial(*args, **kwargs) -> BoundArguments
Creates a partial mapping from positional and keyword arguments
to parameters (simulating 'functools.partial' behavior.)
"""
__slots__ = ('_return_annotation', '_parameters')
_parameter_cls = Parameter
_bound_arguments_cls = BoundArguments
empty = _empty
def __init__(self, parameters=None, *, return_annotation=_empty,
__validate_parameters__=True):
"""Constructs Signature from the given list of Parameter
objects and 'return_annotation'. All arguments are optional.
"""
if parameters is None:
params = OrderedDict()
else:
if __validate_parameters__:
params = OrderedDict()
top_kind = _POSITIONAL_ONLY
kind_defaults = False
for idx, param in enumerate(parameters):
kind = param.kind
name = param.name
if kind < top_kind:
msg = 'wrong parameter order: {!r} before {!r}'
msg = msg.format(top_kind, kind)
raise ValueError(msg)
elif kind > top_kind:
kind_defaults = False
top_kind = kind
if kind in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD):
if param.default is _empty:
if kind_defaults:
# No default for this parameter, but the
# previous parameter of the same kind had
# a default
msg = 'non-default argument follows default ' \
'argument'
raise ValueError(msg)
else:
# There is a default for this parameter.
kind_defaults = True
if name in params:
msg = 'duplicate parameter name: {!r}'.format(name)
raise ValueError(msg)
params[name] = param
else:
params = OrderedDict(((param.name, param)
for param in parameters))
self._parameters = types.MappingProxyType(params)
self._return_annotation = return_annotation
@classmethod
def from_function(cls, func):
"""Constructs Signature for the given python function."""
is_duck_function = False
if not isfunction(func):
if _signature_is_functionlike(func):
is_duck_function = True
else:
# If it's not a pure Python function, and not a duck type
# of pure function:
raise TypeError('{!r} is not a Python function'.format(func))
Parameter = cls._parameter_cls
# Parameter information.
func_code = func.__code__
pos_count = func_code.co_argcount
arg_names = func_code.co_varnames
positional = tuple(arg_names[:pos_count])
keyword_only_count = func_code.co_kwonlyargcount
keyword_only = arg_names[pos_count:(pos_count + keyword_only_count)]
annotations = func.__annotations__
defaults = func.__defaults__
kwdefaults = func.__kwdefaults__
if defaults:
pos_default_count = len(defaults)
else:
pos_default_count = 0
parameters = []
# Non-keyword-only parameters w/o defaults.
non_default_count = pos_count - pos_default_count
for name in positional[:non_default_count]:
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD))
# ... w/ defaults.
for offset, name in enumerate(positional[non_default_count:]):
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD,
default=defaults[offset]))
# *args
if func_code.co_flags & CO_VARARGS:
name = arg_names[pos_count + keyword_only_count]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_POSITIONAL))
# Keyword-only parameters.
for name in keyword_only:
default = _empty
if kwdefaults is not None:
default = kwdefaults.get(name, _empty)
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_KEYWORD_ONLY,
default=default))
# **kwargs
if func_code.co_flags & CO_VARKEYWORDS:
index = pos_count + keyword_only_count
if func_code.co_flags & CO_VARARGS:
index += 1
name = arg_names[index]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_KEYWORD))
# Is 'func' is a pure Python function - don't validate the
# parameters list (for correct order and defaults), it should be OK.
return cls(parameters,
return_annotation=annotations.get('return', _empty),
__validate_parameters__=is_duck_function)
@classmethod
def from_builtin(cls, func):
"""Constructs Signature for the given builtin function."""
return _signature_from_builtin(cls, func)
@classmethod
def from_callable(cls, obj):
"""Constructs Signature for the given callable object."""
return _signature_from_callable(obj, sigcls=cls)
@property
def parameters(self):
return self._parameters
@property
def return_annotation(self):
return self._return_annotation
def replace(self, *, parameters=_void, return_annotation=_void):
"""Creates a customized copy of the Signature.
Pass 'parameters' and/or 'return_annotation' arguments
to override them in the new copy.
"""
if parameters is _void:
parameters = self.parameters.values()
if return_annotation is _void:
return_annotation = self._return_annotation
return type(self)(parameters,
return_annotation=return_annotation)
def _hash_basis(self):
params = tuple(param for param in self.parameters.values()
if param.kind != _KEYWORD_ONLY)
kwo_params = {param.name: param for param in self.parameters.values()
if param.kind == _KEYWORD_ONLY}
return params, kwo_params, self.return_annotation
def __hash__(self):
params, kwo_params, return_annotation = self._hash_basis()
kwo_params = frozenset(kwo_params.values())
return hash((params, kwo_params, return_annotation))
def __eq__(self, other):
return (isinstance(other, Signature) and
self._hash_basis() == other._hash_basis())
def _bind(self, args, kwargs, *, partial=False):
"""Private method. Don't use directly."""
arguments = OrderedDict()
parameters = iter(self.parameters.values())
parameters_ex = ()
arg_vals = iter(args)
while True:
# Let's iterate through the positional arguments and corresponding
# parameters
try:
arg_val = next(arg_vals)
except StopIteration:
# No more positional arguments
try:
param = next(parameters)
except StopIteration:
# No more parameters. That's it. Just need to check that
# we have no `kwargs` after this while loop
break
else:
if param.kind == _VAR_POSITIONAL:
# That's OK, just empty *args. Let's start parsing
# kwargs
break
elif param.name in kwargs:
if param.kind == _POSITIONAL_ONLY:
msg = '{arg!r} parameter is positional only, ' \
'but was passed as a keyword'
msg = msg.format(arg=param.name)
raise TypeError(msg) from None
parameters_ex = (param,)
break
elif (param.kind == _VAR_KEYWORD or
param.default is not _empty):
# That's fine too - we have a default value for this
# parameter. So, lets start parsing `kwargs`, starting
# with the current parameter
parameters_ex = (param,)
break
else:
# No default, not VAR_KEYWORD, not VAR_POSITIONAL,
# not in `kwargs`
if partial:
parameters_ex = (param,)
break
else:
msg = '{arg!r} parameter lacking default value'
msg = msg.format(arg=param.name)
raise TypeError(msg) from None
else:
# We have a positional argument to process
try:
param = next(parameters)
except StopIteration:
raise TypeError('too many positional arguments') from None
else:
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
# Looks like we have no parameter for this positional
# argument
raise TypeError('too many positional arguments')
if param.kind == _VAR_POSITIONAL:
# We have an '*args'-like argument, let's fill it with
# all positional arguments we have left and move on to
# the next phase
values = [arg_val]
values.extend(arg_vals)
arguments[param.name] = tuple(values)
break
if param.name in kwargs:
raise TypeError('multiple values for argument '
'{arg!r}'.format(arg=param.name))
arguments[param.name] = arg_val
# Now, we iterate through the remaining parameters to process
# keyword arguments
kwargs_param = None
for param in itertools.chain(parameters_ex, parameters):
if param.kind == _VAR_KEYWORD:
# Memorize that we have a '**kwargs'-like parameter
kwargs_param = param
continue
if param.kind == _VAR_POSITIONAL:
# Named arguments don't refer to '*args'-like parameters.
# We only arrive here if the positional arguments ended
# before reaching the last parameter before *args.
continue
param_name = param.name
try:
arg_val = kwargs.pop(param_name)
except KeyError:
# We have no value for this parameter. It's fine though,
# if it has a default value, or it is an '*args'-like
# parameter, left alone by the processing of positional
# arguments.
if (not partial and param.kind != _VAR_POSITIONAL and
param.default is _empty):
raise TypeError('{arg!r} parameter lacking default value'. \
format(arg=param_name)) from None
else:
if param.kind == _POSITIONAL_ONLY:
# This should never happen in case of a properly built
# Signature object (but let's have this check here
# to ensure correct behaviour just in case)
raise TypeError('{arg!r} parameter is positional only, '
'but was passed as a keyword'. \
format(arg=param.name))
arguments[param_name] = arg_val
if kwargs:
if kwargs_param is not None:
# Process our '**kwargs'-like parameter
arguments[kwargs_param.name] = kwargs
else:
raise TypeError('too many keyword arguments')
return self._bound_arguments_cls(self, arguments)
def bind(*args, **kwargs):
"""Get a BoundArguments object, that maps the passed `args`
and `kwargs` to the function's signature. Raises `TypeError`
if the passed arguments can not be bound.
"""
return args[0]._bind(args[1:], kwargs)
def bind_partial(*args, **kwargs):
"""Get a BoundArguments object, that partially maps the
passed `args` and `kwargs` to the function's signature.
Raises `TypeError` if the passed arguments can not be bound.
"""
return args[0]._bind(args[1:], kwargs, partial=True)
def __reduce__(self):
return (type(self),
(tuple(self._parameters.values()),),
{'_return_annotation': self._return_annotation})
def __setstate__(self, state):
self._return_annotation = state['_return_annotation']
def __repr__(self):
return '<{} at {:#x} "{}">'.format(self.__class__.__name__,
id(self), self)
def __str__(self):
result = []
render_pos_only_separator = False
render_kw_only_separator = True
for param in self.parameters.values():
formatted = str(param)
kind = param.kind
if kind == _POSITIONAL_ONLY:
render_pos_only_separator = True
elif render_pos_only_separator:
# It's not a positional-only parameter, and the flag
# is set to 'True' (there were pos-only params before.)
result.append('/')
render_pos_only_separator = False
if kind == _VAR_POSITIONAL:
# OK, we have an '*args'-like parameter, so we won't need
# a '*' to separate keyword-only arguments
render_kw_only_separator = False
elif kind == _KEYWORD_ONLY and render_kw_only_separator:
# We have a keyword-only parameter to render and we haven't
# rendered an '*args'-like parameter before, so add a '*'
# separator to the parameters list ("foo(arg1, *, arg2)" case)
result.append('*')
# This condition should be only triggered once, so
# reset the flag
render_kw_only_separator = False
result.append(formatted)
if render_pos_only_separator:
# There were only positional-only parameters, hence the
# flag was not reset to 'False'
result.append('/')
rendered = '({})'.format(', '.join(result))
if self.return_annotation is not _empty:
anno = formatannotation(self.return_annotation)
rendered += ' -> {}'.format(anno)
return rendered
def signature(obj):
"""Get a signature object for the passed callable."""
return Signature.from_callable(obj)
def _main():
""" Logic for inspecting an object given at command line """
import argparse
import importlib
parser = argparse.ArgumentParser()
parser.add_argument(
'object',
help="The object to be analysed. "
"It supports the 'module:qualname' syntax")
parser.add_argument(
'-d', '--details', action='store_true',
help='Display info about the module rather than its source code')
args = parser.parse_args()
target = args.object
mod_name, has_attrs, attrs = target.partition(":")
try:
obj = module = importlib.import_module(mod_name)
except Exception as exc:
msg = "Failed to import {} ({}: {})".format(mod_name,
type(exc).__name__,
exc)
print(msg, file=sys.stderr)
exit(2)
if has_attrs:
parts = attrs.split(".")
obj = module
for part in parts:
obj = getattr(obj, part)
if module.__name__ in sys.builtin_module_names:
print("Can't get info for builtin modules.", file=sys.stderr)
exit(1)
if args.details:
print('Target: {}'.format(target))
print('Origin: {}'.format(getsourcefile(module)))
print('Cached: {}'.format(module.__cached__))
if obj is module:
print('Loader: {}'.format(repr(module.__loader__)))
if hasattr(module, '__path__'):
print('Submodule search path: {}'.format(module.__path__))
else:
try:
__, lineno = findsource(obj)
except Exception:
pass
else:
print('Line: {}'.format(lineno))
print('\n')
else:
print(getsource(obj))
if __name__ == "__main__":
_main()
| 37.265468 | 84 | 0.602232 |
e62dbdc3e5e9f84577b0c3d93f861ef3ded376e3 | 6,341 | py | Python | google/cloud/aiplatform_v1/services/migration_service/pagers.py | dizcology/python-aiplatform | 1a135775966c8a2303ded529eba514dcf9db7205 | [
"Apache-2.0"
] | 2 | 2021-10-02T02:25:44.000Z | 2021-11-17T10:35:01.000Z | google/cloud/aiplatform_v1/services/migration_service/pagers.py | pompipo/python-aiplatform | 3612b05c62dfb46822cd2c1798fd47349dba33bc | [
"Apache-2.0"
] | 1 | 2021-03-02T18:25:00.000Z | 2021-03-02T18:25:00.000Z | google/cloud/aiplatform_v1/services/migration_service/pagers.py | pompipo/python-aiplatform | 3612b05c62dfb46822cd2c1798fd47349dba33bc | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterable,
Awaitable,
Callable,
Iterable,
Sequence,
Tuple,
Optional,
)
from google.cloud.aiplatform_v1.types import migratable_resource
from google.cloud.aiplatform_v1.types import migration_service
class SearchMigratableResourcesPager:
"""A pager for iterating through ``search_migratable_resources`` requests.
This class thinly wraps an initial
:class:`google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse` object, and
provides an ``__iter__`` method to iterate through its
``migratable_resources`` field.
If there are more pages, the ``__iter__`` method will make additional
``SearchMigratableResources`` requests and continue to iterate
through the ``migratable_resources`` field on the
corresponding responses.
All the usual :class:`google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., migration_service.SearchMigratableResourcesResponse],
request: migration_service.SearchMigratableResourcesRequest,
response: migration_service.SearchMigratableResourcesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.aiplatform_v1.types.SearchMigratableResourcesRequest):
The initial request object.
response (google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = migration_service.SearchMigratableResourcesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[migration_service.SearchMigratableResourcesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[migratable_resource.MigratableResource]:
for page in self.pages:
yield from page.migratable_resources
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class SearchMigratableResourcesAsyncPager:
"""A pager for iterating through ``search_migratable_resources`` requests.
This class thinly wraps an initial
:class:`google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse` object, and
provides an ``__aiter__`` method to iterate through its
``migratable_resources`` field.
If there are more pages, the ``__aiter__`` method will make additional
``SearchMigratableResources`` requests and continue to iterate
through the ``migratable_resources`` field on the
corresponding responses.
All the usual :class:`google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[
..., Awaitable[migration_service.SearchMigratableResourcesResponse]
],
request: migration_service.SearchMigratableResourcesRequest,
response: migration_service.SearchMigratableResourcesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.aiplatform_v1.types.SearchMigratableResourcesRequest):
The initial request object.
response (google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = migration_service.SearchMigratableResourcesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(
self,
) -> AsyncIterable[migration_service.SearchMigratableResourcesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[migratable_resource.MigratableResource]:
async def async_generator():
async for page in self.pages:
for response in page.migratable_resources:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
| 39.385093 | 93 | 0.69642 |
b5955adfa00e438d8f28a200af7320151f6fcba2 | 9,805 | py | Python | ml-agents/mlagents/trainers/learn.py | symbiote-research/ml-agents | 37ac2062e13b9a4371d25db56cef423371975ea7 | [
"Apache-2.0"
] | 1 | 2020-12-23T06:38:13.000Z | 2020-12-23T06:38:13.000Z | ml-agents/mlagents/trainers/learn.py | symbiote-research/ml-agents | 37ac2062e13b9a4371d25db56cef423371975ea7 | [
"Apache-2.0"
] | null | null | null | ml-agents/mlagents/trainers/learn.py | symbiote-research/ml-agents | 37ac2062e13b9a4371d25db56cef423371975ea7 | [
"Apache-2.0"
] | null | null | null | # # Unity ML-Agents Toolkit
from mlagents import torch_utils
import yaml
import os
import numpy as np
import json
from typing import Callable, Optional, List
import mlagents.trainers
import mlagents_envs
from mlagents.trainers.trainer_controller import TrainerController
from mlagents.trainers.environment_parameter_manager import EnvironmentParameterManager
from mlagents.trainers.trainer import TrainerFactory
from mlagents.trainers.directory_utils import validate_existing_directories
from mlagents.trainers.stats import (
TensorboardWriter,
StatsReporter,
GaugeWriter,
ConsoleWriter,
)
from mlagents.trainers.cli_utils import parser
from mlagents_envs.environment import UnityEnvironment
from mlagents.trainers.settings import RunOptions
from mlagents.trainers.training_status import GlobalTrainingStatus
from mlagents_envs.base_env import BaseEnv
from mlagents.trainers.subprocess_env_manager import SubprocessEnvManager
from mlagents_envs.side_channel.side_channel import SideChannel
from mlagents_envs.side_channel.engine_configuration_channel import EngineConfig
from mlagents_envs.timers import (
hierarchical_timer,
get_timer_tree,
add_metadata as add_timer_metadata,
)
from mlagents_envs import logging_util
logger = logging_util.get_logger(__name__)
TRAINING_STATUS_FILE_NAME = "training_status.json"
def get_version_string() -> str:
# pylint: disable=no-member
return f""" Version information:
ml-agents: {mlagents.trainers.__version__},
ml-agents-envs: {mlagents_envs.__version__},
Communicator API: {UnityEnvironment.API_VERSION},
PyTorch: {torch_utils.torch.__version__}"""
def parse_command_line(argv: Optional[List[str]] = None) -> RunOptions:
args = parser.parse_args(argv)
return RunOptions.from_argparse(args)
def run_training(run_seed: int, options: RunOptions) -> None:
"""
Launches training session.
:param options: parsed command line arguments
:param run_seed: Random seed used for training.
:param run_options: Command line arguments for training.
"""
with hierarchical_timer("run_training.setup"):
checkpoint_settings = options.checkpoint_settings
env_settings = options.env_settings
engine_settings = options.engine_settings
base_path = "results"
write_path = os.path.join(base_path, checkpoint_settings.run_id)
maybe_init_path = (
os.path.join(base_path, checkpoint_settings.initialize_from)
if checkpoint_settings.initialize_from is not None
else None
)
run_logs_dir = os.path.join(write_path, "run_logs")
port: Optional[int] = env_settings.base_port
# Check if directory exists
validate_existing_directories(
write_path,
checkpoint_settings.resume,
checkpoint_settings.force,
maybe_init_path,
)
# Make run logs directory
os.makedirs(run_logs_dir, exist_ok=True)
# Load any needed states
if checkpoint_settings.resume:
GlobalTrainingStatus.load_state(
os.path.join(run_logs_dir, "training_status.json")
)
# Configure Tensorboard Writers and StatsReporter
tb_writer = TensorboardWriter(
write_path, clear_past_data=not checkpoint_settings.resume
)
gauge_write = GaugeWriter()
console_writer = ConsoleWriter()
StatsReporter.add_writer(tb_writer)
StatsReporter.add_writer(gauge_write)
StatsReporter.add_writer(console_writer)
if env_settings.env_path is None:
port = None
env_factory = create_environment_factory(
env_settings.env_path,
engine_settings.no_graphics,
run_seed,
port,
env_settings.env_args,
os.path.abspath(run_logs_dir), # Unity environment requires absolute path
)
engine_config = EngineConfig(
width=engine_settings.width,
height=engine_settings.height,
quality_level=engine_settings.quality_level,
time_scale=engine_settings.time_scale,
target_frame_rate=engine_settings.target_frame_rate,
capture_frame_rate=engine_settings.capture_frame_rate,
)
env_manager = SubprocessEnvManager(
env_factory, engine_config, env_settings.num_envs
)
env_parameter_manager = EnvironmentParameterManager(
options.environment_parameters, run_seed, restore=checkpoint_settings.resume
)
trainer_factory = TrainerFactory(
trainer_config=options.behaviors,
output_path=write_path,
train_model=not checkpoint_settings.inference,
load_model=checkpoint_settings.resume,
seed=run_seed,
param_manager=env_parameter_manager,
init_path=maybe_init_path,
multi_gpu=False,
)
# Create controller and begin training.
tc = TrainerController(
trainer_factory,
write_path,
checkpoint_settings.run_id,
env_parameter_manager,
not checkpoint_settings.inference,
run_seed,
)
# Begin training
try:
tc.start_learning(env_manager)
finally:
env_manager.close()
write_run_options(write_path, options)
write_timing_tree(run_logs_dir)
write_training_status(run_logs_dir)
def write_run_options(output_dir: str, run_options: RunOptions) -> None:
run_options_path = os.path.join(output_dir, "configuration.yaml")
try:
with open(run_options_path, "w") as f:
try:
yaml.dump(run_options.as_dict(), f, sort_keys=False)
except TypeError: # Older versions of pyyaml don't support sort_keys
yaml.dump(run_options.as_dict(), f)
except FileNotFoundError:
logger.warning(
f"Unable to save configuration to {run_options_path}. Make sure the directory exists"
)
def write_training_status(output_dir: str) -> None:
GlobalTrainingStatus.save_state(os.path.join(output_dir, TRAINING_STATUS_FILE_NAME))
def write_timing_tree(output_dir: str) -> None:
timing_path = os.path.join(output_dir, "timers.json")
try:
with open(timing_path, "w") as f:
json.dump(get_timer_tree(), f, indent=4)
except FileNotFoundError:
logger.warning(
f"Unable to save to {timing_path}. Make sure the directory exists"
)
def create_environment_factory(
env_path: Optional[str],
no_graphics: bool,
seed: int,
start_port: Optional[int],
env_args: Optional[List[str]],
log_folder: str,
) -> Callable[[int, List[SideChannel]], BaseEnv]:
def create_unity_environment(
worker_id: int, side_channels: List[SideChannel]
) -> UnityEnvironment:
# Make sure that each environment gets a different seed
env_seed = seed + worker_id
return UnityEnvironment(
file_name=env_path,
worker_id=worker_id,
seed=env_seed,
no_graphics=no_graphics,
base_port=start_port,
additional_args=env_args,
side_channels=side_channels,
log_folder=log_folder,
)
return create_unity_environment
def run_cli(options: RunOptions) -> None:
try:
print(
"""
▄▄▄▓▓▓▓
╓▓▓▓▓▓▓█▓▓▓▓▓
,▄▄▄m▀▀▀' ,▓▓▓▀▓▓▄ ▓▓▓ ▓▓▌
▄▓▓▓▀' ▄▓▓▀ ▓▓▓ ▄▄ ▄▄ ,▄▄ ▄▄▄▄ ,▄▄ ▄▓▓▌▄ ▄▄▄ ,▄▄
▄▓▓▓▀ ▄▓▓▀ ▐▓▓▌ ▓▓▌ ▐▓▓ ▐▓▓▓▀▀▀▓▓▌ ▓▓▓ ▀▓▓▌▀ ^▓▓▌ ╒▓▓▌
▄▓▓▓▓▓▄▄▄▄▄▄▄▄▓▓▓ ▓▀ ▓▓▌ ▐▓▓ ▐▓▓ ▓▓▓ ▓▓▓ ▓▓▌ ▐▓▓▄ ▓▓▌
▀▓▓▓▓▀▀▀▀▀▀▀▀▀▀▓▓▄ ▓▓ ▓▓▌ ▐▓▓ ▐▓▓ ▓▓▓ ▓▓▓ ▓▓▌ ▐▓▓▐▓▓
^█▓▓▓ ▀▓▓▄ ▐▓▓▌ ▓▓▓▓▄▓▓▓▓ ▐▓▓ ▓▓▓ ▓▓▓ ▓▓▓▄ ▓▓▓▓`
'▀▓▓▓▄ ^▓▓▓ ▓▓▓ └▀▀▀▀ ▀▀ ^▀▀ `▀▀ `▀▀ '▀▀ ▐▓▓▌
▀▀▀▀▓▄▄▄ ▓▓▓▓▓▓, ▓▓▓▓▀
`▀█▓▓▓▓▓▓▓▓▓▌
¬`▀▀▀█▓
"""
)
except Exception:
print("\n\n\tUnity Technologies\n")
print(get_version_string())
if options.debug:
log_level = logging_util.DEBUG
else:
log_level = logging_util.INFO
logging_util.set_log_level(log_level)
logger.debug("Configuration for this run:")
logger.debug(json.dumps(options.as_dict(), indent=4))
# Options deprecation warnings
if options.checkpoint_settings.load_model:
logger.warning(
"The --load option has been deprecated. Please use the --resume option instead."
)
if options.checkpoint_settings.train_model:
logger.warning(
"The --train option has been deprecated. Train mode is now the default. Use "
"--inference to run in inference mode."
)
run_seed = options.env_settings.seed
# Add some timer metadata
add_timer_metadata("mlagents_version", mlagents.trainers.__version__)
add_timer_metadata("mlagents_envs_version", mlagents_envs.__version__)
add_timer_metadata("communication_protocol_version", UnityEnvironment.API_VERSION)
add_timer_metadata("pytorch_version", torch_utils.torch.__version__)
add_timer_metadata("numpy_version", np.__version__)
if options.env_settings.seed == -1:
run_seed = np.random.randint(0, 10000)
logger.info(f"run_seed set to {run_seed}")
run_training(run_seed, options)
def main():
run_cli(parse_command_line())
# For python debugger to directly run this script
if __name__ == "__main__":
main()
| 34.893238 | 97 | 0.63641 |
6ae927e2f5cba6fcb30e0a3a14383dad807a96bc | 8,236 | py | Python | lib/rucio/api/request.py | faluchet/rucio | b8f3ebdc0748aeed022d8b789e7ef6e0f36e6dae | [
"Apache-2.0"
] | null | null | null | lib/rucio/api/request.py | faluchet/rucio | b8f3ebdc0748aeed022d8b789e7ef6e0f36e6dae | [
"Apache-2.0"
] | null | null | null | lib/rucio/api/request.py | faluchet/rucio | b8f3ebdc0748aeed022d8b789e7ef6e0f36e6dae | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright European Organization for Nuclear Research (CERN) since 2012
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Interface for the requests abstraction layer
"""
from rucio.api import permission
from rucio.common import exception
from rucio.common.types import InternalAccount, InternalScope
from rucio.common.utils import api_update_return_dict
from rucio.core import request
from rucio.core.rse import get_rse_id
def queue_requests(requests, issuer, vo='def'):
"""
Submit transfer or deletion requests on destination RSEs for data identifiers.
:param requests: List of dictionaries containing 'scope', 'name', 'dest_rse_id', 'request_type', 'attributes'
:param issuer: Issuing account as a string.
:param vo: The VO to act on.
:returns: List of Request-IDs as 32 character hex strings
"""
kwargs = {'requests': requests, 'issuer': issuer}
if not permission.has_permission(issuer=issuer, vo=vo, action='queue_requests', kwargs=kwargs):
raise exception.AccessDenied('%(issuer)s can not queue request' % locals())
for req in requests:
req['scope'] = InternalScope(req['scope'], vo=vo)
if 'account' in req:
req['account'] = InternalAccount(req['account'], vo=vo)
new_requests = request.queue_requests(requests)
return [api_update_return_dict(r) for r in new_requests]
def cancel_request(request_id, issuer, account, vo='def'):
"""
Cancel a request.
:param request_id: Request Identifier as a 32 character hex string.
:param issuer: Issuing account as a string.
:param account: Account identifier as a string.
:param vo: The VO to act on.
"""
kwargs = {'account': account, 'issuer': issuer, 'request_id': request_id}
if not permission.has_permission(issuer=issuer, vo=vo, action='cancel_request_', kwargs=kwargs):
raise exception.AccessDenied('%s cannot cancel request %s' % (account, request_id))
raise NotImplementedError
def cancel_request_did(scope, name, dest_rse, request_type, issuer, account, vo='def'):
"""
Cancel a request based on a DID and request type.
:param scope: Data identifier scope as a string.
:param name: Data identifier name as a string.
:param dest_rse: RSE name as a string.
:param request_type: Type of the request as a string.
:param issuer: Issuing account as a string.
:param account: Account identifier as a string.
:param vo: The VO to act on.
"""
dest_rse_id = get_rse_id(rse=dest_rse, vo=vo)
kwargs = {'account': account, 'issuer': issuer}
if not permission.has_permission(issuer=issuer, vo=vo, action='cancel_request_did', kwargs=kwargs):
raise exception.AccessDenied('%(account)s cannot cancel %(request_type)s request for %(scope)s:%(name)s' % locals())
scope = InternalScope(scope, vo=vo)
return request.cancel_request_did(scope, name, dest_rse_id, request_type)
def get_next(request_type, state, issuer, account, vo='def'):
"""
Retrieve the next request matching the request type and state.
:param request_type: Type of the request as a string.
:param state: State of the request as a string.
:param issuer: Issuing account as a string.
:param account: Account identifier as a string.
:param vo: The VO to act on.
:returns: Request as a dictionary.
"""
kwargs = {'account': account, 'issuer': issuer, 'request_type': request_type, 'state': state}
if not permission.has_permission(issuer=issuer, vo=vo, action='get_next', kwargs=kwargs):
raise exception.AccessDenied('%(account)s cannot get the next request of type %(request_type)s in state %(state)s' % locals())
reqs = request.get_next(request_type, state)
return [api_update_return_dict(r) for r in reqs]
def get_request_by_did(scope, name, rse, issuer, vo='def'):
"""
Retrieve a request by its DID for a destination RSE.
:param scope: The scope of the data identifier as a string.
:param name: The name of the data identifier as a string.
:param rse: The destination RSE of the request as a string.
:param issuer: Issuing account as a string.
:param vo: The VO to act on.
:returns: Request as a dictionary.
"""
rse_id = get_rse_id(rse=rse, vo=vo)
kwargs = {'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, 'issuer': issuer}
if not permission.has_permission(issuer=issuer, vo=vo, action='get_request_by_did', kwargs=kwargs):
raise exception.AccessDenied('%(issuer)s cannot retrieve the request DID %(scope)s:%(name)s to RSE %(rse)s' % locals())
scope = InternalScope(scope, vo=vo)
req = request.get_request_by_did(scope, name, rse_id)
return api_update_return_dict(req)
def get_request_history_by_did(scope, name, rse, issuer, vo='def'):
"""
Retrieve a historical request by its DID for a destination RSE.
:param scope: The scope of the data identifier as a string.
:param name: The name of the data identifier as a string.
:param rse: The destination RSE of the request as a string.
:param issuer: Issuing account as a string.
:param vo: The VO to act on.
:returns: Request as a dictionary.
"""
rse_id = get_rse_id(rse=rse, vo=vo)
kwargs = {'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, 'issuer': issuer}
if not permission.has_permission(issuer=issuer, vo=vo, action='get_request_history_by_did', kwargs=kwargs):
raise exception.AccessDenied('%(issuer)s cannot retrieve the request DID %(scope)s:%(name)s to RSE %(rse)s' % locals())
scope = InternalScope(scope, vo=vo)
req = request.get_request_history_by_did(scope, name, rse_id)
return api_update_return_dict(req)
def list_requests(src_rses, dst_rses, states, issuer, vo='def'):
"""
List all requests in a specific state from a source RSE to a destination RSE.
:param src_rses: source RSEs.
:param dst_rses: destination RSEs.
:param states: list of request states.
:param issuer: Issuing account as a string.
"""
src_rse_ids = [get_rse_id(rse=rse, vo=vo) for rse in src_rses]
dst_rse_ids = [get_rse_id(rse=rse, vo=vo) for rse in dst_rses]
kwargs = {'src_rse_id': src_rse_ids, 'dst_rse_id': dst_rse_ids, 'issuer': issuer}
if not permission.has_permission(issuer=issuer, vo=vo, action='list_requests', kwargs=kwargs):
raise exception.AccessDenied('%(issuer)s cannot list requests from RSE %(src_rse)s to RSE %(dst_rse)s' % locals())
for req in request.list_requests(src_rse_ids, dst_rse_ids, states):
req = req.to_dict()
yield api_update_return_dict(req)
def list_requests_history(src_rses, dst_rses, states, issuer, vo='def', offset=None, limit=None):
"""
List all historical requests in a specific state from a source RSE to a destination RSE.
:param src_rses: source RSEs.
:param dst_rses: destination RSEs.
:param states: list of request states.
:param issuer: Issuing account as a string.
:param offset: offset (for paging).
:param limit: limit number of results.
"""
src_rse_ids = [get_rse_id(rse=rse, vo=vo) for rse in src_rses]
dst_rse_ids = [get_rse_id(rse=rse, vo=vo) for rse in dst_rses]
kwargs = {'src_rse_id': src_rse_ids, 'dst_rse_id': dst_rse_ids, 'issuer': issuer}
if not permission.has_permission(issuer=issuer, vo=vo, action='list_requests_history', kwargs=kwargs):
raise exception.AccessDenied('%(issuer)s cannot list requests from RSE %(src_rse)s to RSE %(dst_rse)s' % locals())
for req in request.list_requests_history(src_rse_ids, dst_rse_ids, states, offset, limit):
req = req.to_dict()
yield api_update_return_dict(req)
| 41.386935 | 134 | 0.705682 |
4df90e02d7e6deb0ce92eadf2415dc5a8b1726c5 | 9,804 | py | Python | synapse/federation/federation_base.py | Fr3shTea/synapse | cbd82d0b2db069400b5d43373838817d8a0209e7 | [
"Apache-2.0"
] | null | null | null | synapse/federation/federation_base.py | Fr3shTea/synapse | cbd82d0b2db069400b5d43373838817d8a0209e7 | [
"Apache-2.0"
] | null | null | null | synapse/federation/federation_base.py | Fr3shTea/synapse | cbd82d0b2db069400b5d43373838817d8a0209e7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING
from synapse.api.constants import MAX_DEPTH, EventContentFields, EventTypes, Membership
from synapse.api.errors import Codes, SynapseError
from synapse.api.room_versions import EventFormatVersions, RoomVersion
from synapse.crypto.event_signing import check_event_content_hash
from synapse.crypto.keyring import Keyring
from synapse.events import EventBase, make_event_from_dict
from synapse.events.utils import prune_event, validate_canonicaljson
from synapse.http.servlet import assert_params_in_dict
from synapse.types import JsonDict, get_domain_from_id
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
class FederationBase:
def __init__(self, hs: "HomeServer"):
self.hs = hs
self.server_name = hs.hostname
self.keyring = hs.get_keyring()
self.spam_checker = hs.get_spam_checker()
self.store = hs.get_datastore()
self._clock = hs.get_clock()
async def _check_sigs_and_hash(
self, room_version: RoomVersion, pdu: EventBase
) -> EventBase:
"""Checks that event is correctly signed by the sending server.
Args:
room_version: The room version of the PDU
pdu: the event to be checked
Returns:
* the original event if the checks pass
* a redacted version of the event (if the signature
matched but the hash did not)
* throws a SynapseError if the signature check failed."""
try:
await _check_sigs_on_pdu(self.keyring, room_version, pdu)
except SynapseError as e:
logger.warning(
"Signature check failed for %s: %s",
pdu.event_id,
e,
)
raise
if not check_event_content_hash(pdu):
# let's try to distinguish between failures because the event was
# redacted (which are somewhat expected) vs actual ball-tampering
# incidents.
#
# This is just a heuristic, so we just assume that if the keys are
# about the same between the redacted and received events, then the
# received event was probably a redacted copy (but we then use our
# *actual* redacted copy to be on the safe side.)
redacted_event = prune_event(pdu)
if set(redacted_event.keys()) == set(pdu.keys()) and set(
redacted_event.content.keys()
) == set(pdu.content.keys()):
logger.info(
"Event %s seems to have been redacted; using our redacted copy",
pdu.event_id,
)
else:
logger.warning(
"Event %s content has been tampered, redacting",
pdu.event_id,
)
return redacted_event
result = await self.spam_checker.check_event_for_spam(pdu)
if result:
logger.warning("Event contains spam, soft-failing %s", pdu.event_id)
# we redact (to save disk space) as well as soft-failing (to stop
# using the event in prev_events).
redacted_event = prune_event(pdu)
redacted_event.internal_metadata.soft_failed = True
return redacted_event
return pdu
async def _check_sigs_on_pdu(
keyring: Keyring, room_version: RoomVersion, pdu: EventBase
) -> None:
"""Check that the given events are correctly signed
Raise a SynapseError if the event wasn't correctly signed.
Args:
keyring: keyring object to do the checks
room_version: the room version of the PDUs
pdus: the events to be checked
"""
# we want to check that the event is signed by:
#
# (a) the sender's server
#
# - except in the case of invites created from a 3pid invite, which are exempt
# from this check, because the sender has to match that of the original 3pid
# invite, but the event may come from a different HS, for reasons that I don't
# entirely grok (why do the senders have to match? and if they do, why doesn't the
# joining server ask the inviting server to do the switcheroo with
# exchange_third_party_invite?).
#
# That's pretty awful, since redacting such an invite will render it invalid
# (because it will then look like a regular invite without a valid signature),
# and signatures are *supposed* to be valid whether or not an event has been
# redacted. But this isn't the worst of the ways that 3pid invites are broken.
#
# (b) for V1 and V2 rooms, the server which created the event_id
#
# let's start by getting the domain for each pdu, and flattening the event back
# to JSON.
# First we check that the sender event is signed by the sender's domain
# (except if its a 3pid invite, in which case it may be sent by any server)
if not _is_invite_via_3pid(pdu):
try:
await keyring.verify_event_for_server(
get_domain_from_id(pdu.sender),
pdu,
pdu.origin_server_ts if room_version.enforce_key_validity else 0,
)
except Exception as e:
errmsg = "event id %s: unable to verify signature for sender %s: %s" % (
pdu.event_id,
get_domain_from_id(pdu.sender),
e,
)
raise SynapseError(403, errmsg, Codes.FORBIDDEN)
# now let's look for events where the sender's domain is different to the
# event id's domain (normally only the case for joins/leaves), and add additional
# checks. Only do this if the room version has a concept of event ID domain
# (ie, the room version uses old-style non-hash event IDs).
if room_version.event_format == EventFormatVersions.V1 and get_domain_from_id(
pdu.event_id
) != get_domain_from_id(pdu.sender):
try:
await keyring.verify_event_for_server(
get_domain_from_id(pdu.event_id),
pdu,
pdu.origin_server_ts if room_version.enforce_key_validity else 0,
)
except Exception as e:
errmsg = (
"event id %s: unable to verify signature for event id domain %s: %s"
% (
pdu.event_id,
get_domain_from_id(pdu.event_id),
e,
)
)
raise SynapseError(403, errmsg, Codes.FORBIDDEN)
# If this is a join event for a restricted room it may have been authorised
# via a different server from the sending server. Check those signatures.
if (
room_version.msc3083_join_rules
and pdu.type == EventTypes.Member
and pdu.membership == Membership.JOIN
and EventContentFields.AUTHORISING_USER in pdu.content
):
authorising_server = get_domain_from_id(
pdu.content[EventContentFields.AUTHORISING_USER]
)
try:
await keyring.verify_event_for_server(
authorising_server,
pdu,
pdu.origin_server_ts if room_version.enforce_key_validity else 0,
)
except Exception as e:
errmsg = (
"event id %s: unable to verify signature for authorising server %s: %s"
% (
pdu.event_id,
authorising_server,
e,
)
)
raise SynapseError(403, errmsg, Codes.FORBIDDEN)
def _is_invite_via_3pid(event: EventBase) -> bool:
return (
event.type == EventTypes.Member
and event.membership == Membership.INVITE
and "third_party_invite" in event.content
)
def event_from_pdu_json(
pdu_json: JsonDict, room_version: RoomVersion, outlier: bool = False
) -> EventBase:
"""Construct an EventBase from an event json received over federation
Args:
pdu_json: pdu as received over federation
room_version: The version of the room this event belongs to
outlier: True to mark this event as an outlier
Raises:
SynapseError: if the pdu is missing required fields or is otherwise
not a valid matrix event
"""
# we could probably enforce a bunch of other fields here (room_id, sender,
# origin, etc etc)
assert_params_in_dict(pdu_json, ("type", "depth"))
depth = pdu_json["depth"]
if not isinstance(depth, int):
raise SynapseError(400, "Depth %r not an intger" % (depth,), Codes.BAD_JSON)
if depth < 0:
raise SynapseError(400, "Depth too small", Codes.BAD_JSON)
elif depth > MAX_DEPTH:
raise SynapseError(400, "Depth too large", Codes.BAD_JSON)
# Validate that the JSON conforms to the specification.
if room_version.strict_canonicaljson:
validate_canonicaljson(pdu_json)
event = make_event_from_dict(pdu_json, room_version)
event.internal_metadata.outlier = outlier
return event
| 38.750988 | 90 | 0.637291 |
eff8e05d89b9b55cad3224b2378d4d8bde86e0be | 522 | py | Python | Pr_2.py | EndMad/L6 | a17d9a859b4659e40df390576f566277809161e9 | [
"MIT"
] | null | null | null | Pr_2.py | EndMad/L6 | a17d9a859b4659e40df390576f566277809161e9 | [
"MIT"
] | null | null | null | Pr_2.py | EndMad/L6 | a17d9a859b4659e40df390576f566277809161e9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# создайте словарь, где ключами являются числа, а значениями – строки.
# Примените к нему метод items(), c с помощью полученного объекта dict_items создайте
# новый словарь, "обратный" исходному, т. е. ключами являются строки, а значениями –
# числа.
from datetime import date
import sys
if __name__ == '__main__':
list_1 = {11: 'eleven', 12: 'twelve', 20: 'twenty'}
list_2 = dict(map(reversed, list_1.items()))
print(list_1)
print(list_2)
| 30.705882 | 86 | 0.672414 |
4a28639b1ebbd1d4fceb93a8d6d7ad651e4204f8 | 363 | py | Python | fdk_client/platform/models/EventSubscriptionTemplateEmail.py | kavish-d/fdk-client-python | a1023eb530473322cb52e095fc4ceb226c1e6037 | [
"MIT"
] | null | null | null | fdk_client/platform/models/EventSubscriptionTemplateEmail.py | kavish-d/fdk-client-python | a1023eb530473322cb52e095fc4ceb226c1e6037 | [
"MIT"
] | null | null | null | fdk_client/platform/models/EventSubscriptionTemplateEmail.py | kavish-d/fdk-client-python | a1023eb530473322cb52e095fc4ceb226c1e6037 | [
"MIT"
] | null | null | null | """Platform Models."""
from marshmallow import fields, Schema
from marshmallow.validate import OneOf
from ..enums import *
from ..models.BaseSchema import BaseSchema
class EventSubscriptionTemplateEmail(BaseSchema):
# Communication swagger.json
subscribed = fields.Boolean(required=False)
template = fields.Str(required=False)
| 16.5 | 49 | 0.735537 |
b801350fdb686c85c408a15f7dc5e21a0facccc9 | 86 | py | Python | fNb-end/src/backend/main.py | kauereblin/ifc | 071103c4b87a158754f1fe6751984ed0b1760fed | [
"MIT"
] | 4 | 2020-07-23T18:20:00.000Z | 2020-11-17T02:38:31.000Z | fNb-end/src/backend/main.py | kauereblin/ifc | 071103c4b87a158754f1fe6751984ed0b1760fed | [
"MIT"
] | null | null | null | fNb-end/src/backend/main.py | kauereblin/ifc | 071103c4b87a158754f1fe6751984ed0b1760fed | [
"MIT"
] | null | null | null | from routes import app
if __name__ == "__main__":
app.run(port=5000, debug=True)
| 17.2 | 34 | 0.697674 |
d724a4a3ccb822832934b18fdcad7492717b8223 | 1,317 | py | Python | app/main/forms.py | Brayooh/pitches | 8b1b6abd7b966097cc0ac97fe4439e75dc0401d6 | [
"MIT"
] | null | null | null | app/main/forms.py | Brayooh/pitches | 8b1b6abd7b966097cc0ac97fe4439e75dc0401d6 | [
"MIT"
] | null | null | null | app/main/forms.py | Brayooh/pitches | 8b1b6abd7b966097cc0ac97fe4439e75dc0401d6 | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, SelectField, TextAreaField, SubmitField
from wtforms.validators import Required
class PostForm(FlaskForm):
title = StringField('Title', validators=[Required()])
post = TextAreaField('Pitch', validators=[Required()])
category = SelectField('Category', choices=[('product', 'product'), ('idea', 'idea'), ('business', 'business')],
validators=[Required()])
submit = SubmitField('Post')
class CommentForm(FlaskForm):
comment = TextAreaField('Comment', validators=[Required()])
submit = SubmitField('Post')
class Vote(FlaskForm):
submit = SelectField('Like')
class UpdateProfile(FlaskForm):
bio = TextAreaField('bio', validators=[Required()])
submit = SubmitField('Post')
category = SelectField('Category', choices=[('product', 'product'), ('idea', 'idea'), ('business', 'business')],
validators=[Required()])
submit = SubmitField('Post')
class CommentForm(FlaskForm):
comment = TextAreaField('Comment', validators=[Required()])
submit = SubmitField('Post')
class Vote(FlaskForm):
submit = SelectField('Like')
class UpdateProfile(FlaskForm):
bio = TextAreaField('bio', validators=[Required()])
submit = SubmitField('Post')
| 29.266667 | 116 | 0.667426 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.