hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b5425155b264243a2c541eb54beb0002facd6762 | 10,688 | py | Python | ass3-airplane_det/DOTA_devkit/dota-v1.5_evaluation_task1.py | Rooooyy/BUAA_PR | 5b4d12dc786c3fdc469ae59e0b099e8095aee550 | [
"BSD-2-Clause"
] | 2 | 2021-06-09T16:21:53.000Z | 2021-08-30T02:31:56.000Z | DOTA_devkit/dota-v1.5_evaluation_task1.py | jedibobo/S2ANet-custom-dataset | 869b196d4c33713a5c61bd80064d10a453fb76ef | [
"Apache-2.0"
] | null | null | null | DOTA_devkit/dota-v1.5_evaluation_task1.py | jedibobo/S2ANet-custom-dataset | 869b196d4c33713a5c61bd80064d10a453fb76ef | [
"Apache-2.0"
] | null | null | null | # --------------------------------------------------------
# dota_evaluation_task1
# Licensed under The MIT License [see LICENSE for details]
# Written by Jian Ding, based on code from Bharath Hariharan
# --------------------------------------------------------
"""
To use the code, users should to config detpath, annopath and imagesetfile
detpath is the path for 15 result files, for the format, you can refer to "http://captain.whu.edu.cn/DOTAweb/tasks.html"
search for PATH_TO_BE_CONFIGURED to config the paths
Note, the evaluation is on the large scale images
"""
import numpy as np
try:
from polyiou import polyiou
except:
from DOTA_devkit.polyiou import polyiou
def parse_gt(filename):
"""
:param filename: ground truth file to parse
:return: all instances in a picture
"""
objects = []
with open(filename, 'r') as f:
while True:
line = f.readline()
if line:
splitlines = line.strip().split(' ')
object_struct = {}
if (len(splitlines) < 9):
continue
object_struct['name'] = splitlines[8]
# if (len(splitlines) == 9):
# object_struct['difficult'] = 0
# elif (len(splitlines) == 10):
# object_struct['difficult'] = int(splitlines[9])
object_struct['difficult'] = 0
object_struct['bbox'] = [float(splitlines[0]),
float(splitlines[1]),
float(splitlines[2]),
float(splitlines[3]),
float(splitlines[4]),
float(splitlines[5]),
float(splitlines[6]),
float(splitlines[7])]
objects.append(object_struct)
else:
break
return objects
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(detpath,
annopath,
imagesetfile,
classname,
# cachedir,
ovthresh=0.5,
use_07_metric=False):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default False)
"""
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# cachedir caches the annotations in a pickle file
# first load gt
# if not os.path.isdir(cachedir):
# os.mkdir(cachedir)
# cachefile = os.path.join(cachedir, 'annots.pkl')
# read list of images
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
# print('imagenames: ', imagenames)
# if not os.path.isfile(cachefile):
# load annots
recs = {}
for i, imagename in enumerate(imagenames):
# print('parse_files name: ', annopath.format(imagename))
recs[imagename] = parse_gt(annopath.format(imagename))
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
# read dets from Task1* files
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
# print('check confidence: ', confidence)
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
# print('check sorted_scores: ', sorted_scores)
# print('check sorted_ind: ', sorted_ind)
## note the usage only in numpy not for list
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# print('check imge_ids: ', image_ids)
# print('imge_ids len:', len(image_ids))
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
## compute det bb with each BBGT
if BBGT.size > 0:
# compute overlaps
# intersection
# 1. calculate the overlaps between hbbs, if the iou between hbbs are 0, the iou between obbs are 0, too.
# pdb.set_trace()
BBGT_xmin = np.min(BBGT[:, 0::2], axis=1)
BBGT_ymin = np.min(BBGT[:, 1::2], axis=1)
BBGT_xmax = np.max(BBGT[:, 0::2], axis=1)
BBGT_ymax = np.max(BBGT[:, 1::2], axis=1)
bb_xmin = np.min(bb[0::2])
bb_ymin = np.min(bb[1::2])
bb_xmax = np.max(bb[0::2])
bb_ymax = np.max(bb[1::2])
ixmin = np.maximum(BBGT_xmin, bb_xmin)
iymin = np.maximum(BBGT_ymin, bb_ymin)
ixmax = np.minimum(BBGT_xmax, bb_xmax)
iymax = np.minimum(BBGT_ymax, bb_ymax)
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((bb_xmax - bb_xmin + 1.) * (bb_ymax - bb_ymin + 1.) +
(BBGT_xmax - BBGT_xmin + 1.) *
(BBGT_ymax - BBGT_ymin + 1.) - inters)
overlaps = inters / uni
BBGT_keep_mask = overlaps > 0
BBGT_keep = BBGT[BBGT_keep_mask, :]
BBGT_keep_index = np.where(overlaps > 0)[0]
# pdb.set_trace()
def calcoverlaps(BBGT_keep, bb):
overlaps = []
for index, GT in enumerate(BBGT_keep):
overlap = polyiou.iou_poly(polyiou.VectorDouble(BBGT_keep[index]), polyiou.VectorDouble(bb))
overlaps.append(overlap)
return overlaps
if len(BBGT_keep) > 0:
overlaps = calcoverlaps(BBGT_keep, bb)
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
# pdb.set_trace()
jmax = BBGT_keep_index[jmax]
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
print('check fp:', fp)
print('check tp', tp)
print('npos num:', npos)
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap
def main():
detpath = r'work_dirs/temp/result_merge/Task1_{:s}.txt'
annopath = r'data/dota15/test/dota1_5_labelTxt/{:s}.txt'
imagesetfile = r'data/dota15/test/testset.txt'
# For DOTA-v1.5
classnames = ['plane', 'baseball-diamond', 'bridge', 'ground-track-field', 'small-vehicle', 'large-vehicle', 'ship',
'tennis-court',
'basketball-court', 'storage-tank', 'soccer-ball-field', 'roundabout', 'harbor', 'swimming-pool',
'helicopter', 'container-crane']
classaps = []
map = 0
for classname in classnames:
print('classname:', classname)
rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
ovthresh=0.5,
use_07_metric=True)
map = map + ap
# print('rec: ', rec, 'prec: ', prec, 'ap: ', ap)
print('ap: ', ap)
classaps.append(ap)
# umcomment to show p-r curve of each category
# plt.figure(figsize=(8,4))
# plt.xlabel('recall')
# plt.ylabel('precision')
# plt.plot(rec, prec)
# plt.show()
map = map / len(classnames)
print('map:', map)
classaps = 100 * np.array(classaps)
print('classaps: ', classaps)
if __name__ == '__main__':
main()
| 35.273927 | 124 | 0.52601 |
da187b2f97a4dc4bc1d14e1bb0d40ffc22c104d8 | 544 | py | Python | pyomnisci/_loaders.py | jp-harvey/pyomnisci-1 | bedaa7002299055a172a3de12ca49129d0a51e5d | [
"Apache-2.0"
] | 7 | 2021-02-05T17:00:21.000Z | 2022-02-04T20:55:14.000Z | pyomnisci/_loaders.py | jp-harvey/pyomnisci-1 | bedaa7002299055a172a3de12ca49129d0a51e5d | [
"Apache-2.0"
] | 19 | 2021-01-14T18:48:13.000Z | 2022-01-13T00:26:22.000Z | pyomnisci/_loaders.py | jp-harvey/pyomnisci-1 | bedaa7002299055a172a3de12ca49129d0a51e5d | [
"Apache-2.0"
] | 8 | 2020-11-18T01:58:36.000Z | 2022-01-27T19:45:50.000Z | """
Internal helpers for loading data
"""
from omnisci.thrift.ttypes import TStringRow, TStringValue
import collections
def _build_input_rows(data):
input_data = []
for row in data:
input_row = TStringRow()
input_row.cols = [
TStringValue("{" + ",".join(str(y) for y in x) + "}")
if isinstance(x, collections.abc.Sequence)
and not isinstance(x, str)
else TStringValue(str(x))
for x in row
]
input_data.append(input_row)
return input_data
| 25.904762 | 65 | 0.601103 |
18572fa3240b473fbe11ce48b79a0d0263d3ccdb | 1,085 | py | Python | setup.py | sujitmandal/scrape-search-engine | b1102e0daf472cb1fb248e075f37217ac60d59b1 | [
"MIT"
] | 11 | 2021-02-23T19:30:19.000Z | 2022-02-28T16:54:29.000Z | setup.py | sujitmandal/scrape-search-engine | b1102e0daf472cb1fb248e075f37217ac60d59b1 | [
"MIT"
] | 3 | 2021-07-05T09:55:53.000Z | 2022-03-03T13:45:12.000Z | setup.py | sujitmandal/scrape-search-engine | b1102e0daf472cb1fb248e075f37217ac60d59b1 | [
"MIT"
] | 3 | 2020-10-22T01:18:53.000Z | 2021-04-28T09:45:28.000Z | __author__ = 'Sujit Mandal'
#Date : 22-08-2020
from setuptools import setup
def readme():
with open('README.md') as files:
README = files.read()
return(README)
setup(
name = 'scrape-search-engine',
version = '0.2.0',
description = "Search anything on the different Search Engine's it will collect all the links.",
long_description = readme(),
long_description_content_type = 'text/markdown',
url = 'https://github.com/sujitmandal/scrape-search-engine',
author = 'Sujit Mandal',
author_email = 'mandals974@gmail.com',
license = 'MIT',
classifiers = [
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
packages = ['ScrapeSearchEngine'],
include_package_data = True,
)
| 31 | 100 | 0.621198 |
04ff1d796a6705485276317d8554e717ca9c2baa | 3,869 | py | Python | test/mongo_test.py | julianespinel/stockreader | ad197c70346ce6c26c38ec5a979f58c06f122bfc | [
"MIT"
] | 36 | 2017-01-04T02:12:29.000Z | 2021-12-25T05:11:27.000Z | test/mongo_test.py | julianespinel/stockreader | ad197c70346ce6c26c38ec5a979f58c06f122bfc | [
"MIT"
] | 24 | 2016-08-03T01:13:01.000Z | 2022-01-27T02:01:24.000Z | test/mongo_test.py | julianespinel/stockreader | ad197c70346ce6c26c38ec5a979f58c06f122bfc | [
"MIT"
] | 14 | 2016-12-04T19:56:28.000Z | 2022-01-14T03:18:19.000Z | import unittest
import pymongo
import test.factories as factories
from src.stocks import mongo
from src.infrastructure import json
class MongoTest(unittest.TestCase):
DB_HOST = "localhost"
DB_PORT = 27017
TEST_DB_NAME = "test_stockreader_db"
def setUp(self):
self.mongo = mongo.Mongo(self.DB_HOST, self.DB_PORT, self.TEST_DB_NAME)
def tearDown(self):
mongo_client = pymongo.MongoClient()
mongo_client.drop_database(self.TEST_DB_NAME)
def test_save_stock_list_OK(self):
stock_count = len(self.mongo.read_stocks_from_stock_list())
self.assertEquals(0, stock_count)
stocks = factories.get_stock_list()
self.mongo.save_stock_list(stocks)
stock_count = len(self.mongo.read_stocks_from_stock_list())
self.assertEquals(len(stocks), stock_count)
def test_save_stock_list_NOK_duplicate_stock(self):
stock_count = len(self.mongo.read_stocks_from_stock_list())
self.assertEquals(0, stock_count)
stocks = factories.get_stock_list()
stocks.append(stocks[0])
self.mongo.save_stock_list(stocks)
stock_count = len(self.mongo.read_stocks_from_stock_list())
self.assertEquals(len(stocks) - 1, stock_count)
def test_stock_exists_OK(self):
stock_count = len(self.mongo.read_stocks_from_stock_list())
self.assertEquals(0, stock_count)
stocks = factories.get_stock_list()
self.mongo.save_stock_list(stocks)
stock_count = len(self.mongo.read_stocks_from_stock_list())
self.assertEquals(len(stocks), stock_count)
self.assertTrue(self.mongo.stock_exists("FB"))
def test_stock_exists_NOK_empty_stock_list(self):
stock_count = len(self.mongo.read_stocks_from_stock_list())
self.assertEquals(0, stock_count)
stocks = []
self.mongo.save_stock_list(stocks)
stock_count = len(self.mongo.read_stocks_from_stock_list())
self.assertEquals(len(stocks), stock_count)
self.assertFalse(self.mongo.stock_exists("FB"))
def test_get_stock_by_quote_OK(self):
stock_count = len(self.mongo.read_stocks_from_stock_list())
self.assertEquals(0, stock_count)
stocks = factories.get_stock_list()
self.mongo.save_stock_list(stocks)
stock_count = len(self.mongo.read_stocks_from_stock_list())
self.assertEquals(len(stocks), stock_count)
expected_stock = stocks[0]
stock_by_quote = self.mongo.get_stock_by_quote(expected_stock["symbol"])
self.assertEquals(expected_stock, stock_by_quote)
def test_save_stock_historical_data_OK(self):
quote = "BAC"
historical_stock_entries = len(self.mongo.get_stock_historical_data(quote))
self.assertEquals(0, historical_stock_entries)
stock_historical_data_array = factories.get_stock_historical_data_array()
stock_historical_data_array = json.json_keys_to_lower_and_snake_case(stock_historical_data_array)
self.mongo.save_stock_historical_data(quote, stock_historical_data_array)
historical_stock_entries = len(self.mongo.get_stock_historical_data(quote))
self.assertEquals(len(stock_historical_data_array), historical_stock_entries)
def test_upsert_stock_current_data_OK(self):
quote = "BAC"
current_data = self.mongo.get_stock_current_data(quote)
self.assertIsNone(current_data)
stock_current_data = factories.get_stock_current_data()
stock_current_data = json.json_keys_to_lower_and_snake_case(stock_current_data)
self.mongo.upsert_stock_current_data(quote, stock_current_data)
current_data = self.mongo.get_stock_current_data(quote)
current_data.pop("_id") # Remove MongoDB generated ID to match with stock_current_data
self.assertEquals(stock_current_data, current_data)
| 44.471264 | 105 | 0.733006 |
88128ee7d1aff7978aa6cf754ab0f1cdf45aef69 | 53 | py | Python | api/v1/models/__init__.py | nothink/meguchi | 3f57d56183a946b68a38a6592aaae64b0dd8c09b | [
"MIT"
] | null | null | null | api/v1/models/__init__.py | nothink/meguchi | 3f57d56183a946b68a38a6592aaae64b0dd8c09b | [
"MIT"
] | null | null | null | api/v1/models/__init__.py | nothink/meguchi | 3f57d56183a946b68a38a6592aaae64b0dd8c09b | [
"MIT"
] | null | null | null | from .resource import Resource
__all__ = [Resource]
| 13.25 | 30 | 0.773585 |
34cf786c59a387892f76147e4ae599b83ceccee8 | 8,378 | py | Python | propheto/project/configuration/configuration.py | anthonymichaelclark/propheto | 5347a74c0e3fb2698abfef2933585158d445f6a7 | [
"Apache-2.0"
] | null | null | null | propheto/project/configuration/configuration.py | anthonymichaelclark/propheto | 5347a74c0e3fb2698abfef2933585158d445f6a7 | [
"Apache-2.0"
] | null | null | null | propheto/project/configuration/configuration.py | anthonymichaelclark/propheto | 5347a74c0e3fb2698abfef2933585158d445f6a7 | [
"Apache-2.0"
] | null | null | null | import json
import copy
from time import time
from typing import Optional, List
from datetime import datetime, date
from .iteration import Iteration
from propheto.utilities import unique_id
class Configuration:
"""
Base class for configuration of remote project resources.
"""
def __init__(
self,
id: Optional[str] = "",
name: Optional[str] = "",
version: Optional[str] = "",
description: Optional[str] = "",
current_iteration_id: Optional[str] = "",
iterations: Optional[dict] = {},
status: Optional[str] = "inactive",
service_api_url: Optional[str] = None,
*args,
**kwargs,
) -> None:
self.id = id
self.name = name
self.version = version
self.description = description
self.current_iteration_id = current_iteration_id
# TODO: FIGURE OUT ITERATIONS
# self.iterations = iterations
self.iterations = {}
self.status = status
self.service_api_url = service_api_url
# Check if passing iteration into initialization
# if so then load iteration values
if iterations != {}:
for id, item in iterations.items():
item["id"] = id
self.add_iteration(**item)
def __repr__(self) -> str:
return f"Configuration(id={self.id}, name={self.name}, version={self.version})"
def __str__(self) -> str:
return f"id={self.id}, name={self.name}, version={self.version}"
def to_dict(self) -> dict:
"""
Convert the object to dictionary keys
Returns
-------
output_dict : dict
Dictionary of the object resources.
"""
output_dict = {}
config_dict = vars(self)
for key, attribute in config_dict.items():
if key == "_session":
pass
elif key != "iterations":
output_dict[key] = copy.deepcopy(attribute)
else:
_iterations_dict = {}
for i_key, _iteration in attribute.items():
_iterations_dict[i_key] = copy.deepcopy(_iteration.to_dict())
output_dict["iterations"] = _iterations_dict
return output_dict
@staticmethod
def dict_converter(obj):
"""
Convert python datetime to iso format
"""
if isinstance(obj, (datetime, date)):
return obj.isoformat()
raise TypeError(f"{type(obj)} not datetime")
def to_json(self) -> str:
"""
Convert the class object into a json
Returns
-------
_config_json : str
String representing the configuration
"""
_config = self.to_dict().copy()
# remove ignorable attributes
for attribute in ["_config_path", "_session"]:
if attribute in _config:
del _config[attribute]
_config_json = json.dumps(_config, default=self.dict_converter)
return _config_json
def write_config(self, output_file: str = "propheto.config") -> None:
"""
Write the configuration to a local output
"""
_config = self.to_dict()
# remove ignorable attributes
for attribute in ["_config_path", "_session"]:
if attribute in _config:
_config.pop(attribute)
with open(output_file, "w") as config_file:
config_file.write(json.dumps(_config, default=self.dict_converter))
def add_iteration(
self,
iteration_name: str,
id: str = "",
version: str = "",
resources: dict = {},
status: str = "inactivate",
set_current: bool = False,
*args,
**kwargs,
) -> object:
"""
iteration_name : str
Name for the iteration/experiment.
id : str, optional
Unique id for the iteration.
version : str, optional
Version for the iteration/API endpoint
resources : dict, optional
Resource object
status : str, optional
Status of the iteration
set_current : bool, optional
Whether the iteration represents the most recent or current deployment of the model
"""
iteration = Iteration(
id=id if id != "" else unique_id(length=8),
iteration_name=iteration_name,
version=version if version != "" else self.version,
resources={},
status=status,
)
self.iterations[iteration.id] = iteration
# Parse the resource and turn into object
if resources != {}:
for id, resource in resources.items():
resource["id"] = id
resource["iteration_id"] = iteration.id
resource["created_at"] = datetime.fromisoformat(resource["created_at"])
resource["updated_at"] = datetime.fromisoformat(resource["updated_at"])
self.add_resource(**resource)
if set_current:
self.set_current_iteration(iteration_id=iteration.id)
return iteration
def set_current_iteration(self, iteration_id: str):
"""
Set the iteration id
Parameters
----------
iteration_id : str
Unique Id for the iterations
"""
self.current_iteration_id = iteration_id
def add_resource(
self,
remote_object: object,
iteration_id: str = "",
id: str = "",
name: str = "",
pickle_object: Optional[bytes] = b"",
created_at: datetime = datetime.fromtimestamp(time()),
updated_at: datetime = datetime.fromtimestamp(time()),
) -> None:
"""
Add a resource to the iteration
Parameters
----------
remote_object : object
Object resource
iteration_id : str, optional
Identification string for the resource
id : str, optional
Id for the specific resource
name : str, optional
Name of the resource
pickle_object : bytes, optional
Object
created_at : datetime, optional
Created datetime for the object
updated_at : datetime, optional
Updated datetime of the object
"""
iteration_id = iteration_id if iteration_id != "" else self.current_iteration_id
resource = self.iterations[iteration_id].add_resource(
id=id,
remote_object=remote_object,
pickle_object=pickle_object,
name=name,
created_at=created_at,
updated_at=updated_at,
)
return resource
def loads(
self,
iteration_id: Optional[str] = None,
profile_name: Optional[str] = "default",
) -> None:
"""
Load pickled resources
Parameters
----------
iteration_id : str, optional
The id of the iteration which will be destroyed
profile_name : str, optional
The profile name to load the resources
"""
if self.iterations != {}:
iteration_id = iteration_id if iteration_id else self.current_iteration_id
iteration_resources = self.iterations[iteration_id].resources
for id in list(iteration_resources.keys()):
self.iterations[iteration_id].resources[id].loads(profile_name=profile_name)
def destroy(
self,
iteration_id: Optional[str] = None,
excludes: Optional[List[str]] = [],
includes: Optional[List[str]] = [],
) -> None:
"""
Destroy project resources
Parameters
----------
iteration_id : str, optional
Optional to specify the specific iteration to destroy
excludes : List[str], optional
Excludes specific resources from destroying.
includes : List[str], optional
Includes specific resources that should be destroyed.
"""
iteration_id = iteration_id if iteration_id else self.current_iteration_id
self.iterations[iteration_id].destroy(excludes=excludes, includes=includes)
| 33.378486 | 99 | 0.568155 |
fc95cbf2287e23afd125708b0d63c11af1a72fb5 | 1,473 | py | Python | examples/v1/check_permissions.py | samkim/authzed-py | a74642e126ca84a4ef93d9c7fc64941cab79a204 | [
"Apache-2.0"
] | 13 | 2021-02-17T02:05:51.000Z | 2022-02-10T01:52:32.000Z | examples/v1/check_permissions.py | samkim/authzed-py | a74642e126ca84a4ef93d9c7fc64941cab79a204 | [
"Apache-2.0"
] | 6 | 2021-07-17T15:49:10.000Z | 2022-03-04T13:01:11.000Z | examples/v1/check_permissions.py | samkim/authzed-py | a74642e126ca84a4ef93d9c7fc64941cab79a204 | [
"Apache-2.0"
] | 6 | 2021-03-15T04:35:03.000Z | 2022-03-04T11:12:10.000Z | from authzed.api.v1 import (
CheckPermissionRequest,
CheckPermissionResponse,
Client,
ObjectReference,
SubjectReference,
)
from grpcutil import bearer_token_credentials
emilia = SubjectReference(
object=ObjectReference(
object_type="blog/user",
object_id="emilia",
)
)
beatrice = SubjectReference(
object=ObjectReference(
object_type="blog/user",
object_id="beatrice",
)
)
post_one = ObjectReference(object_type="blog/post", object_id="1")
client = Client(
"grpc.authzed.com:443",
bearer_token_credentials("t_your_token_here_1234567deadbeef"),
)
resp = client.CheckPermission(
CheckPermissionRequest(
resource=post_one,
permission="reader",
subject=emilia,
)
)
assert resp.permissionship == CheckPermissionResponse.PERMISSIONSHIP_HAS_PERMISSION
resp = client.CheckPermission(
CheckPermissionRequest(
resource=post_one,
permission="writer",
subject=emilia,
)
)
assert resp.permissionship == CheckPermissionResponse.PERMISSIONSHIP_HAS_PERMISSION
resp = client.CheckPermission(
CheckPermissionRequest(
resource=post_one,
permission="reader",
subject=beatrice,
)
)
assert resp.permissionship == CheckPermissionResponse.PERMISSIONSHIP_HAS_PERMISSION
resp = client.CheckPermission(
CheckPermissionRequest(
resource=post_one,
permission="writer",
subject=beatrice,
)
)
| 23.015625 | 83 | 0.7074 |
da212b4119c094e0e2aec4ce3fd5eb31e4b9e130 | 3,743 | py | Python | server.py | grahampfau/ftp-to-s3 | eada6dc67ffcc6ab7a1d921f9dcd3493bc8c5416 | [
"MIT"
] | 1 | 2018-02-13T05:43:20.000Z | 2018-02-13T05:43:20.000Z | server.py | grahampfau/ftp-to-s3 | eada6dc67ffcc6ab7a1d921f9dcd3493bc8c5416 | [
"MIT"
] | null | null | null | server.py | grahampfau/ftp-to-s3 | eada6dc67ffcc6ab7a1d921f9dcd3493bc8c5416 | [
"MIT"
] | 1 | 2019-02-07T14:51:23.000Z | 2019-02-07T14:51:23.000Z | import logging
import os
import threading
from queue import Queue
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import FTPServer
from konfig import Konfig
log_format = '%(asctime)s - %(levelname)s - %(message)s'
logging.basicConfig(format=log_format)
log= logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
konf = Konfig()
root_log_level = logging.getLevelName(konf.root_log_level)
root_log= logging.getLogger()
root_log.setLevel(root_log_level)
ftp_port = int(konf.ftp_port)
passive_port_lower = int(konf.passive_port_lower)
passive_port_upper = int(konf.passive_port_upper) + 1
passive_range = range(passive_port_lower, passive_port_upper)
s3_connection = S3Connection(konf.aws_access_key_id,
konf.aws_secret_access_key)
s3_bucket = s3_connection.get_bucket(konf.aws_bucket_name)
job_queue = Queue()
def process_file(filename):
# Upload to S3, get URL
s3_key = Key(s3_bucket)
s3_key.key = filename.split(os.getcwd() + '/ftp')[-1]
s3_key.set_contents_from_filename(filename)
s3_key.set_acl('public-read')
url = s3_key.generate_url(expires_in=86400) # 1 day
log.debug(("File now in S3 at: {}".format(url)))
# Delete file
os.unlink(filename)
log.debug(("Deleted file: {}".format(filename)))
class FTPWorker(threading.Thread):
def __init__(self, q):
self.q = q
threading.Thread.__init__(self)
def run(self):
log.debug("Worker online")
while True:
log.debug(
"Worker waiting for job ... %s" % str(job_queue.qsize()))
filename = job_queue.get()
log.debug("Worker got job: %s, qsize: %s" % (
filename,
str(job_queue.qsize())))
try:
process_file(filename)
log.debug("Task done, qsize: %s" % str(job_queue.qsize()))
except Exception as e:
log.error("Task failed with error: %s" % str(e))
finally:
job_queue.task_done()
class FTPHandler(FTPHandler):
def on_file_received(self, filename):
job_queue.put(filename)
def main():
# Instantiate a dummy authorizer for managing 'virtual' users
authorizer = DummyAuthorizer()
# Define a new user having full r/w permissions
authorizer.add_user(konf.ftp_username,
konf.ftp_password,
'ftp/',
perm='elradfmwM')
# Instantiate FTP handler class
handler = FTPHandler
handler.permit_foreign_addresses = True
handler.passive_ports = passive_range
handler.authorizer = authorizer
# Define a customized banner (string returned when client connects)
handler.banner = "pyftpdlib based ftpd ready."
# Instantiate FTP server class and listen on 0.0.0.0:2121
address = ('', ftp_port)
server = FTPServer(address, handler)
# set a limit for connections
log.debug('Max number of connections: ' + str(len(passive_range)))
server.max_cons = len(passive_range)
server.max_cons_per_ip = len(passive_range)
# start ftp server
server.serve_forever()
if __name__ == '__main__':
# Restore directories from S3 bucket
for item in s3_bucket.list():
directory = 'ftp/' + item.name.rsplit('/', 1)[0]
if not os.path.exists(directory):
log.debug('Restoring directory: ' + directory)
os.makedirs(directory, exist_ok=True)
for i in range(0, 4):
t = FTPWorker(job_queue)
t.daemon = True
t.start()
log.debug("Started worker")
main()
| 31.191667 | 74 | 0.656692 |
2815f5fbd5026672eb23f36959294840c19234b5 | 27,382 | py | Python | django/http/__init__.py | kcunning/django | 1b05546bd50d1bf99e3d0e7def1238edf8d44065 | [
"BSD-3-Clause"
] | 1 | 2017-12-05T18:37:01.000Z | 2017-12-05T18:37:01.000Z | django/http/__init__.py | kcunning/django | 1b05546bd50d1bf99e3d0e7def1238edf8d44065 | [
"BSD-3-Clause"
] | null | null | null | django/http/__init__.py | kcunning/django | 1b05546bd50d1bf99e3d0e7def1238edf8d44065 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
import datetime
import os
import re
import sys
import time
import warnings
from io import BytesIO
from pprint import pformat
from urllib import urlencode, quote
from urlparse import urljoin, parse_qsl
import Cookie
# Some versions of Python 2.7 and later won't need this encoding bug fix:
_cookie_encodes_correctly = Cookie.SimpleCookie().value_encode(';') == (';', '"\\073"')
# See ticket #13007, http://bugs.python.org/issue2193 and http://trac.edgewall.org/ticket/2256
_tc = Cookie.SimpleCookie()
try:
_tc.load('foo:bar=1')
_cookie_allows_colon_in_names = True
except Cookie.CookieError:
_cookie_allows_colon_in_names = False
if _cookie_encodes_correctly and _cookie_allows_colon_in_names:
SimpleCookie = Cookie.SimpleCookie
else:
Morsel = Cookie.Morsel
class SimpleCookie(Cookie.SimpleCookie):
if not _cookie_encodes_correctly:
def value_encode(self, val):
# Some browsers do not support quoted-string from RFC 2109,
# including some versions of Safari and Internet Explorer.
# These browsers split on ';', and some versions of Safari
# are known to split on ', '. Therefore, we encode ';' and ','
# SimpleCookie already does the hard work of encoding and decoding.
# It uses octal sequences like '\\012' for newline etc.
# and non-ASCII chars. We just make use of this mechanism, to
# avoid introducing two encoding schemes which would be confusing
# and especially awkward for javascript.
# NB, contrary to Python docs, value_encode returns a tuple containing
# (real val, encoded_val)
val, encoded = super(SimpleCookie, self).value_encode(val)
encoded = encoded.replace(";", "\\073").replace(",","\\054")
# If encoded now contains any quoted chars, we need double quotes
# around the whole string.
if "\\" in encoded and not encoded.startswith('"'):
encoded = '"' + encoded + '"'
return val, encoded
if not _cookie_allows_colon_in_names:
def load(self, rawdata):
self.bad_cookies = set()
super(SimpleCookie, self).load(rawdata)
for key in self.bad_cookies:
del self[key]
# override private __set() method:
# (needed for using our Morsel, and for laxness with CookieError
def _BaseCookie__set(self, key, real_value, coded_value):
try:
M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
except Cookie.CookieError:
self.bad_cookies.add(key)
dict.__setitem__(self, key, Cookie.Morsel())
from django.conf import settings
from django.core import signing
from django.core.exceptions import ImproperlyConfigured
from django.core.files import uploadhandler
from django.http.multipartparser import MultiPartParser
from django.http.utils import *
from django.utils.datastructures import MultiValueDict, ImmutableList
from django.utils.encoding import smart_str, iri_to_uri, force_unicode
from django.utils.http import cookie_date
from django.utils import timezone
RESERVED_CHARS="!*'();:@&=+$,/?%#[]"
absolute_http_url_re = re.compile(r"^https?://", re.I)
class Http404(Exception):
pass
RAISE_ERROR = object()
def build_request_repr(request, path_override=None, GET_override=None,
POST_override=None, COOKIES_override=None,
META_override=None):
"""
Builds and returns the request's representation string. The request's
attributes may be overridden by pre-processed values.
"""
# Since this is called as part of error handling, we need to be very
# robust against potentially malformed input.
try:
get = (pformat(GET_override)
if GET_override is not None
else pformat(request.GET))
except:
get = '<could not parse>'
if request._post_parse_error:
post = '<could not parse>'
else:
try:
post = (pformat(POST_override)
if POST_override is not None
else pformat(request.POST))
except:
post = '<could not parse>'
try:
cookies = (pformat(COOKIES_override)
if COOKIES_override is not None
else pformat(request.COOKIES))
except:
cookies = '<could not parse>'
try:
meta = (pformat(META_override)
if META_override is not None
else pformat(request.META))
except:
meta = '<could not parse>'
path = path_override if path_override is not None else request.path
return smart_str(u'<%s\npath:%s,\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' %
(request.__class__.__name__,
path,
unicode(get),
unicode(post),
unicode(cookies),
unicode(meta)))
class UnreadablePostError(IOError):
pass
class HttpRequest(object):
"""A basic HTTP request."""
# The encoding used in GET/POST dicts. None means use default setting.
_encoding = None
_upload_handlers = []
def __init__(self):
self.GET, self.POST, self.COOKIES, self.META, self.FILES = {}, {}, {}, {}, {}
self.path = ''
self.path_info = ''
self.method = None
self._post_parse_error = False
def __repr__(self):
return build_request_repr(self)
def get_host(self):
"""Returns the HTTP host using the environment or request headers."""
# We try three options, in order of decreasing preference.
if settings.USE_X_FORWARDED_HOST and (
'HTTP_X_FORWARDED_HOST' in self.META):
host = self.META['HTTP_X_FORWARDED_HOST']
elif 'HTTP_HOST' in self.META:
host = self.META['HTTP_HOST']
else:
# Reconstruct the host using the algorithm from PEP 333.
host = self.META['SERVER_NAME']
server_port = str(self.META['SERVER_PORT'])
if server_port != (self.is_secure() and '443' or '80'):
host = '%s:%s' % (host, server_port)
return host
def get_full_path(self):
# RFC 3986 requires query string arguments to be in the ASCII range.
# Rather than crash if this doesn't happen, we encode defensively.
return '%s%s' % (self.path, self.META.get('QUERY_STRING', '') and ('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) or '')
def get_signed_cookie(self, key, default=RAISE_ERROR, salt='', max_age=None):
"""
Attempts to return a signed cookie. If the signature fails or the
cookie has expired, raises an exception... unless you provide the
default argument in which case that value will be returned instead.
"""
try:
cookie_value = self.COOKIES[key].encode('utf-8')
except KeyError:
if default is not RAISE_ERROR:
return default
else:
raise
try:
value = signing.get_cookie_signer(salt=key + salt).unsign(
cookie_value, max_age=max_age)
except signing.BadSignature:
if default is not RAISE_ERROR:
return default
else:
raise
return value
def build_absolute_uri(self, location=None):
"""
Builds an absolute URI from the location and the variables available in
this request. If no location is specified, the absolute URI is built on
``request.get_full_path()``.
"""
if not location:
location = self.get_full_path()
if not absolute_http_url_re.match(location):
current_uri = '%s://%s%s' % (self.is_secure() and 'https' or 'http',
self.get_host(), self.path)
location = urljoin(current_uri, location)
return iri_to_uri(location)
def _is_secure(self):
return os.environ.get("HTTPS") == "on"
def is_secure(self):
# First, check the SECURE_PROXY_SSL_HEADER setting.
if settings.SECURE_PROXY_SSL_HEADER:
try:
header, value = settings.SECURE_PROXY_SSL_HEADER
except ValueError:
raise ImproperlyConfigured('The SECURE_PROXY_SSL_HEADER setting must be a tuple containing two values.')
if self.META.get(header, None) == value:
return True
# Failing that, fall back to _is_secure(), which is a hook for
# subclasses to implement.
return self._is_secure()
def is_ajax(self):
return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest'
def _set_encoding(self, val):
"""
Sets the encoding used for GET/POST accesses. If the GET or POST
dictionary has already been created, it is removed and recreated on the
next access (so that it is decoded correctly).
"""
self._encoding = val
if hasattr(self, '_get'):
del self._get
if hasattr(self, '_post'):
del self._post
def _get_encoding(self):
return self._encoding
encoding = property(_get_encoding, _set_encoding)
def _initialize_handlers(self):
self._upload_handlers = [uploadhandler.load_handler(handler, self)
for handler in settings.FILE_UPLOAD_HANDLERS]
def _set_upload_handlers(self, upload_handlers):
if hasattr(self, '_files'):
raise AttributeError("You cannot set the upload handlers after the upload has been processed.")
self._upload_handlers = upload_handlers
def _get_upload_handlers(self):
if not self._upload_handlers:
# If there are no upload handlers defined, initialize them from settings.
self._initialize_handlers()
return self._upload_handlers
upload_handlers = property(_get_upload_handlers, _set_upload_handlers)
def parse_file_upload(self, META, post_data):
"""Returns a tuple of (POST QueryDict, FILES MultiValueDict)."""
self.upload_handlers = ImmutableList(
self.upload_handlers,
warning = "You cannot alter upload handlers after the upload has been processed."
)
parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding)
return parser.parse()
@property
def body(self):
if not hasattr(self, '_body'):
if self._read_started:
raise Exception("You cannot access body after reading from request's data stream")
try:
self._body = self.read()
except IOError as e:
raise UnreadablePostError, e, sys.exc_traceback
self._stream = BytesIO(self._body)
return self._body
@property
def raw_post_data(self):
warnings.warn('HttpRequest.raw_post_data has been deprecated. Use HttpRequest.body instead.', DeprecationWarning)
return self.body
def _mark_post_parse_error(self):
self._post = QueryDict('')
self._files = MultiValueDict()
self._post_parse_error = True
def _load_post_and_files(self):
# Populates self._post and self._files
if self.method != 'POST':
self._post, self._files = QueryDict('', encoding=self._encoding), MultiValueDict()
return
if self._read_started and not hasattr(self, '_body'):
self._mark_post_parse_error()
return
if self.META.get('CONTENT_TYPE', '').startswith('multipart'):
if hasattr(self, '_body'):
# Use already read data
data = BytesIO(self._body)
else:
data = self
try:
self._post, self._files = self.parse_file_upload(self.META, data)
except:
# An error occured while parsing POST data. Since when
# formatting the error the request handler might access
# self.POST, set self._post and self._file to prevent
# attempts to parse POST data again.
# Mark that an error occured. This allows self.__repr__ to
# be explicit about it instead of simply representing an
# empty POST
self._mark_post_parse_error()
raise
else:
self._post, self._files = QueryDict(self.body, encoding=self._encoding), MultiValueDict()
## File-like and iterator interface.
##
## Expects self._stream to be set to an appropriate source of bytes by
## a corresponding request subclass (e.g. WSGIRequest).
## Also when request data has already been read by request.POST or
## request.body, self._stream points to a BytesIO instance
## containing that data.
def read(self, *args, **kwargs):
self._read_started = True
return self._stream.read(*args, **kwargs)
def readline(self, *args, **kwargs):
self._read_started = True
return self._stream.readline(*args, **kwargs)
def xreadlines(self):
while True:
buf = self.readline()
if not buf:
break
yield buf
__iter__ = xreadlines
def readlines(self):
return list(iter(self))
class QueryDict(MultiValueDict):
"""
A specialized MultiValueDict that takes a query string when initialized.
This is immutable unless you create a copy of it.
Values retrieved from this class are converted from the given encoding
(DEFAULT_CHARSET by default) to unicode.
"""
# These are both reset in __init__, but is specified here at the class
# level so that unpickling will have valid values
_mutable = True
_encoding = None
def __init__(self, query_string, mutable=False, encoding=None):
MultiValueDict.__init__(self)
if not encoding:
encoding = settings.DEFAULT_CHARSET
self.encoding = encoding
for key, value in parse_qsl((query_string or ''), True): # keep_blank_values=True
self.appendlist(force_unicode(key, encoding, errors='replace'),
force_unicode(value, encoding, errors='replace'))
self._mutable = mutable
def _get_encoding(self):
if self._encoding is None:
self._encoding = settings.DEFAULT_CHARSET
return self._encoding
def _set_encoding(self, value):
self._encoding = value
encoding = property(_get_encoding, _set_encoding)
def _assert_mutable(self):
if not self._mutable:
raise AttributeError("This QueryDict instance is immutable")
def __setitem__(self, key, value):
self._assert_mutable()
key = str_to_unicode(key, self.encoding)
value = str_to_unicode(value, self.encoding)
MultiValueDict.__setitem__(self, key, value)
def __delitem__(self, key):
self._assert_mutable()
super(QueryDict, self).__delitem__(key)
def __copy__(self):
result = self.__class__('', mutable=True, encoding=self.encoding)
for key, value in dict.items(self):
dict.__setitem__(result, key, value)
return result
def __deepcopy__(self, memo):
import copy
result = self.__class__('', mutable=True, encoding=self.encoding)
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo), copy.deepcopy(value, memo))
return result
def setlist(self, key, list_):
self._assert_mutable()
key = str_to_unicode(key, self.encoding)
list_ = [str_to_unicode(elt, self.encoding) for elt in list_]
MultiValueDict.setlist(self, key, list_)
def setlistdefault(self, key, default_list=()):
self._assert_mutable()
if key not in self:
self.setlist(key, default_list)
return MultiValueDict.getlist(self, key)
def appendlist(self, key, value):
self._assert_mutable()
key = str_to_unicode(key, self.encoding)
value = str_to_unicode(value, self.encoding)
MultiValueDict.appendlist(self, key, value)
def update(self, other_dict):
self._assert_mutable()
f = lambda s: str_to_unicode(s, self.encoding)
if hasattr(other_dict, 'lists'):
for key, valuelist in other_dict.lists():
for value in valuelist:
MultiValueDict.update(self, {f(key): f(value)})
else:
d = dict([(f(k), f(v)) for k, v in other_dict.items()])
MultiValueDict.update(self, d)
def pop(self, key, *args):
self._assert_mutable()
return MultiValueDict.pop(self, key, *args)
def popitem(self):
self._assert_mutable()
return MultiValueDict.popitem(self)
def clear(self):
self._assert_mutable()
MultiValueDict.clear(self)
def setdefault(self, key, default=None):
self._assert_mutable()
key = str_to_unicode(key, self.encoding)
default = str_to_unicode(default, self.encoding)
return MultiValueDict.setdefault(self, key, default)
def copy(self):
"""Returns a mutable copy of this object."""
return self.__deepcopy__({})
def urlencode(self, safe=None):
"""
Returns an encoded string of all query string arguments.
:arg safe: Used to specify characters which do not require quoting, for
example::
>>> q = QueryDict('', mutable=True)
>>> q['next'] = '/a&b/'
>>> q.urlencode()
'next=%2Fa%26b%2F'
>>> q.urlencode(safe='/')
'next=/a%26b/'
"""
output = []
if safe:
encode = lambda k, v: '%s=%s' % ((quote(k, safe), quote(v, safe)))
else:
encode = lambda k, v: urlencode({k: v})
for k, list_ in self.lists():
k = smart_str(k, self.encoding)
output.extend([encode(k, smart_str(v, self.encoding))
for v in list_])
return '&'.join(output)
def parse_cookie(cookie):
if cookie == '':
return {}
if not isinstance(cookie, Cookie.BaseCookie):
try:
c = SimpleCookie()
c.load(cookie)
except Cookie.CookieError:
# Invalid cookie
return {}
else:
c = cookie
cookiedict = {}
for key in c.keys():
cookiedict[key] = c.get(key).value
return cookiedict
class BadHeaderError(ValueError):
pass
class HttpResponse(object):
"""A basic HTTP response, with content and dictionary-accessed headers."""
status_code = 200
def __init__(self, content='', mimetype=None, status=None,
content_type=None):
# _headers is a mapping of the lower-case name to the original case of
# the header (required for working with legacy systems) and the header
# value. Both the name of the header and its value are ASCII strings.
self._headers = {}
self._charset = settings.DEFAULT_CHARSET
if mimetype: # For backwards compatibility.
content_type = mimetype
if not content_type:
content_type = "%s; charset=%s" % (settings.DEFAULT_CONTENT_TYPE,
self._charset)
self.content = content
self.cookies = SimpleCookie()
if status:
self.status_code = status
self['Content-Type'] = content_type
def __str__(self):
"""Full HTTP message, including headers."""
return '\n'.join(['%s: %s' % (key, value)
for key, value in self._headers.values()]) \
+ '\n\n' + self.content
def _convert_to_ascii(self, *values):
"""Converts all values to ascii strings."""
for value in values:
if isinstance(value, unicode):
try:
value = value.encode('us-ascii')
except UnicodeError as e:
e.reason += ', HTTP response headers must be in US-ASCII format'
raise
else:
value = str(value)
if '\n' in value or '\r' in value:
raise BadHeaderError("Header values can't contain newlines (got %r)" % (value))
yield value
def __setitem__(self, header, value):
header, value = self._convert_to_ascii(header, value)
self._headers[header.lower()] = (header, value)
def __delitem__(self, header):
try:
del self._headers[header.lower()]
except KeyError:
pass
def __getitem__(self, header):
return self._headers[header.lower()][1]
def __getstate__(self):
# SimpleCookie is not pickeable with pickle.HIGHEST_PROTOCOL, so we
# serialise to a string instead
state = self.__dict__.copy()
state['cookies'] = str(state['cookies'])
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.cookies = SimpleCookie(self.cookies)
def has_header(self, header):
"""Case-insensitive check for a header."""
return header.lower() in self._headers
__contains__ = has_header
def items(self):
return self._headers.values()
def get(self, header, alternate=None):
return self._headers.get(header.lower(), (None, alternate))[1]
def set_cookie(self, key, value='', max_age=None, expires=None, path='/',
domain=None, secure=False, httponly=False):
"""
Sets a cookie.
``expires`` can be:
- a string in the correct format,
- a naive ``datetime.datetime`` object in UTC,
- an aware ``datetime.datetime`` object in any time zone.
If it is a ``datetime.datetime`` object then ``max_age`` will be calculated.
"""
self.cookies[key] = value
if expires is not None:
if isinstance(expires, datetime.datetime):
if timezone.is_aware(expires):
expires = timezone.make_naive(expires, timezone.utc)
delta = expires - expires.utcnow()
# Add one second so the date matches exactly (a fraction of
# time gets lost between converting to a timedelta and
# then the date string).
delta = delta + datetime.timedelta(seconds=1)
# Just set max_age - the max_age logic will set expires.
expires = None
max_age = max(0, delta.days * 86400 + delta.seconds)
else:
self.cookies[key]['expires'] = expires
if max_age is not None:
self.cookies[key]['max-age'] = max_age
# IE requires expires, so set it if hasn't been already.
if not expires:
self.cookies[key]['expires'] = cookie_date(time.time() +
max_age)
if path is not None:
self.cookies[key]['path'] = path
if domain is not None:
self.cookies[key]['domain'] = domain
if secure:
self.cookies[key]['secure'] = True
if httponly:
self.cookies[key]['httponly'] = True
def set_signed_cookie(self, key, value, salt='', **kwargs):
value = signing.get_cookie_signer(salt=key + salt).sign(value)
return self.set_cookie(key, value, **kwargs)
def delete_cookie(self, key, path='/', domain=None):
self.set_cookie(key, max_age=0, path=path, domain=domain,
expires='Thu, 01-Jan-1970 00:00:00 GMT')
def _get_content(self):
if self.has_header('Content-Encoding'):
return ''.join([str(e) for e in self._container])
return ''.join([smart_str(e, self._charset) for e in self._container])
def _set_content(self, value):
if hasattr(value, '__iter__'):
self._container = value
self._base_content_is_iter = True
else:
self._container = [value]
self._base_content_is_iter = False
content = property(_get_content, _set_content)
def __iter__(self):
self._iterator = iter(self._container)
return self
def next(self):
chunk = self._iterator.next()
if isinstance(chunk, unicode):
chunk = chunk.encode(self._charset)
return str(chunk)
def close(self):
if hasattr(self._container, 'close'):
self._container.close()
# The remaining methods partially implement the file-like object interface.
# See http://docs.python.org/lib/bltin-file-objects.html
def write(self, content):
if self._base_content_is_iter:
raise Exception("This %s instance is not writable" % self.__class__)
self._container.append(content)
def flush(self):
pass
def tell(self):
if self._base_content_is_iter:
raise Exception("This %s instance cannot tell its position" % self.__class__)
return sum([len(str(chunk)) for chunk in self._container])
class HttpResponseRedirect(HttpResponse):
status_code = 302
def __init__(self, redirect_to):
super(HttpResponseRedirect, self).__init__()
self['Location'] = iri_to_uri(redirect_to)
class HttpResponsePermanentRedirect(HttpResponse):
status_code = 301
def __init__(self, redirect_to):
super(HttpResponsePermanentRedirect, self).__init__()
self['Location'] = iri_to_uri(redirect_to)
class HttpResponseNotModified(HttpResponse):
status_code = 304
class HttpResponseBadRequest(HttpResponse):
status_code = 400
class HttpResponseNotFound(HttpResponse):
status_code = 404
class HttpResponseForbidden(HttpResponse):
status_code = 403
class HttpResponseNotAllowed(HttpResponse):
status_code = 405
def __init__(self, permitted_methods):
super(HttpResponseNotAllowed, self).__init__()
self['Allow'] = ', '.join(permitted_methods)
class HttpResponseGone(HttpResponse):
status_code = 410
class HttpResponseServerError(HttpResponse):
status_code = 500
# A backwards compatible alias for HttpRequest.get_host.
def get_host(request):
return request.get_host()
# It's neither necessary nor appropriate to use
# django.utils.encoding.smart_unicode for parsing URLs and form inputs. Thus,
# this slightly more restricted function.
def str_to_unicode(s, encoding):
"""
Converts basestring objects to unicode, using the given encoding. Illegally
encoded input characters are replaced with Unicode "unknown" codepoint
(\ufffd).
Returns any non-basestring objects without change.
"""
if isinstance(s, str):
return unicode(s, encoding, 'replace')
else:
return s
| 36.31565 | 134 | 0.610474 |
33835c3c385c670bc1575e7c0a19257ad81b9a85 | 900 | py | Python | section1/graphs_and_sessions.py | joyjeni/-Learn-Artificial-Intelligence-with-TensorFlow | 8ae05456241a3ead3dcb83dd315797380d7acacf | [
"MIT"
] | 16 | 2018-07-06T14:41:49.000Z | 2021-11-08T13:12:03.000Z | section1/graphs_and_sessions.py | joyjeni/-Learn-Artificial-Intelligence-with-TensorFlow | 8ae05456241a3ead3dcb83dd315797380d7acacf | [
"MIT"
] | null | null | null | section1/graphs_and_sessions.py | joyjeni/-Learn-Artificial-Intelligence-with-TensorFlow | 8ae05456241a3ead3dcb83dd315797380d7acacf | [
"MIT"
] | 15 | 2018-07-11T06:14:20.000Z | 2021-02-09T21:38:24.000Z | """
File: section1/graphs_and_sessions.py
Author: Brandon McKinzie
Description: snippets from the slides in video 1.4.
"""
import tensorflow as tf
# -----------------------------------
# Graphs.
# -----------------------------------
c = tf.constant(4.0, name='c')
assert c.graph is tf.get_default_graph()
initial_default_graph = tf.get_default_graph()
g = tf.Graph()
with g.as_default():
c = tf.constant(4.0, name='c')
assert c.graph is g
assert c.graph is not initial_default_graph
# -----------------------------------
# Sessions.
# -----------------------------------
# 1. Define some computations.
x = tf.get_variable('x', shape=[2, 3])
y = tf.get_variable('y', shape=[3, 1])
z = tf.matmul(x, y, name='z')
with tf.Session() as sess:
# 2. Initialize our variables.
sess.run(tf.global_variables_initializer())
# 3. Execute computations.
output = sess.run(z)
| 23.684211 | 51 | 0.568889 |
5fb87f2712f0447521f54290b91728d25a9c5476 | 1,870 | py | Python | lib/build_test.py | pchaigno/grr | 69c81624c281216a45c4bb88a9d4e4b0613a3556 | [
"Apache-2.0"
] | null | null | null | lib/build_test.py | pchaigno/grr | 69c81624c281216a45c4bb88a9d4e4b0613a3556 | [
"Apache-2.0"
] | null | null | null | lib/build_test.py | pchaigno/grr | 69c81624c281216a45c4bb88a9d4e4b0613a3556 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Tests for building and repacking clients."""
import os
import shutil
import stat
from grr.lib import build
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import maintenance_utils
from grr.lib import test_lib
from grr.lib import utils
class BuildTests(test_lib.GRRBaseTest):
"""Tests for building and repacking functionality."""
def setUp(self):
super(BuildTests, self).setUp()
self.executables_dir = os.path.join(config_lib.CONFIG["Test.srcdir"],
"grr", "executables")
def testRepackAll(self):
"""Testing repacking all binaries."""
with utils.TempDirectory() as tmp_dir:
new_dir = os.path.join(tmp_dir, "grr", "executables")
# Copy templates and ensure our resulting directory is writeable.
shutil.copytree(self.executables_dir, new_dir)
for root, dirs, _ in os.walk(new_dir):
for this_dir in dirs:
os.chmod(os.path.join(root, this_dir),
stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
config_lib.CONFIG.Set("ClientBuilder.source", tmp_dir)
# If this doesn't raise, it means that there were either no templates,
# or all of them were repacked successfully.
maintenance_utils.RepackAllBinaries()
def testGenClientConfig(self):
plugins = ["plugin1", "plugin2"]
config_lib.CONFIG.Set("Client.plugins", plugins)
deployer = build.ClientDeployer()
data = deployer.GetClientConfig(["Client Context"], validate=True)
parser = config_lib.YamlParser(data=data)
raw_data = parser.RawData()
self.assertIn("Client.build_time", raw_data)
self.assertIn("Client.plugins", raw_data)
self.assertEqual(raw_data["Client.plugins"], plugins)
def main(argv):
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| 29.68254 | 76 | 0.694652 |
2b27627202fcf16a1bd8bdf2adc4734f515109a8 | 45 | py | Python | 01_hello/hello02_comment.py | rebeckaflynn/tiny_python_projects | 692f24dd00769438e7aaa1c45223b701b20a1192 | [
"MIT"
] | null | null | null | 01_hello/hello02_comment.py | rebeckaflynn/tiny_python_projects | 692f24dd00769438e7aaa1c45223b701b20a1192 | [
"MIT"
] | null | null | null | 01_hello/hello02_comment.py | rebeckaflynn/tiny_python_projects | 692f24dd00769438e7aaa1c45223b701b20a1192 | [
"MIT"
] | null | null | null | # Purpose: Say hello
print('Hello, World!')
| 11.25 | 22 | 0.666667 |
7955caf0722c623102ee8ce749e23bb9de7fff64 | 1,434 | py | Python | apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/mrp/models/stock_picking.py | gtfarng/Odoo_migrade | 9cc28fae4c379e407645248a29d22139925eafe7 | [
"Apache-2.0"
] | 1 | 2019-12-19T01:53:13.000Z | 2019-12-19T01:53:13.000Z | apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/mrp/models/stock_picking.py | gtfarng/Odoo_migrade | 9cc28fae4c379e407645248a29d22139925eafe7 | [
"Apache-2.0"
] | null | null | null | apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/mrp/models/stock_picking.py | gtfarng/Odoo_migrade | 9cc28fae4c379e407645248a29d22139925eafe7 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class StockPickingType(models.Model):
_inherit = 'stock.picking.type'
code = fields.Selection(selection_add=[('mrp_operation', 'Manufacturing Operation')])
count_mo_todo = fields.Integer(compute='_get_mo_count')
count_mo_waiting = fields.Integer(compute='_get_mo_count')
count_mo_late = fields.Integer(compute='_get_mo_count')
def _get_mo_count(self):
mrp_picking_types = self.filtered(lambda picking: picking.code == 'mrp_operation')
if not mrp_picking_types:
return
domains = {
'count_mo_waiting': [('availability', '=', 'waiting')],
'count_mo_todo': [('state', 'in', ('confirmed', 'planned', 'progress'))],
'count_mo_late': [('date_planned_start', '<', fields.Date.today()), ('state', '=', 'confirmed')],
}
for field in domains:
data = self.env['mrp.production'].read_group(domains[field] +
[('state', 'not in', ('done', 'cancel')), ('picking_type_id', 'in', self.ids)],
['picking_type_id'], ['picking_type_id'])
count = dict(map(lambda x: (x['picking_type_id'] and x['picking_type_id'][0], x['picking_type_id_count']), data))
for record in mrp_picking_types:
record[field] = count.get(record.id, 0) | 47.8 | 125 | 0.622734 |
f331957308ba036570cb7d866470dcdf522e0fcf | 461 | py | Python | app/migrations/0007_auto_20200323_0246.py | taikhoangg003/django-dashboard-coreui | f0e8acac2428f0eb493b71c7e4010da35b4389f7 | [
"MIT"
] | null | null | null | app/migrations/0007_auto_20200323_0246.py | taikhoangg003/django-dashboard-coreui | f0e8acac2428f0eb493b71c7e4010da35b4389f7 | [
"MIT"
] | null | null | null | app/migrations/0007_auto_20200323_0246.py | taikhoangg003/django-dashboard-coreui | f0e8acac2428f0eb493b71c7e4010da35b4389f7 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.4 on 2020-03-23 02:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0006_auto_20200323_0229'),
]
operations = [
migrations.AlterField(
model_name='padata',
name='future_ptc_date',
field=models.CharField(blank=True, default='', max_length=500, null=True, verbose_name='Fureture PTC date'),
),
]
| 24.263158 | 120 | 0.624729 |
dcdc4b3a8c7612fb7eae30bb099f4e6b5a84a1ef | 790 | py | Python | old/dropbox_scripts/file-manipulation/change-extension-of-all-files/change-extension-of-all-files.py | jolitp/automation_scripts | ba0c94e5212f0069b89f75a48fe2e2aafb5c921c | [
"MIT"
] | null | null | null | old/dropbox_scripts/file-manipulation/change-extension-of-all-files/change-extension-of-all-files.py | jolitp/automation_scripts | ba0c94e5212f0069b89f75a48fe2e2aafb5c921c | [
"MIT"
] | null | null | null | old/dropbox_scripts/file-manipulation/change-extension-of-all-files/change-extension-of-all-files.py | jolitp/automation_scripts | ba0c94e5212f0069b89f75a48fe2e2aafb5c921c | [
"MIT"
] | null | null | null | #! /usr/bin/python3
import pathlib
import os
import sys
import json
import shutil
from colorama import Fore
from colorama import Style
import subprocess
DEBUG = False
DEBUG = True
path_to_current_directory = pathlib.Path(__file__).parent.absolute()
current_directory_name = os.path.basename(path_to_current_directory)
all_files_and_folders = os.listdir(path_to_current_directory)
for video in all_files_and_folders:
source = str(path_to_current_directory) + '/' + video
print(source)
ext = sys.argv[1]
# print(ext)
video_without_ext = os.path.splitext(video)[0]
# print(video_without_ext)
destination = str(path_to_current_directory) + '/' + video_without_ext + '.' + ext
print(destination)
shutil.move(source,destination)
| 14.90566 | 86 | 0.734177 |
842f133aff6ddc574033a2f7da9fcfc04e218ec7 | 5,189 | py | Python | nicos_mlz/reseda/commands.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 12 | 2019-11-06T15:40:36.000Z | 2022-01-01T16:23:00.000Z | nicos_mlz/reseda/commands.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 4 | 2019-11-08T10:18:16.000Z | 2021-01-13T13:07:29.000Z | nicos_mlz/reseda/commands.py | ISISComputingGroup/nicos | 94cb4d172815919481f8c6ee686f21ebb76f2068 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 6 | 2020-01-11T10:52:30.000Z | 2022-02-25T12:35:23.000Z | # -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Christian Franz <christian.franz@frm2.tum.de>
#
# *****************************************************************************
"""Module for RESEDA specific commands."""
import scipy.constants as co
from nicos import session
from nicos.commands import helparglist, usercommand
from nicos.commands.device import maw, move, stop, wait
from nicos.commands.measure import count
from nicos.commands.scan import manualscan
__all__ = ['zero', 'setecho', 'set_cascade', 'pol', 'miezescan', 'miezetau']
@helparglist('wavelength, deltaFreq, distance')
@usercommand
def miezetau(wavelength, deltaFreq, distance):
"""Calculate MIEZE time.
It will be calculated for wavelength (A), difference frequency of coils
(Hz) and sample detector distance (m).
deltaFreq is the single difference, not double difference.
"""
# co.m_n Mass of neutron
# co.h Planck constant
return (2*co.m_n**2/co.h**2 * (wavelength*1e-10)**3 * deltaFreq * distance *
1e9)
@usercommand
def zero():
"""Shut down all (static) power supplies."""
ps = ['hrf_0a', 'hrf_0b', 'hrf_1a', 'hrf_1b', 'hsf_0a', 'hsf_0b', 'hsf_1',
'sf_0a', 'sf_0b', 'sf_1', 'gf1', 'gf2', 'gf4', 'gf5', 'gf6', 'gf7',
'gf8', 'gf9', 'gf10', 'nse0', 'nse1']
for powersupply in ps:
powersupply = session.getDevice(powersupply)
move(powersupply, 0.001)
wait()
# Stop regulation and turn fg_amp off
stop('cbox_0a_reg_amp', 'cbox_0b_reg_amp', 'cbox_1_reg_amp')
maw('cbox_0a_fg_amp', 0.001, 'cbox_0b_fg_amp', 0.001,
'cbox_1_fg_amp', 0.001)
@usercommand
def set_flipper_off():
"""Shut down flippers.
After shutting down the neutrons are guided through instrument for
image mode (MIEZE)
"""
ps = ['sf_0a', 'sf_0b', 'cbox_0a_fg_amp', 'cbox_0b_fg_amp']
reg = ['cbox_0a_reg_amp', 'cbox_0b_reg_amp']
for powersupply in ps:
powersupply = session.getDevice(powersupply)
move(powersupply, 0.01)
for regulator in reg:
regulator = session.getDevice(regulator)
stop(regulator)
@helparglist('time')
@usercommand
def setecho(time):
"""Wrap setting of an echotime."""
echotime = session.getDevice('echotime')
move(echotime, time)
set_cascade()
wait(echotime)
@usercommand
def set_cascade():
"""Set Cascade Frequency Generator Freqs and Trigger."""
echotime = session.getDevice('echotime')
psd_chop_freq = session.getDevice('psd_chop_freq')
psd_timebin_freq = session.getDevice('psd_timebin_freq')
fg_burst = session.getDevice('fg_burst')
tau = echotime.target
f1 = echotime.currenttable[tau]['cbox_0a_fg_freq']
f2 = echotime.currenttable[tau]['cbox_0b_fg_freq']
move(psd_chop_freq, 2 * (f2 - f1))
move(psd_timebin_freq, 32 * (f2 - f1))
move(fg_burst, 'arm')
move(fg_burst, 'trigger')
wait()
@helparglist('echolist, counttime')
@usercommand
def miezescan(echolist, counttime):
"""Iterate over a list of echotimes -> measure one S(Q,t) curve
echolist: list of echotimes
counttime: counting time (the **same** for all list entries)
"""
# psd_channel.mode = 'tof'
echotime = session.getDevice('echotime')
with manualscan(echotime, counttime):
for etime in echolist:
setecho(etime)
count(counttime)
@helparglist('up, down')
@usercommand
def pol(up, down):
"""Calculate contrast or polarisation."""
return (up - down) / (up + down)
@helparglist('device, start, step, numsteps')
@usercommand
def freqscan(device, start, step, numsteps):
"""Special scan for finding a resonance.
Detector must be set to according device (e.g. cbox_0a_coil_rms)
device: cbox_0a_fg_freq
start: starting frequency in Hz
step: steps in Hz
numsteps: number of steps
"""
with manualscan(device):
for i in range(numsteps):
maw(device, start + step*i)
session.delay(0.2)
count(1)
@usercommand
def img():
"Setting the Cascade Detector to image mode"
session.getDevice('psd_channel').mode = 'image'
@usercommand
def tof():
"Settting the Cascade Detector to tof mode"
session.getDevice('psd_channel').mode = 'tof'
| 31.259036 | 80 | 0.659279 |
85bec717f5f09183fd6835e9758eb2afac3f6a00 | 590 | py | Python | Day01-15/code/Day07/findmax.py | EngrSaad2/Python-100-Days | ab0b26714b1df50d02a1433dc82f2a3fb025be5c | [
"Apache-2.0"
] | 6 | 2020-04-22T14:07:51.000Z | 2021-09-07T12:55:23.000Z | Day01-15/code/Day07/findmax.py | 2462612540/Python-Language | a676d1274a04ff03f1aea0de9c58019d6ef8f5fe | [
"Apache-2.0"
] | null | null | null | Day01-15/code/Day07/findmax.py | 2462612540/Python-Language | a676d1274a04ff03f1aea0de9c58019d6ef8f5fe | [
"Apache-2.0"
] | 4 | 2019-08-25T05:51:00.000Z | 2021-04-16T08:14:16.000Z | """
找出列表中最大或最小的元素
Version: 0.1
Author: 骆昊
Date: 2018-03-06
"""
def main():
fruits = ['grape', 'apple', 'strawberry', 'waxberry', 'pitaya']
# 直接使用内置的max和min函数找出列表中最大和最小元素
# print(max(fruits))
# print(min(fruits))
max_value = min_value = fruits[0]
for index in range(1, len(fruits)):
if fruits[index] > max_value:
max_value = fruits[index]
elif fruits[index] < min_value:
min_value = fruits[index]
print('Max:', max_value)
print('Min:', min_value)
if __name__ == '__main__':
main()
# 想一想如果最大的元素有两个要找出第二大的又该怎么做
| 21.071429 | 67 | 0.613559 |
103b363b2977f80ebaa7e579a56f098e42387e9e | 6,612 | py | Python | sdk/consumption/azure-mgmt-consumption/azure/mgmt/consumption/operations/_reservation_recommendation_details_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/consumption/azure-mgmt-consumption/azure/mgmt/consumption/operations/_reservation_recommendation_details_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/consumption/azure-mgmt-consumption/azure/mgmt/consumption/operations/_reservation_recommendation_details_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ReservationRecommendationDetailsOperations(object):
"""ReservationRecommendationDetailsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.consumption.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
billing_scope, # type: str
scope, # type: Union[str, "_models.Scope"]
region, # type: str
term, # type: Union[str, "_models.Term"]
look_back_period, # type: Union[str, "_models.LookBackPeriod"]
product, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ReservationRecommendationDetailsModel"]
"""Details of a reservation recommendation for what-if analysis of reserved instances.
:param billing_scope: The scope associated with reservation recommendation details operations.
This includes '/subscriptions/{subscriptionId}/' for subscription scope,
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' for resource group scope,
/providers/Microsoft.Billing/billingAccounts/{billingAccountId}' for BillingAccount scope, and
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}'
for billingProfile scope.
:type billing_scope: str
:param scope: Scope of the reservation.
:type scope: str or ~azure.mgmt.consumption.models.Scope
:param region: Used to select the region the recommendation should be generated for.
:type region: str
:param term: Specify length of reservation recommendation term.
:type term: str or ~azure.mgmt.consumption.models.Term
:param look_back_period: Filter the time period on which reservation recommendation results are
based.
:type look_back_period: str or ~azure.mgmt.consumption.models.LookBackPeriod
:param product: Filter the products for which reservation recommendation results are generated.
Examples: Standard_DS1_v2 (for VM), Premium_SSD_Managed_Disks_P30 (for Managed Disks).
:type product: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ReservationRecommendationDetailsModel, or the result of cls(response)
:rtype: ~azure.mgmt.consumption.models.ReservationRecommendationDetailsModel or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ReservationRecommendationDetailsModel"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-10-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'billingScope': self._serialize.url("billing_scope", billing_scope, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['scope'] = self._serialize.query("scope", scope, 'str')
query_parameters['region'] = self._serialize.query("region", region, 'str')
query_parameters['term'] = self._serialize.query("term", term, 'str')
query_parameters['lookBackPeriod'] = self._serialize.query("look_back_period", look_back_period, 'str')
query_parameters['product'] = self._serialize.query("product", product, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ReservationRecommendationDetailsModel', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/{billingScope}/providers/Microsoft.Consumption/reservationRecommendationDetails'} # type: ignore
| 50.861538 | 133 | 0.69268 |
60006f0fae0d4c2ff720dbbd99f829518b617ee6 | 148 | py | Python | src/ros/src/ros_homebot/nodes/test_node.py | chrisspen/homebot | cd1a5a2bddcb90d50a00e67b94297942827fbc61 | [
"MIT"
] | 8 | 2017-11-19T01:42:59.000Z | 2021-02-08T09:18:58.000Z | src/ros/src/ros_homebot/nodes/test_node.py | chrisspen/homebot | cd1a5a2bddcb90d50a00e67b94297942827fbc61 | [
"MIT"
] | 24 | 2017-02-14T16:50:02.000Z | 2017-06-07T18:38:20.000Z | src/ros/src/ros_homebot/nodes/test_node.py | chrisspen/homebot | cd1a5a2bddcb90d50a00e67b94297942827fbc61 | [
"MIT"
] | 6 | 2017-05-17T03:07:05.000Z | 2019-11-14T09:33:06.000Z | #!/usr/bin/env python
import time
import rospy
rospy.init_node('test_node')
while 1:
rospy.loginfo("Waiting for 1 second...")
time.sleep(1)
| 18.5 | 44 | 0.702703 |
e147cad696762a1e7a637cf470175445cad287f9 | 28,095 | py | Python | utils/bipartite_graph_motif.py | pige99/TME | b069ed159c6ae8835591af13b3fb2bc418e2fef9 | [
"MIT"
] | 3 | 2021-07-26T04:52:25.000Z | 2022-02-10T04:33:44.000Z | utils/bipartite_graph_motif.py | pige99/TME | b069ed159c6ae8835591af13b3fb2bc418e2fef9 | [
"MIT"
] | null | null | null | utils/bipartite_graph_motif.py | pige99/TME | b069ed159c6ae8835591af13b3fb2bc418e2fef9 | [
"MIT"
] | null | null | null | import time
import bisect
import dgl
import torch
import numpy as np
from tqdm import tqdm
# Count motif 1
def count_motif_1_1(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin_2, right_margin_2 = pivot, pivot + threshold_time
mask_2 = torch.logical_and(src_out_timestamp > left_margin_2, src_out_timestamp <= right_margin_2)
mask_2 = torch.logical_and(mask_2, src_out_ngr == dst)
src_out_ngr_2, src_out_timestamp_2 = src_out_ngr[mask_2], src_out_timestamp[mask_2]
for i, node_i in enumerate(src_out_ngr_2):
left_margin_3, right_margin_3 = src_out_timestamp_2[i], pivot + threshold_time
mask_3 = torch.logical_and(src_out_timestamp > left_margin_3, src_out_timestamp <= right_margin_3)
mask_3 = torch.logical_and(mask_3, src_out_ngr == dst)
g.edata['motif_count'][eid][0] += torch.sum(mask_3)
def count_motif_1_2(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin_1, right_margin_1 = pivot - threshold_time, pivot
mask_1 = torch.logical_and(src_out_timestamp >= left_margin_1, src_out_timestamp < right_margin_1)
mask_1 = torch.logical_and(mask_1, src_out_ngr == dst)
src_out_ngr_1, src_out_timestamp_1 = src_out_ngr[mask_1], src_out_timestamp[mask_1]
for i, node_i in enumerate(src_out_ngr_1):
left_margin_3, right_margin_3 = pivot, src_out_timestamp_1[i] + threshold_time
mask_3 = torch.logical_and(src_out_timestamp > left_margin_3, src_out_timestamp <= right_margin_3)
mask_3 = torch.logical_and(mask_3, src_out_ngr == dst)
g.edata['motif_count'][eid][1] += torch.sum(mask_3)
def count_motif_1_3(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin_1, right_margin_1 = pivot - threshold_time, pivot
mask_1 = torch.logical_and(src_out_timestamp >= left_margin_1, src_out_timestamp < right_margin_1)
mask_1 = torch.logical_and(mask_1, src_out_ngr == dst)
src_out_ngr_1, src_out_timestamp_1 = src_out_ngr[mask_1], src_out_timestamp[mask_1]
for i, node_i in enumerate(src_out_ngr_1):
left_margin_2, right_margin_2 = src_out_timestamp_1[i], pivot
mask_2 = torch.logical_and(src_out_timestamp > left_margin_2, src_out_timestamp < right_margin_2)
mask_2 = torch.logical_and(mask_2, src_out_ngr == dst)
g.edata['motif_count'][eid][2] += torch.sum(mask_2)
# Count motif 2
def count_motif_2_1(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin_2, right_margin_2 = pivot, pivot + threshold_time
mask_2 = torch.logical_and(src_out_timestamp > left_margin_2, src_out_timestamp <= right_margin_2)
mask_2 = torch.logical_and(mask_2, src_out_ngr == dst)
src_out_ngr_2, src_out_timestamp_2 = src_out_ngr[mask_2], src_out_timestamp[mask_2]
for i, node_i in enumerate(src_out_ngr_2):
left_margin_3, right_margin_3 = src_out_timestamp_2[i], pivot + threshold_time
mask_3 = torch.logical_and(src_out_timestamp > left_margin_3, src_out_timestamp <= right_margin_3)
mask_3 = torch.logical_and(mask_3, src_out_ngr != node_i)
g.edata['motif_count'][eid][3] += torch.sum(mask_3)
def count_motif_2_2(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin_1, right_margin_1 = pivot - threshold_time, pivot
mask_1 = torch.logical_and(src_out_timestamp >= left_margin_1, src_out_timestamp < right_margin_1)
mask_1 = torch.logical_and(mask_1, src_out_ngr == dst)
src_out_ngr_1, src_out_timestamp_1 = src_out_ngr[mask_1], src_out_timestamp[mask_1]
for i, node_i in enumerate(src_out_ngr_1):
left_margin_3, right_margin_3 = pivot, src_out_timestamp_1[i] + threshold_time
mask_3 = torch.logical_and(src_out_timestamp > left_margin_3, src_out_timestamp <= right_margin_3)
mask_3 = torch.logical_and(mask_3, src_out_ngr != node_i)
g.edata['motif_count'][eid][4] += torch.sum(mask_3)
def count_motif_2_3(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin_1, right_margin_1 = pivot - threshold_time, pivot
mask_1 = torch.logical_and(src_out_timestamp >= left_margin_1, src_out_timestamp < right_margin_1)
mask_1 = torch.logical_and(mask_1, src_out_ngr != dst)
src_out_ngr_1, src_out_timestamp_1 = src_out_ngr[mask_1], src_out_timestamp[mask_1]
for i, node_i in enumerate(src_out_ngr_1):
left_margin_2, right_margin_2 = src_out_timestamp_1[i], pivot
mask_2 = torch.logical_and(src_out_timestamp > left_margin_2, src_out_timestamp < right_margin_2)
mask_2 = torch.logical_and(mask_2, src_out_ngr == node_i)
g.edata['motif_count'][eid][5] += torch.sum(mask_2)
# Count motif 3
def count_motif_3_1(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin_3, right_margin_3 = pivot, pivot + threshold_time
mask_3 = torch.logical_and(src_out_timestamp > left_margin_3, src_out_timestamp <= right_margin_3)
mask_3 = torch.logical_and(mask_3, src_out_ngr == dst)
src_out_ngr_3, src_out_timestamp_3 = src_out_ngr[mask_3], src_out_timestamp[mask_3]
for i, node_i in enumerate(src_out_ngr_3):
left_margin_2, right_margin_2 = pivot, src_out_timestamp_3[i]
mask_2 = torch.logical_and(src_out_timestamp > left_margin_2, src_out_timestamp < right_margin_2)
mask_2 = torch.logical_and(mask_2, src_out_ngr != node_i)
g.edata['motif_count'][eid][6] += torch.sum(mask_2)
def count_motif_3_2(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin_1, right_margin_1 = pivot - threshold_time, pivot
mask_1 = torch.logical_and(src_out_timestamp >= left_margin_1, src_out_timestamp < right_margin_1)
mask_1 = torch.logical_and(mask_1, src_out_ngr != dst)
src_out_ngr_1, src_out_timestamp_1 = src_out_ngr[mask_1], src_out_timestamp[mask_1]
for i, node_i in enumerate(src_out_ngr_1):
left_margin_3, right_margin_3 = pivot, src_out_timestamp_1[i] + threshold_time
mask_3 = torch.logical_and(src_out_timestamp > left_margin_3, src_out_timestamp <= right_margin_3)
mask_3 = torch.logical_and(mask_3, src_out_ngr == node_i)
g.edata['motif_count'][eid][7] += torch.sum(mask_3)
def count_motif_3_3(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin_1, right_margin_1 = pivot - threshold_time, pivot
mask_1 = torch.logical_and(src_out_timestamp >= left_margin_1, src_out_timestamp < right_margin_1)
mask_1 = torch.logical_and(mask_1, src_out_ngr == dst)
src_out_ngr_1, src_out_timestamp_1 = src_out_ngr[mask_1], src_out_timestamp[mask_1]
for i, node_i in enumerate(src_out_ngr_1):
left_margin_2, right_margin_2 = src_out_timestamp_1[i], pivot
mask_2 = torch.logical_and(src_out_timestamp > left_margin_2, src_out_timestamp < right_margin_2)
mask_2 = torch.logical_and(mask_2, src_out_ngr != node_i)
g.edata['motif_count'][eid][8] += torch.sum(mask_2)
# Count motif 4
def count_motif_4_1(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin_2, right_margin_2 = pivot, pivot + threshold_time
mask_2 = torch.logical_and(src_out_timestamp > left_margin_2, src_out_timestamp <= right_margin_2)
mask_2 = torch.logical_and(mask_2, src_out_ngr != dst)
src_out_ngr_2, src_out_timestamp_2 = src_out_ngr[mask_2], src_out_timestamp[mask_2]
for i, node_i in enumerate(src_out_ngr_2):
left_margin_3, right_margin_3 = src_out_timestamp_2[i], pivot + threshold_time
mask_3 = torch.logical_and(src_out_timestamp > left_margin_3, src_out_timestamp <= right_margin_3)
mask_3 = torch.logical_and(mask_3, src_out_ngr == node_i)
g.edata['motif_count'][eid][9] += torch.sum(mask_3)
def count_motif_4_2(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin_1, right_margin_1 = pivot - threshold_time, pivot
mask_1 = torch.logical_and(src_out_timestamp >= left_margin_1, src_out_timestamp < right_margin_1)
mask_1 = torch.logical_and(mask_1, src_out_ngr != dst)
src_out_ngr_1, src_out_timestamp_1 = src_out_ngr[mask_1], src_out_timestamp[mask_1]
for i, node_i in enumerate(src_out_ngr_1):
left_margin_3, right_margin_3 = pivot, src_out_timestamp_1[i] + threshold_time
mask_3 = torch.logical_and(src_out_timestamp > left_margin_3, src_out_timestamp <= right_margin_3)
mask_3 = torch.logical_and(mask_3, src_out_ngr == dst)
g.edata['motif_count'][eid][10] += torch.sum(mask_3)
def count_motif_4_3(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin_1, right_margin_1 = pivot - threshold_time, pivot
mask_1 = torch.logical_and(src_out_timestamp >= left_margin_1, src_out_timestamp < right_margin_1)
mask_1 = torch.logical_and(mask_1, src_out_ngr != dst)
src_out_ngr_1, src_out_timestamp_1 = src_out_ngr[mask_1], src_out_timestamp[mask_1]
for i, node_i in enumerate(src_out_ngr_1):
left_margin_2, right_margin_2 = src_out_timestamp_1[i], pivot
mask_2 = torch.logical_and(src_out_timestamp > left_margin_2, src_out_timestamp < right_margin_2)
mask_2 = torch.logical_and(mask_2, src_out_ngr == dst)
src_out_ngr_2 = src_out_ngr[mask_2]
g.edata['motif_count'][eid][11] += torch.sum(mask_2)
# Count motif 5
def count_motif_5_1(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin, right_margin = pivot, pivot + threshold_time
mask = torch.logical_and(src_out_timestamp > left_margin, src_out_timestamp <= right_margin)
mask = torch.logical_and(mask, src_out_ngr != dst)
src_out_ngr, src_out_timestamp = src_out_ngr[mask], src_out_timestamp[mask]
for i, node_i in enumerate(src_out_ngr):
ngr2, _, eids2 = g.in_edges(node_i, form='all')
ngr2_timestamp = g.edata['timestamp'][eids2]
left_margin_2, right_margin_2 = src_out_timestamp[i], pivot + threshold_time
mask_2 = torch.logical_and(ngr2_timestamp > left_margin_2, ngr2_timestamp <= right_margin_2)
mask_2 = torch.logical_and(mask_2, ngr2 != src)
ngr2, ngr2_timestamp = ngr2[mask_2], ngr2_timestamp[mask_2]
for j, node_j in enumerate(ngr2):
ngr1, _, eids1 = g.in_edges(dst, form='all')
ngr1_timestamp = g.edata['timestamp'][eids1]
left_mragin_1, right_margin_1 = ngr2_timestamp[j], pivot + threshold_time
mask_1 = torch.logical_and(ngr1_timestamp > left_mragin_1, ngr1_timestamp <= right_margin_1)
mask_1 = torch.logical_and(mask_1, ngr1 == node_j)
g.edata['motif_count'][eid][12] += torch.sum(mask_1)
def count_motif_5_2(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin, right_margin = pivot - threshold_time, pivot
mask = torch.logical_and(src_out_timestamp >= left_margin, src_out_timestamp < right_margin)
mask = torch.logical_and(mask, src_out_ngr != dst)
src_out_ngr, src_out_timestamp = src_out_ngr[mask], src_out_timestamp[mask]
for i, node_i in enumerate(src_out_ngr):
ngr2, _, eids2 = g.in_edges(dst, form='all')
ngr2_timestamp = g.edata['timestamp'][eids2]
left_margin_2, right_margin_2 = pivot, src_out_timestamp[i] + threshold_time
mask_2 = torch.logical_and(ngr2_timestamp > left_margin_2, ngr2_timestamp <= right_margin_2)
mask_2 = torch.logical_and(mask_2, ngr2 != src)
ngr2, ngr2_timestamp = ngr2[mask_2], ngr2_timestamp[mask_2]
for j, node_j in enumerate(ngr2):
ngr1, _, eids1 = g.in_edges(node_i, form='all')
ngr1_timestamp = g.edata['timestamp'][eids1]
left_mragin_1, right_margin_1 = ngr2_timestamp[j], src_out_timestamp[i] + threshold_time
mask_1 = torch.logical_and(ngr1_timestamp > left_mragin_1, ngr1_timestamp <= right_margin_1)
mask_1 = torch.logical_and(mask_1, ngr1 == node_j)
g.edata['motif_count'][eid][13] += torch.sum(mask_1)
def count_motif_5_3(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin, right_margin = pivot, pivot + threshold_time
mask = torch.logical_and(src_out_timestamp > left_margin, src_out_timestamp <= right_margin)
mask = torch.logical_and(mask, src_out_ngr != dst)
src_out_ngr, src_out_timestamp = src_out_ngr[mask], src_out_timestamp[mask]
for i, node_i in enumerate(src_out_ngr):
ngr3, _, eids3 = g.in_edges(dst, form='all')
ngr3_timestamp = g.edata['timestamp'][eids3]
left_margin_3, right_margin_3 = src_out_timestamp[i] - threshold_time, pivot
mask_3 = torch.logical_and(ngr3_timestamp >= left_margin_3, ngr3_timestamp < right_margin_3)
mask_3 = torch.logical_and(mask_3, ngr3 != src)
ngr3, ngr3_timestamp = ngr3[mask_3], ngr3_timestamp[mask_3]
for j, node_j in enumerate(ngr3):
ngr4, _, eids4 = g.in_edges(node_i, form='all')
ngr4_timestamp = g.edata['timestamp'][eids4]
left_mragin_4, right_margin_4 = src_out_timestamp[i] - threshold_time, ngr3_timestamp[j]
mask_4 = torch.logical_and(ngr4_timestamp >= left_mragin_4, ngr4_timestamp < right_margin_4)
mask_4 = torch.logical_and(mask_4, ngr4 == node_j)
g.edata['motif_count'][eid][14] += torch.sum(mask_4)
def count_motif_5_4(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin, right_margin = pivot - threshold_time, pivot
mask = torch.logical_and(src_out_timestamp >= left_margin, src_out_timestamp < right_margin)
mask = torch.logical_and(mask, src_out_ngr != dst)
src_out_ngr, src_out_timestamp = src_out_ngr[mask], src_out_timestamp[mask]
for i, node_i in enumerate(src_out_ngr):
ngr3, _, eids3 = g.in_edges(node_i, form='all')
ngr3_timestamp = g.edata['timestamp'][eids3]
left_margin_3, right_margin_3 = pivot - threshold_time, src_out_timestamp[i]
mask_3 = torch.logical_and(ngr3_timestamp >= left_margin_3, ngr3_timestamp < right_margin_3)
mask_3 = torch.logical_and(mask_3, ngr3 != src)
ngr3, ngr3_timestamp = ngr3[mask_3], ngr3_timestamp[mask_3]
for j, node_j in enumerate(ngr3):
ngr4, _, eids4 = g.in_edges(dst, form='all')
ngr4_timestamp = g.edata['timestamp'][eids4]
left_mragin_4, right_margin_4 = pivot - threshold_time, ngr3_timestamp[j]
mask_4 = torch.logical_and(ngr4_timestamp >= left_mragin_4, ngr4_timestamp < right_margin_4)
mask_4 = torch.logical_and(mask_4, ngr4 == node_j)
g.edata['motif_count'][eid][15] += torch.sum(mask_4)
# Count motif 6
def count_motif_6_1(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin, right_margin = pivot, pivot + threshold_time
mask = torch.logical_and(src_out_timestamp > left_margin, src_out_timestamp <= right_margin)
mask = torch.logical_and(mask, src_out_ngr != dst)
src_out_ngr, src_out_timestamp = src_out_ngr[mask], src_out_timestamp[mask]
for i, node_i in enumerate(src_out_ngr):
ngr3, _, eids3 = g.in_edges(node_i, form='all')
ngr3_timestamp = g.edata['timestamp'][eids3]
left_margin_3, right_margin_3 = pivot, src_out_timestamp[i]
mask_3 = torch.logical_and(ngr3_timestamp > left_margin_3, ngr3_timestamp < right_margin_3)
mask_3 = torch.logical_and(mask_3, ngr3 != src)
ngr3, ngr3_timestamp = ngr3[mask_3], ngr3_timestamp[mask_3]
for j, node_j in enumerate(ngr3):
ngr1, _, eids1 = g.in_edges(dst, form='all')
ngr1_timestamp = g.edata['timestamp'][eids1]
left_mragin_1, right_margin_1 = src_out_timestamp[i], pivot + threshold_time
mask_1 = torch.logical_and(ngr1_timestamp > left_mragin_1, ngr1_timestamp <= right_margin_1)
mask_1 = torch.logical_and(mask_1, ngr1 == node_j)
g.edata['motif_count'][eid][16] += torch.sum(mask_1)
def count_motif_6_2(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin, right_margin = pivot, pivot + threshold_time
mask = torch.logical_and(src_out_timestamp > left_margin, src_out_timestamp <= right_margin)
mask = torch.logical_and(mask, src_out_ngr != dst)
src_out_ngr, src_out_timestamp = src_out_ngr[mask], src_out_timestamp[mask]
for i, node_i in enumerate(src_out_ngr):
ngr2, _, eids2 = g.in_edges(dst, form='all')
ngr2_timestamp = g.edata['timestamp'][eids2]
left_margin_2, right_margin_2 = pivot, src_out_timestamp[i]
mask_2 = torch.logical_and(ngr2_timestamp > left_margin_2, ngr2_timestamp < right_margin_2)
mask_2 = torch.logical_and(mask_2, ngr2 != src)
ngr2, ngr2_timestamp = ngr2[mask_2], ngr2_timestamp[mask_2]
for j, node_j in enumerate(ngr2):
ngr4, _, eids4 = g.in_edges(node_i, form='all')
ngr4_timestamp = g.edata['timestamp'][eids4]
left_mragin_4, right_margin_4 = src_out_timestamp[i] - threshold_time, pivot
mask_4 = torch.logical_and(ngr4_timestamp >= left_mragin_4, ngr4_timestamp < right_margin_4)
mask_4 = torch.logical_and(mask_4, ngr4 == node_j)
g.edata['motif_count'][eid][17] += torch.sum(mask_4)
def count_motif_6_3(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin, right_margin = pivot - threshold_time, pivot
mask = torch.logical_and(src_out_timestamp >= left_margin, src_out_timestamp < right_margin)
mask = torch.logical_and(mask, src_out_ngr != dst)
src_out_ngr, src_out_timestamp = src_out_ngr[mask], src_out_timestamp[mask]
for i, node_i in enumerate(src_out_ngr):
ngr3, _, eids3 = g.in_edges(dst, form='all')
ngr3_timestamp = g.edata['timestamp'][eids3]
left_margin_3, right_margin_3 = src_out_timestamp[i], pivot
mask_3 = torch.logical_and(ngr3_timestamp > left_margin_3, ngr3_timestamp < right_margin_3)
mask_3 = torch.logical_and(mask_3, ngr3 != src)
ngr3, ngr3_timestamp = ngr3[mask_3], ngr3_timestamp[mask_3]
for j, node_j in enumerate(ngr3):
ngr1, _, eids1 = g.in_edges(node_i, form='all')
ngr1_timestamp = g.edata['timestamp'][eids1]
left_mragin_1, right_margin_1 = pivot, src_out_timestamp[i] + threshold_time
mask_1 = torch.logical_and(ngr1_timestamp > left_mragin_1, ngr1_timestamp <= right_margin_1)
mask_1 = torch.logical_and(mask_1, ngr1 == node_j)
g.edata['motif_count'][eid][18] += torch.sum(mask_1)
def count_motif_6_4(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin, right_margin = pivot - threshold_time, pivot
mask = torch.logical_and(src_out_timestamp >= left_margin, src_out_timestamp < right_margin)
mask = torch.logical_and(mask, src_out_ngr != dst)
src_out_ngr, src_out_timestamp = src_out_ngr[mask], src_out_timestamp[mask]
for i, node_i in enumerate(src_out_ngr):
ngr2, _, eids2 = g.in_edges(node_i, form='all')
ngr2_timestamp = g.edata['timestamp'][eids2]
left_margin_2, right_margin_2 = src_out_timestamp[i], pivot
mask_2 = torch.logical_and(ngr2_timestamp > left_margin_2, ngr2_timestamp < right_margin_2)
mask_2 = torch.logical_and(mask_2, ngr2 != src)
ngr2, ngr2_timestamp = ngr2[mask_2], ngr2_timestamp[mask_2]
for j, node_j in enumerate(ngr2):
ngr4, _, eids4 = g.in_edges(dst, form='all')
ngr4_timestamp = g.edata['timestamp'][eids4]
left_mragin_4, right_margin_4 = pivot - threshold_time, src_out_timestamp[i]
mask_4 = torch.logical_and(ngr4_timestamp >= left_mragin_4, ngr4_timestamp < right_margin_4)
mask_4 = torch.logical_and(mask_4, ngr4 == node_j)
g.edata['motif_count'][eid][19] += torch.sum(mask_4)
# Count motif 7
def count_motif_7_1(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin, right_margin = pivot, pivot + threshold_time
mask = torch.logical_and(src_out_timestamp > left_margin, src_out_timestamp <= right_margin)
mask = torch.logical_and(mask, src_out_ngr != dst)
src_out_ngr, src_out_timestamp = src_out_ngr[mask], src_out_timestamp[mask]
for i, node_i in enumerate(src_out_ngr):
ngr3, _, eids3 = g.in_edges(node_i, form='all')
ngr3_timestamp = g.edata['timestamp'][eids3]
left_margin_3, right_margin_3 = src_out_timestamp[i], pivot + threshold_time
mask_3 = torch.logical_and(ngr3_timestamp > left_margin_3, ngr3_timestamp <= right_margin_3)
mask_3 = torch.logical_and(mask_3, ngr3 != src)
ngr3, ngr3_timestamp = ngr3[mask_3], ngr3_timestamp[mask_3]
for j, node_j in enumerate(ngr3):
ngr1, _, eids1 = g.in_edges(dst, form='all')
ngr1_timestamp = g.edata['timestamp'][eids1]
left_mragin_1, right_margin_1 = pivot, src_out_timestamp[i]
mask_1 = torch.logical_and(ngr1_timestamp > left_mragin_1, ngr1_timestamp < right_margin_1)
mask_1 = torch.logical_and(mask_1, ngr1 == node_j)
g.edata['motif_count'][eid][20] += torch.sum(mask_1)
def count_motif_7_2(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin, right_margin = pivot, pivot + threshold_time
mask = torch.logical_and(src_out_timestamp > left_margin, src_out_timestamp <= right_margin)
mask = torch.logical_and(mask, src_out_ngr != dst)
src_out_ngr, src_out_timestamp = src_out_ngr[mask], src_out_timestamp[mask]
for i, node_i in enumerate(src_out_ngr):
ngr2, _, eids2 = g.in_edges(dst, form='all')
ngr2_timestamp = g.edata['timestamp'][eids2]
left_margin_2, right_margin_2 = src_out_timestamp[i] - threshold_time, pivot
mask_2 = torch.logical_and(ngr2_timestamp >= left_margin_2, ngr2_timestamp < right_margin_2)
mask_2 = torch.logical_and(mask_2, ngr2 != src)
ngr2, ngr2_timestamp = ngr2[mask_2], ngr2_timestamp[mask_2]
for j, node_j in enumerate(ngr2):
ngr4, _, eids4 = g.in_edges(node_i, form='all')
ngr4_timestamp = g.edata['timestamp'][eids4]
left_mragin_4, right_margin_4 = pivot, src_out_timestamp[i]
mask_4 = torch.logical_and(ngr4_timestamp > left_mragin_4, ngr4_timestamp < right_margin_4)
mask_4 = torch.logical_and(mask_4, ngr4 == node_j)
g.edata['motif_count'][eid][21] += torch.sum(mask_4)
def count_motif_7_3(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin, right_margin = pivot - threshold_time, pivot
mask = torch.logical_and(src_out_timestamp >= left_margin, src_out_timestamp < right_margin)
mask = torch.logical_and(mask, src_out_ngr != dst)
src_out_ngr, src_out_timestamp = src_out_ngr[mask], src_out_timestamp[mask]
for i, node_i in enumerate(src_out_ngr):
ngr3, _, eids3 = g.in_edges(dst, form='all')
ngr3_timestamp = g.edata['timestamp'][eids3]
left_margin_3, right_margin_3 = pivot, src_out_timestamp[i] + threshold_time
mask_3 = torch.logical_and(ngr3_timestamp > left_margin_3, ngr3_timestamp < right_margin_3)
mask_3 = torch.logical_and(mask_3, ngr3 != src)
ngr3, ngr3_timestamp = ngr3[mask_3], ngr3_timestamp[mask_3]
for j, node_j in enumerate(ngr3):
ngr1, _, eids1 = g.in_edges(node_i, form='all')
ngr1_timestamp = g.edata['timestamp'][eids1]
left_mragin_1, right_margin_1 = src_out_timestamp[i], pivot
mask_1 = torch.logical_and(ngr1_timestamp > left_mragin_1, ngr1_timestamp < right_margin_1)
mask_1 = torch.logical_and(mask_1, ngr1 == node_j)
g.edata['motif_count'][eid][22] += torch.sum(mask_1)
def count_motif_7_4(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin, right_margin = pivot - threshold_time, pivot
mask = torch.logical_and(src_out_timestamp >= left_margin, src_out_timestamp < right_margin)
mask = torch.logical_and(mask, src_out_ngr != dst)
src_out_ngr, src_out_timestamp = src_out_ngr[mask], src_out_timestamp[mask]
for i, node_i in enumerate(src_out_ngr):
ngr2, _, eids2 = g.in_edges(node_i, form='all')
ngr2_timestamp = g.edata['timestamp'][eids2]
left_margin_2, right_margin_2 = pivot - threshold_time, src_out_timestamp[i]
mask_2 = torch.logical_and(ngr2_timestamp >= left_margin_2, ngr2_timestamp < right_margin_2)
mask_2 = torch.logical_and(mask_2, ngr2 != src)
ngr2, ngr2_timestamp = ngr2[mask_2], ngr2_timestamp[mask_2]
for j, node_j in enumerate(ngr2):
ngr4, _, eids4 = g.in_edges(dst, form='all')
ngr4_timestamp = g.edata['timestamp'][eids4]
left_mragin_4, right_margin_4 = src_out_timestamp[i], pivot
mask_4 = torch.logical_and(ngr4_timestamp > left_mragin_4, ngr4_timestamp < right_margin_4)
mask_4 = torch.logical_and(mask_4, ngr4 == node_j)
g.edata['motif_count'][eid][23] += torch.sum(mask_4)
def count_bipartite_graph(g, threshold_time, device='cpu'):
func_ls = [count_motif_1_1, count_motif_1_2, count_motif_1_3,
count_motif_2_1, count_motif_2_2, count_motif_2_3,
count_motif_3_1, count_motif_3_2, count_motif_3_3,
count_motif_4_1, count_motif_4_2, count_motif_4_3,
count_motif_5_1, count_motif_5_2, count_motif_5_3, count_motif_5_4,
count_motif_6_1, count_motif_6_2, count_motif_6_3, count_motif_6_4,
count_motif_7_1, count_motif_7_2, count_motif_7_3, count_motif_7_4]
# func_ls = [count_motif_1_1, count_motif_1_2, count_motif_1_3]
g.edata['motif_count'] = torch.zeros(g.number_of_edges(), 24)
for eid in tqdm(range(g.num_edges())):
for f in func_ls:
f(g, threshold_time, eid)
| 49.376098 | 102 | 0.735362 |
dc39e41f29f5b5ef727dbbee5187d55a01b13d45 | 2,725 | py | Python | segmenter/jobs/BaseJob.py | brandongk/segmenter | dbc042d31dc74f1abdc87ae10a6be78ba38ddb91 | [
"Unlicense"
] | null | null | null | segmenter/jobs/BaseJob.py | brandongk/segmenter | dbc042d31dc74f1abdc87ae10a6be78ba38ddb91 | [
"Unlicense"
] | null | null | null | segmenter/jobs/BaseJob.py | brandongk/segmenter | dbc042d31dc74f1abdc87ae10a6be78ba38ddb91 | [
"Unlicense"
] | null | null | null | from launcher import Task
import argparse
from typing import Dict
import os
import itertools
import sys
import pprint
import json
class BaseJob(Task):
@staticmethod
def arguments(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"dataset",
type=str,
help='the dataset to use when running the command.',
nargs='?',
default="")
parser.add_argument(
"--job-config",
type=str,
default='',
help=
'the configuration on which to run the command. Can also be set through the JOB_CONFIG environment variable.',
required=False)
@staticmethod
def arguments_to_cli(args) -> str:
return args["dataset"]
def __init__(self, args):
self.args = args
self.data_dir = os.path.join(os.path.abspath(args["data_dir"]),
args["dataset"])
self.output_dir = os.path.join(os.path.abspath(args["output_dir"]),
args["dataset"])
if args["job_config"] is not None:
os.environ["JOB_CONFIG"] = args["job_config"]
def execute(self):
from segmenter.config import config_from_dir, validate_config
self.job_config = None
self.job_hash = None
with open(os.path.join(self.data_dir, "classes.json"),
"r") as json_file:
data = json.load(json_file)
self.classes = data["class_order"]
if "JOB_CONFIG" in os.environ and os.environ["JOB_CONFIG"]:
self.job_config, self.job_hash = config_from_dir(
os.path.join(self.output_dir, os.environ["JOB_CONFIG"]))
# validate_config(self.job_config)
pprint.pprint(self.job_config)
self.classes = self.job_config["CLASSES"]
self.folds = ["all"] if self.job_config["FOLDS"] == 0 else [
"fold{}".format(o) for o in range(self.job_config["FOLDS"])
]
if self.job_config["BOOST_FOLDS"] > 0:
boost_folds = [
"b{}".format(o)
for o in list(range(0, self.job_config["BOOST_FOLDS"] + 1))
]
self.folds = [
"".join(o)
for o in itertools.product(*[self.folds, boost_folds])
]
try:
import tensorflow as tf
if os.environ.get("DEBUG", "false").lower() == "true":
tf.config.experimental_run_functions_eagerly(True)
else:
tf.get_logger().setLevel("ERROR")
except ModuleNotFoundError:
pass
| 34.0625 | 122 | 0.543119 |
20de3445013590c3e600cfd231c598bf84d115a1 | 5,406 | py | Python | binc19/binc.py | david-deboer/covid_binned_data | 4ed672974af17614ae559188428a843227f3f5b8 | [
"BSD-2-Clause"
] | null | null | null | binc19/binc.py | david-deboer/covid_binned_data | 4ed672974af17614ae559188428a843227f3f5b8 | [
"BSD-2-Clause"
] | null | null | null | binc19/binc.py | david-deboer/covid_binned_data | 4ed672974af17614ae559188428a843227f3f5b8 | [
"BSD-2-Clause"
] | null | null | null | import csv
import os.path
import numpy as np
import matplotlib.pyplot as plt
from . import stats, binc_util
from my_utils import state_variable
"""
This handles covid_binned_data files.
"""
color_list = ['b', 'g', 'r', 'c', 'm', 'k', 'tab:blue', 'tab:orange', 'tab:brown', 'tab:olive',
'tab:pink', 'bisque', 'lightcoral', 'goldenrod', 'lightgrey', 'lime', 'lightseagreen']
class Binc(state_variable.StateVar):
"""Class for reading csv files."""
def __init__(self, filename=None):
plt_args = {'alpha': None,
'color': None,
'linestyle': None,
'linewidth': None,
'marker': None,
'markersize': None,
'label': None
}
super().__init__(label='BINC state variables', verbose=False, enforce='loose')
self.pltpar = state_variable.StateVar(label='Plot kwargs', verbose=False, enforce='nominal')
self.pltpar.sv_load(plt_args, use_to_init=True, var_type=None)
self.filename = filename
self.header = []
self.dates = []
self.data = []
self.stats = stats.Stat(stat_type=None)
self.st_date = {}
self.st_data = {}
if filename is not None:
self.load()
def load(self, filename=None,
dir='/Users/ddeboer/Documents/ubase/Projects/COVID_analysis/covid_binned_data'):
"""
Load csv files.
Parameters
----------
filename : str or None
filename of csv
"""
if filename is None:
filename = self.filename
if not filename.endswith('.csv'):
filename = '{}.csv'.format(filename)
if not filename.startswith('Bin_'):
filename = 'Bin_{}'.format(filename)
if dir is not None:
filename = os.path.join(dir, filename)
self.filename = filename
with open(filename, 'r') as fp:
reader = csv.reader(fp)
for i, row in enumerate(reader):
if not i:
self.header = row
self.dtype = [x for x in row[:row.index('Latitude')+1]]
for i, _d in enumerate(self.dtype):
setattr(self, _d, [])
self.ID = [] # This is set to the first col, which should be a unique ID
dataslice = slice(len(self.dtype), len(row))
self.dates = [binc_util.string_to_date(x) for x in row[dataslice]]
else:
this_row = [float(x) for x in row[dataslice]]
if len(this_row) != len(self.dates):
continue
self.data.append(this_row)
for i, _d in enumerate(self.dtype):
getattr(self, _d).append(row[i])
self.ID.append(row[0])
self.data = np.asarray(self.data)
self.Longitude = [float(x) for x in self.Longitude]
self.Latitude = [float(x) for x in self.Latitude]
self.Ndata = len(self.data)
def meta(self, val, key, colname):
ind = self.rowind(key, colname)
return getattr(self, val)[ind]
def row(self, key, colname='ID'):
col4ind = getattr(self, colname)
try:
col = self.data[col4ind.index(key)]
except ValueError:
col = None
return col
def rowind(self, key, colname='ID'):
col4ind = getattr(self, colname)
try:
col = col4ind.index(key)
except ValueError:
col = None
return col
def calc(self, stat_type, **kwargs):
self.stats.set_stat(stat_type, **kwargs)
self.st_data[stat_type] = {}
for i in range(self.Ndata):
key = self.ID[i]
self.st_date[stat_type], self.st_data[stat_type][key] = self.stats.calc(self.dates,
self.data[i])
def plot(self, stat_type, key, colname='ID', figname='ID', **kwargs):
self.pltpar.state(**kwargs)
self.state(**kwargs)
self.stats.set_stat(stat_type, **kwargs)
fig = plt.figure(figname)
if not isinstance(key, list):
key = key.split(',')
if self.label is not None:
if not isinstance(self.label, list):
self.label = self.label.split(',')
for lc in self.label:
try:
x = getattr(self, lc)[0]
except TypeError:
raise TypeError("binc.plot: "
"label must be a column header name [{}]".format(lc))
for ik, k in enumerate(key):
ind = self.rowind(k, colname=colname)
if ind is None:
continue
x, y = self.stats.calc(self.dates, self.data[ind])
if isinstance(self.label, list):
lbl = []
for lc in self.label:
if lc is not None:
lbl.append(getattr(self, lc)[ind])
self.pltpar.label = ','.join(lbl)
cik = ik % len(color_list)
self.pltpar.color = color_list[cik]
plt.plot(x, y, **self.pltpar.sv_todict())
fig.autofmt_xdate()
plt.title(colname)
| 37.804196 | 100 | 0.511099 |
b3e08123cdf4b6311bb7f5c4be0f638bbc456e9a | 2,920 | py | Python | synergy/db/model/daemon_process_entry.py | eggsandbeer/scheduler | 18ad32bd7b824ca334e2c5a1bbd10f599dfc2c82 | [
"BSD-3-Clause"
] | null | null | null | synergy/db/model/daemon_process_entry.py | eggsandbeer/scheduler | 18ad32bd7b824ca334e2c5a1bbd10f599dfc2c82 | [
"BSD-3-Clause"
] | null | null | null | synergy/db/model/daemon_process_entry.py | eggsandbeer/scheduler | 18ad32bd7b824ca334e2c5a1bbd10f599dfc2c82 | [
"BSD-3-Clause"
] | null | null | null | __author__ = 'Bohdan Mushkevych'
from odm.document import BaseDocument
from odm.fields import StringField, DictField, ListField
from synergy.scheduler.scheduler_constants import TYPE_MANAGED, TYPE_FREERUN, TYPE_GARBAGE_COLLECTOR, EXCHANGE_UTILS, \
TYPE_DAEMON
PROCESS_NAME = 'process_name'
CLASSNAME = 'classname'
MQ_QUEUE = 'mq_queue'
MQ_EXCHANGE = 'mq_exchange'
MQ_ROUTING_KEY = 'mq_routing_key'
ARGUMENTS = 'arguments'
TOKEN = 'token'
PROCESS_TYPE = 'process_type'
LOG_FILENAME = 'log_filename'
LOG_TAG = 'log_tag'
PID_FILENAME = 'pid_filename'
PRESENT_ON_BOXES = 'present_on_boxes' # list of boxes where this process is monitored by the Supervisor
class DaemonProcessEntry(BaseDocument):
""" Non-persistent model. This class presents Process Context Entry record """
process_name = StringField(PROCESS_NAME)
classname = StringField(CLASSNAME)
token = StringField(TOKEN)
mq_queue = StringField(MQ_QUEUE)
mq_exchange = StringField(MQ_EXCHANGE)
mq_routing_key = StringField(MQ_ROUTING_KEY)
arguments = DictField(ARGUMENTS)
process_type = StringField(PROCESS_TYPE, choices=[TYPE_MANAGED, TYPE_FREERUN, TYPE_DAEMON, TYPE_GARBAGE_COLLECTOR])
present_on_boxes = ListField(PRESENT_ON_BOXES)
pid_filename = StringField(PID_FILENAME)
log_filename = StringField(LOG_FILENAME)
@BaseDocument.key.getter
def key(self):
return self.process_name
@key.setter
def key(self, value):
""" :param value: name of the process """
self.process_name = value
def daemon_context_entry(process_name,
classname,
token,
exchange=EXCHANGE_UTILS,
present_on_boxes=None,
arguments=None,
queue=None,
routing=None,
process_type=TYPE_DAEMON,
pid_file=None,
log_file=None):
""" forms process context entry """
_ROUTING_PREFIX = 'routing_'
_QUEUE_PREFIX = 'queue_'
_SUFFIX = '_daemon'
if queue is None:
queue = _QUEUE_PREFIX + token + _SUFFIX
if routing is None:
routing = _ROUTING_PREFIX + token + _SUFFIX
if pid_file is None:
pid_file = token + _SUFFIX + '.pid'
if log_file is None:
log_file = token + _SUFFIX + '.log'
if arguments is None:
arguments = dict()
else:
assert isinstance(arguments, dict)
process_entry = DaemonProcessEntry(
process_name=process_name,
classname=classname,
token=token,
mq_queue=queue,
mq_routing_key=routing,
mq_exchange=exchange,
present_on_boxes=present_on_boxes,
arguments=arguments,
process_type=process_type,
log_filename=log_file,
pid_filename=pid_file)
return process_entry
| 32.087912 | 119 | 0.658562 |
13021d885ba3e5d590db561dd4b99dc5b14a61ce | 18,146 | py | Python | tensorflow/python/framework/importer.py | topsun888/tensorflow | bad7c50b9dc9789ad7dd0a62daca40b7269841ed | [
"Apache-2.0"
] | 2 | 2019-07-05T15:17:01.000Z | 2020-04-16T07:25:56.000Z | tensorflow/python/framework/importer.py | kiliczsh/tensorflow | f49aca4532c155597c669cf2189f211cafbebf96 | [
"Apache-2.0"
] | 1 | 2021-04-12T03:51:59.000Z | 2021-04-12T03:51:59.000Z | tensorflow/python/framework/importer.py | kiliczsh/tensorflow | f49aca4532c155597c669cf2189f211cafbebf96 | [
"Apache-2.0"
] | 5 | 2018-02-27T00:34:23.000Z | 2022-02-28T16:38:08.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A utility function for importing TensorFlow graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.util import compat
# TODO(josh11b): SWIG the code from node_def_util instead of duplicating
# the logic here.
def _GetNodeAttr(node_def, attr_name):
if attr_name not in node_def.attr:
raise ValueError('Expected one attr with name %r in %s.'
% (attr_name, str(node_def)))
return node_def.attr[attr_name]
def _ArgToTypesNoRef(node_def, arg_def):
if arg_def.number_attr:
repeats = _GetNodeAttr(node_def, arg_def.number_attr).i
if arg_def.type_attr:
dtype = _GetNodeAttr(node_def, arg_def.type_attr).type
else:
assert arg_def.type != types_pb2.DT_INVALID
dtype = arg_def.type
return [dtype] * repeats
elif arg_def.type_attr:
return [_GetNodeAttr(node_def, arg_def.type_attr).type]
elif arg_def.type_list_attr:
return _GetNodeAttr(node_def, arg_def.type_list_attr).list.type
else:
assert arg_def.type != types_pb2.DT_INVALID
return [arg_def.type]
def _SingleArgToTypes(node_def, arg_def):
types = _ArgToTypesNoRef(node_def, arg_def)
if arg_def.is_ref:
return [dtypes.as_dtype(dt).as_ref.as_datatype_enum for dt in types]
return types
def _ArgsToTypes(node_def, arg_list):
types = []
for arg_def in arg_list:
types.extend(_SingleArgToTypes(node_def, arg_def))
return types
def _InputTypes(node_def, op_dict):
op_def = op_dict[node_def.op]
return _ArgsToTypes(node_def, op_def.input_arg)
def _OutputTypes(node_def, op_dict):
op_def = op_dict[node_def.op]
return _ArgsToTypes(node_def, op_def.output_arg)
def _IsControlInput(input_name):
# Expected format: '^operation_name' (control input).
return input_name.startswith('^')
def _ParseTensorName(tensor_name):
"""Parses a tensor name into an operation name and output index.
This function will canonicalize tensor names as follows:
* "foo:0" -> ("foo", 0)
* "foo:7" -> ("foo", 7)
* "foo" -> ("foo", 0)
* "foo:bar:baz" -> ValueError
Args:
tensor_name: The name of a tensor.
Returns:
A tuple containing the operation name, and the output index.
Raises:
ValueError: If `tensor_name' cannot be interpreted as the name of a tensor.
"""
components = tensor_name.split(':')
if len(components) == 2:
# Expected format: 'operation_name:output_index'.
try:
output_index = int(components[1])
except ValueError:
raise ValueError('Cannot convert %r to a tensor name.' % (tensor_name,))
return components[0], output_index
elif len(components) == 1:
# Expected format: 'operation_name' (implicit 0th output).
return components[0], 0
else:
raise ValueError('Cannot convert %r to a tensor name.' % (tensor_name,))
def _CanonicalInputName(input_name):
input_name = compat.as_str(input_name)
if _IsControlInput(input_name):
return input_name
input_op_name, output_index = _ParseTensorName(input_name)
return '%s:%d' % (input_op_name, output_index)
def _InvalidNodeMessage(node, message):
return 'graph_def is invalid at node %r: %s.' % (node.name, message)
@contextlib.contextmanager
def _MaybeDevice(device):
"""Applies the given device only if device is not None or empty."""
if device:
with ops.device(device):
yield
else:
yield
def _FindAttrInOpDef(attr_name, op_def):
for attr_def in op_def.attr:
if attr_name == attr_def.name:
return attr_def
return None
def import_graph_def(graph_def, input_map=None, return_elements=None,
name=None, op_dict=None, producer_op_list=None):
"""Imports the TensorFlow graph in `graph_def` into the Python `Graph`.
This function provides a way to import a serialized TensorFlow
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer, and extract individual objects in the `GraphDef` as
[`Tensor`](#Tensor) and [`Operation`](#Operation) objects. See
[`Graph.as_graph_def()`](#Graph.as_graph_def) for a way to create a
`GraphDef` proto.
Args:
graph_def: A `GraphDef` proto containing operations to be imported into
the default graph.
input_map: A dictionary mapping input names (as strings) in `graph_def`
to `Tensor` objects. The values of the named input tensors in the
imported graph will be re-mapped to the respective `Tensor` values.
return_elements: A list of strings containing operation names in
`graph_def` that will be returned as `Operation` objects; and/or
tensor names in `graph_def` that will be returned as `Tensor` objects.
name: (Optional.) A prefix that will be prepended to the names in
`graph_def`. Defaults to `"import"`.
op_dict: (Optional.) A dictionary mapping op type names to `OpDef` protos.
Must contain an `OpDef` proto for each op type named in `graph_def`.
If omitted, uses the `OpDef` protos registered in the global registry.
producer_op_list: (Optional.) An `OpList` proto with the (possibly stripped)
list of `OpDef`s used by the producer of the graph. If provided, attrs
for ops in `graph_def` that are not in `op_dict` that have their default
value according to `producer_op_list` will be removed. This will allow
some more `GraphDef`s produced by later binaries to be accepted by
earlier binaries.
Returns:
A list of `Operation` and/or `Tensor` objects from the imported graph,
corresponding to the names in `return_elements`.
Raises:
TypeError: If `graph_def` is not a `GraphDef` proto,
`input_map` is not a dictionary mapping strings to `Tensor` objects,
or `return_elements` is not a list of strings.
ValueError: If `input_map`, or `return_elements` contains names that
do not appear in `graph_def`, or `graph_def` is not well-formed (e.g.
it refers to an unknown tensor).
"""
# Type checks for inputs.
if not isinstance(graph_def, graph_pb2.GraphDef):
# `graph_def` could be a dynamically-created message, so try a duck-typed
# approach
try:
old_graph_def = graph_def
graph_def = graph_pb2.GraphDef()
graph_def.MergeFrom(old_graph_def)
except TypeError:
raise TypeError('graph_def must be a GraphDef proto.')
if input_map is None:
input_map = {}
else:
if not (isinstance(input_map, dict)
and all(isinstance(k, compat.bytes_or_text_types)
for k in input_map.keys())):
raise TypeError('input_map must be a dictionary mapping strings to '
'Tensor objects.')
if return_elements is not None:
return_elements = tuple(return_elements)
if not all(isinstance(x, compat.bytes_or_text_types)
for x in return_elements):
raise TypeError('return_elements must be a list of strings.')
# Use a canonical representation for all tensor names.
input_map = {_CanonicalInputName(k): v for k, v in input_map.items()}
used_input_keys = set()
name_to_op = {}
if op_dict is None:
op_dict = op_def_registry.get_registered_ops()
if producer_op_list is None:
producer_op_dict = None
else:
producer_op_dict = {op.name: op for op in producer_op_list.op}
# LINT.IfChange
with ops.name_scope(name, 'import', input_map.values()) as scope:
g = ops.get_default_graph()
# TODO(ashankar): Should this just copy over or should it do some
# more nuanced merging? For example, the graph may already have some
# marked "bad versions" and we don't want to lose those because of
# what's in graph_def.versions? The C++ ImporGraphDef does something
# more nuanced.
g.graph_def_versions.CopyFrom(graph_def.versions)
if input_map:
if not scope:
# The caller must have passed `name=''`.
raise ValueError('tf.import_graph_def() requires a non-empty `name` '
'if `input_map` is used.')
with ops.name_scope('_inputs'):
input_map = {k: ops.convert_to_tensor(v) for k, v in input_map.items()}
# NOTE(mrry): We do this in two passes, because there may be a cycle in
# `graph_def`.
# 1. Add operations without their inputs.
for node in graph_def.node:
# Set any default attr values that aren't present.
op_def = op_dict[node.op]
for attr_def in op_def.attr:
key = attr_def.name
if attr_def.HasField('default_value'):
value = node.attr[key]
if value is None or value.WhichOneof('value') is None:
node.attr[key].CopyFrom(attr_def.default_value)
if producer_op_dict:
# Remove any default attr values that aren't in op_def.
if node.op in producer_op_dict:
producer_op_def = producer_op_dict[node.op]
# We make a copy of node.attr to iterate through since we
# may modify node.attr inside the loop.
for key in list(node.attr):
if _FindAttrInOpDef(key, op_def) is None:
# No attr_def in consumer, look in producer.
attr_def = _FindAttrInOpDef(key, producer_op_def)
if (attr_def and attr_def.HasField('default_value') and
node.attr[key] == attr_def.default_value):
# Unknown attr had default value in producer, delete it
# so it can be understood by consumer.
del node.attr[key]
output_types = _OutputTypes(node, op_dict)
name_to_op[node.name] = g.create_op(
node.op, [], output_types, name=node.name, attrs=node.attr,
compute_shapes=False, compute_device=False,
op_def=op_def)
# 2. Add inputs to the operations.
for node in graph_def.node:
op = name_to_op[node.name]
input_types = _InputTypes(node, op_dict)
# Rewrite the colocation attributes in the graph, since the
# names of new ops may have changed.
for key, value in op.node_def.attr.items():
if key == '_class':
class_values = value.list
new_class_values = []
for class_value in class_values.s:
if class_value.startswith(b'loc:@'):
op_to_bind_to = class_value[5:].decode()
# Find the op by its original name.
if op_to_bind_to not in name_to_op:
raise ValueError('Specified colocation to an op that '
'does not exist during import: %s in %s' % (
op_to_bind_to, node.name))
original_op = name_to_op[op_to_bind_to]
new_class_values.append(compat.as_bytes(
'loc:@' + original_op.name))
else:
new_class_values.append(class_value)
value.list.CopyFrom(attr_value_pb2.AttrValue.ListValue(
s=new_class_values))
# NOTE(mrry): We cannot use zip here because control inputs do not appear
# in the list of input_types.
for i, input_name in enumerate(
[_CanonicalInputName(x) for x in node.input]):
if _IsControlInput(input_name):
# (a) Input is a control input that should be taken from an op
# in "graph_def".
try:
source_op = name_to_op[input_name[1:]]
except KeyError:
raise ValueError(
_InvalidNodeMessage(
node,
'Control input %r not found in graph_def.' % (input_name,)))
# pylint: disable=protected-access
op._add_control_input(source_op)
# pylint: enable=protected-access
else:
try:
input_type = input_types[i]
except IndexError:
raise ValueError(_InvalidNodeMessage(
node, 'More inputs specified (%r) than the op expects.'
% (input_name,)))
if input_name in input_map:
# (b) Input should be replaced by a tensor from the caller.
source_tensor = input_map[input_name]
used_input_keys.add(input_name)
else:
# (c) Input should be taken from an op in `graph_def`.
operation_name, output_index = _ParseTensorName(input_name)
try:
source_op = name_to_op[operation_name]
source_tensor = list(source_op.values())[output_index]
except (KeyError, IndexError):
raise ValueError(
_InvalidNodeMessage(
node,
'Input tensor %r not found in graph_def.'
% (input_name,)))
try:
# pylint: disable=protected-access
op._add_input(source_tensor, dtype=input_type)
# pylint: enable=protected-access
except TypeError as te:
raise ValueError(_InvalidNodeMessage(
node, 'Input tensor %r %s' % (input_name, te)))
# pylint: disable=protected_access
if op._input_dtypes != input_types:
raise ValueError(
_InvalidNodeMessage(
node,
'Input types mismatch (expected %r but got %r)'
% (', '.join(dtypes.as_dtype(x).name for x in input_types),
', '.join(x.name for x in op._input_dtypes))))
# pylint: enable=protected_access
# Execute shape inference for this op.
# NOTE(mrry): If the graph contains a cycle, the full shape information
# may not be available for this op's inputs.
ops.set_shapes_for_outputs(op)
# For nodes with _output_shapes set, set the output shapes.
if '_output_shapes' in op.node_def.attr:
for i, output in enumerate(op.outputs):
dims = op.node_def.attr['_output_shapes'].list.shape[i]
output_shape = tensor_shape.TensorShape(
None if dims.unknown_rank else
[dim.size if dim.size >= 0 else None for dim in dims.dim])
try:
output.set_shape(output_shape)
except ValueError as e:
# If the output shape is incompatible with what is inferred
# by the graph for a very specific whitelist of ops, then we
# ignore this output shape. This can happen if there is a
# bug in the shape function for some operation, and the
# serialized graph def has the incorrect shape set when
# running on a newer binary with the fixed shape function.
# This is an escape hatch that allows us to correct shape
# functions that are not critical to correct execution but
# would cause graphs to fail if imported after correcting.
#
# This can be removed after 2017/03/08.
if op.type not in ['RandomShuffleQueue', 'PaddingFIFOQueue',
'FIFOQueue', 'PriorityQueue', 'QueueSize',
'Stack', 'Barrier', 'BarrierReadySize',
'BarrierIncompleteSize', 'HashTable',
'MutableHashTable',
'MutableHashTableOfTensors', 'Mutex',
'CuckooTable', 'IndexTable',
'WholeFileReader', 'TextLineReader',
'FixedLengthRecordReader',
'TFRecordReader', 'IdentityReader',
'RefSwitch', 'RefEnter', 'RefNextIteration',
'RefMerge', 'RefIdentity']:
raise e
del op.node_def.attr['_output_shapes']
# Apply device functions for this op.
# NOTE(mrry): We do this after configuring the inputs, because
# the result of the device functions may depend on the inputs.
with _MaybeDevice(node.device):
g._apply_device_functions(op) # pylint: disable=protected-access
# Treat unused input mappings as an error, because they are likely to be
# due to a typo.
unused_input_keys = frozenset(input_map.keys()).difference(used_input_keys)
if unused_input_keys:
raise ValueError(
'Attempted to map inputs that were not found in graph_def: [%s]'
% ', '.join(unused_input_keys))
if return_elements is None:
return None
else:
ret = []
for name in return_elements:
name = compat.as_str(name)
if ':' in name:
try:
operation_name, output_index = _ParseTensorName(name)
ret.append(name_to_op[operation_name].outputs[output_index])
except (ValueError, KeyError, IndexError):
raise ValueError(
'Requested return_element %r not found in graph_def.' % name)
else:
try:
ret.append(name_to_op[name])
except KeyError:
raise ValueError(
'Requested return_element %r not found in graph_def.' % name)
return ret
# LINT.ThenChange(//tensorflow/core/graph/graph_constructor.cc)
| 39.969163 | 85 | 0.646313 |
0f9e5cda66c64a8c70566706fa6bd291dce725bd | 1,142 | py | Python | dongmingram/notifications/migrations/0001_initial.py | ldm0408/Dongmingram | 9d745734df8f4f4a0ce64be0adc725cbfa1ea63c | [
"MIT"
] | null | null | null | dongmingram/notifications/migrations/0001_initial.py | ldm0408/Dongmingram | 9d745734df8f4f4a0ce64be0adc725cbfa1ea63c | [
"MIT"
] | 18 | 2018-05-21T10:17:39.000Z | 2022-03-08T22:53:38.000Z | dongmingram/notifications/migrations/0001_initial.py | ldm0408/Dongmingram | 9d745734df8f4f4a0ce64be0adc725cbfa1ea63c | [
"MIT"
] | null | null | null | # Generated by Django 2.0.5 on 2018-07-05 10:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('images', '0007_image_tags'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('notification_type', models.CharField(choices=[('like', 'Like'), ('comment', 'Comment'), ('follow', 'Follow')], max_length=20)),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='creator', to=settings.AUTH_USER_MODEL)),
('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='images.Image')),
('to', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='to', to=settings.AUTH_USER_MODEL)),
],
),
]
| 39.37931 | 145 | 0.647986 |
67bacfbe31a274d23ff36a17b6e2e8b67be1419a | 9,829 | py | Python | reviewboard/admin/security_checks.py | klpyang/reviewboard | d7dabf36e5b492f18048dd7084026bf99d6933c5 | [
"MIT"
] | 1 | 2018-08-23T09:19:02.000Z | 2018-08-23T09:19:02.000Z | reviewboard/admin/security_checks.py | klpyang/reviewboard | d7dabf36e5b492f18048dd7084026bf99d6933c5 | [
"MIT"
] | null | null | null | reviewboard/admin/security_checks.py | klpyang/reviewboard | d7dabf36e5b492f18048dd7084026bf99d6933c5 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import logging
import os
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage
from django.utils import six
from django.utils.six.moves.urllib.error import HTTPError
from django.utils.six.moves.urllib.request import urlopen
from django.utils.translation import ngettext
from django.utils.translation import ugettext_lazy as _
from djblets.siteconfig.models import SiteConfiguration
_security_checks = {}
class BaseSecurityCheck(object):
name = None
desc = None
fix_info = None
def setUp(self):
pass
def execute(self):
raise NotImplementedError
def tearDown(self):
pass
class ExecutableCodeCheck(BaseSecurityCheck):
name = _("Checking that uploaded files won't be executed by the server")
desc = _('A misconfiguration in the web server can cause files attached '
'to review requests to be executed as code. The file types '
'checked in this test are: .html, .htm, .shtml, .php, .php3, '
'.php4, .php5, .phps, .asp, .pl, .py, .fcgi, .cgi, .phtml, '
'.phtm, .pht, .jsp, .sh, and .rb.')
fix_info = _('For instructions on how to fix this problem, please visit '
'<a href="http://support.beanbaginc.com/support/solutions/'
'articles/110173-securing-file-attachments">'
'http://support.beanbaginc.com/support/solutions/articles/'
'110173-securing-file-attachments</a>')
def __init__(self):
loc = os.path.join(settings.MEDIA_ROOT, 'uploaded', 'files')
self.storage = FileSystemStorage(location=loc)
self.directory = settings.MEDIA_URL + 'uploaded/files/'
self.file_checks = [
(
['.php', '.php3', '.php4', '.php5', '.phps', '.phtml',
'.phtm'],
'<?php echo "Hello, World!"; ?>'
),
(
['.pl', '.py'],
'print "Hello, World!"'
),
(
['.html', '.htm', '.shtml', '.pht'],
('<HTML>\n'
'<HEAD>\n'
'<TITLE>Hello, world!</TITLE>\n'
'</HEAD>\n'
'<BODY>\n'
'<H1>Hello, world!</H1>\n'
'<!--#echo var="LAST_MODIFIED" -->\n'
'<!--#exec cmd="echo HI!" -->\n'
'</BODY>\n'
'</HTML>')
),
(
['.jsp'],
'<%= new String("Hello!") %>'
),
(
['.asp'],
'<%="Hello World!"%>'
),
(
['.fcgi', '.cgi', '.sh'],
('#!/bin/sh\n'
'echo "Hello World!"')
),
(
['.rb'],
'puts "Hello world!"'
)
]
def setUp(self):
if self._using_default_storage():
for i, file_check in enumerate(self.file_checks):
extensions_list, content = file_check
bad_extensions = []
for ext in extensions_list:
try:
self.storage.save('exec_check' + ext,
ContentFile(content))
except OSError:
# Some web server configurations prevent even saving
# files with certain extensions. In this case, things
# will definitely succeed.
bad_extensions.append(ext)
# Filter out any extensions that we failed to save, because we
# don't need to check that they downloaded properly.
extensions_list = [ext for ext in extensions_list
if ext not in bad_extensions]
self.file_checks[i] = extensions_list, content
def execute(self):
error_msg = ''
ext_result = True
final_result = True
failed_exts = []
if self._using_default_storage():
for extensions_list, content in self.file_checks:
for ext in extensions_list:
try:
ext_result = self.download_and_compare(
'exec_check' + ext)
if final_result and not ext_result:
final_result = False
except Exception as e:
return (False,
_('Uncaught exception during test: %s') % e)
if not ext_result:
failed_exts.append(ext)
if not final_result:
error_msg = _(
ngettext(
'The web server incorrectly executed these file types: %s',
'The web server incorrectly executed this file type: %s',
len(failed_exts))
% ', '.join(failed_exts))
return final_result, error_msg
def tearDown(self):
if self._using_default_storage():
for extensions_list, content in self.file_checks:
for ext in extensions_list:
self.storage.delete('exec_check' + ext)
def download_and_compare(self, to_download):
try:
data = urlopen(_get_url(self.directory) + to_download).read()
except HTTPError as e:
# An HTTP 403 is also an acceptable response
if e.code == 403:
return True
else:
raise e
with self.storage.open(to_download, 'r') as f:
return data == f.read()
def _using_default_storage(self):
return (settings.DEFAULT_FILE_STORAGE ==
'django.core.files.storage.FileSystemStorage')
class AllowedHostsCheck(BaseSecurityCheck):
name = _('Checking ALLOWED_HOSTS setting')
desc = _('ALLOWED_HOSTS is a list containing the host/domain names that '
'Review Board will consider valid for this server to serve. '
'This is a security measure to prevent an attacker from '
'poisoning cache and password reset e-mails with links to '
'malicious hosts by submitting requests with a fake HTTP Host '
'header, which is possible even under many seemingly-safe web '
'server configurations.')
fix_info = _("To fix this, edit the settings_local.py in the site's conf "
"directory and add a line like this with your site's URL: "
"<pre>ALLOWED_HOSTS = ['example.com']</pre>")
def execute(self):
result = True
error_msg = ''
if len(settings.ALLOWED_HOSTS) < 1:
result = False
error_msg = _('ALLOWED_HOSTS is empty.')
if '*' in settings.ALLOWED_HOSTS:
result = False
error_msg = _("ALLOWED_HOSTS contains '*', which means that the "
"server will respond to any host.")
return result, error_msg
class SecurityCheckRunner(object):
"""This is a runner to execute the security checks defined above.
In order for a check to be run in this runner it needs to be added
to the _security_checks list.
The information that comes back from a single check is the following:
- name: User-friendly name used to describe the check.
- desc: A more detailed description to provide information about the check.
- result: True if the check passed, or False if it failed or there was
an excetion during its execution.
- error_msg: A description of what failed. This will be blank if the test
passes.
- fix_info: Instructions containing what a user should do if a check fails.
"""
def __init__(self):
pass
def run(self):
all_test_results = []
checks = get_security_checks()
for name, cls in six.iteritems(checks):
check = cls()
check.setUp()
current_test_result, error_msg = check.execute()
check.tearDown()
all_test_results.append({
'name': check.name,
'desc': check.desc,
'result': current_test_result,
'error_msg': error_msg,
'fix_info': check.fix_info,
})
return all_test_results
def _populate_security_checks():
"""Populates a list of existing security checks."""
if not _security_checks:
_security_checks['executable_check'] = ExecutableCodeCheck
_security_checks['hosts_check'] = AllowedHostsCheck
def get_security_checks():
"""Returns the list of security checks."""
_populate_security_checks()
return _security_checks
def register_security_check(name, cls):
"""Registers a custom security check."""
_populate_security_checks()
if name in _security_checks:
raise KeyError('"%s" is already a registered security check' % name)
_security_checks[name] = cls
def unregister_security_check(name):
"""Unregisters a previously registered security check."""
_populate_security_checks()
try:
del _security_checks[name]
except KeyError:
logging.error('Failed to unregister unknown security check "%s"' %
name)
raise KeyError('"%s" is not a registered security check' % name)
def _get_url(root):
protocol = SiteConfiguration.objects.get_current().get(
"site_domain_method")
domain = Site.objects.get_current().domain
return '%s://%s%s' % (protocol, domain, root)
| 34.609155 | 79 | 0.55733 |
99aa19b872d5816ef270b436bdc3d31232d06435 | 1,396 | py | Python | functions/gcp/callbacks.py | Katolus/functions | c4aff37231432ce6ef4ed6b37c8b5baaede5975a | [
"MIT"
] | 4 | 2022-03-08T08:46:44.000Z | 2022-03-19T07:52:11.000Z | functions/gcp/callbacks.py | Katolus/functions | c4aff37231432ce6ef4ed6b37c8b5baaede5975a | [
"MIT"
] | 114 | 2021-10-30T05:48:54.000Z | 2022-03-06T10:57:00.000Z | functions/gcp/callbacks.py | Katolus/functions | c4aff37231432ce6ef4ed6b37c8b5baaede5975a | [
"MIT"
] | null | null | null | """Stores methods for validating GCP commands using typer's callback method"""
import typer
from functions.config.files import FunctionRegistry
from functions.helpers import is_function_in_registry
from functions.helpers import is_function_source_valid
def check_if_function_name_in_registry(
ctx: typer.Context, param: typer.CallbackParam, function_name: str
) -> str:
"""Validates the GCP deploy command"""
# Check if the function name is in the function registry
if FunctionRegistry.check_if_function_name_in_registry(function_name):
return function_name
else:
raise typer.BadParameter(
f"Function '{function_name}' not found in registry. "
"Please check the function name and try again."
)
def check_if_function_can_be_deployed(
ctx: typer.Context, param: typer.CallbackParam, f_name: str
) -> str:
"""Checks if a function can be deployed to GCP"""
# Check if the function name is in the function registry
if not is_function_in_registry(f_name):
raise typer.BadParameter(
f"Function '{f_name}' not found in registry. "
"Please check the function name and try again."
)
if not is_function_source_valid(f_name):
raise typer.BadParameter(
f"Function '{f_name}' source is not valid. Please fix it and try again."
)
return f_name
| 34.9 | 84 | 0.706304 |
bcedcba739de7d9104c9d4372a83989dd681357e | 281 | py | Python | src/dynamicCode/bidders.py | Jpfonseca/Blockchain_auction_management | 99ea44e1f92b656c19e609eb9f7447b9323970a0 | [
"MIT"
] | null | null | null | src/dynamicCode/bidders.py | Jpfonseca/Blockchain_auction_management | 99ea44e1f92b656c19e609eb9f7447b9323970a0 | [
"MIT"
] | null | null | null | src/dynamicCode/bidders.py | Jpfonseca/Blockchain_auction_management | 99ea44e1f92b656c19e609eb9f7447b9323970a0 | [
"MIT"
] | 1 | 2020-11-17T06:39:03.000Z | 2020-11-17T06:39:03.000Z | # this works
def foo(id_client, num_bids):
valid_clients = ['WVzMbdOi9f+xgWZ5+jJ7TQ==', 'WuIaYf+KjvlGyJdCkGP7fA==']
for i in range(0, len(valid_clients)):
if id_client == valid_clients[i]:
return True
return False
valid = foo(id_client, num_bids)
| 25.545455 | 76 | 0.658363 |
c36a50f6adff604dd44f961d0360540469d503e5 | 2,538 | py | Python | research/steve/worldmodel_learner.py | 873040/Abhishek | 2ddd716e66bc5cc6e6f0787508dd07da0e02e75a | [
"Apache-2.0"
] | 153 | 2020-10-25T13:58:04.000Z | 2022-03-07T06:01:54.000Z | research/steve/worldmodel_learner.py | 873040/Abhishek | 2ddd716e66bc5cc6e6f0787508dd07da0e02e75a | [
"Apache-2.0"
] | 11 | 2020-07-13T08:29:00.000Z | 2022-03-24T07:21:09.000Z | research/steve/worldmodel_learner.py | 873040/Abhishek | 2ddd716e66bc5cc6e6f0787508dd07da0e02e75a | [
"Apache-2.0"
] | 43 | 2018-12-28T15:01:44.000Z | 2022-02-15T06:23:05.000Z | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
import numpy as np
from learner import Learner
from worldmodel import DeterministicWorldModel
class WorldmodelLearner(Learner):
"""
Worldmodel-specific training loop details.
"""
def learner_name(self): return "worldmodel"
def make_loader_placeholders(self):
self.obs_loader = tf.placeholder(tf.float32, [self.learner_config["batch_size"], np.prod(self.env_config["obs_dims"])])
self.next_obs_loader = tf.placeholder(tf.float32, [self.learner_config["batch_size"], np.prod(self.env_config["obs_dims"])])
self.action_loader = tf.placeholder(tf.float32, [self.learner_config["batch_size"], self.env_config["action_dim"]])
self.reward_loader = tf.placeholder(tf.float32, [self.learner_config["batch_size"]])
self.done_loader = tf.placeholder(tf.float32, [self.learner_config["batch_size"]])
self.datasize_loader = tf.placeholder(tf.float64, [])
return [self.obs_loader, self.next_obs_loader, self.action_loader, self.reward_loader, self.done_loader, self.datasize_loader]
def make_core_model(self):
worldmodel = DeterministicWorldModel(self.config["name"], self.env_config, self.learner_config)
worldmodel_loss, inspect_losses = worldmodel.build_training_graph(*self.current_batch)
model_optimizer = tf.train.AdamOptimizer(3e-4)
model_gvs = model_optimizer.compute_gradients(worldmodel_loss, var_list=worldmodel.model_params)
capped_model_gvs = model_gvs
worldmodel_train_op = model_optimizer.apply_gradients(capped_model_gvs)
return worldmodel, (worldmodel_loss,), (worldmodel_train_op,), inspect_losses
## Optional functions to override
def initialize(self): pass
def resume_from_checkpoint(self, epoch): pass
def checkpoint(self): pass
def backup(self): pass
| 45.321429 | 134 | 0.724586 |
858727a80a00d770f193a113a48e7717d6b5ffc6 | 602 | py | Python | Templates/06.String/String-BF.py | AlgorithmAndLeetCode/itcharge-LeetCode-Py | 2266b6e9add5bd306a1e1eb59d54e9447e641fd3 | [
"MIT"
] | 1 | 2022-01-22T15:52:47.000Z | 2022-01-22T15:52:47.000Z | Templates/06.String/String-BF.py | AlgorithmAndLeetCode/itcharge-LeetCode-Py | 2266b6e9add5bd306a1e1eb59d54e9447e641fd3 | [
"MIT"
] | null | null | null | Templates/06.String/String-BF.py | AlgorithmAndLeetCode/itcharge-LeetCode-Py | 2266b6e9add5bd306a1e1eb59d54e9447e641fd3 | [
"MIT"
] | null | null | null | def bruteForce(T: str, p: str) -> int:
n, m = len(T), len(p)
i, j = 0, 0 # i 表示文本串 T 的当前位置,j 表示模式串 p 的当前位置
while i < n and j < m: # i 或 j 其中一个到达尾部时停止搜索
if T[i] == p[j]: # 如果相等,则继续进行下一个字符匹配
i += 1
j += 1
else:
i = i - (j - 1) # 如果匹配失败则将 i 移动到上次匹配开始位置的下一个位置
j = 0 # 匹配失败 j 回退到模式串开始位置
if j == m:
return i - j # 匹配成功,返回匹配的开始位置
else:
return -1 # 匹配失败,返回 -1
print(bruteForce("abcdeabc", "bcd")) | 33.444444 | 69 | 0.392027 |
9d5e5d4d8970d2e11a09ba42017a5d0392a4d861 | 1,041 | py | Python | theano/gans/BGAN/old/DISC-MNIST-PFAKE/plot_control.py | dendisuhubdy/ccw1_trainstats | 0105bddd23366db6b55421e9b3bccd6da2f9aa51 | [
"MIT"
] | 4 | 2017-10-29T17:41:17.000Z | 2020-04-07T12:51:44.000Z | theano/gans/BGAN/old/DISC-MNIST-PFAKE/plot_control.py | dendisuhubdy/ccw1_trainstats | 0105bddd23366db6b55421e9b3bccd6da2f9aa51 | [
"MIT"
] | null | null | null | theano/gans/BGAN/old/DISC-MNIST-PFAKE/plot_control.py | dendisuhubdy/ccw1_trainstats | 0105bddd23366db6b55421e9b3bccd6da2f9aa51 | [
"MIT"
] | 4 | 2017-11-26T21:53:28.000Z | 2020-04-07T12:51:45.000Z | import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from gumbel_mnist import train as gumbel_train
from rw_mnist import train as rw_train
gumbel_hard_arr = ["True", "False"]
optimGD_arr = ['adam', 'rmsprop', 'sgd']
learning_rate_arr = [1e-2, 1e-3, 1e-4, 1e-5]
anneal_rate_arr = [0.01, 0.001, 0.0001]
anneal_interval_arr = [200, 300, 500]
def main():
plt.title("Discriminator misclassification of generated samples")
plt.xlabel("Training batches")
plt.ylabel("Generator loss")
gumbel_train(False, 'adam', 1e-4, 1e-3, 500, num_epochs=1, plot_colour="-b")
gumbel_train(True, 'rmsprop', 1e-4, 1e-4, 200, num_epochs=1, plot_colour="-y")
rw_train(num_epochs=1, n_samples=20, initial_eta=1e-4, plot_colour="-g")
plt.grid()
art = []
lgd = plt.legend(loc=9, bbox_to_anchor=(0.5, -0.1))
art.append(lgd)
plt.draw()
plt.savefig('gen_plots/p_fake_comparison.png',
additional_artists=art,
bbox_inches="tight")
if __name__ == '__main__':
main()
| 34.7 | 82 | 0.676273 |
216f622cd5f3ca7e64c18704204910fdf5480c95 | 3,442 | py | Python | scripts/example-scripts/GeostreamDatapointPlotter.py | julianpistorius/computing-pipeline | 708a861c0011ee20cb1d08179bba122362b69180 | [
"BSD-3-Clause"
] | 21 | 2016-09-21T15:51:10.000Z | 2022-01-25T23:28:05.000Z | scripts/example-scripts/GeostreamDatapointPlotter.py | julianpistorius/computing-pipeline | 708a861c0011ee20cb1d08179bba122362b69180 | [
"BSD-3-Clause"
] | 569 | 2015-10-01T21:49:01.000Z | 2021-03-22T22:52:32.000Z | scripts/example-scripts/GeostreamDatapointPlotter.py | julianpistorius/computing-pipeline | 708a861c0011ee20cb1d08179bba122362b69180 | [
"BSD-3-Clause"
] | 13 | 2016-04-13T05:34:49.000Z | 2019-11-05T19:17:40.000Z | import requests
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
url = "https://terraref.ncsa.illinois.edu/clowder/api/geostreams/datapoints?key=Pb3AUSqnUw&stream_id=300"
response = requests.get(url)
if response.status_code == 200:
json = response.json()
times = []
wind_speeds = []
precipitation_rate = []
surface_downwelling_shortwave_flux_in_air = []
northward_wind = []
relative_humidity = []
air_temperature = []
eastward_wind = []
surface_downwelling_photosynthetic_photon_flux_in_air = []
for datapoint in json:
"""Example datapoint:
{ 'sensor_id': '303',
'created': '2017-02-03T14:33:11Z',
'geometry': {
'type': 'Point',
'coordinates': [33.0745666667, -68.0249166667, 0]
},
'start_time': '2016-08-10T13:50:29Z',
'id': 36185,
'stream_id': '300',
'sensor_name': 'Full Field',
'end_time': '2016-08-10T13:55:00Z',
'type': 'Feature',
'properties': {
'wind_speed': 1.0890774907749077,
'precipitation_rate': 0.0,
'surface_downwelling_shortwave_flux_in_air': 43.60608856088568,
'northward_wind': -0.9997966833626167,
'relative_humidity': 60.41579335793356,
'source': u'https://terraref.ncsa.illinois.edu/clowder/datasets/5893a72c4f0c06726b1b0cda',
'source_file': u'5893a72f4f0c06726b1b0d20',
'air_temperature': 301.13597785977885,
'eastward_wind': -0.3659132309673836,
'surface_downwelling_photosynthetic_photon_flux_in_air': 152.14981549815525
}}"""
start_time = datapoint['start_time']
times.append(datetime.strptime(start_time, "%Y-%m-%dT%H:%M:%SZ"))
p = datapoint['properties']
wind_speeds.append(p['wind_speed'])
precipitation_rate.append(p['precipitation_rate'])
surface_downwelling_shortwave_flux_in_air.append(p['surface_downwelling_shortwave_flux_in_air'])
northward_wind.append(p['northward_wind'])
relative_humidity.append(p['relative_humidity'])
air_temperature.append(p['air_temperature'])
eastward_wind.append(p['eastward_wind'])
surface_downwelling_photosynthetic_photon_flux_in_air.append(p['surface_downwelling_photosynthetic_photon_flux_in_air'])
plt.figure(1)
plt.title("Wind Speed")
plt.plot(times, wind_speeds, 'b-')
plt.figure(2)
plt.title("Precipitation Rate")
plt.plot(times, precipitation_rate, 'r-')
plt.figure(3)
plt.title("Surface Downwelling Shortwave Flux in Air")
plt.plot(times, surface_downwelling_shortwave_flux_in_air, 'g-')
plt.figure(4)
plt.title("Northward Wind")
plt.plot(times, northward_wind, 'c-')
plt.figure(5)
plt.title("Relative Humidity")
plt.plot(times, relative_humidity, 'm-')
plt.figure(6)
plt.title("Air Temperature, K")
plt.plot(times, air_temperature, 'y-')
plt.figure(7)
plt.title("Eastward Wind")
plt.plot(times, eastward_wind, 'k-')
plt.figure(8)
plt.title("Surface Downwelling Photosynthetic Photon Flux in Air")
plt.plot(times, surface_downwelling_photosynthetic_photon_flux_in_air, 'b-')
plt.show()
else:
print("no response")
| 35.484536 | 128 | 0.63713 |
f4ec8954080e34450fa52a61d61052d9f27457b9 | 54,095 | py | Python | custom/icds_reports/reports/awc_reports.py | dborowiecki/commcare-hq | f2f4fa67faec09040a98502f5657444075b63f2e | [
"BSD-3-Clause"
] | null | null | null | custom/icds_reports/reports/awc_reports.py | dborowiecki/commcare-hq | f2f4fa67faec09040a98502f5657444075b63f2e | [
"BSD-3-Clause"
] | null | null | null | custom/icds_reports/reports/awc_reports.py | dborowiecki/commcare-hq | f2f4fa67faec09040a98502f5657444075b63f2e | [
"BSD-3-Clause"
] | null | null | null | from collections import OrderedDict
from datetime import datetime, timedelta, date
from dateutil.relativedelta import relativedelta
from dateutil.rrule import MONTHLY, rrule, DAILY, WEEKLY, MO
from django.db.models import F
from django.db.models.aggregates import Sum, Avg
from django.utils.translation import ugettext as _
from corehq.util.view_utils import absolute_reverse
from custom.icds_reports.cache import icds_quickcache
from custom.icds_reports.messages import wasting_help_text, stunting_help_text, \
early_initiation_breastfeeding_help_text, exclusive_breastfeeding_help_text, \
children_initiated_appropriate_complementary_feeding_help_text, institutional_deliveries_help_text, \
percent_children_enrolled_help_text
from custom.icds_reports.models import AggAwcMonthly, DailyAttendanceView, \
AggChildHealthMonthly, AggAwcDailyView, AggCcsRecordMonthly, ChildHealthMonthlyView
from custom.icds_reports.models.views import CcsRecordMonthlyView
from custom.icds_reports.utils import apply_exclude, percent_diff, get_value, percent_increase, \
match_age, current_age, exclude_records_by_age_for_column, calculate_date_for_age, \
person_has_aadhaar_column, person_is_beneficiary_column, get_status, wasting_moderate_column, \
wasting_severe_column, stunting_moderate_column, stunting_severe_column, current_month_stunting_column, \
current_month_wasting_column, hfa_recorded_in_month_column, wfh_recorded_in_month_column, \
chosen_filters_to_labels, default_age_interval, get_anemic_status, get_symptoms, get_counseling, \
get_tt_dates, is_anemic, format_decimal, DATA_NOT_ENTERED, get_delivery_nature, get_color_with_green_positive,\
get_color_with_red_positive
from custom.icds_reports.const import MapColors
from custom.icds_reports.messages import new_born_with_low_weight_help_text
@icds_quickcache(['domain', 'config', 'month', 'prev_month', 'two_before', 'loc_level', 'show_test'], timeout=30 * 60)
def get_awc_reports_system_usage(domain, config, month, prev_month, two_before, loc_level, show_test=False):
def get_data_for(filters, date):
queryset = AggAwcMonthly.objects.filter(
month=datetime(*date), **filters
).values(
loc_level
).annotate(
awc_open=Sum('awc_days_open'),
weighed=Sum('wer_weighed'),
all=Sum('wer_eligible'),
)
if not show_test:
queryset = apply_exclude(domain, queryset)
return queryset
chart_data = DailyAttendanceView.objects.filter(
pse_date__range=(datetime(*two_before), datetime(*month)), **config
).values(
'pse_date', 'aggregation_level'
).annotate(
awc_count=Sum('awc_open_count'),
attended_children=Avg('attended_children_percent')
).order_by('pse_date')
if not show_test:
chart_data = apply_exclude(domain, chart_data)
awc_count_chart = []
attended_children_chart = []
for row in chart_data:
date = row['pse_date']
date_in_milliseconds = int(date.strftime("%s")) * 1000
awc_count_chart.append([date_in_milliseconds, row['awc_count']])
attended_children_chart.append([date_in_milliseconds, row['attended_children'] or 0])
this_month_data = get_data_for(config, month)
prev_month_data = get_data_for(config, prev_month)
return {
'kpi': [
[
{
'label': _('AWC Days Open'),
'help_text': _((
"The total number of days the AWC is open in the given month. The AWC is expected to "
"be open 6 days a week (Not on Sundays and public holidays)")
),
'percent': percent_increase(
'awc_open',
this_month_data,
prev_month_data,
),
'value': get_value(this_month_data, 'awc_open'),
'all': '',
'format': 'number',
'frequency': 'month'
},
{
'label': _((
"Percentage of eligible children (ICDS beneficiaries between 0-6 years) "
"who have been weighed in the current month")
),
'help_text': _('Percentage of AWCs with a functional toilet'),
'percent': percent_diff(
'weighed',
this_month_data,
prev_month_data,
'all'
),
'value': get_value(this_month_data, 'weighed'),
'all': get_value(this_month_data, 'all'),
'format': 'percent_and_div',
'frequency': 'month'
}
]
],
'charts': [
[
{
'key': 'AWC Days Open Per Week',
'values': awc_count_chart,
"classed": "dashed",
}
],
[
{
'key': 'PSE- Average Weekly Attendance',
'values': attended_children_chart,
"classed": "dashed",
}
]
],
}
@icds_quickcache(['config', 'month', 'domain', 'show_test'], timeout=30 * 60)
def get_awc_reports_pse(config, month, domain, show_test=False):
selected_month = datetime(*month)
last_months = (selected_month - relativedelta(months=1))
last_day_of_selected_month = (selected_month + relativedelta(months=1)) - relativedelta(days=1)
map_image_data = DailyAttendanceView.objects.filter(
pse_date__range=(selected_month, last_day_of_selected_month), **config
).values(
'awc_name', 'form_location_lat', 'form_location_long', 'image_name', 'doc_id', 'pse_date'
).order_by('-pse_date')
kpi_data_tm = AggAwcMonthly.objects.filter(
month=selected_month, **config
).values('awc_name').annotate(
days_open=Sum('awc_days_open')
)
kpi_data_lm = AggAwcMonthly.objects.filter(
month=last_months, **config
).values('awc_name').annotate(
days_open=Sum('awc_days_open')
)
open_count_data = DailyAttendanceView.objects.filter(
pse_date__range=(selected_month, last_day_of_selected_month), **config
).values('awc_name', 'pse_date').annotate(
open_count=Sum('awc_open_count'),
).order_by('pse_date')
daily_attendance = DailyAttendanceView.objects.filter(
pse_date__range=(selected_month, last_day_of_selected_month), **config
).values('awc_name', 'pse_date').annotate(
avg_percent=Avg('attended_children_percent'),
attended=Sum('attended_children'),
eligible=Sum('eligible_children')
)
if not show_test:
map_image_data = apply_exclude(domain, map_image_data)
kpi_data_tm = apply_exclude(domain, kpi_data_tm)
kpi_data_lm = apply_exclude(domain, kpi_data_lm)
open_count_data = apply_exclude(domain, open_count_data)
daily_attendance = apply_exclude(domain, daily_attendance)
attended_children_chart = {}
dates = [dt for dt in rrule(DAILY, dtstart=selected_month, until=last_day_of_selected_month)]
for date in dates:
attended_children_chart[int(date.strftime("%s")) * 1000] = {
'avg_percent': 0,
'attended': 0,
'eligible': 0
}
open_count_chart = {}
open_count_dates = [
dt for dt in rrule(WEEKLY, dtstart=selected_month, until=last_day_of_selected_month, byweekday=MO)
]
for date in open_count_dates:
first_day_of_week = date - timedelta(days=date.isoweekday() - 1)
milliseconds = int(first_day_of_week.strftime("%s")) * 1000
open_count_chart[milliseconds] = 0
for chart_row in open_count_data:
first_day_of_week = chart_row['pse_date'] - timedelta(days=chart_row['pse_date'].isoweekday() - 1)
pse_week = int(first_day_of_week.strftime("%s")) * 1000
if pse_week in open_count_chart:
open_count_chart[pse_week] += (chart_row['open_count'] or 0)
else:
open_count_chart[pse_week] = (chart_row['open_count'] or 0)
for daily_attendance_row in daily_attendance:
pse_day = int(daily_attendance_row['pse_date'].strftime("%s")) * 1000
attended_children_chart[pse_day] = {
'avg_percent': daily_attendance_row['avg_percent'] or 0,
'attended': daily_attendance_row['attended'] or 0,
'eligible': daily_attendance_row['eligible'] or 0
}
map_data = {}
date_to_image_data = {}
for map_row in map_image_data:
lat = map_row['form_location_lat']
longitude = map_row['form_location_long']
awc_name = map_row['awc_name']
image_name = map_row['image_name']
doc_id = map_row['doc_id']
pse_date = map_row['pse_date']
if lat and longitude:
key = doc_id.replace('-', '')
map_data.update({
key: {
'lat': float(lat),
'lng': float(longitude),
'focus': 'true',
'message': awc_name,
}
})
if image_name:
date_str = pse_date.strftime("%d/%m/%Y")
date_to_image_data[date_str] = map_row
images = []
tmp_image = []
for idx, date in enumerate(rrule(DAILY, dtstart=selected_month, until=last_day_of_selected_month)):
date_str = date.strftime("%d/%m/%Y")
image_data = date_to_image_data.get(date_str)
if image_data:
image_name = image_data['image_name']
doc_id = image_data['doc_id']
tmp_image.append({
'id': idx,
'image': absolute_reverse('icds_image_accessor', args=(domain, doc_id, image_name)),
'date': date_str
})
else:
tmp_image.append({
'id': idx,
'image': None,
'date': date_str
})
if (idx + 1) % 4 == 0:
images.append(tmp_image)
tmp_image = []
if tmp_image:
images.append(tmp_image)
return {
'kpi': [
[
{
'label': _('AWC Days Open'),
'help_text': _((
"""
Total number of days the AWC is open in the given month.
The AWC is expected to be open 6 days a week (Not on Sundays and public holidays)
"""
)),
'percent': percent_increase(
'days_open',
kpi_data_tm,
kpi_data_lm,
),
'value': get_value(kpi_data_tm, 'days_open'),
'all': '',
'format': 'number',
'frequency': 'month',
'color': get_color_with_green_positive(percent_increase(
'days_open',
kpi_data_tm,
kpi_data_lm,
)),
}
]
],
'charts': [
[
{
'key': 'AWC Days Open per week',
'values': sorted([
dict(
x=x_val,
y=y_val
) for x_val, y_val in open_count_chart.items()
], key=lambda d: d['x']),
"strokeWidth": 2,
"classed": "dashed",
"color": MapColors.BLUE
}
],
[
{
'key': 'PSE - Daily Attendance',
'values': sorted([
dict(
x=x_val,
y=y_val['avg_percent'],
attended=y_val['attended'],
eligible=y_val['eligible']
) for x_val, y_val in attended_children_chart.items()
], key=lambda d: d['x']),
"strokeWidth": 2,
"classed": "dashed",
"color": MapColors.BLUE
},
]
],
'map': {
'markers': map_data,
},
'images': images
}
@icds_quickcache(['domain', 'config', 'month', 'prev_month', 'show_test', 'icds_feature_flag'], timeout=30 * 60)
def get_awc_reports_maternal_child(domain, config, month, prev_month, show_test=False, icds_feature_flag=False):
def get_data_for(date):
age_filters = {'age_tranche': 72} if icds_feature_flag else {'age_tranche__in': [0, 6, 72]}
moderately_underweight = exclude_records_by_age_for_column(
{'age_tranche': 72},
'nutrition_status_moderately_underweight'
)
severely_underweight = exclude_records_by_age_for_column(
{'age_tranche': 72},
'nutrition_status_severely_underweight'
)
wasting_moderate = exclude_records_by_age_for_column(
age_filters,
wasting_moderate_column(icds_feature_flag)
)
wasting_severe = exclude_records_by_age_for_column(
age_filters,
wasting_severe_column(icds_feature_flag)
)
stunting_moderate = exclude_records_by_age_for_column(
age_filters,
stunting_moderate_column(icds_feature_flag)
)
stunting_severe = exclude_records_by_age_for_column(
age_filters,
stunting_severe_column(icds_feature_flag)
)
nutrition_status_weighed = exclude_records_by_age_for_column(
{'age_tranche': 72},
'nutrition_status_weighed'
)
height_measured_in_month = exclude_records_by_age_for_column(
age_filters,
hfa_recorded_in_month_column(icds_feature_flag)
)
weighed_and_height_measured_in_month = exclude_records_by_age_for_column(
age_filters,
wfh_recorded_in_month_column(icds_feature_flag)
)
queryset = AggChildHealthMonthly.objects.filter(
month=date, **config
).values(
'month', 'aggregation_level'
).annotate(
underweight=(
Sum(moderately_underweight) + Sum(severely_underweight)
),
valid_weighed=Sum(nutrition_status_weighed),
immunized=(
Sum('fully_immunized_on_time') + Sum('fully_immunized_late')
),
eligible=Sum('fully_immunized_eligible'),
wasting=Sum(wasting_moderate) + Sum(wasting_severe),
height_measured_in_month=Sum(height_measured_in_month),
weighed_and_height_measured_in_month=Sum(weighed_and_height_measured_in_month),
stunting=Sum(stunting_moderate) + Sum(stunting_severe),
low_birth=Sum('low_birth_weight_in_month'),
birth=Sum('bf_at_birth'),
born=Sum('born_in_month'),
weighed_and_born_in_month=Sum('weighed_and_born_in_month'),
month_ebf=Sum('ebf_in_month'),
ebf=Sum('ebf_eligible'),
month_cf=Sum('cf_initiation_in_month'),
cf=Sum('cf_initiation_eligible')
)
if not show_test:
queryset = apply_exclude(domain, queryset)
return queryset
def get_weight_efficiency(date):
queryset = AggAwcMonthly.objects.filter(
month=date, **config
).values(
'month', 'aggregation_level', 'awc_name'
).annotate(
wer_weight=Sum('wer_weighed'),
wer_eli=Sum('wer_eligible')
)
if not show_test:
queryset = apply_exclude(domain, queryset)
return queryset
def get_institutional_delivery_data(date):
queryset = AggCcsRecordMonthly.objects.filter(
month=date, **config
).values(
'month', 'aggregation_level', 'awc_name'
).annotate(
institutional_delivery_in_month_sum=Sum('institutional_delivery_in_month'),
delivered_in_month_sum=Sum('delivered_in_month')
)
if not show_test:
queryset = apply_exclude(domain, queryset)
return queryset
this_month_data = get_data_for(datetime(*month))
prev_month_data = get_data_for(datetime(*prev_month))
this_month_data_we = get_weight_efficiency(datetime(*month))
prev_month_data_we = get_weight_efficiency(datetime(*prev_month))
this_month_institutional_delivery_data = get_institutional_delivery_data(datetime(*month))
prev_month_institutional_delivery_data = get_institutional_delivery_data(datetime(*prev_month))
gender_label, age_label, chosen_filters = chosen_filters_to_labels(
config,
default_interval=default_age_interval(icds_feature_flag)
)
return {
'kpi': [
[
{
'label': _('Underweight (Weight-for-Age)'),
'help_text': _((
"Of the total children weighed, the percentage of children between 0-5 years who were "
"moderately/severely underweight in the current month. Children who are moderately or "
"severely underweight have a higher risk of mortality. "
)),
'percent': percent_diff(
'underweight',
this_month_data,
prev_month_data,
'valid_weighed'
),
'color': get_color_with_red_positive(percent_diff(
'underweight',
this_month_data,
prev_month_data,
'valid_weighed'
)),
'value': get_value(this_month_data, 'underweight'),
'all': get_value(this_month_data, 'valid_weighed'),
'format': 'percent_and_div',
'frequency': 'month'
},
{
'label': _('Wasting (Weight-for-Height)'),
'help_text': wasting_help_text(age_label),
'percent': percent_diff(
'wasting',
this_month_data,
prev_month_data,
'weighed_and_height_measured_in_month'
),
'color': get_color_with_red_positive(percent_diff(
'wasting',
this_month_data,
prev_month_data,
'weighed_and_height_measured_in_month'
)),
'value': get_value(this_month_data, 'wasting'),
'all': get_value(this_month_data, 'weighed_and_height_measured_in_month'),
'format': 'percent_and_div',
'frequency': 'month'
},
],
[
{
'label': _('Stunting (Height-for-Age)'),
'help_text': stunting_help_text(age_label),
'percent': percent_diff(
'stunting',
this_month_data,
prev_month_data,
'height_measured_in_month'
),
'color': get_color_with_red_positive(percent_diff(
'stunting',
this_month_data,
prev_month_data,
'height_measured_in_month'
)),
'value': get_value(this_month_data, 'stunting'),
'all': get_value(this_month_data, 'height_measured_in_month'),
'format': 'percent_and_div',
'frequency': 'month'
},
{
'label': _('Weighing Efficiency'),
'help_text': _(
"Of the children between the ages of 0-5 years who are enrolled for Anganwadi Services, "
"the percentage who were weighed in the given month. "
),
'percent': percent_diff(
'wer_weight',
this_month_data_we,
prev_month_data_we,
'wer_eli'
),
'color': get_color_with_green_positive(percent_diff(
'wer_weight',
this_month_data_we,
prev_month_data_we,
'wer_eli'
)),
'value': get_value(this_month_data_we, 'wer_weight'),
'all': get_value(this_month_data_we, 'wer_eli'),
'format': 'percent_and_div',
'frequency': 'month'
},
],
[
{
'label': _('Newborns with Low Birth Weight'),
'help_text': _(
new_born_with_low_weight_help_text(html=False)
),
'percent': percent_diff(
'low_birth',
this_month_data,
prev_month_data,
'weighed_and_born_in_month'
),
'color': get_color_with_red_positive(percent_diff(
'low_birth',
this_month_data,
prev_month_data,
'weighed_and_born_in_month'
)),
'value': get_value(this_month_data, 'low_birth'),
'all': get_value(this_month_data, 'weighed_and_born_in_month'),
'format': 'percent_and_div',
'frequency': 'month'
},
{
'label': _('Early Initiation of Breastfeeding'),
'help_text': early_initiation_breastfeeding_help_text(),
'percent': percent_diff(
'birth',
this_month_data,
prev_month_data,
'born'
),
'color': get_color_with_green_positive(percent_diff(
'birth',
this_month_data,
prev_month_data,
'born'
)),
'value': get_value(this_month_data, 'birth'),
'all': get_value(this_month_data, 'born'),
'format': 'percent_and_div',
'frequency': 'month'
},
],
[
{
'label': _('Exclusive breastfeeding'),
'help_text': exclusive_breastfeeding_help_text(),
'percent': percent_diff(
'month_ebf',
this_month_data,
prev_month_data,
'ebf'
),
'color': get_color_with_green_positive(percent_diff(
'month_ebf',
this_month_data,
prev_month_data,
'ebf'
)),
'value': get_value(this_month_data, 'month_ebf'),
'all': get_value(this_month_data, 'ebf'),
'format': 'percent_and_div',
'frequency': 'month'
},
{
'label': _('Children initiated appropriate Complementary Feeding'),
'help_text': children_initiated_appropriate_complementary_feeding_help_text(),
'percent': percent_diff(
'month_cf',
this_month_data,
prev_month_data,
'cf'
),
'color': get_color_with_green_positive(percent_diff(
'month_cf',
this_month_data,
prev_month_data,
'cf'
)),
'value': get_value(this_month_data, 'month_cf'),
'all': get_value(this_month_data, 'cf'),
'format': 'percent_and_div',
'frequency': 'month'
},
],
[
{
'label': _('Immunization Coverage (at age 1 year)'),
'help_text': _((
"Of the total number of children enrolled for Anganwadi Services who are over a year old, "
"the percentage of children who have received the complete immunization as per the "
"National Immunization Schedule of India that is required by age 1."
"<br/><br/> "
"This includes the following immunizations:<br/> "
"If Pentavalent path: Penta1/2/3, OPV1/2/3, BCG, Measles, VitA1<br/> "
"If DPT/HepB path: DPT1/2/3, HepB1/2/3, OPV1/2/3, BCG, Measles, VitA1"
)),
'percent': percent_diff(
'immunized',
this_month_data,
prev_month_data,
'eligible'
),
'color': get_color_with_green_positive(percent_diff(
'immunized',
this_month_data,
prev_month_data,
'eligible'
)),
'value': get_value(this_month_data, 'immunized'),
'all': get_value(this_month_data, 'eligible'),
'format': 'percent_and_div',
'frequency': 'month'
},
{
'label': _('Institutional Deliveries'),
'help_text': institutional_deliveries_help_text(),
'percent': percent_diff(
'institutional_delivery_in_month_sum',
this_month_institutional_delivery_data,
prev_month_institutional_delivery_data,
'delivered_in_month_sum'
),
'color': get_color_with_green_positive(percent_diff(
'institutional_delivery_in_month_sum',
this_month_institutional_delivery_data,
prev_month_institutional_delivery_data,
'delivered_in_month_sum'
)),
'value': get_value(
this_month_institutional_delivery_data,
'institutional_delivery_in_month_sum'
),
'all': get_value(this_month_institutional_delivery_data, 'delivered_in_month_sum'),
'format': 'percent_and_div',
'frequency': 'month'
},
]
]
}
@icds_quickcache(['domain', 'config', 'now_date', 'month', 'show_test', 'beta'], timeout=30 * 60)
def get_awc_report_demographics(domain, config, now_date, month, show_test=False, beta=False):
selected_month = datetime(*month)
now_date = datetime(*now_date)
chart = AggChildHealthMonthly.objects.filter(
month=selected_month, **config
).values(
'age_tranche', 'aggregation_level'
).annotate(
valid=Sum('valid_in_month')
).order_by('age_tranche')
if not show_test:
chart = apply_exclude(domain, chart)
chart_data = OrderedDict()
chart_data.update({'0-1 month': 0})
chart_data.update({'1-6 months': 0})
chart_data.update({'6-12 months': 0})
chart_data.update({'1-3 years': 0})
chart_data.update({'3-6 years': 0})
for chart_row in chart:
if chart_row['age_tranche']:
age = int(chart_row['age_tranche'])
valid = chart_row['valid']
chart_data[match_age(age)] += valid
def get_data_for(query_class, filters):
queryset = query_class.objects.filter(
**filters
).values(
'aggregation_level'
).annotate(
household=Sum('cases_household'),
child_health=Sum('cases_child_health'),
child_health_all=Sum('cases_child_health_all'),
ccs_pregnant=Sum('cases_ccs_pregnant'),
ccs_pregnant_all=Sum('cases_ccs_pregnant_all'),
css_lactating=Sum('cases_ccs_lactating'),
css_lactating_all=Sum('cases_ccs_lactating_all'),
person_adolescent=Sum('cases_person_adolescent_girls_11_14'),
person_adolescent_all=Sum('cases_person_adolescent_girls_11_14_all'),
person_aadhaar=Sum(person_has_aadhaar_column(beta)),
all_persons=Sum(person_is_beneficiary_column(beta))
)
if not show_test:
queryset = apply_exclude(domain, queryset)
return queryset
previous_month = selected_month - relativedelta(months=1)
if selected_month.month == now_date.month and selected_month.year == now_date.year:
config['date'] = now_date.date()
data = None
# keep the record in searched - current - month
while data is None or (not data and config['date'].day != 1):
config['date'] -= relativedelta(days=1)
data = get_data_for(AggAwcDailyView, config)
prev_data = None
while prev_data is None or (not prev_data and config['date'].day != 1):
config['date'] -= relativedelta(days=1)
prev_data = get_data_for(AggAwcDailyView, config)
frequency = 'day'
else:
config['month'] = selected_month
data = get_data_for(AggAwcMonthly, config)
config['month'] = previous_month
prev_data = get_data_for(AggAwcMonthly, config)
frequency = 'month'
return {
'chart': [
{
'key': 'Children (0-6 years)',
'values': [[key, value] for key, value in chart_data.items()],
"classed": "dashed",
}
],
'kpi': [
[
{
'label': _('Registered Households'),
'help_text': _("Total number of households registered"),
'percent': percent_increase(
'household',
data,
prev_data,
),
'color': get_color_with_green_positive(percent_increase(
'household',
data,
prev_data)),
'value': get_value(data, 'household'),
'all': '',
'format': 'number',
'frequency': frequency
},
{
'label': _('Percent Aadhaar-seeded Beneficiaries'),
'help_text': _(
'Of the total number of ICDS beneficiaries, the percentage whose Adhaar identification '
'has been captured. '
),
'percent': percent_diff(
'person_aadhaar',
data,
prev_data,
'all_persons'
),
'color': get_color_with_green_positive(percent_diff(
'person_aadhaar',
data,
prev_data,
'all_persons'
)),
'value': get_value(data, 'person_aadhaar'),
'all': get_value(data, 'all_persons'),
'format': 'percent_and_div',
'frequency': frequency
}
],
[
{
'label': _('Percent children (0-6 years) enrolled for Anganwadi Services'),
'help_text': percent_children_enrolled_help_text(),
'percent': percent_diff('child_health', data, prev_data, 'child_health_all'),
'color': get_color_with_green_positive(percent_diff(
'child_health_all',
data,
prev_data, 'child_health_all')),
'value': get_value(data, 'child_health'),
'all': get_value(data, 'child_health_all'),
'format': 'percent_and_div',
'frequency': frequency,
},
{
'label': _('Percent pregnant women enrolled for Anganwadi Services'),
'help_text': _('Of the total number of pregnant women, the percentage of pregnant '
'women enrolled for Anganwadi Services'),
'percent': percent_diff('ccs_pregnant', data, prev_data, 'ccs_pregnant_all'),
'color': get_color_with_green_positive(percent_diff(
'ccs_pregnant',
data,
prev_data,
'ccs_pregnant_all'
)),
'value': get_value(data, 'ccs_pregnant'),
'all': get_value(data, 'ccs_pregnant_all'),
'format': 'percent_and_div',
'frequency': frequency
}
],
[
{
'label': _('Percent lactating women enrolled for Anganwadi Services'),
'help_text': _('Of the total number of lactating women, the percentage of '
'lactating women enrolled for Anganwadi Services'),
'percent': percent_diff('css_lactating', data, prev_data, 'css_lactating_all'),
'color': get_color_with_green_positive(percent_diff(
'css_lactating',
data,
prev_data,
'css_lactating_all'
)),
'value': get_value(data, 'css_lactating'),
'all': get_value(data, 'css_lactating_all'),
'format': 'percent_and_div',
'frequency': frequency
},
{
'label': _('Percent adolescent girls (11-14 years) enrolled for Anganwadi Services'),
'help_text': _((
"Of the total number of adolescent girls (aged 11-14 years), the percentage "
"of girls enrolled for Anganwadi Services"
)),
'percent': percent_diff(
'person_adolescent',
data,
prev_data,
'person_adolescent_all'
),
'color': get_color_with_green_positive(percent_diff(
'person_adolescent',
data,
prev_data,
'person_adolescent_all'
)),
'value': get_value(data, 'person_adolescent'),
'all': get_value(data, 'person_adolescent_all'),
'format': 'percent_and_div',
'frequency': frequency
}
]
]
}
@icds_quickcache(['domain', 'config', 'month', 'show_test', 'beta'], timeout=30 * 60)
def get_awc_report_infrastructure(domain, config, month, show_test=False, beta=False):
selected_month = datetime(*month)
def get_data_for_kpi(filters, date):
queryset = AggAwcMonthly.objects.filter(
month=date, **filters
).values(
'aggregation_level'
).annotate(
clean_water=Sum('infra_clean_water'),
functional_toilet=Sum('infra_functional_toilet'),
medicine_kits=Sum('infra_medicine_kits'),
infant_weighing_scale=Sum('infra_infant_weighing_scale'),
adult_weighing_scale=Sum('infra_adult_weighing_scale'),
num_awc_infra_last_update=Sum('num_awc_infra_last_update'),
)
if not show_test:
queryset = apply_exclude(domain, queryset)
return queryset
def get_infa_value(data, prop):
if beta:
value = data[0][prop] if data and data[0]['num_awc_infra_last_update'] else None
else:
value = (data[0][prop] or None) if data else None
if value is not None:
if value == 1:
return _("Available")
else:
return _("Not Available")
else:
return _(DATA_NOT_ENTERED)
kpi_data = get_data_for_kpi(config, selected_month.date())
return {
'kpi': [
[
{
'label': _('Clean Drinking Water'),
'help_text': None,
'value': get_infa_value(kpi_data, 'clean_water'),
'all': '',
'format': 'string',
'show_percent': False,
'frequency': 'month'
},
{
'label': _('Functional Toilet'),
'help_text': None,
'value': get_infa_value(kpi_data, 'functional_toilet'),
'all': '',
'format': 'string',
'show_percent': False,
'frequency': 'month'
}
],
[
{
'label': _('Weighing Scale: Infants'),
'help_text': None,
'value': get_infa_value(kpi_data, 'infant_weighing_scale'),
'all': '',
'format': 'string',
'show_percent': False,
'frequency': 'month'
},
{
'label': _('AWCs with Weighing Scale: Mother and Child'),
'help_text': None,
'value': get_infa_value(kpi_data, 'adult_weighing_scale'),
'all': '',
'format': 'string',
'show_percent': False,
'frequency': 'month'
}
],
[
{
'label': _('Medicine Kit'),
'help_text': None,
'value': get_infa_value(kpi_data, 'medicine_kits'),
'all': '',
'format': 'string',
'show_percent': False,
'frequency': 'month'
}
],
]
}
@icds_quickcache([
'start', 'length', 'draw', 'order', 'filters', 'month', 'two_before', 'icds_features_flag'
], timeout=30 * 60)
def get_awc_report_beneficiary(start, length, draw, order, filters, month, two_before,
icds_features_flag):
filters['month'] = datetime(*month)
filters['open_in_month'] = 1
filters['valid_in_month'] = 1
if filters.get('age_in_months__range') is None:
filters['age_in_months__lte'] = 72
data = ChildHealthMonthlyView.objects.filter(
**filters
).order_by(order)
data_count = data.count()
data = data[start:(start + length)]
config = {
'data': [],
'months': [
dt.strftime("%b %Y") for dt in rrule(
MONTHLY,
dtstart=datetime(*two_before),
until=datetime(*month)
)
][::-1],
'last_month': datetime(*month).strftime("%b %Y"),
}
def base_data(row_data):
return dict(
case_id=row_data.case_id,
person_name=row_data.person_name,
dob=row_data.dob,
age=calculate_date_for_age(row_data.dob, datetime(*month).date()),
fully_immunized='Yes' if row_data.fully_immunized else 'No',
age_in_months=row_data.age_in_months,
current_month_nutrition_status=get_status(
row_data.current_month_nutrition_status,
'underweight',
'Normal weight for age'
),
recorded_weight=row_data.recorded_weight or 0,
recorded_height=row_data.recorded_height or 0,
current_month_stunting=get_status(
getattr(row_data, current_month_stunting_column(icds_features_flag)),
'stunted',
'Normal height for age',
data_entered=True if row_data.recorded_height else False
),
current_month_wasting=get_status(
getattr(row_data, current_month_wasting_column(icds_features_flag)),
'wasted',
'Normal weight for height',
data_entered=True if row_data.recorded_height and row_data.recorded_weight else False
),
pse_days_attended=row_data.pse_days_attended,
mother_phone_number=row_data.mother_phone_number,
aww_phone_number=row_data.aww_phone_number
)
for row in data:
config['data'].append(base_data(row))
config["draw"] = draw
config["recordsTotal"] = data_count
config["recordsFiltered"] = data_count
return config
@icds_quickcache(['case_id', 'awc_id', 'selected_month'], timeout=30 * 60)
def get_beneficiary_details(case_id, awc_id, selected_month):
selected_month = datetime(*selected_month)
six_month_before = selected_month - relativedelta(months=6)
data = ChildHealthMonthlyView.objects.filter(
case_id=case_id,
awc_id=awc_id,
month__range=(six_month_before, selected_month)
).order_by('month')
min_height = 35
max_height = 120.0
beneficiary = {
'weight': [],
'height': [],
'wfl': []
}
for row in data:
age_in_months = row.age_in_months
recorded_weight = row.recorded_weight
recorded_height = row.recorded_height
beneficiary.update({
'person_name': row.person_name,
'mother_name': row.mother_name,
'dob': row.dob,
'age': current_age(row.dob, datetime.now().date()),
'sex': row.sex,
'age_in_months': age_in_months,
})
if age_in_months <= 60:
if recorded_weight:
beneficiary['weight'].append({
'x': int(age_in_months),
'y': float(recorded_weight)
})
if recorded_height:
beneficiary['height'].append({
'x': int(age_in_months),
'y': float(recorded_height)
})
if recorded_height and min_height <= recorded_height <= max_height:
beneficiary['wfl'].append({
'x': float(row.recorded_height),
'y': float(recorded_weight) if row.recorded_height else 0
})
return beneficiary
@icds_quickcache([
'start', 'length', 'order', 'reversed_order', 'awc_id'
], timeout=30 * 60)
def get_awc_report_pregnant(start, length, order, reversed_order, awc_id):
latest_available_month = date.today() - timedelta(days=1)
query_month = latest_available_month.replace(day=1)
data = CcsRecordMonthlyView.objects.filter(
awc_id=awc_id,
month=query_month,
pregnant_all=1,
).order_by('case_id', '-month').distinct('case_id').values(
'case_id', 'person_name', 'age_in_months', 'opened_on', 'edd', 'trimester', 'anemic_severe',
'anemic_moderate', 'anemic_normal', 'anemic_unknown', 'num_anc_complete', 'pregnant_all',
'num_rations_distributed', 'last_date_thr', 'month', 'closed', 'open_in_month', 'pregnant'
).exclude(open_in_month=False)
data_count = data.count()
config = {
'data': [],
}
def base_data(row_data):
return dict(
case_id=row_data['case_id'],
person_name=row_data['person_name'],
age=row_data['age_in_months'] // 12 if row_data['age_in_months'] else row_data['age_in_months'],
closed=row_data['closed'],
opened_on=row_data['opened_on'],
edd=row_data['edd'],
trimester=row_data['trimester'],
anemic=is_anemic(row_data),
num_anc_complete=row_data['num_anc_complete'],
beneficiary='Yes' if row_data['pregnant'] else 'No',
number_of_thrs_given=row_data['num_rations_distributed'],
last_date_thr=row_data['last_date_thr'],
)
for row in data:
config['data'].append(base_data(row))
def ordering_format(record):
if record[order]:
return record[order]
numeric_fields = ['age', 'closed', 'trimester', 'num_anc_complete', 'number_of_thrs_given']
if any([field in order for field in numeric_fields]):
return 0
date_fields = ['opened_on', 'edd', 'last_date_thr']
if any([field in order for field in date_fields]):
return date.today()
return ""
config['data'].sort(key=ordering_format, reverse=reversed_order)
config['data'] = config['data'][start:(start + length)]
config["recordsTotal"] = data_count
config["recordsFiltered"] = data_count
return config
@icds_quickcache(['case_id', 'awc_id'], timeout=30 * 60)
def get_pregnant_details(case_id, awc_id):
ten_months_ago = date.today() - relativedelta(months=10, day=1)
data = CcsRecordMonthlyView.objects.filter(
case_id=case_id,
awc_id=awc_id,
month__gte=ten_months_ago,
home_visit_date__lte=F('month') + timedelta(days=31),
).order_by('home_visit_date', '-month').distinct('home_visit_date').values(
'case_id', 'trimester', 'person_name', 'age_in_months', 'mobile_number', 'edd', 'opened_on', 'preg_order',
'home_visit_date', 'bp_sys', 'bp_dia', 'anc_weight', 'anc_hemoglobin', 'anemic_severe', 'anemic_moderate',
'anemic_normal', 'anemic_unknown', 'bleeding', 'swelling', 'blurred_vision', 'convulsions', 'rupture',
'eating_extra', 'resting', 'immediate_breastfeeding', 'using_ifa',
'ifa_consumed_last_seven_days', 'tt_1', 'tt_2', 'month', 'anc_abnormalities'
)
config = {
'data': [
[],
[],
[],
],
}
for row_data in data:
config['data'][row_data['trimester'] - 1].append(
dict(
case_id=row_data['case_id'],
trimester=row_data['trimester'] if row_data['trimester'] else DATA_NOT_ENTERED,
person_name=row_data['person_name'] if row_data['person_name'] else DATA_NOT_ENTERED,
age=row_data['age_in_months'] // 12 if row_data['age_in_months'] else row_data['age_in_months'],
mobile_number=row_data['mobile_number'] if row_data['mobile_number'] else DATA_NOT_ENTERED,
edd=row_data['edd'] if row_data['edd'] else DATA_NOT_ENTERED,
opened_on=row_data['opened_on'] if row_data['opened_on'] else DATA_NOT_ENTERED,
preg_order=row_data['preg_order'] if row_data['preg_order'] else DATA_NOT_ENTERED,
home_visit_date=row_data['home_visit_date'] if row_data['home_visit_date'] else DATA_NOT_ENTERED,
bp=DATA_NOT_ENTERED if not row_data['bp_sys'] and not row_data['bp_dia'] else '{} / {}'.format(
row_data['bp_sys'] if row_data['bp_sys'] else DATA_NOT_ENTERED,
row_data['bp_dia'] if row_data['bp_dia'] else DATA_NOT_ENTERED,
),
anc_weight=row_data['anc_weight'] if row_data['anc_weight'] else DATA_NOT_ENTERED,
anc_hemoglobin=format_decimal(
row_data['anc_hemoglobin']
) if row_data['anc_hemoglobin'] else DATA_NOT_ENTERED,
anc_abnormalities='Yes' if row_data['anc_abnormalities'] else 'None',
anemic=get_anemic_status(row_data),
symptoms=get_symptoms(row_data),
counseling=get_counseling(row_data),
using_ifa='Y' if row_data['using_ifa'] else 'N',
ifa_consumed_last_seven_days='Y' if row_data['ifa_consumed_last_seven_days'] else 'N',
tt_taken='Y' if get_tt_dates(row_data) != 'None' else 'N',
tt_date=get_tt_dates(row_data),
)
)
if not config.get('pregnant', None):
config['pregnant'] = {
'person_name': row_data['person_name'] if row_data['person_name'] else DATA_NOT_ENTERED,
'age': row_data['age_in_months'] // 12 if row_data['age_in_months'] else row_data['age_in_months'],
'mobile_number': row_data['mobile_number'] if row_data['mobile_number'] else DATA_NOT_ENTERED,
'edd': row_data['edd'] if row_data['edd'] else DATA_NOT_ENTERED,
'opened_on': row_data['opened_on'] if row_data['opened_on'] else DATA_NOT_ENTERED,
'trimester': row_data['trimester'] if row_data['trimester'] else DATA_NOT_ENTERED,
'preg_order': row_data['preg_order'] if row_data['preg_order'] else DATA_NOT_ENTERED,
}
if not config.get('pregnant', None):
row_data = CcsRecordMonthlyView.objects.filter(
case_id=case_id,
awc_id=awc_id,
month__gte=ten_months_ago,
).order_by('case_id', '-month').distinct('case_id').values(
'case_id', 'trimester', 'person_name', 'age_in_months', 'mobile_number', 'edd', 'opened_on',
'preg_order', 'home_visit_date'
).first()
config['pregnant'] = {
'person_name': row_data['person_name'] if row_data['person_name'] else DATA_NOT_ENTERED,
'age': row_data['age_in_months'] // 12 if row_data['age_in_months'] else row_data['age_in_months'],
'mobile_number': row_data['mobile_number'] if row_data['mobile_number'] else DATA_NOT_ENTERED,
'edd': row_data['edd'] if row_data['edd'] else DATA_NOT_ENTERED,
'opened_on': row_data['opened_on'] if row_data['opened_on'] else DATA_NOT_ENTERED,
'trimester': row_data['trimester'] if row_data['trimester'] else DATA_NOT_ENTERED,
'preg_order': row_data['preg_order'] if row_data['preg_order'] else DATA_NOT_ENTERED,
}
return config
@icds_quickcache([
'start', 'length', 'order', 'reversed_order', 'awc_id'
], timeout=30 * 60)
def get_awc_report_lactating(start, length, order, reversed_order, awc_id):
latest_available_month = date.today() - timedelta(days=1)
first_day_month = latest_available_month.replace(day=1)
data = CcsRecordMonthlyView.objects.filter(
awc_id=awc_id,
month=first_day_month,
).order_by('case_id', '-month').distinct('case_id').values(
'case_id', 'lactating', 'open_in_month', 'date_death'
).filter(lactating=1, date_death=None).exclude(open_in_month=False)
case_ids = [case['case_id'] for case in data]
if case_ids:
data = CcsRecordMonthlyView.objects.filter(
awc_id=awc_id,
month=first_day_month,
date_death=None,
case_id__in=case_ids,
).order_by('case_id', '-month').distinct('case_id').values(
'case_id', 'person_name', 'age_in_months', 'add', 'delivery_nature', 'institutional_delivery',
'num_pnc_visits', 'breastfed_at_birth', 'is_ebf', 'num_rations_distributed', 'month'
)
data_count = data.count()
else:
data = []
data_count = 0
config = {
'data': [],
}
def base_data(row_data):
return dict(
case_id=row_data['case_id'],
person_name=row_data['person_name'],
age=row_data['age_in_months'] // 12 if row_data['age_in_months'] else row_data['age_in_months'],
add=row_data['add'],
delivery_nature=get_delivery_nature(row_data),
institutional_delivery='Y' if row_data['institutional_delivery'] else 'N',
num_pnc_visits=row_data['num_pnc_visits'],
breastfed_at_birth='Y' if row_data['breastfed_at_birth'] else 'N',
is_ebf='Y' if row_data['is_ebf'] else 'N',
num_rations_distributed=row_data['num_rations_distributed'],
)
for row in data:
config['data'].append(base_data(row))
def ordering_format(record):
if record[order]:
return record[order]
numeric_fields = ['age', 'delivery_nature', 'num_pnc_visits', 'num_rations_distributed']
if any([field in order for field in numeric_fields]):
return 0
date_fields = ['add']
if any([field in order for field in date_fields]):
return date.today()
return ""
config['data'].sort(key=ordering_format, reverse=reversed_order)
config['data'] = config['data'][start:(start + length)]
config["recordsTotal"] = data_count
config["recordsFiltered"] = data_count
return config
| 41.547619 | 118 | 0.537295 |
b2a5b47b7c28d6c43a2653586924b7b33261b6a8 | 2,204 | py | Python | scintilla/scripts/splitbins.py | zufuliu/notepad2 | c55d0b02f6910b84ab8a3dd735afbffb1d45d0d4 | [
"MIT"
] | 1,238 | 2017-06-26T01:23:11.000Z | 2022-03-31T03:04:04.000Z | scintilla/scripts/splitbins.py | zufuliu/notepad2 | c55d0b02f6910b84ab8a3dd735afbffb1d45d0d4 | [
"MIT"
] | 401 | 2017-08-04T11:26:06.000Z | 2022-03-31T23:48:47.000Z | scintilla/scripts/splitbins.py | zufuliu/notepad2 | c55d0b02f6910b84ab8a3dd735afbffb1d45d0d4 | [
"MIT"
] | 140 | 2018-02-10T13:19:01.000Z | 2022-03-29T04:15:17.000Z | # splitbins() is based on Python source
# https://github.com/python/cpython/blob/main/Tools/unicode/makeunicodedata.py
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
# 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019 Python Software Foundation;
# All Rights Reserved
# see Python-LICENSE.txt or https://www.python.org/psf/license/ for license details.
import sys
import math
from collections.abc import Iterable
def getsize(data):
# return smallest possible integer size for the given array
if isinstance(data, Iterable):
maxdata = max(data)
else:
maxdata = data
if maxdata < 256:
return 1
elif maxdata < 65536:
return 2
else:
return 4
def splitbins(t, second=False):
"""t -> (t1, t2, shift). Split a table to save space.
t is a sequence of ints. This function can be useful to save space if
many of the ints are the same. t1 and t2 are lists of ints, and shift
is an int, chosen to minimize the combined size of t1 and t2 (in C
code), and where for each i in range(len(t)),
t[i] == t2[(t1[i >> shift] << shift) | (i & mask)]
where mask is a bitmask isolating the last "shift" bits.
"""
# the most we can shift n and still have something left
maxshift = math.floor(math.log2(len(t)))
total = sys.maxsize # smallest total size so far
t = tuple(t) # so slices can be dict keys
for shift in range(maxshift + 1):
t1 = []
t2 = []
size = 2**shift
bincache = {}
for i in range(0, len(t), size):
part = t[i:i+size]
index = bincache.get(part)
if index is None:
index = len(t2)
bincache[part] = index
t2.extend(part)
t1.append(index >> shift)
# determine memory size
b = len(t1)*getsize(t1)
if second:
t3, t4, shift2 = splitbins(t2, False)
b += len(t3)*getsize(t3) + len(t4)*getsize(t4)
else:
b += len(t2)*getsize(t2)
if b < total:
if second:
best = t1, t3, t4, shift, shift2
else:
best = t1, t2, shift
total = b
return best
def preshift(index, shift):
m = max(index)
size = getsize(m)
n = (2**(size * 8) - 1) // m
n = math.floor(math.log2(n))
if n > 0:
n = min(n, shift)
for i in range(len(index)):
index[i] = index[i] << n
return shift - n
return shift
| 27.55 | 84 | 0.655172 |
a0acecbf310f3f0905d5819c02e4587922ec8875 | 1,987 | py | Python | Text-Adventure/text-adventure_original.py | leisheng1234567890/Python_game | 5aeab477a4b21c6a6d0f25305172e2d4d7d6c6ba | [
"MIT"
] | 10 | 2019-02-27T07:57:32.000Z | 2021-06-04T07:15:49.000Z | Text-Adventure/text-adventure_original.py | leisheng1234567890/Python_game | 5aeab477a4b21c6a6d0f25305172e2d4d7d6c6ba | [
"MIT"
] | null | null | null | Text-Adventure/text-adventure_original.py | leisheng1234567890/Python_game | 5aeab477a4b21c6a6d0f25305172e2d4d7d6c6ba | [
"MIT"
] | 7 | 2019-02-28T08:51:57.000Z | 2022-03-25T12:43:59.000Z | directions = ['north','south','east','west']
# Data structure to store details of each location in the game
class Location:
# Constructor - set up
def __init__(self, name, description):
self.name = name
self.description = description
self.linkedLocations = {} # Empty dictionary - will store which locations are linked to which other locations
def addLink(self, direction, destination):
# Add link to linkedLocations dictionary (if the specified direction and destination are valid)
if direction not in directions:
raise ValueError('Invalid direction')
elif destination not in locations:
raise ValueError('Invalid destination')
else:
self.linkedLocations[direction] = destination
# Dictionary with location ID strings as keys and Location objects as the values
locations = { 'woods':Location('The woods', 'You are in the woods. There are lots of trees.'),
'lake':Location('The lake', 'You are by the lake. It is very watery.') }
# Join the two locations together
locations['woods'].addLink('north','lake')
locations['lake'].addLink('south','woods')
# Player will start in the woods
currentLocation = locations['woods']
# Main game loop
while True:
# Display description of current location
print(currentLocation.description)
# Display neighbouring locations
for linkDirection,linkedLocation in currentLocation.linkedLocations.items():
print(linkDirection + ': ' + locations[linkedLocation].name)
# Read player input
command = input('>').lower()
if command in directions:
if command not in currentLocation.linkedLocations:
print('You cannot go that way')
else:
newLocationID = currentLocation.linkedLocations[command]
currentLocation = locations[newLocationID]
else:
print('Try one of: ' + ', '.join(directions)) # Show list of directions, separated by commas
| 39.74 | 119 | 0.685959 |
42ba21a1228f1def1864cb88feaf5fe1ed9ae817 | 31,967 | py | Python | openerp/addons/base/tests/test_base.py | ntiufalara/openerp7 | 903800da0644ec0dd9c1dcd34205541f84d45fe4 | [
"MIT"
] | 3 | 2016-01-29T14:39:49.000Z | 2018-12-29T22:42:00.000Z | openerp/addons/base/tests/test_base.py | ntiufalara/openerp7 | 903800da0644ec0dd9c1dcd34205541f84d45fe4 | [
"MIT"
] | 2 | 2016-03-23T14:29:41.000Z | 2017-02-20T17:11:30.000Z | openerp/addons/base/tests/test_base.py | ntiufalara/openerp7 | 903800da0644ec0dd9c1dcd34205541f84d45fe4 | [
"MIT"
] | null | null | null | import unittest2
import openerp.tests.common as common
from openerp.osv.orm import except_orm
class test_base(common.TransactionCase):
def setUp(self):
super(test_base,self).setUp()
self.res_partner = self.registry('res.partner')
self.res_users = self.registry('res.users')
self.res_partner_title = self.registry('res.partner.title')
# samples use effective TLDs from the Mozilla public suffix
# list at http://publicsuffix.org
self.samples = [
('"Raoul Grosbedon" <raoul@chirurgiens-dentistes.fr> ', 'Raoul Grosbedon', 'raoul@chirurgiens-dentistes.fr'),
('ryu+giga-Sushi@aizubange.fukushima.jp', '', 'ryu+giga-Sushi@aizubange.fukushima.jp'),
('Raoul chirurgiens-dentistes.fr', 'Raoul chirurgiens-dentistes.fr', ''),
(" Raoul O'hara <!@historicalsociety.museum>", "Raoul O'hara", '!@historicalsociety.museum')
]
def test_00_res_partner_name_create(self):
cr, uid = self.cr, self.uid
parse = self.res_partner._parse_partner_name
for text, name, mail in self.samples:
self.assertEqual((name,mail), parse(text), 'Partner name parsing failed')
partner_id, dummy = self.res_partner.name_create(cr, uid, text)
partner = self.res_partner.browse(cr, uid, partner_id)
self.assertEqual(name or mail, partner.name, 'Partner name incorrect')
self.assertEqual(mail or False, partner.email, 'Partner email incorrect')
def test_10_res_partner_find_or_create(self):
cr,uid = self.cr, self.uid
email = self.samples[0][0]
partner_id, dummy = self.res_partner.name_create(cr, uid, email)
found_id = self.res_partner.find_or_create(cr, uid, email)
self.assertEqual(partner_id, found_id, 'find_or_create failed')
new_id = self.res_partner.find_or_create(cr, uid, self.samples[1][0])
self.assertTrue(new_id > partner_id, 'find_or_create failed - should have created new one')
new_id2 = self.res_partner.find_or_create(cr, uid, self.samples[2][0])
self.assertTrue(new_id2 > new_id, 'find_or_create failed - should have created new one again')
def test_15_res_partner_name_search(self):
cr,uid = self.cr, self.uid
for name, active in [
('"A Raoul Grosbedon" <raoul@chirurgiens-dentistes.fr>', False),
('B Raoul chirurgiens-dentistes.fr', True),
("C Raoul O'hara <!@historicalsociety.museum>", True),
('ryu+giga-Sushi@aizubange.fukushima.jp', True),
]:
partner_id, dummy = self.res_partner.name_create(cr, uid, name, context={'default_active': active})
partners = self.res_partner.name_search(cr, uid, 'Raoul')
self.assertEqual(len(partners), 2, 'Incorrect search number result for name_search')
partners = self.res_partner.name_search(cr, uid, 'Raoul', limit=1)
self.assertEqual(len(partners), 1, 'Incorrect search number result for name_search with a limit')
self.assertEqual(partners[0][1], 'B Raoul chirurgiens-dentistes.fr', 'Incorrect partner returned, should be the first active')
def test_20_res_partner_address_sync(self):
cr, uid = self.cr, self.uid
ghoststep = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid,
{'name': 'GhostStep',
'is_company': True,
'street': 'Main Street, 10',
'phone': '123456789',
'email': 'info@ghoststep.com',
'vat': 'BE0477472701',
'type': 'default'}))
p1 = self.res_partner.browse(cr, uid, self.res_partner.name_create(cr, uid, 'Denis Bladesmith <denis.bladesmith@ghoststep.com>')[0])
self.assertEqual(p1.type, 'contact', 'Default type must be "contact"')
p1phone = '123456789#34'
p1.write({'phone': p1phone,
'parent_id': ghoststep.id,
'use_parent_address': True})
p1.refresh()
self.assertEqual(p1.street, ghoststep.street, 'Address fields must be synced')
self.assertEqual(p1.phone, p1phone, 'Phone should be preserved after address sync')
self.assertEqual(p1.type, 'contact', 'Type should be preserved after address sync')
self.assertEqual(p1.email, 'denis.bladesmith@ghoststep.com', 'Email should be preserved after sync')
# turn off sync
p1street = 'Different street, 42'
p1.write({'street': p1street,
'use_parent_address': False})
p1.refresh(), ghoststep.refresh()
self.assertEqual(p1.street, p1street, 'Address fields must not be synced after turning sync off')
self.assertNotEqual(ghoststep.street, p1street, 'Parent address must never be touched')
# turn on sync again
p1.write({'use_parent_address': True})
p1.refresh()
self.assertEqual(p1.street, ghoststep.street, 'Address fields must be synced again')
self.assertEqual(p1.phone, p1phone, 'Phone should be preserved after address sync')
self.assertEqual(p1.type, 'contact', 'Type should be preserved after address sync')
self.assertEqual(p1.email, 'denis.bladesmith@ghoststep.com', 'Email should be preserved after sync')
# Modify parent, sync to children
ghoststreet = 'South Street, 25'
ghoststep.write({'street': ghoststreet})
p1.refresh()
self.assertEqual(p1.street, ghoststreet, 'Address fields must be synced automatically')
self.assertEqual(p1.phone, p1phone, 'Phone should not be synced')
self.assertEqual(p1.email, 'denis.bladesmith@ghoststep.com', 'Email should be preserved after sync')
p1street = 'My Street, 11'
p1.write({'street': p1street})
ghoststep.refresh()
self.assertEqual(ghoststep.street, ghoststreet, 'Touching contact should never alter parent')
def test_30_res_partner_first_contact_sync(self):
""" Test initial creation of company/contact pair where contact address gets copied to
company """
cr, uid = self.cr, self.uid
ironshield = self.res_partner.browse(cr, uid, self.res_partner.name_create(cr, uid, 'IronShield')[0])
self.assertFalse(ironshield.is_company, 'Partners are not companies by default')
self.assertFalse(ironshield.use_parent_address, 'use_parent_address defaults to False')
self.assertEqual(ironshield.type, 'contact', 'Default type must be "contact"')
ironshield.write({'type': 'default'}) # force default type to double-check sync
p1 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid,
{'name': 'Isen Hardearth',
'street': 'Strongarm Avenue, 12',
'parent_id': ironshield.id}))
self.assertEquals(p1.type, 'contact', 'Default type must be "contact", not the copied parent type')
ironshield.refresh()
self.assertEqual(ironshield.street, p1.street, 'Address fields should be copied to company')
self.assertTrue(ironshield.is_company, 'Company flag should be turned on after first contact creation')
def test_40_res_partner_address_getc(self):
""" Test address_get address resolution mechanism: it should first go down through descendants,
stopping when encountering another is_copmany entity, then go up, stopping again at the first
is_company entity or the root ancestor and if nothing matches, it should use the provided partner
itself """
cr, uid = self.cr, self.uid
elmtree = self.res_partner.browse(cr, uid, self.res_partner.name_create(cr, uid, 'Elmtree')[0])
branch1 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid, {'name': 'Branch 1',
'parent_id': elmtree.id,
'is_company': True}))
leaf10 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid, {'name': 'Leaf 10',
'parent_id': branch1.id,
'type': 'invoice'}))
branch11 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid, {'name': 'Branch 11',
'parent_id': branch1.id,
'type': 'other'}))
leaf111 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid, {'name': 'Leaf 111',
'parent_id': branch11.id,
'type': 'delivery'}))
branch11.write({'is_company': False}) # force is_company after creating 1rst child
branch2 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid, {'name': 'Branch 2',
'parent_id': elmtree.id,
'is_company': True}))
leaf21 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid, {'name': 'Leaf 21',
'parent_id': branch2.id,
'type': 'delivery'}))
leaf22 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid, {'name': 'Leaf 22',
'parent_id': branch2.id}))
leaf23 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid, {'name': 'Leaf 23',
'parent_id': branch2.id,
'type': 'default'}))
# go up, stop at branch1
self.assertEqual(self.res_partner.address_get(cr, uid, [leaf111.id], ['delivery', 'invoice', 'contact', 'other', 'default']),
{'delivery': leaf111.id,
'invoice': leaf10.id,
'contact': branch1.id,
'other': branch11.id,
'default': leaf111.id}, 'Invalid address resolution')
self.assertEqual(self.res_partner.address_get(cr, uid, [branch11.id], ['delivery', 'invoice', 'contact', 'other', 'default']),
{'delivery': leaf111.id,
'invoice': leaf10.id,
'contact': branch1.id,
'other': branch11.id,
'default': branch11.id}, 'Invalid address resolution')
# go down, stop at at all child companies
self.assertEqual(self.res_partner.address_get(cr, uid, [elmtree.id], ['delivery', 'invoice', 'contact', 'other', 'default']),
{'delivery': elmtree.id,
'invoice': elmtree.id,
'contact': elmtree.id,
'other': elmtree.id,
'default': elmtree.id}, 'Invalid address resolution')
# go down through children
self.assertEqual(self.res_partner.address_get(cr, uid, [branch1.id], ['delivery', 'invoice', 'contact', 'other', 'default']),
{'delivery': leaf111.id,
'invoice': leaf10.id,
'contact': branch1.id,
'other': branch11.id,
'default': branch1.id}, 'Invalid address resolution')
self.assertEqual(self.res_partner.address_get(cr, uid, [branch2.id], ['delivery', 'invoice', 'contact', 'other', 'default']),
{'delivery': leaf21.id,
'invoice': leaf23.id,
'contact': branch2.id,
'other': leaf23.id,
'default': leaf23.id}, 'Invalid address resolution')
# go up then down through siblings
self.assertEqual(self.res_partner.address_get(cr, uid, [leaf21.id], ['delivery', 'invoice', 'contact', 'other', 'default']),
{'delivery': leaf21.id,
'invoice': leaf23.id,
'contact': branch2.id,
'other': leaf23.id,
'default': leaf23.id
}, 'Invalid address resolution, should scan commercial entity ancestor and its descendants')
self.assertEqual(self.res_partner.address_get(cr, uid, [leaf22.id], ['delivery', 'invoice', 'contact', 'other', 'default']),
{'delivery': leaf21.id,
'invoice': leaf23.id,
'contact': leaf22.id,
'other': leaf23.id,
'default': leaf23.id}, 'Invalid address resolution, should scan commercial entity ancestor and its descendants')
self.assertEqual(self.res_partner.address_get(cr, uid, [leaf23.id], ['delivery', 'invoice', 'contact', 'other', 'default']),
{'delivery': leaf21.id,
'invoice': leaf23.id,
'contact': branch2.id,
'other': leaf23.id,
'default': leaf23.id}, 'Invalid address resolution, `default` should only override if no partner with specific type exists')
# empty adr_pref means only 'default'
self.assertEqual(self.res_partner.address_get(cr, uid, [elmtree.id], []),
{'default': elmtree.id}, 'Invalid address resolution, no default means commercial entity ancestor')
self.assertEqual(self.res_partner.address_get(cr, uid, [leaf111.id], []),
{'default': leaf111.id}, 'Invalid address resolution, no default means contact itself')
branch11.write({'type': 'default'})
self.assertEqual(self.res_partner.address_get(cr, uid, [leaf111.id], []),
{'default': branch11.id}, 'Invalid address resolution, branch11 should now be default')
def test_50_res_partner_commercial_sync(self):
cr, uid = self.cr, self.uid
p0 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid,
{'name': 'Sigurd Sunknife',
'email': 'ssunknife@gmail.com'}))
sunhelm = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid,
{'name': 'Sunhelm',
'is_company': True,
'street': 'Rainbow Street, 13',
'phone': '1122334455',
'email': 'info@sunhelm.com',
'vat': 'BE0477472701',
'child_ids': [(4, p0.id),
(0, 0, {'name': 'Alrik Greenthorn',
'email': 'agr@sunhelm.com'})],
}))
p1 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid,
{'name': 'Otto Blackwood',
'email': 'otto.blackwood@sunhelm.com',
'parent_id': sunhelm.id}))
p11 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid,
{'name': 'Gini Graywool',
'email': 'ggr@sunhelm.com',
'parent_id': p1.id}))
p2 = self.res_partner.browse(cr, uid, self.res_partner.search(cr, uid,
[('email', '=', 'agr@sunhelm.com')])[0])
for p in (p0, p1, p11, p2):
p.refresh()
self.assertEquals(p.commercial_partner_id, sunhelm, 'Incorrect commercial entity resolution')
self.assertEquals(p.vat, sunhelm.vat, 'Commercial fields must be automatically synced')
sunhelmvat = 'BE0123456789'
sunhelm.write({'vat': sunhelmvat})
for p in (p0, p1, p11, p2):
p.refresh()
self.assertEquals(p.vat, sunhelmvat, 'Commercial fields must be automatically and recursively synced')
p1vat = 'BE0987654321'
p1.write({'vat': p1vat})
for p in (sunhelm, p0, p11, p2):
p.refresh()
self.assertEquals(p.vat, sunhelmvat, 'Sync to children should only work downstream and on commercial entities')
# promote p1 to commercial entity
vals = p1.onchange_type(is_company=True)['value']
p1.write(dict(vals, parent_id=sunhelm.id,
is_company=True,
name='Sunhelm Subsidiary'))
p1.refresh()
self.assertEquals(p1.vat, p1vat, 'Setting is_company should stop auto-sync of commercial fields')
self.assertEquals(p1.commercial_partner_id, p1, 'Incorrect commercial entity resolution after setting is_company')
# writing on parent should not touch child commercial entities
sunhelmvat2 = 'BE0112233445'
sunhelm.write({'vat': sunhelmvat2})
p1.refresh()
self.assertEquals(p1.vat, p1vat, 'Setting is_company should stop auto-sync of commercial fields')
p0.refresh()
self.assertEquals(p0.vat, sunhelmvat2, 'Commercial fields must be automatically synced')
def test_60_read_group(self):
cr, uid = self.cr, self.uid
title_sir = self.res_partner_title.create(cr, uid, {'name': 'Sir', 'domain': 'contact'})
title_lady = self.res_partner_title.create(cr, uid, {'name': 'Lady', 'domain': 'contact'})
test_users = [
{'name': 'Alice', 'login': 'alice', 'color': 1, 'function': 'Friend', 'date': '2015-03-28', 'title': title_lady},
{'name': 'Alice', 'login': 'alice2', 'color': 0, 'function': 'Friend', 'date': '2015-01-28', 'title': title_lady},
{'name': 'Bob', 'login': 'bob', 'color': 2, 'function': 'Friend', 'date': '2015-03-02', 'title': title_sir},
{'name': 'Eve', 'login': 'eve', 'color': 3, 'function': 'Eavesdropper', 'date': '2015-03-20', 'title': title_lady},
{'name': 'Nab', 'login': 'nab', 'color': -3, 'function': '5$ Wrench', 'date': '2014-09-10', 'title': title_sir},
{'name': 'Nab', 'login': 'nab-she', 'color': 6, 'function': '5$ Wrench', 'date': '2014-01-02', 'title': title_lady},
]
ids = [self.res_users.create(cr, uid, u) for u in test_users]
domain = [('id', 'in', ids)]
# group on local char field without domain and without active_test (-> empty WHERE clause)
groups_data = self.res_users.read_group(cr, uid, [], fields=['login'], groupby=['login'], orderby='login DESC', context={'active_test': False})
self.assertGreater(len(groups_data), 6, "Incorrect number of results when grouping on a field")
# group on local char field with limit
groups_data = self.res_users.read_group(cr, uid, domain, fields=['login'], groupby=['login'], orderby='login DESC', limit=3, offset=3)
self.assertEqual(len(groups_data), 3, "Incorrect number of results when grouping on a field with limit")
self.assertEqual(['bob', 'alice2', 'alice'], [g['login'] for g in groups_data], 'Result mismatch')
# group on inherited char field, aggregate on int field (second groupby ignored on purpose)
groups_data = self.res_users.read_group(cr, uid, domain, fields=['name', 'color', 'function'], groupby=['function', 'login'])
self.assertEqual(len(groups_data), 3, "Incorrect number of results when grouping on a field")
self.assertEqual(['5$ Wrench', 'Eavesdropper', 'Friend'], [g['function'] for g in groups_data], 'incorrect read_group order')
for group_data in groups_data:
self.assertIn('color', group_data, "Aggregated data for the column 'color' is not present in read_group return values")
self.assertEqual(group_data['color'], 3, "Incorrect sum for aggregated data for the column 'color'")
# group on inherited char field, reverse order
groups_data = self.res_users.read_group(cr, uid, domain, fields=['name', 'color'], groupby='name', orderby='name DESC')
self.assertEqual(['Nab', 'Eve', 'Bob', 'Alice'], [g['name'] for g in groups_data], 'Incorrect ordering of the list')
# group on int field, default ordering
groups_data = self.res_users.read_group(cr, uid, domain, fields=['color'], groupby='color')
self.assertEqual([-3, 0, 1, 2, 3, 6], [g['color'] for g in groups_data], 'Incorrect ordering of the list')
# multi group, second level is int field, should still be summed in first level grouping
groups_data = self.res_users.read_group(cr, uid, domain, fields=['name', 'color'], groupby=['name', 'color'], orderby='name DESC')
self.assertEqual(['Nab', 'Eve', 'Bob', 'Alice'], [g['name'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([3, 3, 2, 1], [g['color'] for g in groups_data], 'Incorrect ordering of the list')
# group on inherited char field, multiple orders with directions
groups_data = self.res_users.read_group(cr, uid, domain, fields=['name', 'color'], groupby='name', orderby='color DESC, name')
self.assertEqual(len(groups_data), 4, "Incorrect number of results when grouping on a field")
self.assertEqual(['Eve', 'Nab', 'Bob', 'Alice'], [g['name'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([1, 2, 1, 2], [g['name_count'] for g in groups_data], 'Incorrect number of results')
# group on inherited date column (res_partner.date) -> Year-Month, default ordering
groups_data = self.res_users.read_group(cr, uid, domain, fields=['function', 'color', 'date'], groupby=['date'])
self.assertEqual(len(groups_data), 4, "Incorrect number of results when grouping on a field")
self.assertEqual(['January 2014', 'September 2014', 'January 2015', 'March 2015'], [g['date'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([1, 1, 1, 3], [g['date_count'] for g in groups_data], 'Incorrect number of results')
# group on inherited date column (res_partner.date) -> Year-Month, custom order
groups_data = self.res_users.read_group(cr, uid, domain, fields=['function', 'color', 'date'], groupby=['date'], orderby='date DESC')
self.assertEqual(len(groups_data), 4, "Incorrect number of results when grouping on a field")
self.assertEqual(['March 2015', 'January 2015', 'September 2014', 'January 2014'], [g['date'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([3, 1, 1, 1], [g['date_count'] for g in groups_data], 'Incorrect number of results')
# group on inherited many2one (res_partner.title), default order
groups_data = self.res_users.read_group(cr, uid, domain, fields=['function', 'color', 'title'], groupby=['title'])
self.assertEqual(len(groups_data), 2, "Incorrect number of results when grouping on a field")
# m2o is returned as a (id, label) pair
self.assertEqual([(title_lady, 'Lady'), (title_sir, 'Sir')], [g['title'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([4, 2], [g['title_count'] for g in groups_data], 'Incorrect number of results')
self.assertEqual([10, -1], [g['color'] for g in groups_data], 'Incorrect aggregation of int column')
# group on inherited many2one (res_partner.title), reversed natural order
groups_data = self.res_users.read_group(cr, uid, domain, fields=['function', 'color', 'title'], groupby=['title'], orderby="title desc")
self.assertEqual(len(groups_data), 2, "Incorrect number of results when grouping on a field")
# m2o is returned as a (id, label) pair
self.assertEqual([(title_sir, 'Sir'), (title_lady, 'Lady')], [g['title'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([2, 4], [g['title_count'] for g in groups_data], 'Incorrect number of results')
self.assertEqual([-1, 10], [g['color'] for g in groups_data], 'Incorrect aggregation of int column')
# group on inherited many2one (res_partner.title), multiple orders with m2o in second position
groups_data = self.res_users.read_group(cr, uid, domain, fields=['function', 'color', 'title'], groupby=['title'], orderby="color desc, title desc")
self.assertEqual(len(groups_data), 2, "Incorrect number of results when grouping on a field")
# m2o is returned as a (id, label) pair
self.assertEqual([(title_lady, 'Lady'), (title_sir, 'Sir')], [g['title'] for g in groups_data], 'Incorrect ordering of the result')
self.assertEqual([4, 2], [g['title_count'] for g in groups_data], 'Incorrect number of results')
self.assertEqual([10, -1], [g['color'] for g in groups_data], 'Incorrect aggregation of int column')
# group on inherited many2one (res_partner.title), ordered by other inherited field (color)
groups_data = self.res_users.read_group(cr, uid, domain, fields=['function', 'color', 'title'], groupby=['title'], orderby='color')
self.assertEqual(len(groups_data), 2, "Incorrect number of results when grouping on a field")
# m2o is returned as a (id, label) pair
self.assertEqual([(title_sir, 'Sir'), (title_lady, 'Lady')], [g['title'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([2, 4], [g['title_count'] for g in groups_data], 'Incorrect number of results')
self.assertEqual([-1, 10], [g['color'] for g in groups_data], 'Incorrect aggregation of int column')
class test_partner_recursion(common.TransactionCase):
def setUp(self):
super(test_partner_recursion,self).setUp()
self.res_partner = self.registry('res.partner')
cr, uid = self.cr, self.uid
self.p1 = self.res_partner.name_create(cr, uid, 'Elmtree')[0]
self.p2 = self.res_partner.create(cr, uid, {'name': 'Elmtree Child 1', 'parent_id': self.p1})
self.p3 = self.res_partner.create(cr, uid, {'name': 'Elmtree Grand-Child 1.1', 'parent_id': self.p2})
# split 101, 102, 103 tests to force SQL rollback between them
def test_101_res_partner_recursion(self):
cr, uid, p1, p3 = self.cr, self.uid, self.p1, self.p3
self.assertRaises(except_orm, self.res_partner.write, cr, uid, [p1], {'parent_id': p3})
def test_102_res_partner_recursion(self):
cr, uid, p2, p3 = self.cr, self.uid, self.p2, self.p3
self.assertRaises(except_orm, self.res_partner.write, cr, uid, [p2], {'parent_id': p3})
def test_103_res_partner_recursion(self):
cr, uid, p3 = self.cr, self.uid, self.p3
self.assertRaises(except_orm, self.res_partner.write, cr, uid, [p3], {'parent_id': p3})
def test_104_res_partner_recursion_indirect_cycle(self):
""" Indirect hacky write to create cycle in children """
cr, uid, p2, p3 = self.cr, self.uid, self.p2, self.p3
p3b = self.res_partner.create(cr, uid, {'name': 'Elmtree Grand-Child 1.2', 'parent_id': self.p2})
self.assertRaises(except_orm, self.res_partner.write, cr, uid, [p2],
{'child_ids': [(1, p3, {'parent_id': p3b}), (1, p3b, {'parent_id': p3})]})
def test_110_res_partner_recursion_multi_update(self):
""" multi-write on several partners in same hierarchy must not trigger a false cycle detection """
cr, uid, p1, p2, p3 = self.cr, self.uid, self.p1, self.p2, self.p3
self.assertTrue(self.res_partner.write(cr, uid, [p1,p2,p3], {'phone': '123456'}))
class test_translation(common.TransactionCase):
def setUp(self):
super(test_translation, self).setUp()
self.res_category = self.registry('res.partner.category')
self.ir_translation = self.registry('ir.translation')
cr, uid = self.cr, self.uid
self.registry('ir.translation').load(cr, ['base'], ['fr_FR'])
self.cat_id = self.res_category.create(cr, uid, {'name': 'Customers'})
self.ir_translation.create(cr, uid, {'name': 'res.partner.category,name', 'module':'base',
'value': 'Clients', 'res_id': self.cat_id, 'lang':'fr_FR', 'state':'translated', 'type': 'model'})
def test_101_create_translated_record(self):
cr, uid = self.cr, self.uid
no_context_cat = self.res_category.browse(cr, uid, self.cat_id)
self.assertEqual(no_context_cat.name, 'Customers', "Error in basic name_get")
fr_context_cat = self.res_category.browse(cr, uid, self.cat_id, context={'lang':'fr_FR'})
self.assertEqual(fr_context_cat.name, 'Clients', "Translation not found")
def test_102_duplicate_record(self):
cr, uid = self.cr, self.uid
self.new_cat_id = self.res_category.copy(cr, uid, self.cat_id, context={'lang':'fr_FR'})
no_context_cat = self.res_category.browse(cr, uid, self.new_cat_id)
self.assertEqual(no_context_cat.name, 'Customers', "Duplication did not set untranslated value")
fr_context_cat = self.res_category.browse(cr, uid, self.new_cat_id, context={'lang':'fr_FR'})
self.assertEqual(fr_context_cat.name, 'Clients', "Did not found translation for initial value")
def test_103_duplicate_record_fr(self):
cr, uid = self.cr, self.uid
self.new_fr_cat_id = self.res_category.copy(cr, uid, self.cat_id, default={'name': 'Clients (copie)'}, context={'lang':'fr_FR'})
no_context_cat = self.res_category.browse(cr, uid, self.new_fr_cat_id)
self.assertEqual(no_context_cat.name, 'Customers', "Duplication erased original untranslated value")
fr_context_cat = self.res_category.browse(cr, uid, self.new_fr_cat_id, context={'lang':'fr_FR'})
self.assertEqual(fr_context_cat.name, 'Clients (copie)', "Did not used default value for translated value")
if __name__ == '__main__':
unittest2.main()
| 69.043197 | 158 | 0.57015 |
051473fa1324fd22f12bda78680b2634132572f7 | 3,230 | py | Python | discordbot/stocks/government/contracts.py | DidierRLopes/GST-discordbot | 8ff7f7557f5db62ea33d63cfc11ee7ae5f9de56c | [
"MIT"
] | 1 | 2021-12-31T04:10:42.000Z | 2021-12-31T04:10:42.000Z | discordbot/stocks/government/contracts.py | DidierRLopes/GST-discordbot | 8ff7f7557f5db62ea33d63cfc11ee7ae5f9de56c | [
"MIT"
] | null | null | null | discordbot/stocks/government/contracts.py | DidierRLopes/GST-discordbot | 8ff7f7557f5db62ea33d63cfc11ee7ae5f9de56c | [
"MIT"
] | null | null | null | import os
import discord
from matplotlib import pyplot as plt
import pandas as pd
from gamestonk_terminal.config_plot import PLOT_DPI
from gamestonk_terminal.stocks.government import quiverquant_model
from gamestonk_terminal.helper_funcs import plot_autoscale
import discordbot.config_discordbot as cfg
from discordbot.run_discordbot import gst_imgur
async def contracts_command(ctx, ticker="", past_transaction_days="", raw=""):
"""Displays contracts associated with tickers [quiverquant.com]"""
try:
# Debug user input
if cfg.DEBUG:
print(f"!stocks.gov.contracts {ticker} {past_transaction_days} {raw}")
if past_transaction_days == "":
past_transaction_days = 10
else:
if not past_transaction_days.lstrip("-").isnumeric():
raise Exception("Number has to be an integer")
past_transaction_days = int(past_transaction_days)
if raw in ["false", "False", "FALSE", ""]:
raw = False
if raw in ["true", "True", "TRUE"]:
raw = True
if raw not in [True, False]:
raise Exception("raw argument has to be true or false")
if ticker == "":
raise Exception("A ticker is required")
# Retrieve Data
df_contracts = quiverquant_model.get_government_trading("contracts", ticker)
if df_contracts.empty:
raise Exception("No government contracts found")
# Output Data
df_contracts["Date"] = pd.to_datetime(df_contracts["Date"]).dt.date
df_contracts = df_contracts[
df_contracts["Date"].isin(
df_contracts["Date"].unique()[:past_transaction_days]
)
]
df_contracts.drop_duplicates(inplace=True)
fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
df_contracts.groupby("Date").sum().div(1000).plot(kind="bar", rot=0, ax=ax)
ax.set_ylabel("Amount ($1k)")
ax.set_title(f"Sum of latest government contracts to {ticker}")
fig.tight_layout()
plt.savefig("gov_contracts.png")
uploaded_image = gst_imgur.upload_image("gov_contracts.png", title="something")
image_link = uploaded_image.link
if cfg.DEBUG:
print(f"Image URL: {image_link}")
title = f"Stocks: [quiverquant.com] Contracts by {ticker}"
if raw:
description = df_contracts.to_string()
embed = discord.Embed(
title=title, description=description, colour=cfg.COLOR
)
else:
embed = discord.Embed(title=title, colour=cfg.COLOR)
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
embed.set_image(url=image_link)
os.remove("gov_contracts.png")
await ctx.send(embed=embed)
except Exception as e:
embed = discord.Embed(
title=f"ERROR Stocks: [quiverquant.com] Contracts by {ticker}",
colour=cfg.COLOR,
description=e,
)
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
await ctx.send(embed=embed)
| 32.959184 | 87 | 0.619814 |
650ec0ed16cfa407c59933e234915de5b02a71ea | 166 | py | Python | notebooks/2018.11.09 Quasiquotes.py | costrouc/uarray | c3c42147181a88265942ad5f9cf439467f746782 | [
"BSD-3-Clause"
] | null | null | null | notebooks/2018.11.09 Quasiquotes.py | costrouc/uarray | c3c42147181a88265942ad5f9cf439467f746782 | [
"BSD-3-Clause"
] | null | null | null | notebooks/2018.11.09 Quasiquotes.py | costrouc/uarray | c3c42147181a88265942ad5f9cf439467f746782 | [
"BSD-3-Clause"
] | null | null | null | #%%
from ast import parse
from astunparse import dump
#%%
print(dump(parse("1 + 1").body[0]))
#%%
import inspect
#%%
f = lambda: 1 + 1
print(inspect.getsource(f))
| 11.857143 | 35 | 0.644578 |
5a583aac7dd65b431dffa0b1bd81c4defa5ba413 | 5,644 | py | Python | libs/numpy/compat/py3k.py | rocketbot-cl/recognition | cca8a87070ccaca3a26e37345c36ab1bf836e258 | [
"MIT"
] | 353 | 2020-12-10T10:47:17.000Z | 2022-03-31T23:08:29.000Z | libs/numpy/compat/py3k.py | rocketbot-cl/recognition | cca8a87070ccaca3a26e37345c36ab1bf836e258 | [
"MIT"
] | 80 | 2020-12-10T09:54:22.000Z | 2022-03-30T22:08:45.000Z | libs/numpy/compat/py3k.py | rocketbot-cl/recognition | cca8a87070ccaca3a26e37345c36ab1bf836e258 | [
"MIT"
] | 63 | 2020-12-10T17:10:34.000Z | 2022-03-28T16:27:07.000Z | """
Python 3.X compatibility tools.
While this file was originally intended for Python 2 -> 3 transition,
it is now used to create a compatibility layer between different
minor versions of Python 3.
While the active version of numpy may not support a given version of python, we
allow downstream libraries to continue to use these shims for forward
compatibility with numpy while they transition their code to newer versions of
Python.
"""
__all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar',
'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested',
'asstr', 'open_latin1', 'long', 'basestring', 'sixu',
'integer_types', 'is_pathlib_path', 'npy_load_module', 'Path',
'pickle', 'contextlib_nullcontext', 'os_fspath', 'os_PathLike']
import sys
import os
from pathlib import Path, PurePath
import io
import abc
from abc import ABC as abc_ABC
try:
import pickle5 as pickle
except ImportError:
import pickle
long = int
integer_types = (int,)
basestring = str
unicode = str
bytes = bytes
def asunicode(s):
if isinstance(s, bytes):
return s.decode('latin1')
return str(s)
def asbytes(s):
if isinstance(s, bytes):
return s
return str(s).encode('latin1')
def asstr(s):
if isinstance(s, bytes):
return s.decode('latin1')
return str(s)
def isfileobj(f):
return isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter))
def open_latin1(filename, mode='r'):
return open(filename, mode=mode, encoding='iso-8859-1')
def sixu(s):
return s
strchar = 'U'
def getexception():
return sys.exc_info()[1]
def asbytes_nested(x):
if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)):
return [asbytes_nested(y) for y in x]
else:
return asbytes(x)
def asunicode_nested(x):
if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)):
return [asunicode_nested(y) for y in x]
else:
return asunicode(x)
def is_pathlib_path(obj):
"""
Check whether obj is a pathlib.Path object.
Prefer using `isinstance(obj, os_PathLike)` instead of this function.
"""
return Path is not None and isinstance(obj, Path)
# from Python 3.7
class contextlib_nullcontext:
"""Context manager that does no additional processing.
Used as a stand-in for a normal context manager, when a particular
block of code is only sometimes used with a normal context manager:
cm = optional_cm if condition else nullcontext()
with cm:
# Perform operation, using optional_cm if condition is True
"""
def __init__(self, enter_result=None):
self.enter_result = enter_result
def __enter__(self):
return self.enter_result
def __exit__(self, *excinfo):
pass
def npy_load_module(name, fn, info=None):
"""
Load a module.
.. versionadded:: 1.11.2
Parameters
----------
name : str
Full module name.
fn : str
Path to module file.
info : tuple, optional
Only here for backward compatibility with Python 2.*.
Returns
-------
mod : module
"""
# Explicitly lazy import this to avoid paying the cost
# of importing importlib at startup
from importlib.machinery import SourceFileLoader
return SourceFileLoader(name, fn).load_module()
# Backport os.fs_path, os.PathLike, and PurePath.__fspath__
if sys.version_info[:2] >= (3, 6):
os_fspath = os.fspath
os_PathLike = os.PathLike
else:
def _PurePath__fspath__(self):
return str(self)
class os_PathLike(abc_ABC):
"""Abstract base class for implementing the file system path protocol."""
@abc.abstractmethod
def __fspath__(self):
"""Return the file system path representation of the object."""
raise NotImplementedError
@classmethod
def __subclasshook__(cls, subclass):
if PurePath is not None and issubclass(subclass, PurePath):
return True
return hasattr(subclass, '__fspath__')
def os_fspath(path):
"""Return the path representation of a path-like object.
If str or bytes is passed in, it is returned unchanged. Otherwise the
os.PathLike interface is used to get the path representation. If the
path representation is not str or bytes, TypeError is raised. If the
provided path is not str, bytes, or os.PathLike, TypeError is raised.
"""
if isinstance(path, (str, bytes)):
return path
# Work from the object's type to match method resolution of other magic
# methods.
path_type = type(path)
try:
path_repr = path_type.__fspath__(path)
except AttributeError:
if hasattr(path_type, '__fspath__'):
raise
elif PurePath is not None and issubclass(path_type, PurePath):
return _PurePath__fspath__(path)
else:
raise TypeError("expected str, bytes or os.PathLike object, "
"not " + path_type.__name__)
if isinstance(path_repr, (str, bytes)):
return path_repr
else:
raise TypeError("expected {}.__fspath__() to return str or bytes, "
"not {}".format(path_type.__name__,
type(path_repr).__name__))
| 30.181818 | 82 | 0.622962 |
144de06b7610854871123976df8f66b8c55e559b | 1,088 | py | Python | vyper/tests/parser/syntax/test_functions_call.py | ssteiger/Vyper-Contract-GUI-Depreciated | 3ba9df1b7f7cf871beaddd3ae2af06b6a94ef7fe | [
"MIT"
] | null | null | null | vyper/tests/parser/syntax/test_functions_call.py | ssteiger/Vyper-Contract-GUI-Depreciated | 3ba9df1b7f7cf871beaddd3ae2af06b6a94ef7fe | [
"MIT"
] | null | null | null | vyper/tests/parser/syntax/test_functions_call.py | ssteiger/Vyper-Contract-GUI-Depreciated | 3ba9df1b7f7cf871beaddd3ae2af06b6a94ef7fe | [
"MIT"
] | null | null | null | import pytest
from pytest import (
raises,
)
from vyper import (
compiler,
)
from vyper.exceptions import (
ParserException,
StructureException,
)
fail_list = [
"""
@public
def foo() -> uint256:
doesnotexist(2, uint256)
return convert(2, uint256)
""",
"""
@public
def foo() -> uint256:
convert(2, uint256)
return convert(2, uint256)
""",
("""
@private
def test(a : uint256):
pass
@public
def burn(_value: uint256):
self.test(msg.sender._value)
""", ParserException)
]
@pytest.mark.parametrize('bad_code', fail_list)
def test_functions_call_fail(bad_code):
if isinstance(bad_code, tuple):
with raises(bad_code[1]):
compiler.compile_code(bad_code[0])
else:
with raises(StructureException):
compiler.compile_code(bad_code)
valid_list = [
"""
@public
def foo() -> uint256:
return convert(2, uint256)
"""
]
@pytest.mark.parametrize('good_code', valid_list)
def test_functions_call_success(good_code):
assert compiler.compile_code(good_code) is not None
| 17 | 55 | 0.650735 |
da34e2dfd6b2ba932485cb3c82858eec766fc405 | 31,446 | py | Python | pyLib/plotTools.py | jpkeskinen/P4UL | 28b96c0d7292385e3a8ea237bf62a85fbd051556 | [
"MIT"
] | null | null | null | pyLib/plotTools.py | jpkeskinen/P4UL | 28b96c0d7292385e3a8ea237bf62a85fbd051556 | [
"MIT"
] | null | null | null | pyLib/plotTools.py | jpkeskinen/P4UL | 28b96c0d7292385e3a8ea237bf62a85fbd051556 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
import glob
import numpy as np
import matplotlib.pyplot as plt
from utilities import dataFromDict
from matplotlib.ticker import FormatStrFormatter
# 08.04.2016: Mona added an option for colorbar bounds to addImagePlot
plt.rc('xtick', labelsize=24); #plt.rc('ytick.major', size=10)
plt.rc('ytick', labelsize=24); #plt.rc('ytick.minor', size=6)
plt.rcParams["font.family"] = "serif"
plt.rcParams["legend.fontsize"] = "large"
#plt.rcParams["font.serif"] = "Utopia"
#plt.rcParams["font.family"] = "monospace"
#plt.rcParams["font.monospace"] = "Courier"
#plt.rcParams["legend.labelspacing"] = 1.
iCg = 0 # Global integer for color
iMg = 0 # Global integer for markers
iLg = 0 # Global integer for line styles
gxI = -1 # Global x-index for csv animations
gyLst = [] # Global y-value list for csv animations
# The available color maps:
cmaps = { 1:'rainbow', 2:'jet', 3:'hot', 4:'gist_earth', 5:'nipy_spectral',\
6:'coolwarm', 7:'gist_rainbow', 8:'Spectral', 9:'CMRmap', 10:'cubehelix',\
11:'seismic', 12:'bwr', 13:'terrain', 14:'gist_ncar', 15:'gnuplot2', \
16:'BuPu', 17:'GnBu', 18:'RdPu', 19:'YlGnBu', 20:'YlOrRd',\
21:'Oranges', 22:'Reds', 23:'Purples', 24:'Blues'}
# NOTE! Some good ones: 2, 5, 12, 14
# The available color maps in the new version of matplotlib:
cmaps_new = { 1:'viridis', 2:'inferno', 3:'plasma', 4:'magma', 5:'Blues',
6:'BuGn', 7:'BuPu', 8:'GnBu', 9:'Greens', 10:'Greys',
11:'Oranges', 12:'OrRd', 13:'PuBu', 14:'PuBuGn', 15:'PuRd',
16:'Purples', 17:'RdPu', 18:'afmhot', 19:'autumn',
20:'bone', 22:'cool', 23:'copper', 24:'gist_heat',
25:'gray', 26:'hot', 27:'pink', 28:'spring', 29:'summer',
30:'winter', 31:'Reds', 32:'YlGn', 33:'YlGnBu', 34:'YlOrBr',
35:'YlOrRd', 36:'BrBG', 37:'bwr', 38:'coolwarm', 39:'PiYG',
40:'PRGn', 41:'PuOr', 42:'RdBu', 43:'RdGy', 44:'RdYlBu',
45:'RdYlGn', 46:'Spectral', 47:'seismic', 48:'Accent', 49:'Dark2',
50:'Paired', 51:'Pastel1', 52:'Pastel2', 53:'Set1', 54:'Set2',
55:'Set3', 56:'gist_earth',57:'terrain', 58:'ocean', 59:'gist_stern',
60:'brg', 61:'CMRmap', 62:'cubehelix', 63:'gnuplot', 64:'gnuplot2',
65:'gist_ncar',66:'nipy_spectral', 67:'jet', 68:'rainbow', 69:'gist_rainbow',
70:'hsv', 71:'flag', 72:'prism'}
# =*=*=*=* FUNCTION DEFINITIONS *=*=*=*=*=*=*=*=*=*=*=*
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def addFigAxes( fig ):
if( len(fig.get_axes()) == 0 ):
ax = fig.add_axes( [0.115, 0.09 , 0.85 , 0.81] ) #[left, up, width, height]
else:
ax = fig.get_axes()[0]
return ax
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def printDict( D , ncols=3 ):
i = 0; pStr = str()
for k, v in D.items():
i+=1
# use at least 13 chars to make columns line up
pStr += ' {}: {:13s} \t'.format(k,v)
if( i%ncols == 0 ):
print(pStr); pStr = str()
# print whatever is left at the end
print(pStr+'\n'); pStr = None; i = None
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def setColormap( img ):
global cmaps
# Select the desired colormap
try:
printDict( cmaps_new, 3 )
icmap = int(input(' Enter integer key for the colormap = '))
try: nc = int(input(' Number of discrete colors in colormap = '))
except: nc = None
cm = plt.get_cmap( cmaps_new[icmap], nc )
img.set_cmap(cm)
except:
print(' Using default colormap.')
pass
return img
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def setColorbarLims( img, lMax=None, lMin=None ):
# Specify the bounds in the colorbar
if( (lMax is None) or (lMin is None) ):
try:
mm = input(' Enter limits for colorbar: <min> <max> =')
lMin,lMax = list( map(float, mm.split()) )
img.set_clim([lMin,lMax])
except:
pass
else:
try:
lMin = float(lMin); lMax = float(lMax)
img.set_clim([lMin,lMax])
except:
pass
return img
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def userColormapSettings( fig, im, Rmax=None, Rmin=None ):
uticks =None # User-defined ticks. <None> leads to default setting.
eformat=None
im = setColorbarLims( im )
im = setColormap( im )
try:
uticks=list( map(float, input(' Enter ticks separated by comma (empty=default):').split(',')) )
except:
uticks=None
if(Rmax is not None):
if(Rmax<1.e-3):
eformat='%.2e'
cb = fig.colorbar(im, ticks=uticks, format=eformat)
return cb
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def random_marker():
markerList = ['x','s','p','h','d','*','o','+']
nm = len(markerList)
im = np.random.random_integers(nm) - 1
mrk = markerList[im]
return mrk
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def marker_stack():
global iMg
markerList = ['+','s','D','o','h','p','*','x']
mrk = markerList[ iMg ]
iMg = min( ( iMg + 1 ), ( len(markerList)-1 ) )
return mrk
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def linestyle_stack(lm=1, il=None):
global iLg
# '-' : solid line style, '--': dashed line style
# '-.' : dash-dot line style, ':' : dotted line style
if( lm == 1 ):
lstyleList = ['-','--','-.',':']
else:
lstyleList = ['-','--'] # ['x','+'] # ['-','--'] #
nlinestyles = len(lstyleList)
if( il is not None and np.isscalar(il) ):
iLg = min( int(il) , (nlinestyles-1) )
lstyle = lstyleList[iLg]
iLg += 1
if( iLg > (nlinestyles-1) ):
iLg = 0
return lstyle
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def color_stack(lm=1, ic=None):
global iCg
'''
Brown '#A52A2A',
DeepPink '#FF1493',
BlueViolet '#8A2BE2',
DarkCyan '#008B8B',
DarkOrange '#FF8C00',
DarkMagenta '#8B008B',
GoldenRod '#DAA520',
SeaGreen '#2E8B57',
OrangeRed '#FF4500',
SlateBlue '#6A5ACD'
'''
if( lm == 1 ):
colorList = ['b','r','c','k','#FF8C00','g','#8B008B',\
'#FF1493','#8A2BE2','#008B8B','m',\
'#2E8B57','#FF4500','#6A5ACD',\
'#A52A2A','#DAA520']
else:
colorList = ['b','b','r','r','c','c','k','k','#FF8C00','#FF8C00','g','g','#8B008B','#8B008B',\
'#FF1493','#FF1493','#8A2BE2','#8A2BE2','#008B8B','#008B8B','m','m',\
'#2E8B57','#2E8B57','#FF4500','#FF4500','#6A5ACD','#6A5ACD',\
'#A52A2A','#A52A2A','#DAA520','#DAA520']
ncolors = len(colorList)
if( ic is not None and np.isscalar(ic) ):
iCg = min( int(ic) , ( ncolors-1 ) )
clr = colorList[iCg]
iCg += 1
if( iCg > (ncolors-1) ):
iCg = 0
return clr
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def plotBar(fig, xb, yb, labelStr, plotStr=["","",""], wb=0.6, errb=0):
ax = addFigAxes( fig )
bars=ax.bar(xb,yb,width=wb, label=labelStr, yerr=errb, ecolor='r')
ax.set_title( plotStr[0], fontsize=22)
ax.set_xlabel(plotStr[1], fontsize=22)
ax.set_ylabel(plotStr[2], fontsize=22); ax.grid(True)
return fig
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def addImagePlot( fig, R, titleStr, gridOn=False, limsOn=False ):
global cmaps
ax = addFigAxes( fig )
im = ax.imshow(np.real(R), aspect='auto')
ax.set_title(titleStr)
ax.grid(gridOn)
if(limsOn):
cbar = userColormapSettings( fig, im, np.nanmax(R) )
else:
minval = np.nanmin(R); maxval = np.nanmax(R)
minSign = np.sign( minval )
maxSign = np.sign( maxval )
vmin = min( np.abs(minval), np.abs(maxval) )
vmax = max( np.abs(minval), np.abs(maxval) )
if( vmax/(vmin+1.E-5) < 1.5 ):
vmax *= maxSign; vmin = minSign * vmax
else:
vmax *= maxSign; vmin *= minSign
im = setColorbarLims( im, vmax, vmin )
cbar = fig.colorbar(im)
return fig
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def addImagePlotDict(fig, RDict ):
global cmaps
R = dataFromDict('R', RDict, allowNone=False)
ex = dataFromDict('extent', RDict, allowNone=True)
ttl = dataFromDict('title', RDict, allowNone=True)
xlbl = dataFromDict('xlabel', RDict, allowNone=True)
ylbl = dataFromDict('ylabel', RDict, allowNone=True)
gOn = dataFromDict('gridOn', RDict, allowNone=False)
lOn = dataFromDict('limsOn', RDict, allowNone=False)
cm = dataFromDict('cmap', RDict, allowNone=True)
orig = dataFromDict('origin', RDict, allowNone=True)
ax = addFigAxes( fig )
im = ax.imshow(np.real(R), origin=orig, extent=ex, aspect='auto', cmap=cm)
ax.set_title(ttl); ax.set_xlabel(xlbl); ax.set_ylabel(ylbl)
ax.grid(gOn)
if(lOn):
cbar = userColormapSettings( fig, im, np.nanmax(R), np.nanmin(R) )
else:
cbar = fig.colorbar(im)
return fig
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def addToPlot(fig, x,y,labelStr, plotStr=["","",""], logOn=False):
'''
Add variables x,y to a given plot.
Test whether y has multiple columns --> Require different treatment.
'''
ax = addFigAxes( fig )
d = np.size(np.shape(y)) # Test if y has multiple columns
for i in range(d):
if(d==1):
yt = y
else:
yt = y[:,i]; labelStr+='['+str(i)+']'
if(logOn):
lines=ax.semilogy(x,yt,linestyle_stack(1, None), linewidth=2.0, label=labelStr)
#lines=ax.loglog(x,yt,'-', linewidth=1.3, label=labelStr)
else:
lines=ax.plot(x,yt,linestyle_stack(1, None), linewidth=2.0, label=labelStr)
ax.set_title( plotStr[0], fontsize=22)
ax.set_xlabel(plotStr[1], fontsize=28)
ax.set_ylabel(plotStr[2], fontsize=28); ax.grid(True)
return fig
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def plotXX( fig, pDict ):
fileStr = dataFromDict('filename', pDict, allowNone=False)
logOn = dataFromDict('logOn', pDict, allowNone=False)
Cx = dataFromDict('Cx', pDict, allowNone=False)
Cy = dataFromDict('Cy', pDict, allowNone=False)
revAxes = dataFromDict('revAxes', pDict, allowNone=False)
linemode = dataFromDict('lm', pDict, allowNone=False)
linewidth= dataFromDict('lw', pDict, allowNone=False)
ylims = dataFromDict('ylims', pDict, allowNone=True)
xlims = dataFromDict('xlims', pDict, allowNone=True)
try: x = np.loadtxt(fileStr)
except: x = np.loadtxt(fileStr,delimiter=',')
ax = addFigAxes( fig )
labelStr = labelString( fileStr )
#lStr = fileStr.rsplit(".", 1)[0] # Remove the ".dat"
#rStr = lStr.rsplit("_")[-1]
#tStr = lStr.split("/", 2)
#if( tStr[0] is "." ):
# lStr = tStr[1]
#else:
# lStr = tStr[0]
#labelStr = lStr+"_"+rStr
# Print each column separately
amax = 0.
Ny = (x.shape[1]-1)
for i in range(Ny):
if( Ny == 1 ):
labelXX = labelStr
else:
labelXX = labelStr+'['+str(i)+']'
if( revAxes ):
yp = Cy*x[:,0]; xp = Cx*x[:,i+1]; dp = xp
else:
xp = Cx*x[:,0]; yp = Cy*x[:,i+1]; dp = yp
if( logOn ):
if( revAxes ):
xp = np.abs( xp )
plotf = ax.semilogx
else:
yp = np.abs( yp )
plotf = ax.semilogy
else:
plotf = ax.plot
lines = plotf( xp, yp, \
linestyle_stack(lm=linemode), linewidth=linewidth, \
label=labelXX, color=color_stack(lm=linemode))
lmax = np.abs(np.max(dp)) # Local maximum
if( lmax > amax ): amax = lmax
#if( amax <5.e-4 and revAxes):
# if( revAxes ): ax.xaxis.set_major_formatter(FormatStrFormatter('%.2e'))
# else: ax.yaxis.set_major_formatter(FormatStrFormatter('%.2e'))
ax.set_ybound(lower=ylims[0], upper=ylims[1] )
ax.set_xbound(lower=xlims[0], upper=xlims[1] )
ax.set_xlabel(" X ")
ax.set_ylabel(" Y ")
return fig
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def ciDataFromFile( filename ):
try: x = np.loadtxt(filename)
except: x = np.loadtxt(filename,delimiter=',')
nrows, ncols = x.shape
#print(' nrows, ncols = {}, {}'.format(nrows,ncols))
if( ncols > 3 ):
# Copy values and clear memory
d = x[:,0]; v = x[:,1]; vl = x[:,2]; vu = x[:,3]
elif( ncols == 2 ):
d = x[:,0]; v = x[:,1]; vl = x[:,1]; vu = x[:,1]
else:
msg = '''
Error! ncols has a strange value {}.
The data must be in [x, v, v_lower, v_upper, (possibly something else)] format.
Or alternatively [x,v] format in which case no confidence intervals will be present.
Exiting...'''.format( ncols )
sys.exit(msg)
# clear memory
x = None
return d, v, vl, vu
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def ciScaleVals( d, v, vl, vu, Cx, Cy, revAxes ):
if( revAxes ):
xx = Cx*v
vl *= Cx; vu *= Cx
d *= Cy
yy = d
else:
yy = Cy*v
vl *= Cy; vu *= Cy
d *= Cx
xx = d
return d, xx, yy, vl, vu
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def ciDiffVals( x1, y1, v1l, v1u, x2, y2, v2l, v2u, revAxes ):
# Because it is possible that all v2u > v1u (or v2u < v1u) we have to prepare for that.
id2 = (v2u>v1u)
id1 = ~id2
id2On=False; id1On=False
if( np.count_nonzero(id2) > 0 ):
id2On = True
v1mu = np.abs( np.mean(v1u[id2]) )
if( np.count_nonzero(id1) > 0 ):
id1On = True
v1ml = np.abs( np.mean(v1l[id1]) )
if( revAxes ):
#if( id2On ): x1[id2] =np.maximum( ((v2l[id2]-v1u[id2])/v1mu)*100., 0.) # If diff is pos, there's overlap
#if( id1On ): x1[id1] =np.minimum( ((v2u[id1]-v1l[id1])/v1ml)*100., 0.) # If diff is neg, -- " --
if( id2On ): x1[id2] =np.maximum( (v2l[id2]-v1u[id2]), 0.) # If diff is pos, there's overlap
if( id1On ): x1[id1] =np.minimum( (v2u[id1]-v1l[id1]), 0.) # If diff is neg, -- " --
y1 = 0.5*( y1 + y2 )
dm = np.mean( np.abs(x1) )
else:
#if( id2On ): y1[id2] =np.maximum( ((v2l[id2]-v1u[id2])/v1mu)*100., 0.) # If diff is pos, there's overlap
#if( id1On ): y1[id1] =np.minimum( ((v2u[id1]-v1l[id1])/v1ml)*100., 0.) # If diff is neg, -- " --
if( id2On ): y1[id2] =np.maximum( (v2l[id2]-v1u[id2]), 0.) # If diff is pos, there's overlap
if( id1On ): y1[id1] =np.minimum( (v2u[id1]-v1l[id1]), 0.) # If diff is neg, -- " --
x1 = 0.5*( x1 + x2 )
dm = np.mean( np.abs(y1) )
return x1, y1, dm
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def labelString( fname ):
ls = fname
if( "." in ls ):
ls = ls.rsplit(".", 1)[0]
if( "/" in ls ):
sL = ls.split('/')
if( len(sL) > 1 ):
lL = list(map( len, sL ))
if( (lL[0] > 1) and ("." not in sL[0]) ):
ls = sL[0]
elif((lL[1] > 1) and ("." not in sL[1]) ):
ls = sL[1]
else:
ls = sL[-1]
return ls
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def plotCiXY( fig, pDict ):
fn = dataFromDict('filename', pDict, allowNone=False)
Cx = dataFromDict('Cx', pDict, allowNone=True)
Cy = dataFromDict('Cy', pDict, allowNone=True)
linemode= dataFromDict('lm', pDict, allowNone=False)
logOn = dataFromDict('logOn', pDict, allowNone=True)
revAxes = dataFromDict('revAxes', pDict, allowNone=True)
ylims = dataFromDict('ylims', pDict, allowNone=True)
xlims = dataFromDict('xlims', pDict, allowNone=True)
labelStr = labelString( fn )
if( Cx is None ): Cx = 1.
if( Cy is None ): Cy = 1.
d, v, v_l, v_u = ciDataFromFile( fn )
ax = addFigAxes( fig )
d, xp, yp, v_l, v_u = ciScaleVals( d, v, v_l, v_u, Cx, Cy, revAxes )
if( revAxes ): xlb = 'V(d)'; ylb = 'd'
else: ylb = 'V(d)'; xlb = 'd'
if( logOn ):
if( revAxes ):
plotf = ax.semilogx
fillbf = ax.fill_betweenx
else:
plotf = ax.semilogy
fillbf= ax.fill_between
else:
plotf = ax.plot
if( revAxes ):
fillbf = ax.fill_betweenx
else:
fillbf = ax.fill_between
lines = plotf( xp, yp, linestyle_stack(lm=linemode), lw=3., \
label=labelStr, color=color_stack(lm=linemode))
linef = fillbf( d, v_u, v_l, facecolor='white', edgecolor='white', alpha=0.25)
ax.set_ybound(lower=ylims[0], upper=ylims[1] )
ax.set_xbound(lower=xlims[0], upper=xlims[1] )
ax.set_xlabel(xlb)
ax.set_ylabel(ylb)
return fig
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def plotCiDiffXY( fig, pDict ):
f1 = dataFromDict('fileref', pDict, allowNone=False)
fn = dataFromDict('filename', pDict, allowNone=False)
Cx = dataFromDict('Cx', pDict, allowNone=True)
Cy = dataFromDict('Cy', pDict, allowNone=True)
linemode= dataFromDict('lm', pDict, allowNone=False)
logOn = dataFromDict('logOn', pDict, allowNone=True)
revAxes = dataFromDict('revAxes', pDict, allowNone=True)
ylims = dataFromDict('ylims', pDict, allowNone=True)
xlims = dataFromDict('xlims', pDict, allowNone=True)
labelStr = labelString( fn )
if( Cx is None ): Cx = 1.
if( Cy is None ): Cy = 1.
d1, v1, v1_l, v1_u = ciDataFromFile( f1 )
d2, v2, v2_l, v2_u = ciDataFromFile( fn )
if( d2[-1] != d1[-1] ):
if( d2[-1] > d1[-1] ): # Quick and dirty handling for cases when d2[-1] > d1[-1]
idx = ( d2 <= d1[-1] ) # Take the terms where values match
d2 = d2[idx]; v2 = v2[idx]; v2_l = v2_l[idx]; v2_u = v2_u[idx] # Shorten
# Compute the ratio to match the resolutions (roughly)
r = np.round( (d2[1]-d2[0])/(d1[1]-d1[0]) ).astype(int)
# Use the matching indecies only
idm = ( np.mod((np.arange(len(d1))+1) , r) == 0 )
d1 = d1[idm]; v1 = v1[idm]; v1_l = v1_l[idm]; v1_u = v1_u[idm]
Lm = min( len(v2), len(v1) )
d2 = d2[:Lm]; v2 = v2[:Lm]; v2_l = v2_l[:Lm]; v2_u = v2_u[:Lm]
d1 = d1[:Lm]; v1 = v1[:Lm]; v1_l = v1_l[:Lm]; v1_u = v1_u[:Lm]
d1, x1, y1, v1_l, v1_u = ciScaleVals( d1, v1, v1_l, v1_u, Cx, Cy, revAxes )
d2, x2, y2, v2_l, v2_u = ciScaleVals( d2, v2, v2_l, v2_u, Cx, Cy, revAxes )
xp, yp, dm = ciDiffVals( x1, y1, v1_l, v1_u, x2, y2, v2_l, v2_u, revAxes )
if( revAxes ): xlb = 'D(d)'; ylb = 'd'
else: ylb = 'D(d)'; xlb = 'd'
ax = addFigAxes( fig )
if( logOn ):
if( revAxes ):
plotf = ax.semilogx
fillbf = ax.fill_betweenx
else:
plotf = ax.semilogy
fillbf= ax.fill_between
else:
plotf = ax.plot
if( revAxes ):
fillbf = ax.fill_betweenx
else:
fillbf = ax.fill_between
lines = plotf( xp, yp, linestyle_stack(lm=linemode), lw=3., \
label=labelStr+r': $\left< | \Delta | \right>$={:.2g}'.format(dm) , color=color_stack(lm=linemode))
#label=r': $\left< | \Delta | \right>$={:.2f}'.format(dm) , color=color_stack(lm=linemode))
#linef = fillbf( d, v_u, v_l, facecolor='gray', alpha=0.25)
ax.set_ybound(lower=ylims[0], upper=ylims[1] )
ax.set_xbound(lower=xlims[0], upper=xlims[1] )
ax.set_xlabel(xlb)
ax.set_ylabel(ylb)
return fig
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def plotDY( fig, fileStr, dim=3, revAxes=False ):
dim = min( dim, 3 ); dim=max(dim , 1)
x = np.loadtxt(fileStr)
r = np.zeros( len(x[:,0]), float )
for i in range(dim):
x0 = np.min( x[:,i] )
r += (x[:,i]-x0)**2
d = np.sqrt(r)
ax = addFigAxes( fig )
labelStr = labelString( fileStr )
# Print each column separately
for i in range((x.shape[1]-dim)):
if( revAxes ):
lines=ax.plot(x[:,i+dim],d[:],marker=marker_stack(),
color=color_stack(), fillstyle='none', ls='None' , label=labelStr+'['+str(i)+']' )
else:
lines=ax.plot(d[:],x[:,i+dim],marker=marker_stack(), mew=1.7,
color=color_stack(), fillstyle='none', ls='None', label=labelStr+'['+str(i)+']')
if( revAxes ):
ax.set_ylabel(" D(X,Y,Z) "); ax.set_xlabel(" F(D) ")
else:
ax.set_xlabel(" D(X,Y,Z) "); ax.set_ylabel(" F(D) ")
return fig
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def plotYX( fig, fileStr, logOn ):
x = np.loadtxt(fileStr)
y = x[:,1]
ax = addFigAxes( fig )
# Print each column separately
for i in range((x.shape[1]-3)):
if( logOn ):
lines=ax.semilogy(np.abs(x[:,i+3]), y[:] , linewidth=1.1 , label=fileStr+'_'+str(i))
else:
lines=ax.plot(x[:,i+3], y[:], linewidth=1.1, label=fileStr+'_'+str(i) )
ax.set_xlabel(" F(Y) ")
ax.set_ylabel(" Y ")
return fig
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def fullPlotXY(fig,fileStr,figStr,xlabelStr,ylabelStr,lwidth=1.2,fsize=16,logOn=False):
x = np.loadtxt(fileStr)
y = x[:,1]
ax = addFigAxes( fig )
# Print each column separately
for i in range((x.shape[1]-3)):
if( logOn ):
lines=ax.semilogy(np.abs(x[:,i+3]), y[:] , linewidth=lw , label=figStr+'_'+str(i))
else:
lines=ax.plot(x[:,i+3], y[:], linewidth=lwidth, label=figStr+'_'+str(i) )
ax.set_xlabel(xlabelStr, fontsize=fsize)
ax.set_ylabel(ylabelStr, fontsize=fsize)
return fig
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def plotCSV( fig, fileStr, revAxes=False, magY=False, globalValues=False ):
global gxI
global gyLst
fl = open( fileStr, 'r' )
line = fl.readline() # Read first line which contains all variable names as str.
fl.close()
varList = line.split(',')
for i in range(len(varList)):
varList[i]=varList[i].strip("\"")
x = np.loadtxt(fileStr, delimiter=',', skiprows=1)
if( not globalValues or (globalValues and gxI == -1) ):
n = 0
for v in varList:
print(" => ["+str(n)+"]: "+ v)
n+=1
try:
xI = int(input(" X [index] = "))
except:
sys.exit(' No selection. Exiting program. ')
e = input(" Y [List] = ")
if( e == ''):
select=input(" Select All? [1-9]=> Yes, [Empty]=> No: ")
if( select == ''):
sys.exit(' Exiting program.')
else:
yLst = list(range(len(fileList)))
else:
try: yLst = list( map( int, e.split(',') ) )
except: sys.exit(' Bad entry. Exiting program.')
if( globalValues and gxI == -1 ):
gxI = xI # Store the global values
gyLst = yLst
else: # (globalValues and gxI /= -1)
#print ' Utilizing global values '
xI = gxI # Extract the global values
yLst = gyLst
labelStr = fileStr.split(".")[0]
ax = addFigAxes( fig )
if( not magY ):
yLbl = "" # Start with empty label
for yJ in yLst:
yLbl = yLbl+varList[yJ]+"; " # Compile label
if( revAxes ):
lines=ax.plot(x[:,yJ],x[:,xI],'-', markersize=6, linewidth=1.5, label=labelStr+": "+varList[yJ])
else:
lines=ax.plot(x[:,xI],x[:,yJ],'o-', markersize=6, linewidth=1.5, label=labelStr+": "+varList[yJ])
#writeXY( x[:,xI],x[:,yJ], 'out.dat' )
else:
yt = np.zeros(len(x[:,0]))
yLbl = " Mag(y[:]) " # Set fixed label
for yJ in yLst:
yt += x[:,yJ]**2
if( revAxes ):
lines=ax.plot(np.sqrt(yt),x[:,xI],'-', markersize=6, linewidth=1.5, label=labelStr)
else:
lines=ax.plot(x[:,xI],np.sqrt(yt),'o-', markersize=6, linewidth=1.5, label=labelStr)
if( revAxes ):
ax.set_ylabel(varList[xI]); ax.set_xlabel(yLbl)
else:
ax.set_xlabel(varList[xI]); ax.set_ylabel(yLbl)
return fig
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def extractFromCSV( csvFile, varNames ):
fl = open( csvFile, 'r' )
line = fl.readline() # Read first line which contains all variable names as str.
fl.close()
varList = line.split(',')
for i in range(len(varList)):
varList[i]=varList[i].strip("\"")
varList[i]=varList[i].strip("\""+"\n") # This is in case the line contain '\n'
Ix = []
for varStr in varNames:
try: Ix.append( varList.index(varStr) )#; print "Index List= {}".format(Ix)
except: None
if (len(Ix) == 0):
print("None of the variables in {0} were found in {1}".format(varNames,varList))
print("Exiting program. ")
sys.exit(1)
x = np.loadtxt(csvFile, delimiter=',', skiprows=1)
data = []
for jcol in Ix:
data.append( np.array(x[:,jcol]) )
return np.array(data)
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def extractFromRAW( rawFile, varNames ):
fl = open( rawFile, 'r' )
# Read (first or second) line which contains all var names as str.
while 1:
line = fl.readline()
if('#' and 'x' in line):
break
fl.close()
varList = line.split(); varList.remove('#')
#print varList
Ix = []
for varStr in varNames:
try: Ix.append( varList.index(varStr) )#; print "Index List= {}".format(Ix)
except: None
#print Ix
if (len(Ix) == 0):
print("None of the variables in {0} were found in {1}".format(varNames,varList))
print("Exiting program. ")
sys.exit(1)
x = np.loadtxt(rawFile)
data = []
for jcol in Ix:
data.append(x[:,jcol])
return data
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def addQuiver( X, Y, Ux, Uy , fc, labelStr, titleStr=" " ):
plt.figure()
Q = plt.quiver(X[::fc, ::fc],Y[::fc, ::fc],Ux[::fc, ::fc],Uy[::fc, ::fc],\
pivot='tail', color='b', units='xy', scale=1.5 )
#qk = plt.quiverkey(Q, 0.9, 1.05, 1, labelStr, labelpos='E',fontproperties={'weight': 'bold'})
plt.title(titleStr)
return Q
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def addContourf( X, Y, Q, CfDict=None ):
Xdims = np.array(X.shape)
figDims = 12.*(Xdims[::-1].astype(float)/np.max(Xdims))
#figDims = (11,11)
#figDims = (9,11)
fig = plt.figure(figsize=figDims)
#fig, ax = plt.subplots()
ax = addFigAxes( fig )
# Default values
labelStr = ' Q(X,Y) '
titleStr = ' Title: Q(X,Y) '
cm = None
vx = None
vn = None
levels = None
N = 12
if( CfDict is not None ):
titleStr = dataFromDict('title', CfDict, allowNone=False)
labelStr = dataFromDict('label', CfDict, allowNone=False)
cm = dataFromDict('cmap', CfDict, allowNone=True )
N = dataFromDict('N', CfDict, allowNone=True )
vn = dataFromDict('vmin', CfDict, allowNone=True )
vx = dataFromDict('vmax', CfDict, allowNone=True )
levels = dataFromDict('levels', CfDict, allowNone=True )
if( N is None ): N = 12
#print(' vmax = {}, vmin = {} '.format(vx,vn))
#levels = [-1e-6, -1e-7, 0, 1e-7, 1e-6]
#CO = plt.contourf(X,Y,Q, levels )
if( levels is not None ): CO = ax.contourf(X,Y,Q, levels, cmap=cm, vmin=vn, vmax=vx )
else: CO = ax.contourf(X,Y,Q, N , cmap=cm, vmin=vn, vmax=vx )
ax.set_title( titleStr )
cbar = fig.colorbar(CO)
if( vx is not None ): cbar.vmax = vx
if( vn is not None ): cbar.vmin = vn
cbar.ax.set_ylabel(labelStr, fontsize=20, fontstyle='normal', fontweight='book', fontname='serif')
return CO
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def addScatterPlot(fig, X, Y, C, fc=4 ):
ax = addFigAxes( fig )
dims = np.array(np.shape(X))//fc # NOTE: integer division necessary
N = np.prod(dims)
ax.scatter(X[::fc,::fc].reshape(N), Y[::fc,::fc].reshape(N), s=10, \
c=C[::fc,::fc].reshape(N), marker=',', cmap=plt.cm.rainbow)
return fig
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def arrow2DPlot( fig, fileStr , scale=1.0, ic=0, fillOn=True ):
d = np.loadtxt(fileStr)
labelStr = fileStr.split(".")[0]
try:
x = d[:,0]; y =d[:,1]; dx = d[:,2]; dy =d[:,3]
except:
print(' The file must contain (at least) 4 columns: x, y, dx, dy ')
sys.exit(1)
ax = addFigAxes( fig )
lx = max(scale, 0.825 )*0.0008
lx = min( lx, 0.0016 )
for i in range( len(x) ):
ax.arrow( x[i], y[i], scale*dx[i], scale*dy[i], color=color_stack(ic) , width=lx, \
head_width=5.85*lx, head_length=2.85*lx, overhang=0.25, fill=fillOn )
return fig
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def writeXY( x , y , fileName ):
f = open( fileName ,'w') #'w' = for writing
for i in range(len(x)):
f.write("%13.7e \t %13.7e \n" %(x[i], y[i]) )
print('Writing file '+fileName)
f.close()
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def wavePlot( fig, fileStr, logOn ):
x = np.loadtxt(fileStr)
ax = addFigAxes( fig )
labelStr = fileStr.split(".")[0]
# Print each column separately
Ny = (x.shape[1]-1)
for i in range(Ny):
if( Ny == 1 ):
labelXX = labelStr
else:
labelXX = labelStr+'['+str(i)+']'
if( logOn ):
#lines=ax.loglog(x[:,0],np.abs(x[:,i+1]),'o-', linewidth=1.3 , label=labelXX)
lines=ax.semilogy(x[:,0],np.abs(x[:,i+1]),'-', linewidth=1.1 , label=labelXX)
else:
lines=ax.plot(x[:,0],x[:,i+1],'o', linewidth=1.1, label=labelXX)
ax.set_xlabel(" X ")
ax.set_ylabel(" Y ")
return fig
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def userLabels( pFig ):
#print('A) pFig.get_axes()[] ')
ax=pFig.get_axes()[0] # Get a handle on the first axes
#print('B) pFig.get_axes()[] ')
#pl.rc('text', usetex=True )
#pl.rc('xtick', labelsize=24)
#pl.rc('ytick', labelsize=24)
titleStr = strEntry( " Plot Title = " , " " )
yLbl = strEntry( " Y Label = " , " Y " )
xLbl = strEntry( " X Label = " , " X " )
"""
fontname: [ FONTNAME | 'serif' | 'sans-serif' | 'cursive' | 'fantasy' | 'monospace' ]
fontsize: [ size in points ]
fontweight: [ a numeric value in range 0-1000 | 'ultralight' | 'light' | 'normal' | 'regular' | 'book' | 'medium' | 'roman' | 'semibold' |
'demibold' | 'demi' | 'bold' | 'heavy' | 'extra bold' | 'black' ]
fontstyle: [ 'normal' | 'italic' | 'oblique']
"""
ax.set_title(titleStr, fontsize=20, fontstyle='normal', fontweight='demibold', fontname='serif')
ax.set_ylabel(yLbl, fontsize=20, fontstyle='normal', fontweight='book', fontname='serif')
ax.set_xlabel(xLbl, fontsize=20, fontstyle='normal', fontweight='book', fontname='serif')
return pFig
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def strEntry( questionStr , defaultStr ):
try:
oStr = input(str(questionStr))
except:
oStr = str(defaultStr)
return oStr
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def numEntry( questionStr , defaultValue ):
try:
value = input(str(questionStr))
except:
value = float(defaultValue)
return value
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def maxValues( fileStr ):
x = np.loadtxt(fileStr)
mv = []
for i in range(x.shape[1]):
mv.append(np.max(x[:,i]))
return mv
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# ADDED MY MONA KURPPA, 2016:
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def addToPlot_marker(fig, x,y,labelStr, plotStr=["","",""], logOn=False, marker='-'):
'''
Add variables x,y to a given plot.
Test whether y has multiple columns --> Require different treatment.
e.g. marker = '-' or '--' or 'v-'
'''
ax = addFigAxes( fig )
d = np.size(np.shape(y)) # Test if y has multiple columns
for i in range(d):
if(d==1):
yt = y
else:
yt = y[:,i]; labelStr+='['+str(i)+']'
if(logOn):
lines=ax.loglog(x,yt,marker, linewidth=1.3, label=labelStr)
else:
lines=ax.plot(x,yt,marker, linewidth=1.6, label=labelStr)
ax.set_title( plotStr[0], fontsize=22)
ax.set_xlabel(plotStr[1], fontsize=22)
ax.set_ylabel(plotStr[2], fontsize=22); ax.grid(True)
return fig
| 30.500485 | 140 | 0.53603 |
5e0eb57105efe69723d39209b3a32df4b28f1ad9 | 2,404 | py | Python | legal-api/src/legal_api/resources/business/business_aliases.py | argush3/lear | 804820ea93a9ca44d1a474ce7a903bb0a808aacb | [
"Apache-2.0"
] | 8 | 2019-06-19T16:16:15.000Z | 2021-08-28T23:56:40.000Z | legal-api/src/legal_api/resources/business/business_aliases.py | argush3/lear | 804820ea93a9ca44d1a474ce7a903bb0a808aacb | [
"Apache-2.0"
] | 796 | 2019-03-07T19:25:50.000Z | 2022-03-31T20:32:57.000Z | legal-api/src/legal_api/resources/business/business_aliases.py | argush3/lear | 804820ea93a9ca44d1a474ce7a903bb0a808aacb | [
"Apache-2.0"
] | 82 | 2019-01-30T20:06:14.000Z | 2022-03-29T20:38:31.000Z | # Copyright © 2020 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Retrieve the aliases for the entity."""
from http import HTTPStatus
from flask import jsonify, request
from flask_restx import Resource, cors
from legal_api.models import Alias, Business
from legal_api.utils.util import cors_preflight
from .api_namespace import API
@cors_preflight('GET,')
@API.route('/<string:identifier>/aliases', methods=['GET', 'OPTIONS'])
@API.route('/<string:identifier>/aliases/<int:alias_id>', methods=['GET', 'OPTIONS'])
class AliasResource(Resource):
"""Business Aliases service."""
@staticmethod
@cors.crossdomain(origin='*')
def get(identifier, alias_id=None):
"""Return a JSON of the aliases."""
business = Business.find_by_identifier(identifier)
if not business:
return jsonify({'message': f'{identifier} not found'}), HTTPStatus.NOT_FOUND
# return the matching alias
if alias_id:
alias, msg, code = AliasResource._get_alias(business, alias_id)
return jsonify(alias or msg), code
aliases_list = []
alias_type = request.args.get('type')
if alias_type:
aliases = Alias.find_by_type(business.id, alias_type.upper())
else:
aliases = business.aliases.all()
for alias in aliases:
alias_json = alias.json
aliases_list.append(alias_json)
return jsonify(aliases=aliases_list)
@staticmethod
def _get_alias(business, alias_id=None):
# find by ID
alias = None
if alias_id:
rv = Alias.find_by_id(alias_id=alias_id)
if rv:
alias = {'alias': rv.json}
if not alias:
return None, {'message': f'{business.identifier} alias not found'}, HTTPStatus.NOT_FOUND
return alias, None, HTTPStatus.OK
| 32.931507 | 100 | 0.670965 |
51f419a1b371ad569e930024b72cf52eb4dc5700 | 3,004 | py | Python | hpccm/templates/downloader.py | hcheung01/hpc-container-maker | 107cd9e6a6930f67b1a261bec739fe9de85c8a49 | [
"Apache-2.0"
] | 1 | 2020-07-19T15:47:32.000Z | 2020-07-19T15:47:32.000Z | hpccm/templates/downloader.py | hcheung01/hpc-container-maker | 107cd9e6a6930f67b1a261bec739fe9de85c8a49 | [
"Apache-2.0"
] | null | null | null | hpccm/templates/downloader.py | hcheung01/hpc-container-maker | 107cd9e6a6930f67b1a261bec739fe9de85c8a49 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""downloader template"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import posixpath
import re
import hpccm.base_object
class downloader(hpccm.base_object):
"""Template for downloading source code"""
def __init__(self, **kwargs):
"""Initialize template"""
self.branch = kwargs.get('branch', None)
self.commit = kwargs.get('commit', None)
self.repository = kwargs.get('repository', None)
self.src_directory = None
self.url = kwargs.get('url', None)
super(downloader, self).__init__(**kwargs)
def download_step(self, recursive=False, unpack=True, wd='/var/tmp'):
"""Get source code"""
if not self.repository and not self.url:
raise RuntimeError('must specify a repository or a URL')
if self.repository and self.url:
raise RuntimeError('cannot specify both a repository and a URL')
commands = []
if self.url:
# Download tarball
commands.append(hpccm.templates.wget().download_step(
url=self.url, directory=wd))
if unpack:
# Unpack tarball
tarball = posixpath.join(wd, posixpath.basename(self.url))
commands.append(hpccm.templates.tar().untar_step(
tarball, directory=wd))
match = re.search(r'(.*)(?:(?:\.tar)|(?:\.tar\.gz)'
r'|(?:\.tgz)|(?:\.tar\.bz2)|(?:\.tar\.xz))$',
tarball)
if match:
# Set directory where to find source
self.src_directory = posixpath.join(wd, match.group(1))
else:
raise RuntimeError('unrecognized package format')
elif self.repository:
# Clone git repository
commands.append(hpccm.templates.git().clone_step(
branch=self.branch, commit=self.commit, path=wd,
recursive=recursive, repository=self.repository))
# Set directory where to find source
self.src_directory = posixpath.join(wd, posixpath.splitext(
posixpath.basename(self.repository))[0])
return ' && \\\n '.join(commands)
| 35.341176 | 79 | 0.61518 |
8e360ec4f00a160ac2568950a890e191def38b0e | 802 | py | Python | pure_contracts.py | simon-graham/pure_interface | da7bf05151c1c906c753987fbf7e3251905b4ba0 | [
"MIT"
] | 10 | 2018-08-27T04:15:53.000Z | 2021-08-18T09:45:35.000Z | pure_contracts.py | simon-graham/pure_interface | da7bf05151c1c906c753987fbf7e3251905b4ba0 | [
"MIT"
] | 35 | 2018-08-27T04:17:44.000Z | 2021-09-22T05:39:57.000Z | pure_contracts.py | tim-mitchell/pure_interface | 46a2de2574f4543980303cafd89cfcbdb643fbbb | [
"MIT"
] | 3 | 2018-09-19T21:32:01.000Z | 2020-11-17T00:58:55.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import warnings
import pure_interface
from pure_interface import InterfaceError # alias for convenience
try:
import contracts # https://pypi.python.org/pypi/PyContracts
class ContractType(pure_interface.InterfaceType, contracts.ContractsMeta):
# we need to unwrap the decorators because otherwise we fail the empty function body test
# inspecting the wrapper.
_pi_unwrap_decorators = True
pass
except ImportError:
warnings.warn('PyContracts not found')
class ContractType(pure_interface.InterfaceType):
_pi_unwrap_decorators = True
pass
class ContractInterface(pure_interface.Interface, metaclass=ContractType):
pass
| 28.642857 | 97 | 0.753117 |
2887e70579375037d6d1af7a742897b79f8f3160 | 2,223 | py | Python | mopidy/utils/process.py | woutervanwijk/mopidy | 135379efafed41e9537bb0747ece42547c0f0e3d | [
"Apache-2.0"
] | 2 | 2015-06-07T19:05:30.000Z | 2021-11-17T12:01:05.000Z | mopidy/utils/process.py | woutervanwijk/mopidy | 135379efafed41e9537bb0747ece42547c0f0e3d | [
"Apache-2.0"
] | null | null | null | mopidy/utils/process.py | woutervanwijk/mopidy | 135379efafed41e9537bb0747ece42547c0f0e3d | [
"Apache-2.0"
] | null | null | null | from __future__ import unicode_literals
import logging
import signal
try:
import _thread as thread # Python 3
except ImportError:
import thread # Python 2
import threading
from pykka import ActorDeadError
from pykka.registry import ActorRegistry
logger = logging.getLogger(__name__)
SIGNALS = dict(
(k, v) for v, k in signal.__dict__.iteritems()
if v.startswith('SIG') and not v.startswith('SIG_'))
def exit_process():
logger.debug('Interrupting main...')
thread.interrupt_main()
logger.debug('Interrupted main')
def exit_handler(signum, frame):
"""A :mod:`signal` handler which will exit the program on signal."""
logger.info('Got %s signal', SIGNALS[signum])
exit_process()
def stop_actors_by_class(klass):
actors = ActorRegistry.get_by_class(klass)
logger.debug('Stopping %d instance(s) of %s', len(actors), klass.__name__)
for actor in actors:
actor.stop()
def stop_remaining_actors():
num_actors = len(ActorRegistry.get_all())
while num_actors:
logger.error(
'There are actor threads still running, this is probably a bug')
logger.debug(
'Seeing %d actor and %d non-actor thread(s): %s',
num_actors, threading.active_count() - num_actors,
', '.join([t.name for t in threading.enumerate()]))
logger.debug('Stopping %d actor(s)...', num_actors)
ActorRegistry.stop_all()
num_actors = len(ActorRegistry.get_all())
logger.debug('All actors stopped.')
class BaseThread(threading.Thread):
def __init__(self):
super(BaseThread, self).__init__()
# No thread should block process from exiting
self.daemon = True
def run(self):
logger.debug('%s: Starting thread', self.name)
try:
self.run_inside_try()
except KeyboardInterrupt:
logger.info('Interrupted by user')
except ImportError as e:
logger.error(e)
except ActorDeadError as e:
logger.warning(e)
except Exception as e:
logger.exception(e)
logger.debug('%s: Exiting thread', self.name)
def run_inside_try(self):
raise NotImplementedError
| 28.87013 | 78 | 0.654071 |
b6a88fe4a371365bfcb1d9ff677bf70cb5bcfc29 | 491 | py | Python | pyot/utils/sync.py | paaksing/Pyot | 0af13ff453ff3065427b93fadae0b92935b791c8 | [
"MIT"
] | 60 | 2020-08-29T02:04:31.000Z | 2022-03-20T14:51:24.000Z | pyot/utils/sync.py | paaksing/Pyot | 0af13ff453ff3065427b93fadae0b92935b791c8 | [
"MIT"
] | 2 | 2021-04-08T03:06:01.000Z | 2022-02-04T15:27:16.000Z | pyot/utils/sync.py | paaksing/Pyot | 0af13ff453ff3065427b93fadae0b92935b791c8 | [
"MIT"
] | 15 | 2020-11-30T23:31:11.000Z | 2022-02-11T13:31:13.000Z | from typing import Awaitable, Callable, TypeVar
from functools import wraps
import asyncio
R = TypeVar("R")
def async_to_sync(func: Callable[..., Awaitable[R]]) -> Callable[..., R]:
'''Wraps `asyncio.run` on an async function making it sync callable.'''
if not asyncio.iscoroutinefunction(func):
raise TypeError(f"{func} is not a coroutine function")
@wraps(func)
def wrapper(*args, **kwargs):
return asyncio.run(func(*args, **kwargs))
return wrapper
| 28.882353 | 75 | 0.680244 |
54a652bdf5cbd824ecd3ff319964e93cde2e191c | 4,351 | py | Python | experiments/vitchyr/end2end_sac/pointmass_from_state.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | 1 | 2020-10-23T14:40:09.000Z | 2020-10-23T14:40:09.000Z | experiments/vitchyr/end2end_sac/pointmass_from_state.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | experiments/vitchyr/end2end_sac/pointmass_from_state.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | 1 | 2021-05-27T20:38:45.000Z | 2021-05-27T20:38:45.000Z | import numpy as np
import rlkit.misc.hyperparameter as hyp
import rlkit.torch.pytorch_util as ptu
from multiworld.core.flat_goal_env import FlatGoalEnv
from multiworld.core.image_env import ImageEnv
from multiworld.envs.pygame.point2d import Point2DEnv
from rlkit.data_management.env_replay_buffer import EnvReplayBuffer
from rlkit.envs.wrappers import NormalizedBoxEnv
from rlkit.launchers.launcher_util import run_experiment
from rlkit.samplers.data_collector import MdpPathCollector
from rlkit.torch.networks import MergedCNN, ConcatMlp
from rlkit.torch.sac.policies import MakeDeterministic, TanhGaussianPolicy
from rlkit.torch.sac.policies import TanhCNNGaussianPolicy
from rlkit.torch.sac.twin_sac import TwinSACTrainer
from rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm
def experiment(variant):
env = Point2DEnv(
**variant['env_kwargs']
)
env = FlatGoalEnv(env)
env = NormalizedBoxEnv(env)
action_dim = int(np.prod(env.action_space.shape))
obs_dim = int(np.prod(env.observation_space.shape))
qf1 = ConcatMlp(
input_size=obs_dim + action_dim,
output_size=1,
**variant['qf_kwargs']
)
qf2 = ConcatMlp(
input_size=obs_dim + action_dim,
output_size=1,
**variant['qf_kwargs']
)
target_qf1 = ConcatMlp(
input_size=obs_dim + action_dim,
output_size=1,
**variant['qf_kwargs']
)
target_qf2 = ConcatMlp(
input_size=obs_dim + action_dim,
output_size=1,
**variant['qf_kwargs']
)
policy = TanhGaussianPolicy(
obs_dim=obs_dim,
action_dim=action_dim,
**variant['policy_kwargs']
)
eval_env = expl_env = env
eval_policy = MakeDeterministic(policy)
eval_path_collector = MdpPathCollector(
eval_env,
eval_policy,
)
expl_path_collector = MdpPathCollector(
expl_env,
policy,
)
replay_buffer = EnvReplayBuffer(
variant['replay_buffer_size'],
expl_env,
)
trainer = TwinSACTrainer(
env=eval_env,
policy=policy,
qf1=qf1,
qf2=qf2,
target_qf1=target_qf1,
target_qf2=target_qf2,
**variant['trainer_kwargs']
)
algorithm = TorchBatchRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
data_buffer=replay_buffer,
**variant['algo_kwargs']
)
algorithm.to(ptu.device)
algorithm.train()
if __name__ == "__main__":
variant = dict(
env_kwargs=dict(
fixed_goal=(0, 4),
images_are_rgb=True,
render_onscreen=True,
show_goal=True,
ball_radius=4,
),
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
use_automatic_entropy_tuning=True,
target_entropy=-1,
),
algo_kwargs=dict(
max_path_length=100,
batch_size=128,
num_epochs=100,
num_eval_steps_per_epoch=1000,
num_expl_steps_per_train_loop=1000,
num_trains_per_train_loop=10,
min_num_steps_before_training=1000,
),
imsize=64,
qf_kwargs=dict(
hidden_sizes=[128, 128],
),
policy_kwargs=dict(
hidden_sizes=[128, 128],
),
replay_buffer_size=int(1E6),
)
n_seeds = 1
mode = 'local'
exp_prefix = 'dev-{}'.format(
__file__.replace('/', '-').replace('_', '-').split('.')[0]
)
# n_seeds = 3
# mode = 'ec2'
# exp_prefix = 'pointmass-state'
search_space = {
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
for _ in range(n_seeds):
run_experiment(
experiment,
exp_prefix=exp_prefix,
mode=mode,
variant=variant,
exp_id=exp_id,
use_gpu=True,
)
| 28.437908 | 74 | 0.624224 |
bd5348c2a4c048215e3dbd59e2515b291356e8b7 | 4,380 | py | Python | msgraph-cli-extensions/v1_0/usersfunctions_v1_0/azext_usersfunctions_v1_0/vendored_sdks/usersfunctions/aio/operations/_users_contact_folders_child_folders_operations.py | thewahome/msgraph-cli | 33127d9efa23a0e5f5303c93242fbdbb73348671 | [
"MIT"
] | null | null | null | msgraph-cli-extensions/v1_0/usersfunctions_v1_0/azext_usersfunctions_v1_0/vendored_sdks/usersfunctions/aio/operations/_users_contact_folders_child_folders_operations.py | thewahome/msgraph-cli | 33127d9efa23a0e5f5303c93242fbdbb73348671 | [
"MIT"
] | null | null | null | msgraph-cli-extensions/v1_0/usersfunctions_v1_0/azext_usersfunctions_v1_0/vendored_sdks/usersfunctions/aio/operations/_users_contact_folders_child_folders_operations.py | thewahome/msgraph-cli | 33127d9efa23a0e5f5303c93242fbdbb73348671 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class UsersContactFoldersChildFoldersOperations:
"""UsersContactFoldersChildFoldersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~users_functions.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def delta(
self,
user_id: str,
contact_folder_id: str,
**kwargs
) -> List["models.MicrosoftGraphContactFolder"]:
"""Invoke function delta.
Invoke function delta.
:param user_id: key: id of user.
:type user_id: str
:param contact_folder_id: key: id of contactFolder.
:type contact_folder_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of MicrosoftGraphContactFolder, or the result of cls(response)
:rtype: list[~users_functions.models.MicrosoftGraphContactFolder]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["models.MicrosoftGraphContactFolder"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delta.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'contactFolder-id': self._serialize.url("contact_folder_id", contact_folder_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[MicrosoftGraphContactFolder]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delta.metadata = {'url': '/users/{user-id}/contactFolders/{contactFolder-id}/childFolders/microsoft.graph.delta()'} # type: ignore
| 43.8 | 135 | 0.679452 |
f617dc5c18f9d6599f8f76b1879575d72a7c4d14 | 4,825 | py | Python | src/testing/gtest/test/gtest_color_test.py | zhaohaibin/BreakpadForWindows | 61cc4d4b626579766f42a3135a90c1e83adb18bf | [
"BSD-3-Clause"
] | 33 | 2015-02-19T17:58:15.000Z | 2022-03-30T03:18:40.000Z | deps/breakpad/src/testing/gtest/test/gtest_color_test.py | kans/birgo | d9aca7356933c4bb95f5649353acbc95e3083a57 | [
"Apache-2.0"
] | 5 | 2015-08-21T14:49:35.000Z | 2021-08-25T06:53:21.000Z | deps/breakpad/src/testing/gtest/test/gtest_color_test.py | kans/birgo | d9aca7356933c4bb95f5649353acbc95e3083a57 | [
"Apache-2.0"
] | 22 | 2015-01-04T10:37:36.000Z | 2021-08-16T11:46:32.000Z | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly determines whether to use colors."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name = 'nt'
COLOR_ENV_VAR = 'GTEST_COLOR'
COLOR_FLAG = 'gtest_color'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_color_test_')
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def UsesColor(term, color_env_var, color_flag):
"""Runs gtest_color_test_ and returns its exit code."""
SetEnvVar('TERM', term)
SetEnvVar(COLOR_ENV_VAR, color_env_var)
cmd = COMMAND
if color_flag is not None:
cmd += ' --%s=%s' % (COLOR_FLAG, color_flag)
return gtest_test_utils.GetExitStatus(os.system(cmd))
class GTestColorTest(gtest_test_utils.TestCase):
def testNoEnvVarNoFlag(self):
"""Tests the case when there's neither GTEST_COLOR nor --gtest_color."""
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', None, None))
self.assert_(not UsesColor('emacs', None, None))
self.assert_(not UsesColor('xterm-mono', None, None))
self.assert_(not UsesColor('unknown', None, None))
self.assert_(not UsesColor(None, None, None))
self.assert_(UsesColor('cygwin', None, None))
self.assert_(UsesColor('xterm', None, None))
self.assert_(UsesColor('xterm-color', None, None))
self.assert_(UsesColor('xterm-256color', None, None))
def testFlagOnly(self):
"""Tests the case when there's --gtest_color but not GTEST_COLOR."""
self.assert_(not UsesColor('dumb', None, 'no'))
self.assert_(not UsesColor('xterm-color', None, 'no'))
if not IS_WINDOWS:
self.assert_(not UsesColor('emacs', None, 'auto'))
self.assert_(UsesColor('xterm', None, 'auto'))
self.assert_(UsesColor('dumb', None, 'yes'))
self.assert_(UsesColor('xterm', None, 'yes'))
def testEnvVarOnly(self):
"""Tests the case when there's GTEST_COLOR but not --gtest_color."""
self.assert_(not UsesColor('dumb', 'no', None))
self.assert_(not UsesColor('xterm-color', 'no', None))
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', 'auto', None))
self.assert_(UsesColor('xterm-color', 'auto', None))
self.assert_(UsesColor('dumb', 'yes', None))
self.assert_(UsesColor('xterm-color', 'yes', None))
def testEnvVarAndFlag(self):
"""Tests the case when there are both GTEST_COLOR and --gtest_color."""
self.assert_(not UsesColor('xterm-color', 'no', 'no'))
self.assert_(UsesColor('dumb', 'no', 'yes'))
self.assert_(UsesColor('xterm-color', 'no', 'auto'))
def testAliasesOfYesAndNo(self):
"""Tests using aliases in specifying --gtest_color."""
self.assert_(UsesColor('dumb', None, 'true'))
self.assert_(UsesColor('dumb', None, 'YES'))
self.assert_(UsesColor('dumb', None, 'T'))
self.assert_(UsesColor('dumb', None, '1'))
self.assert_(not UsesColor('xterm', None, 'f'))
self.assert_(not UsesColor('xterm', None, 'false'))
self.assert_(not UsesColor('xterm', None, '0'))
self.assert_(not UsesColor('xterm', None, 'unknown'))
if __name__ == '__main__':
gtest_test_utils.Main()
| 37.992126 | 76 | 0.713782 |
9fb97d257441a856cdbcb1f2f2d31c69b9b84f32 | 31 | py | Python | homeassistant/components/gstreamer/__init__.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | homeassistant/components/gstreamer/__init__.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 31,101 | 2020-03-02T13:00:16.000Z | 2022-03-31T23:57:36.000Z | homeassistant/components/gstreamer/__init__.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """The gstreamer component."""
| 15.5 | 30 | 0.677419 |
66a4a2fe43f622f839275db66f1064478c14aad3 | 1,489 | py | Python | MLX90640/visualizer_test.py | Nekostone/activity-levels-monitoring | 9197924586425f3f881846742d05c48a242169ac | [
"MIT"
] | null | null | null | MLX90640/visualizer_test.py | Nekostone/activity-levels-monitoring | 9197924586425f3f881846742d05c48a242169ac | [
"MIT"
] | 5 | 2020-07-21T13:42:23.000Z | 2022-03-12T00:44:32.000Z | MLX90640/visualizer_test.py | Nekostone/activity-levels-monitoring | 9197924586425f3f881846742d05c48a242169ac | [
"MIT"
] | 1 | 2020-03-05T12:11:27.000Z | 2020-03-05T12:11:27.000Z | import matplotlib.pyplot as plt
import numpy as np
from file_utils import create_folder_if_absent, get_all_files, get_frame
from visualizer import init_heatmap, update_heatmap, write_gif_from_npy
"""
Test Visualizer
"""
def test_plot_random():
array_shape = (24,32)
min_value = 30
max_value = 40
plot = init_heatmap("Heatmap", array_shape, min_value, max_value)
while True:
frame = np.around(np.random.random(array_shape)*10+30,decimals=2)
update_heatmap(frame, plot)
def test_plot(files):
array_shape = (24,32)
min_value = 30
max_value = 40
plot = init_heatmap("Heatmap", array_shape, min_value, max_value, debug=True)
for f in files:
frame = get_frame(f)
update_heatmap(frame,plot)
def test_plot_without_labels(files):
array_shape = (24,32)
min_value = 25
max_value = 40
plot = init_heatmap("Heatmap", array_shape, min_value, max_value, debug=False)
for i in range(len(files)):
frame = get_frame(files[i])
update_heatmap(frame,plot)
create_folder_if_absent("testpics")
plt.savefig("testpics/{}.png".format(i))
def test_create_folder_if_absent(folder):
create_folder_if_absent(folder)
def test_write_gif_from_npy(files, name, start_index=0, end_index=0, fps=5):
write_gif_from_npy(files, name, start_index, end_index, fps)
data_path = "data/teck_walk_out_and_in"
files = get_all_files(data_path)
test_plot_without_labels(files)
# test_write_gif_from_npy(files, data_path+".gif", end_index=len(files), fps=60)
| 28.09434 | 80 | 0.752183 |
56071edbb7a2f2c878f37cc00d1d3e9f1a997317 | 11,912 | py | Python | trax/layers/combinators_test.py | rlanka4/trax | 26850af6c43180b6499d26b62dc0c324a146c7fa | [
"Apache-2.0"
] | null | null | null | trax/layers/combinators_test.py | rlanka4/trax | 26850af6c43180b6499d26b62dc0c324a146c7fa | [
"Apache-2.0"
] | null | null | null | trax/layers/combinators_test.py | rlanka4/trax | 26850af6c43180b6499d26b62dc0c324a146c7fa | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2020 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for combinator layers."""
from absl.testing import absltest
from trax.layers import activation_fns
from trax.layers import base
from trax.layers import combinators as cb
from trax.layers import core
from trax.layers import normalization
from trax.math import numpy as np
from trax.shapes import ShapeDtype
def divide_by(val):
"""Returns a simple division layer with n_in == 1 and n_out == 1."""
return base.Fn(lambda x: x / val)
class CombinatorLayerTest(absltest.TestCase):
def test_serial_no_op(self):
layer = cb.Serial(None)
input_signature = (ShapeDtype((3, 2)), ShapeDtype((4, 7)))
expected_shape = ((3, 2), (4, 7))
output_shape = base.check_shape_agreement(layer, input_signature)
self.assertEqual(output_shape, expected_shape)
def test_serial_no_op_list(self):
layer = cb.Serial([])
input_signature = (ShapeDtype((3, 2)), ShapeDtype((4, 7)))
expected_shape = ((3, 2), (4, 7))
output_shape = base.check_shape_agreement(layer, input_signature)
self.assertEqual(output_shape, expected_shape)
def test_serial_one_in_one_out(self):
layer = cb.Serial(divide_by(3.0))
input_signature = ShapeDtype((3, 2))
expected_shape = (3, 2)
output_shape = base.check_shape_agreement(layer, input_signature)
self.assertEqual(output_shape, expected_shape)
def test_serial_div_div(self):
layer = cb.Serial(divide_by(2.0), divide_by(5.0))
input_signature = ShapeDtype((3, 2))
expected_shape = (3, 2)
output_shape = base.check_shape_agreement(layer, input_signature)
self.assertEqual(output_shape, expected_shape)
def test_serial_dup_dup(self):
layer = cb.Serial(cb.Dup(), cb.Dup())
input_signature = ShapeDtype((3, 2))
expected_shape = ((3, 2), (3, 2), (3, 2))
output_shape = base.check_shape_agreement(layer, input_signature)
self.assertEqual(output_shape, expected_shape)
def test_serial_with_side_outputs_div_div(self):
def some_layer():
return cb.Parallel(divide_by(2.0), divide_by(5.0))
layer = cb.SerialWithSideOutputs([some_layer(), some_layer()])
input_signature = (ShapeDtype((3, 2)), ShapeDtype((4, 2)),
ShapeDtype((5, 2)))
expected_shape = ((3, 2), (4, 2), (5, 2))
output_shape = base.check_shape_agreement(layer, input_signature)
self.assertEqual(output_shape, expected_shape)
def test_branch_noop_dup(self):
layer = cb.Branch([], cb.Dup())
input_signature = ShapeDtype((3, 2))
expected_shape = ((3, 2), (3, 2), (3, 2))
output_shape = base.check_shape_agreement(layer, input_signature)
self.assertEqual(output_shape, expected_shape)
def test_branch_add_div(self):
layer = cb.Branch(cb.Add(), divide_by(0.5))
input_signature = (ShapeDtype((3, 2)), ShapeDtype((3, 2)))
expected_shape = ((3, 2), (3, 2))
output_shape = base.check_shape_agreement(layer, input_signature)
self.assertEqual(output_shape, expected_shape)
def test_branch_one_layer(self):
layer = cb.Branch(divide_by(0.5))
input_signature = ShapeDtype((3, 2))
expected_shape = (3, 2)
output_shape = base.check_shape_agreement(layer, input_signature)
self.assertEqual(output_shape, expected_shape)
def test_select_computes_n_in(self):
layer = cb.Select([0, 0])
self.assertEqual(layer.n_in, 1)
layer = cb.Select([1, 0])
self.assertEqual(layer.n_in, 2)
layer = cb.Select([2])
self.assertEqual(layer.n_in, 3)
def test_select_given_n_in(self):
layer = cb.Select([0], n_in=2)
self.assertEqual(layer.n_in, 2)
layer = cb.Select([0], n_in=3)
self.assertEqual(layer.n_in, 3)
def test_select_first_of_3(self):
layer = cb.Select([0], n_in=3)
input_signature = (
ShapeDtype((3, 2)), ShapeDtype((4, 7)), ShapeDtype((11, 13)))
expected_shape = (3, 2)
output_shape = base.check_shape_agreement(layer, input_signature)
self.assertEqual(output_shape, expected_shape)
def test_select_second_of_3(self):
layer = cb.Select([1], n_in=3)
input_signature = (
ShapeDtype((3, 2)), ShapeDtype((4, 7)), ShapeDtype((11, 13)))
expected_shape = (4, 7)
output_shape = base.check_shape_agreement(layer, input_signature)
self.assertEqual(output_shape, expected_shape)
def test_parallel_dup_dup(self):
layer = cb.Parallel(cb.Dup(), cb.Dup())
input_signature = (ShapeDtype((3, 2)), ShapeDtype((4, 7)))
expected_shape = ((3, 2), (3, 2), (4, 7), (4, 7))
output_shape = base.check_shape_agreement(layer, input_signature)
self.assertEqual(output_shape, expected_shape)
def test_parallel_div_div(self):
layer = cb.Parallel(divide_by(0.5), divide_by(3.0))
input_signature = (ShapeDtype((3, 2)), ShapeDtype((4, 7)))
expected_shape = ((3, 2), (4, 7))
output_shape = base.check_shape_agreement(layer, input_signature)
self.assertEqual(output_shape, expected_shape)
def test_parallel_no_ops(self):
layer = cb.Parallel([], None)
input_signature = (ShapeDtype((3, 2)), ShapeDtype((4, 7)))
expected_shape = ((3, 2), (4, 7))
output_shape = base.check_shape_agreement(layer, input_signature)
self.assertEqual(output_shape, expected_shape)
def test_drop(self):
layer = cb.Drop()
input_signature = ShapeDtype((3, 2))
expected_shape = ()
output_shape = base.check_shape_agreement(layer, input_signature)
self.assertEqual(output_shape, expected_shape)
def test_dup(self):
layer = cb.Dup()
input_signature = ShapeDtype((3, 2))
expected_shape = ((3, 2), (3, 2))
output_shape = base.check_shape_agreement(layer, input_signature)
self.assertEqual(output_shape, expected_shape)
def test_swap(self):
layer = cb.Swap()
input_signature = (ShapeDtype((3, 2)), ShapeDtype((4, 7)))
expected_shape = ((4, 7), (3, 2))
output_shape = base.check_shape_agreement(layer, input_signature)
self.assertEqual(output_shape, expected_shape)
def test_scan_basic(self):
@base.layer(n_in=2, n_out=2)
def add(x, **unused_kwargs):
res = x[0] + x[1]
return res, res
scan_layer = cb.Scan(add()) # pylint: disable=no-value-for-parameter
input_signature = (ShapeDtype((3, 2, 7)), ShapeDtype((2, 7)))
expected_shape = ((3, 2, 7), (2, 7))
output_shape = base.check_shape_agreement(scan_layer, input_signature)
self.assertEqual(output_shape, expected_shape)
inp = (np.array([1, 2, 3]), np.array(0))
o, v = scan_layer(inp)
self.assertEqual(int(v), 6)
self.assertEqual([int(x) for x in o], [1, 3, 6])
def test_scan_axis1(self):
@base.layer(n_in=2, n_out=2)
def add(x, **unused_kwargs):
res = x[0] + x[1]
return res, res
scan = cb.Scan(add(), axis=1) # pylint: disable=no-value-for-parameter
input_signature = (ShapeDtype((3, 2, 7)), ShapeDtype((3, 7)))
expected_shape = ((3, 2, 7), (3, 7))
output_shape = base.check_shape_agreement(scan, input_signature)
self.assertEqual(output_shape, expected_shape)
def test_scan_multiinput(self):
@base.layer(n_in=3, n_out=2)
def foo(x, **unused_kwargs):
a, b, carry = x
return a + b, b, carry + 1
scan = cb.Scan(foo(), axis=1) # pylint: disable=no-value-for-parameter
input_signature = (ShapeDtype((3, 2, 7)), ShapeDtype((3, 2, 7)),
ShapeDtype((3, 7)))
expected_shape = ((3, 2, 7), (3, 2, 7), (3, 7))
output_shape = base.check_shape_agreement(scan, input_signature)
self.assertEqual(output_shape, expected_shape)
def test_scan_nocarry(self):
@base.layer(n_in=1, n_out=1)
def addone(x, **unused_kwargs):
return x + 1
scan_layer = cb.Scan(addone(), n_carry=0) # pylint: disable=no-value-for-parameter
input_signature = ShapeDtype((3, 2, 7))
expected_shape = (3, 2, 7)
output_shape = base.check_shape_agreement(scan_layer, input_signature)
self.assertEqual(output_shape, expected_shape)
inp = np.array([1, 2, 3])
o = scan_layer(inp)
self.assertEqual([int(x) for x in o], [2, 3, 4])
def test_input_signatures_serial(self):
layer = cb.Serial(divide_by(2.0), divide_by(5.0))
self.assertIsNone(layer.input_signature)
layer._set_input_signature_recursive(ShapeDtype((3, 2)))
self.assertEqual(layer.input_signature, ShapeDtype((3, 2)))
self.assertLen(layer.sublayers, 2)
for sublayer in layer.sublayers:
self.assertEqual(sublayer.input_signature, ShapeDtype((3, 2)))
def test_input_signatures_serial_batch_norm(self):
# Include a layer that actively uses state.
input_signature = ShapeDtype((3, 28, 28))
batch_norm = normalization.BatchNorm()
relu = activation_fns.Relu()
batch_norm_and_relu = cb.Serial(batch_norm, relu)
batch_norm_and_relu.init(input_signature)
# Check for correct shapes entering and exiting the batch_norm layer.
# And the code should run without errors.
batch_norm_and_relu._set_input_signature_recursive(input_signature)
self.assertEqual(batch_norm.input_signature, input_signature)
self.assertEqual(relu.input_signature, input_signature)
def test_input_signatures_parallel(self):
layer = cb.Parallel(divide_by(0.5), divide_by(3.0))
self.assertIsNone(layer.input_signature)
layer._set_input_signature_recursive((ShapeDtype((3, 2)),
ShapeDtype((4, 7))))
self.assertEqual(layer.input_signature,
(ShapeDtype((3, 2)), ShapeDtype((4, 7))))
self.assertLen(layer.sublayers, 2)
sublayer_0, sublayer_1 = layer.sublayers
self.assertEqual(sublayer_0.input_signature, ShapeDtype((3, 2)))
self.assertEqual(sublayer_1.input_signature, ShapeDtype((4, 7)))
def test_state_parallel(self):
model = cb.Parallel(core.Dense(3), core.Dense(5))
self.assertIsInstance(model.state, tuple)
self.assertLen(model.state, 2)
def test_state_serial(self):
model = cb.Serial(core.Dense(4), core.Dense(5), core.Dense(7))
self.assertIsInstance(model.state, tuple)
self.assertLen(model.state, 3)
def test_weights_parallel(self):
model = cb.Parallel(core.Dense(3), core.Dense(5))
self.assertIsInstance(model.weights, tuple)
self.assertLen(model.weights, 2)
def test_weights_serial(self):
model = cb.Serial(core.Dense(4), core.Dense(5), core.Dense(7))
self.assertIsInstance(model.weights, tuple)
self.assertLen(model.weights, 3)
def test_set_rng_serial_recurse_two_levels(self):
dense_00 = core.Dense(2)
dense_01 = core.Dense(2)
dense_10 = core.Dense(2)
dense_11 = core.Dense(2)
layer = cb.Serial(
cb.Serial(dense_00, dense_01),
cb.Serial(dense_10, dense_11),
)
input_signature = ShapeDtype((1, 2))
_, _ = layer.init(input_signature)
weights = layer.weights
dense_00_w, dense_00_b = weights[0][0]
dense_01_w, dense_01_b = weights[0][1]
dense_10_w, dense_10_b = weights[1][0]
dense_11_w, dense_11_b = weights[1][1]
# Setting rng's recursively during init should yield differing weights.
self.assertFalse(np.array_equal(dense_00_w, dense_01_w))
self.assertFalse(np.array_equal(dense_00_b, dense_01_b))
self.assertFalse(np.array_equal(dense_10_w, dense_11_w))
self.assertFalse(np.array_equal(dense_10_b, dense_11_b))
if __name__ == '__main__':
absltest.main()
| 38.550162 | 87 | 0.693251 |
d891b37cfb446ea422f18499be5b3161f5520d65 | 20,315 | py | Python | sdk/python/pulumi_aws/neptune/subnet_group.py | alexbowers/pulumi-aws | 7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/neptune/subnet_group.py | alexbowers/pulumi-aws | 7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/neptune/subnet_group.py | alexbowers/pulumi-aws | 7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['SubnetGroupArgs', 'SubnetGroup']
@pulumi.input_type
class SubnetGroupArgs:
def __init__(__self__, *,
subnet_ids: pulumi.Input[Sequence[pulumi.Input[str]]],
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
name_prefix: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a SubnetGroup resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] subnet_ids: A list of VPC subnet IDs.
:param pulumi.Input[str] description: The description of the neptune subnet group. Defaults to "Managed by Pulumi".
:param pulumi.Input[str] name: The name of the neptune subnet group. If omitted, this provider will assign a random, unique name.
:param pulumi.Input[str] name_prefix: Creates a unique name beginning with the specified prefix. Conflicts with `name`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
"""
pulumi.set(__self__, "subnet_ids", subnet_ids)
if description is None:
description = 'Managed by Pulumi'
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if name_prefix is not None:
pulumi.set(__self__, "name_prefix", name_prefix)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
@property
@pulumi.getter(name="subnetIds")
def subnet_ids(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
A list of VPC subnet IDs.
"""
return pulumi.get(self, "subnet_ids")
@subnet_ids.setter
def subnet_ids(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "subnet_ids", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the neptune subnet group. Defaults to "Managed by Pulumi".
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the neptune subnet group. If omitted, this provider will assign a random, unique name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="namePrefix")
def name_prefix(self) -> Optional[pulumi.Input[str]]:
"""
Creates a unique name beginning with the specified prefix. Conflicts with `name`.
"""
return pulumi.get(self, "name_prefix")
@name_prefix.setter
def name_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name_prefix", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags assigned to the resource, including those inherited from the provider .
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
@pulumi.input_type
class _SubnetGroupState:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
name_prefix: Optional[pulumi.Input[str]] = None,
subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering SubnetGroup resources.
:param pulumi.Input[str] arn: The ARN of the neptune subnet group.
:param pulumi.Input[str] description: The description of the neptune subnet group. Defaults to "Managed by Pulumi".
:param pulumi.Input[str] name: The name of the neptune subnet group. If omitted, this provider will assign a random, unique name.
:param pulumi.Input[str] name_prefix: Creates a unique name beginning with the specified prefix. Conflicts with `name`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] subnet_ids: A list of VPC subnet IDs.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if description is None:
description = 'Managed by Pulumi'
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if name_prefix is not None:
pulumi.set(__self__, "name_prefix", name_prefix)
if subnet_ids is not None:
pulumi.set(__self__, "subnet_ids", subnet_ids)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
The ARN of the neptune subnet group.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the neptune subnet group. Defaults to "Managed by Pulumi".
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the neptune subnet group. If omitted, this provider will assign a random, unique name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="namePrefix")
def name_prefix(self) -> Optional[pulumi.Input[str]]:
"""
Creates a unique name beginning with the specified prefix. Conflicts with `name`.
"""
return pulumi.get(self, "name_prefix")
@name_prefix.setter
def name_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name_prefix", value)
@property
@pulumi.getter(name="subnetIds")
def subnet_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of VPC subnet IDs.
"""
return pulumi.get(self, "subnet_ids")
@subnet_ids.setter
def subnet_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "subnet_ids", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags assigned to the resource, including those inherited from the provider .
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
class SubnetGroup(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
name_prefix: Optional[pulumi.Input[str]] = None,
subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Provides an Neptune subnet group resource.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
default = aws.neptune.SubnetGroup("default",
subnet_ids=[
aws_subnet["frontend"]["id"],
aws_subnet["backend"]["id"],
],
tags={
"Name": "My neptune subnet group",
})
```
## Import
Neptune Subnet groups can be imported using the `name`, e.g.
```sh
$ pulumi import aws:neptune/subnetGroup:SubnetGroup default production-subnet-group
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The description of the neptune subnet group. Defaults to "Managed by Pulumi".
:param pulumi.Input[str] name: The name of the neptune subnet group. If omitted, this provider will assign a random, unique name.
:param pulumi.Input[str] name_prefix: Creates a unique name beginning with the specified prefix. Conflicts with `name`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] subnet_ids: A list of VPC subnet IDs.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SubnetGroupArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides an Neptune subnet group resource.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
default = aws.neptune.SubnetGroup("default",
subnet_ids=[
aws_subnet["frontend"]["id"],
aws_subnet["backend"]["id"],
],
tags={
"Name": "My neptune subnet group",
})
```
## Import
Neptune Subnet groups can be imported using the `name`, e.g.
```sh
$ pulumi import aws:neptune/subnetGroup:SubnetGroup default production-subnet-group
```
:param str resource_name: The name of the resource.
:param SubnetGroupArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SubnetGroupArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
name_prefix: Optional[pulumi.Input[str]] = None,
subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SubnetGroupArgs.__new__(SubnetGroupArgs)
if description is None:
description = 'Managed by Pulumi'
__props__.__dict__["description"] = description
__props__.__dict__["name"] = name
__props__.__dict__["name_prefix"] = name_prefix
if subnet_ids is None and not opts.urn:
raise TypeError("Missing required property 'subnet_ids'")
__props__.__dict__["subnet_ids"] = subnet_ids
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
__props__.__dict__["arn"] = None
super(SubnetGroup, __self__).__init__(
'aws:neptune/subnetGroup:SubnetGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
name_prefix: Optional[pulumi.Input[str]] = None,
subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'SubnetGroup':
"""
Get an existing SubnetGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The ARN of the neptune subnet group.
:param pulumi.Input[str] description: The description of the neptune subnet group. Defaults to "Managed by Pulumi".
:param pulumi.Input[str] name: The name of the neptune subnet group. If omitted, this provider will assign a random, unique name.
:param pulumi.Input[str] name_prefix: Creates a unique name beginning with the specified prefix. Conflicts with `name`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] subnet_ids: A list of VPC subnet IDs.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _SubnetGroupState.__new__(_SubnetGroupState)
__props__.__dict__["arn"] = arn
__props__.__dict__["description"] = description
__props__.__dict__["name"] = name
__props__.__dict__["name_prefix"] = name_prefix
__props__.__dict__["subnet_ids"] = subnet_ids
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
return SubnetGroup(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The ARN of the neptune subnet group.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
"""
The description of the neptune subnet group. Defaults to "Managed by Pulumi".
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the neptune subnet group. If omitted, this provider will assign a random, unique name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="namePrefix")
def name_prefix(self) -> pulumi.Output[str]:
"""
Creates a unique name beginning with the specified prefix. Conflicts with `name`.
"""
return pulumi.get(self, "name_prefix")
@property
@pulumi.getter(name="subnetIds")
def subnet_ids(self) -> pulumi.Output[Sequence[str]]:
"""
A list of VPC subnet IDs.
"""
return pulumi.get(self, "subnet_ids")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> pulumi.Output[Mapping[str, str]]:
"""
A map of tags assigned to the resource, including those inherited from the provider .
"""
return pulumi.get(self, "tags_all")
| 43.594421 | 258 | 0.635491 |
051422514af7adcad5b056787032d72afd2b8c46 | 7,472 | py | Python | main.py | xabarass/cil-tweeter | cf6c09879ef4cd431a61b6573a5b0f9e03ea3309 | [
"MIT"
] | null | null | null | main.py | xabarass/cil-tweeter | cf6c09879ef4cd431a61b6573a5b0f9e03ea3309 | [
"MIT"
] | null | null | null | main.py | xabarass/cil-tweeter | cf6c09879ef4cd431a61b6573a5b0f9e03ea3309 | [
"MIT"
] | null | null | null | import logging
import config
from TwitterDataset import TwitterDataSet
from Vocabulary import read_vocabulary_from_file, ouput_vocabulary_statistics, RegularizingPreprocessor, LexicalPreprocessor, StemmingPreprocessor, CharacterBasedPreprocessor
from NeuralNetwork import Network, AdaBoostModel, StaticAdaBoostModel, AdaptiveAdaBoostModel
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
def keras_model():
print("Starting keras_model...")
print("Loading tweets...")
twitter_dataset = TwitterDataSet(positive_tweets=config.positive_tweets,
negative_tweets=config.negative_tweets,
test_data=config.test_data)
print("Creating vocabulary...")
word_to_occurrence_full = read_vocabulary_from_file(**config.vocab_path_opt)
# preprocessor = RegularizingPreprocessor(word_to_occurrence_full,**config.preprocessor_opt)
preprocessor = LexicalPreprocessor(word_to_occurrence_full,**config.preprocessor_opt)
preprocessor = StemmingPreprocessor(preprocessor,
stemming_vocabulary_filter=config.preprocessor_opt['final_vocabulary_filter'],
remove_unknown_words=config.preprocessor_opt['remove_unknown_words'])
# preprocessor=CharacterBasedPreprocessor(word_to_occurrence_full)
print("Preprocessing training data set...")
preprocessed_dataset = twitter_dataset.create_preprocessed_dataset(preprocessor, config.validation_split_ratio)
print("Create keras model...")
model = Network.create_model(
preprocessed_dataset=preprocessed_dataset,
word_embeddings_opt=config.word_embeddings_opt,
model_builder=config.model_builder)
print("Train keras model...")
Network.train(model=model,
preprocessed_dataset=preprocessed_dataset,
training_opt=config.training_opt,
model_save_path=config.model_save_path,
result_epoch_file=config.result_epoch_file)
print("Output misclassified samples...")
Network.output_misclassified_samples(model=model,
preprocessed_dataset=preprocessed_dataset, preprocessor=preprocessor,
misclassified_samples_file=config.misclassified_samples_file)
print("Output predictions...")
print("\tWriting to: {}".format(config.result_file))
Network.predict(model=model,
preprocessed_dataset=preprocessed_dataset,
prediction_file=config.result_file)
def static_adaboost_model():
print("Starting static AdaBoost sklearn_model...")
print("Loading tweets...")
twitter_dataset = TwitterDataSet(positive_tweets=config.positive_tweets,
negative_tweets=config.negative_tweets,
test_data=config.test_data)
print("Creating vocabulary...")
word_to_occurrence_full = read_vocabulary_from_file(**config.vocab_path_opt)
preprocessor = RegularizingPreprocessor(word_to_occurrence_full,**config.preprocessor_opt)
#preprocessor = LexicalPreprocessor(word_to_occurrence_full,**config.preprocessor_opt)
print("Preprocessing training data set...")
preprocessed_dataset = twitter_dataset.create_preprocessed_dataset(preprocessor, config.validation_split_ratio)
print("Create static sklearn AdaBoost model...")
model = StaticAdaBoostModel.create_model(
preprocessed_dataset=preprocessed_dataset,
word_embeddings_opt=config.word_embeddings_opt,
training_opt=config.training_opt,
adaboost_opt=config.adaboost_opt,
model_builder=config.ensemble_model_builder)
print("Train sklearn model...")
AdaBoostModel.train(model=model,
preprocessed_dataset=preprocessed_dataset)
# print("Output misclassified samples...")
# Network.output_misclassified_samples(model=model,
# preprocessed_dataset=trivially_preprocessed_dataset,
# preprocessor=trivial_preprocessor,
# misclassified_samples_file=config.misclassified_samples_file)
print("Output predictions...")
print("\tWriting to: {}".format(config.result_file))
AdaBoostModel.predict(model=model,
preprocessed_dataset=preprocessed_dataset,
prediction_file=config.result_file)
def adaptive_adaboost_model():
print("Starting adaptive AdaBoost sklearn_model...")
print("Loading tweets...")
twitter_dataset = TwitterDataSet(positive_tweets=config.positive_tweets,
negative_tweets=config.negative_tweets,
test_data=config.test_data,
deduplicate_train_tweets=False)
print("Creating vocabulary...")
word_to_occurrence_full = read_vocabulary_from_file(**config.vocab_path_opt)
preprocessor_factory = lambda word_to_occurrence: RegularizingPreprocessor(word_to_occurrence,
**config.preprocessor_opt)
trivial_preprocessor = LexicalPreprocessor(word_to_occurrence_full=word_to_occurrence_full,
final_vocabulary_filter=lambda word, occurrence: True,
remove_unknown_words=False)
print("Preprocessing training data set...")
trivially_preprocessed_dataset = twitter_dataset.create_preprocessed_dataset(trivial_preprocessor, config.validation_split_ratio)
print("Create sklearn model...")
model = AdaptiveAdaBoostModel.create_model(
twitter_dataset=twitter_dataset,
trivially_preprocessed_dataset=trivially_preprocessed_dataset,
preprocessor_factory=preprocessor_factory,
word_embeddings_opt=config.word_embeddings_opt,
training_opt = config.training_opt,
adaboost_opt=config.adaboost_opt)
print("Train sklearn model...")
AdaBoostModel.train(model=model,
preprocessed_dataset=trivially_preprocessed_dataset,
model_save_path=config.model_save_path) #TODO: use boost-id to in result_epoch_file
# print("Output misclassified samples...")
# Network.output_misclassified_samples(model=model,
# preprocessed_dataset=trivially_preprocessed_dataset,
# preprocessor=trivial_preprocessor,
# misclassified_samples_file=config.misclassified_samples_file)
print("Output predictions...")
print("\tWriting to: {}".format(config.result_file))
AdaBoostModel.predict(model=model,
preprocessed_dataset=trivially_preprocessed_dataset,
prediction_file=config.result_file)
def print_vocabulary_statistics():
print("Creating vocabulary...")
ouput_vocabulary_statistics(read_vocabulary_from_file(**config.vocab_path_opt))
if __name__ == '__main__':
keras_model()
| 47.291139 | 174 | 0.662607 |
0ace26c5ee311051835f4298cda233ba855dabb7 | 2,158 | py | Python | handler_factory.py | weenerberg/guildtracker | 1957018cdaa93678b6591248905804dc4d892d04 | [
"MIT"
] | null | null | null | handler_factory.py | weenerberg/guildtracker | 1957018cdaa93678b6591248905804dc4d892d04 | [
"MIT"
] | null | null | null | handler_factory.py | weenerberg/guildtracker | 1957018cdaa93678b6591248905804dc4d892d04 | [
"MIT"
] | null | null | null | import logging
from zetas_handler import ZetasHandler
from arena_ranks_handler import ArenaRanksHandler, TestArenaRanksHandler
from units_handler import UnitsHandler
from unit_mappings_handler import UnitMappingsHandler
from zeta_reviews_handler import ZetaReviewsHandler
logger = logging.getLogger(__name__)
class HandlerFactory(object):
def __init__(self, guild, ws_base_path, dbx_base_path, datasource_folder, archive_folder, dbx_token, webhook, is_test):
self.__guild = guild
self.__ws_base_path = ws_base_path
self.__dbx_base_path = dbx_base_path
self.__datasource_folder = datasource_folder
self.__archive_folder = archive_folder
self.__dbx_token = dbx_token
self.__webhook = webhook
self.__is_test = is_test
def get_handler(self, type, url):
if type == ZetasHandler.MODULE_NAME:
return ZetasHandler(url, self.__guild, self.__ws_base_path, self.__dbx_base_path, self.__datasource_folder, self.__archive_folder, self.__dbx_token, self.__webhook, self.__is_test)
elif type == ArenaRanksHandler.MODULE_NAME:
if self.__is_test:
return TestArenaRanksHandler(url, self.__guild, self.__ws_base_path, self.__dbx_base_path, self.__datasource_folder, self.__archive_folder, self.__dbx_token, self.__webhook, self.__is_test)
else:
return ArenaRanksHandler(url, self.__guild, self.__ws_base_path, self.__dbx_base_path, self.__datasource_folder, self.__archive_folder, self.__dbx_token, self.__webhook, self.__is_test)
elif type == UnitsHandler.MODULE_NAME:
return UnitsHandler(url, self.__guild, self.__ws_base_path, self.__dbx_base_path, self.__datasource_folder, self.__archive_folder, self.__dbx_token, self.__webhook, self.__is_test)
elif type == UnitMappingsHandler.MODULE_NAME:
return UnitMappingsHandler(url, self.__guild, self.__ws_base_path, self.__dbx_base_path, self.__datasource_folder, self.__archive_folder, self.__dbx_token, self.__webhook, self.__is_test)
elif type == ZetaReviewsHandler.MODULE_NAME:
return ZetaReviewsHandler(url, self.__guild, self.__ws_base_path, self.__dbx_base_path, self.__datasource_folder, self.__archive_folder, self.__dbx_token, self.__webhook, self.__is_test)
| 58.324324 | 193 | 0.823911 |
edaea9475365311fcccd10a8b914f5db0a1bc8f9 | 3,436 | py | Python | python/oneflow/compatible/single_client/test/xrt/test_scalar_op.py | wangyuyue/oneflow | 0a71c22fe8355392acc8dc0e301589faee4c4832 | [
"Apache-2.0"
] | 3,285 | 2020-07-31T05:51:22.000Z | 2022-03-31T15:20:16.000Z | python/oneflow/compatible/single_client/test/xrt/test_scalar_op.py | wangyuyue/oneflow | 0a71c22fe8355392acc8dc0e301589faee4c4832 | [
"Apache-2.0"
] | 2,417 | 2020-07-31T06:28:58.000Z | 2022-03-31T23:04:14.000Z | python/oneflow/compatible/single_client/test/xrt/test_scalar_op.py | wangyuyue/oneflow | 0a71c22fe8355392acc8dc0e301589faee4c4832 | [
"Apache-2.0"
] | 520 | 2020-07-31T05:52:42.000Z | 2022-03-29T02:38:11.000Z | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow.compatible.single_client.unittest
from oneflow.compatible import single_client as flow
config = flow.function_config()
class TestScalarOp(unittest.TestCase):
run_test = False
def _test_body(self, x, scalar, dtype=np.float32):
if not self.run_test:
return
f1 = self.make_job(x.shape, scalar, dtype=flow.float32)
f2 = self.make_xla_job(x.shape, scalar, dtype=flow.float32)
a = f1(x).get()
b = f2(x).get()
print("without xla: ", a)
print("with xla", b)
self.assertTrue(np.allclose(a.numpy(), b.numpy(), rtol=0.001, atol=1e-05))
flow.clear_default_session()
def _test_ones_body(self, x_shape, scalar, dtype=np.float32):
x = np.ones(x_shape, dtype=dtype)
self._test_body(x, scalar, dtype=dtype)
def _test_random_body(self, x_shape, scalar, dtype=np.float32):
x = np.random.random(x_shape).astype(dtype)
self._test_body(x, scalar, dtype=dtype)
def test_ones_input(self):
self._test_ones_body((1, 10), 2.0)
self._test_ones_body((2, 10, 2), 2.0)
self._test_ones_body((2, 5, 2, 2), 2.0)
def test_random_input(self):
self._test_random_body((1, 10), 2.0)
self._test_random_body((2, 10, 2), 2.0)
self._test_random_body((2, 5, 2, 2), 2.0)
class TestScalarAddOp(TestScalarOp):
run_test = True
def make_job(self, x_shape, scalar, dtype=flow.float32):
config.use_xla_jit(False)
config.use_tensorrt(False)
@flow.global_function(config)
def scalar_add_job(x=flow.FixedTensorDef(x_shape, dtype=dtype)):
return flow.math.add(x, scalar)
return scalar_add_job
def make_xla_job(self, x_shape, scalar, dtype=flow.float32):
config.use_xla_jit(True)
config.use_tensorrt(False)
@flow.global_function(config)
def xla_scalar_add_job(x=flow.FixedTensorDef(x_shape, dtype=dtype)):
return flow.math.add(x, scalar)
return xla_scalar_add_job
class TestScalarMulOp(TestScalarOp):
run_test = True
def make_job(self, x_shape, scalar, dtype=flow.float32):
config.use_xla_jit(False)
config.use_tensorrt(False)
@flow.global_function(config)
def scalar_mul_job(x=flow.FixedTensorDef(x_shape, dtype=dtype)):
return flow.math.multiply(x, scalar)
return scalar_mul_job
def make_xla_job(self, x_shape, scalar, dtype=flow.float32):
config.use_xla_jit(True)
config.use_tensorrt(False)
@flow.global_function(config)
def xla_scalar_mul_job(x=flow.FixedTensorDef(x_shape, dtype=dtype)):
return flow.math.multiply(x, scalar)
return xla_scalar_mul_job
if __name__ == "__main__":
unittest.main()
| 30.954955 | 82 | 0.679569 |
1c9c13b966ed1d8642bc1dbe94e7db88f1cf22b7 | 37,257 | py | Python | src/werkzeug/serving.py | Saif807380/werkzeug | 8ca0b2e3dc2101b95585518d85448d08b3b63568 | [
"BSD-3-Clause"
] | 1 | 2021-06-09T16:16:01.000Z | 2021-06-09T16:16:01.000Z | src/werkzeug/serving.py | Saif807380/werkzeug | 8ca0b2e3dc2101b95585518d85448d08b3b63568 | [
"BSD-3-Clause"
] | null | null | null | src/werkzeug/serving.py | Saif807380/werkzeug | 8ca0b2e3dc2101b95585518d85448d08b3b63568 | [
"BSD-3-Clause"
] | null | null | null | """A WSGI and HTTP server for use **during development only**. This
server is convenient to use, but is not designed to be particularly
stable, secure, or efficient. Use a dedicate WSGI server and HTTP
server when deploying to production.
It provides features like interactive debugging and code reloading. Use
``run_simple`` to start the server. Put this in a ``run.py`` script:
.. code-block:: python
from myapp import create_app
from werkzeug import run_simple
"""
import io
import os
import platform
import signal
import socket
import socketserver
import sys
import typing as t
import warnings
from datetime import datetime as dt
from datetime import timedelta
from datetime import timezone
from http.server import BaseHTTPRequestHandler
from http.server import HTTPServer
from ._internal import _log
from ._internal import _wsgi_encoding_dance
from .exceptions import InternalServerError
from .urls import uri_to_iri
from .urls import url_parse
from .urls import url_unquote
try:
import ssl
except ImportError:
class _SslDummy:
def __getattr__(self, name):
raise RuntimeError("SSL support unavailable")
ssl = _SslDummy() # type: ignore
_log_add_style = True
if os.name == "nt":
try:
__import__("colorama")
except ImportError:
_log_add_style = False
can_fork = hasattr(os, "fork")
if can_fork:
ForkingMixIn = socketserver.ForkingMixIn
else:
class ForkingMixIn: # type: ignore
pass
try:
af_unix = socket.AF_UNIX
except AttributeError:
af_unix = None # type: ignore
LISTEN_QUEUE = 128
can_open_by_fd = not platform.system() == "Windows" and hasattr(socket, "fromfd")
_TSSLContextArg = t.Optional[
t.Union["ssl.SSLContext", t.Tuple[str, t.Optional[str]], "te.Literal['adhoc']"]
]
if t.TYPE_CHECKING:
import typing_extensions as te # noqa: F401
from wsgiref.types import WSGIApplication
from wsgiref.types import WSGIEnvironment
from cryptography.hazmat.primitives.asymmetric.rsa import (
RSAPrivateKeyWithSerialization,
)
from cryptography.x509 import Certificate
class DechunkedInput(io.RawIOBase):
"""An input stream that handles Transfer-Encoding 'chunked'"""
def __init__(self, rfile: t.BinaryIO) -> None:
self._rfile = rfile
self._done = False
self._len = 0
def readable(self) -> bool:
return True
def read_chunk_len(self) -> int:
try:
line = self._rfile.readline().decode("latin1")
_len = int(line.strip(), 16)
except ValueError:
raise OSError("Invalid chunk header")
if _len < 0:
raise OSError("Negative chunk length not allowed")
return _len
def readinto(self, buf: bytearray) -> int: # type: ignore
read = 0
while not self._done and read < len(buf):
if self._len == 0:
# This is the first chunk or we fully consumed the previous
# one. Read the next length of the next chunk
self._len = self.read_chunk_len()
if self._len == 0:
# Found the final chunk of size 0. The stream is now exhausted,
# but there is still a final newline that should be consumed
self._done = True
if self._len > 0:
# There is data (left) in this chunk, so append it to the
# buffer. If this operation fully consumes the chunk, this will
# reset self._len to 0.
n = min(len(buf), self._len)
buf[read : read + n] = self._rfile.read(n)
self._len -= n
read += n
if self._len == 0:
# Skip the terminating newline of a chunk that has been fully
# consumed. This also applies to the 0-sized final chunk
terminator = self._rfile.readline()
if terminator not in (b"\n", b"\r\n", b"\r"):
raise OSError("Missing chunk terminating newline")
return read
class WSGIRequestHandler(BaseHTTPRequestHandler):
"""A request handler that implements WSGI dispatching."""
server: "BaseWSGIServer"
@property
def server_version(self) -> str: # type: ignore
from . import __version__
return f"Werkzeug/{__version__}"
def make_environ(self) -> "WSGIEnvironment":
request_url = url_parse(self.path)
def shutdown_server():
warnings.warn(
"The 'environ['werkzeug.server.shutdown']' function is"
" deprecated and will be removed in Werkzeug 2.1.",
stacklevel=2,
)
self.server.shutdown_signal = True
url_scheme = "http" if self.server.ssl_context is None else "https"
if not self.client_address:
self.client_address = ("<local>", 0)
elif isinstance(self.client_address, str):
self.client_address = (self.client_address, 0)
# If there was no scheme but the path started with two slashes,
# the first segment may have been incorrectly parsed as the
# netloc, prepend it to the path again.
if not request_url.scheme and request_url.netloc:
path_info = f"/{request_url.netloc}{request_url.path}"
else:
path_info = request_url.path
path_info = url_unquote(path_info)
environ: "WSGIEnvironment" = {
"wsgi.version": (1, 0),
"wsgi.url_scheme": url_scheme,
"wsgi.input": self.rfile,
"wsgi.errors": sys.stderr,
"wsgi.multithread": self.server.multithread,
"wsgi.multiprocess": self.server.multiprocess,
"wsgi.run_once": False,
"werkzeug.server.shutdown": shutdown_server,
"werkzeug.socket": self.connection,
"SERVER_SOFTWARE": self.server_version,
"REQUEST_METHOD": self.command,
"SCRIPT_NAME": "",
"PATH_INFO": _wsgi_encoding_dance(path_info),
"QUERY_STRING": _wsgi_encoding_dance(request_url.query),
# Non-standard, added by mod_wsgi, uWSGI
"REQUEST_URI": _wsgi_encoding_dance(self.path),
# Non-standard, added by gunicorn
"RAW_URI": _wsgi_encoding_dance(self.path),
"REMOTE_ADDR": self.address_string(),
"REMOTE_PORT": self.port_integer(),
"SERVER_NAME": self.server.server_address[0],
"SERVER_PORT": str(self.server.server_address[1]),
"SERVER_PROTOCOL": self.request_version,
}
for key, value in self.headers.items():
key = key.upper().replace("-", "_")
value = value.replace("\r\n", "")
if key not in ("CONTENT_TYPE", "CONTENT_LENGTH"):
key = f"HTTP_{key}"
if key in environ:
value = f"{environ[key]},{value}"
environ[key] = value
if environ.get("HTTP_TRANSFER_ENCODING", "").strip().lower() == "chunked":
environ["wsgi.input_terminated"] = True
environ["wsgi.input"] = DechunkedInput(environ["wsgi.input"])
# Per RFC 2616, if the URL is absolute, use that as the host.
# We're using "has a scheme" to indicate an absolute URL.
if request_url.scheme and request_url.netloc:
environ["HTTP_HOST"] = request_url.netloc
try:
# binary_form=False gives nicer information, but wouldn't be compatible with
# what Nginx or Apache could return.
peer_cert = self.connection.getpeercert(binary_form=True)
if peer_cert is not None:
# Nginx and Apache use PEM format.
environ["SSL_CLIENT_CERT"] = ssl.DER_cert_to_PEM_cert(peer_cert)
except ValueError:
# SSL handshake hasn't finished.
self.server.log("error", "Cannot fetch SSL peer certificate info")
except AttributeError:
# Not using TLS, the socket will not have getpeercert().
pass
return environ
def run_wsgi(self) -> None:
if self.headers.get("Expect", "").lower().strip() == "100-continue":
self.wfile.write(b"HTTP/1.1 100 Continue\r\n\r\n")
self.environ = environ = self.make_environ()
status_set: t.Optional[str] = None
headers_set: t.Optional[t.List[t.Tuple[str, str]]] = None
status_sent: t.Optional[str] = None
headers_sent: t.Optional[t.List[t.Tuple[str, str]]] = None
def write(data: bytes) -> None:
nonlocal status_sent, headers_sent
assert status_set is not None, "write() before start_response"
assert headers_set is not None, "write() before start_response"
if status_sent is None:
status_sent = status_set
headers_sent = headers_set
try:
code_str, msg = status_sent.split(None, 1)
except ValueError:
code_str, msg = status_sent, ""
code = int(code_str)
self.send_response(code, msg)
header_keys = set()
for key, value in headers_sent:
self.send_header(key, value)
key = key.lower()
header_keys.add(key)
if not (
"content-length" in header_keys
or environ["REQUEST_METHOD"] == "HEAD"
or code < 200
or code in (204, 304)
):
self.close_connection = True
self.send_header("Connection", "close")
if "server" not in header_keys:
self.send_header("Server", self.version_string())
if "date" not in header_keys:
self.send_header("Date", self.date_time_string())
self.end_headers()
assert isinstance(data, bytes), "applications must write bytes"
self.wfile.write(data)
self.wfile.flush()
def start_response(status, headers, exc_info=None):
nonlocal status_set, headers_set
if exc_info:
try:
if headers_sent:
raise exc_info[1].with_traceback(exc_info[2])
finally:
exc_info = None
elif headers_set:
raise AssertionError("Headers already set")
status_set = status
headers_set = headers
return write
def execute(app: "WSGIApplication") -> None:
application_iter = app(environ, start_response)
try:
for data in application_iter:
write(data)
if not headers_sent:
write(b"")
finally:
if hasattr(application_iter, "close"):
application_iter.close() # type: ignore
try:
execute(self.server.app)
except (ConnectionError, socket.timeout) as e:
self.connection_dropped(e, environ)
except Exception:
if self.server.passthrough_errors:
raise
from .debug.tbtools import get_current_traceback
traceback = get_current_traceback(ignore_system_exceptions=True)
try:
# if we haven't yet sent the headers but they are set
# we roll back to be able to set them again.
if status_sent is None:
status_set = None
headers_set = None
execute(InternalServerError())
except Exception:
pass
self.server.log("error", "Error on request:\n%s", traceback.plaintext)
def handle(self) -> None:
"""Handles a request ignoring dropped connections."""
try:
BaseHTTPRequestHandler.handle(self)
except (ConnectionError, socket.timeout) as e:
self.connection_dropped(e)
except Exception as e:
if self.server.ssl_context is not None and is_ssl_error(e):
self.log_error("SSL error occurred: %s", e)
else:
raise
if self.server.shutdown_signal:
self.initiate_shutdown()
def initiate_shutdown(self) -> None:
if is_running_from_reloader():
# Windows does not provide SIGKILL, go with SIGTERM then.
sig = getattr(signal, "SIGKILL", signal.SIGTERM)
os.kill(os.getpid(), sig)
self.server._BaseServer__shutdown_request = True # type: ignore
def connection_dropped(
self, error: BaseException, environ: t.Optional["WSGIEnvironment"] = None
) -> None:
"""Called if the connection was closed by the client. By default
nothing happens.
"""
def handle_one_request(self) -> None:
"""Handle a single HTTP request."""
self.raw_requestline = self.rfile.readline()
if not self.raw_requestline:
self.close_connection = True
elif self.parse_request():
self.run_wsgi()
def send_response(self, code: int, message: t.Optional[str] = None) -> None:
"""Send the response header and log the response code."""
self.log_request(code)
if message is None:
message = self.responses[code][0] if code in self.responses else ""
if self.request_version != "HTTP/0.9":
hdr = f"{self.protocol_version} {code} {message}\r\n"
self.wfile.write(hdr.encode("ascii"))
def version_string(self) -> str:
return super().version_string().strip()
def address_string(self) -> str:
if getattr(self, "environ", None):
return self.environ["REMOTE_ADDR"]
if not self.client_address:
return "<local>"
return self.client_address[0]
def port_integer(self) -> int:
return self.client_address[1]
def log_request(self, code: t.Union[int, str] = "-", size: t.Union[int, str] = "-"):
try:
path = uri_to_iri(self.path)
msg = f"{self.command} {path} {self.request_version}"
except AttributeError:
# path isn't set if the requestline was bad
msg = self.requestline
code = str(code)
if _log_add_style:
if code[0] == "1": # 1xx - Informational
msg = _ansi_style(msg, "bold")
elif code == "200": # 2xx - Success
pass
elif code == "304": # 304 - Resource Not Modified
msg = _ansi_style(msg, "cyan")
elif code[0] == "3": # 3xx - Redirection
msg = _ansi_style(msg, "green")
elif code == "404": # 404 - Resource Not Found
msg = _ansi_style(msg, "yellow")
elif code[0] == "4": # 4xx - Client Error
msg = _ansi_style(msg, "bold", "red")
else: # 5xx, or any other response
msg = _ansi_style(msg, "bold", "magenta")
self.log("info", '"%s" %s %s', msg, code, size)
def log_error(self, *args) -> None:
self.log("error", *args)
def log_message(self, format: str, *args) -> None:
self.log("info", format, *args)
def log(self, type: str, message: str, *args) -> None:
_log(
type,
f"{self.address_string()} - - [{self.log_date_time_string()}] {message}\n",
*args,
)
def _ansi_style(value, *styles):
codes = {
"bold": 1,
"red": 31,
"green": 32,
"yellow": 33,
"magenta": 35,
"cyan": 36,
}
for style in styles:
value = f"\x1b[{codes[style]}m{value}"
return f"{value}\x1b[0m"
def generate_adhoc_ssl_pair(
cn: t.Optional[str] = None,
) -> t.Tuple["Certificate", "RSAPrivateKeyWithSerialization"]:
try:
from cryptography import x509
from cryptography.x509.oid import NameOID
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import rsa
except ImportError:
raise TypeError("Using ad-hoc certificates requires the cryptography library.")
pkey = rsa.generate_private_key(
public_exponent=65537, key_size=2048, backend=default_backend()
)
# pretty damn sure that this is not actually accepted by anyone
if cn is None:
cn = "*"
subject = x509.Name(
[
x509.NameAttribute(NameOID.ORGANIZATION_NAME, "Dummy Certificate"),
x509.NameAttribute(NameOID.COMMON_NAME, cn),
]
)
cert = (
x509.CertificateBuilder()
.subject_name(subject)
.issuer_name(subject)
.public_key(pkey.public_key())
.serial_number(x509.random_serial_number())
.not_valid_before(dt.now(timezone.utc))
.not_valid_after(dt.now(timezone.utc) + timedelta(days=365))
.add_extension(x509.ExtendedKeyUsage([x509.OID_SERVER_AUTH]), critical=False)
.add_extension(x509.SubjectAlternativeName([x509.DNSName("*")]), critical=False)
.sign(pkey, hashes.SHA256(), default_backend())
)
return cert, pkey
def make_ssl_devcert(
base_path: str, host: t.Optional[str] = None, cn: t.Optional[str] = None
) -> t.Tuple[str, str]:
"""Creates an SSL key for development. This should be used instead of
the ``'adhoc'`` key which generates a new cert on each server start.
It accepts a path for where it should store the key and cert and
either a host or CN. If a host is given it will use the CN
``*.host/CN=host``.
For more information see :func:`run_simple`.
.. versionadded:: 0.9
:param base_path: the path to the certificate and key. The extension
``.crt`` is added for the certificate, ``.key`` is
added for the key.
:param host: the name of the host. This can be used as an alternative
for the `cn`.
:param cn: the `CN` to use.
"""
if host is not None:
cn = f"*.{host}/CN={host}"
cert, pkey = generate_adhoc_ssl_pair(cn=cn)
from cryptography.hazmat.primitives import serialization
cert_file = f"{base_path}.crt"
pkey_file = f"{base_path}.key"
with open(cert_file, "wb") as f:
f.write(cert.public_bytes(serialization.Encoding.PEM))
with open(pkey_file, "wb") as f:
f.write(
pkey.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
)
)
return cert_file, pkey_file
def generate_adhoc_ssl_context() -> "ssl.SSLContext":
"""Generates an adhoc SSL context for the development server."""
import tempfile
import atexit
cert, pkey = generate_adhoc_ssl_pair()
from cryptography.hazmat.primitives import serialization
cert_handle, cert_file = tempfile.mkstemp()
pkey_handle, pkey_file = tempfile.mkstemp()
atexit.register(os.remove, pkey_file)
atexit.register(os.remove, cert_file)
os.write(cert_handle, cert.public_bytes(serialization.Encoding.PEM))
os.write(
pkey_handle,
pkey.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
),
)
os.close(cert_handle)
os.close(pkey_handle)
ctx = load_ssl_context(cert_file, pkey_file)
return ctx
def load_ssl_context(
cert_file: str, pkey_file: t.Optional[str] = None, protocol: t.Optional[int] = None
) -> "ssl.SSLContext":
"""Loads SSL context from cert/private key files and optional protocol.
Many parameters are directly taken from the API of
:py:class:`ssl.SSLContext`.
:param cert_file: Path of the certificate to use.
:param pkey_file: Path of the private key to use. If not given, the key
will be obtained from the certificate file.
:param protocol: A ``PROTOCOL`` constant from the :mod:`ssl` module.
Defaults to :data:`ssl.PROTOCOL_TLS_SERVER`.
"""
if protocol is None:
protocol = ssl.PROTOCOL_TLS_SERVER
ctx = ssl.SSLContext(protocol)
ctx.load_cert_chain(cert_file, pkey_file)
return ctx
def is_ssl_error(error=None):
"""Checks if the given error (or the current one) is an SSL error."""
if error is None:
error = sys.exc_info()[1]
return isinstance(error, ssl.SSLError)
def select_address_family(host: str, port: int) -> socket.AddressFamily:
"""Return ``AF_INET4``, ``AF_INET6``, or ``AF_UNIX`` depending on
the host and port."""
if host.startswith("unix://"):
return socket.AF_UNIX
elif ":" in host and hasattr(socket, "AF_INET6"):
return socket.AF_INET6
return socket.AF_INET
def get_sockaddr(
host: str, port: int, family: socket.AddressFamily
) -> t.Union[t.Tuple[str, int], str]:
"""Return a fully qualified socket address that can be passed to
:func:`socket.bind`."""
if family == af_unix:
return host.split("://", 1)[1]
try:
res = socket.getaddrinfo(
host, port, family, socket.SOCK_STREAM, socket.IPPROTO_TCP
)
except socket.gaierror:
return host, port
return res[0][4] # type: ignore
def get_interface_ip(family: socket.AddressFamily):
"""Get the IP address of an external interface. Used when binding to
0.0.0.0 or ::1 to show a more useful URL.
:meta private:
"""
# arbitrary private address
host = "fd31:f903:5ab5:1::1" if family == socket.AF_INET6 else "10.253.155.219"
with socket.socket(family, socket.SOCK_DGRAM) as s:
try:
s.connect((host, 58162))
except OSError:
return "::1" if family == socket.AF_INET6 else "127.0.0.1"
return s.getsockname()[0]
class BaseWSGIServer(HTTPServer):
"""Simple single-threaded, single-process WSGI server."""
multithread = False
multiprocess = False
request_queue_size = LISTEN_QUEUE
def __init__(
self,
host: str,
port: int,
app: "WSGIApplication",
handler: t.Optional[t.Type[WSGIRequestHandler]] = None,
passthrough_errors: bool = False,
ssl_context: t.Optional[_TSSLContextArg] = None,
fd: t.Optional[int] = None,
) -> None:
if handler is None:
handler = WSGIRequestHandler
self.address_family = select_address_family(host, port)
if fd is not None:
real_sock = socket.fromfd(fd, self.address_family, socket.SOCK_STREAM)
port = 0
server_address = get_sockaddr(host, int(port), self.address_family)
# remove socket file if it already exists
if self.address_family == af_unix:
server_address = t.cast(str, server_address)
if os.path.exists(server_address):
os.unlink(server_address)
super().__init__(server_address, handler) # type: ignore
self.app = app
self.passthrough_errors = passthrough_errors
self.shutdown_signal = False
self.host = host
self.port = self.socket.getsockname()[1]
# Patch in the original socket.
if fd is not None:
self.socket.close()
self.socket = real_sock
self.server_address = self.socket.getsockname()
if ssl_context is not None:
if isinstance(ssl_context, tuple):
ssl_context = load_ssl_context(*ssl_context)
if ssl_context == "adhoc":
ssl_context = generate_adhoc_ssl_context()
self.socket = ssl_context.wrap_socket(self.socket, server_side=True)
self.ssl_context: t.Optional["ssl.SSLContext"] = ssl_context
else:
self.ssl_context = None
def log(self, type: str, message: str, *args) -> None:
_log(type, message, *args)
def serve_forever(self, poll_interval=0.5) -> None:
self.shutdown_signal = False
try:
super().serve_forever(poll_interval=poll_interval)
except KeyboardInterrupt:
pass
finally:
self.server_close()
def handle_error(self, request: t.Any, client_address: t.Tuple[str, int]) -> None:
if self.passthrough_errors:
raise
return super().handle_error(request, client_address)
class ThreadedWSGIServer(socketserver.ThreadingMixIn, BaseWSGIServer):
"""A WSGI server that does threading."""
multithread = True
daemon_threads = True
class ForkingWSGIServer(ForkingMixIn, BaseWSGIServer):
"""A WSGI server that does forking."""
multiprocess = True
def __init__(
self,
host: str,
port: int,
app: "WSGIApplication",
processes: int = 40,
handler: t.Optional[t.Type[WSGIRequestHandler]] = None,
passthrough_errors: bool = False,
ssl_context: t.Optional[_TSSLContextArg] = None,
fd: t.Optional[int] = None,
) -> None:
if not can_fork:
raise ValueError("Your platform does not support forking.")
BaseWSGIServer.__init__(
self, host, port, app, handler, passthrough_errors, ssl_context, fd
)
self.max_children = processes
def make_server(
host: str,
port: int,
app: "WSGIApplication",
threaded: bool = False,
processes: int = 1,
request_handler: t.Optional[t.Type[WSGIRequestHandler]] = None,
passthrough_errors: bool = False,
ssl_context: t.Optional[_TSSLContextArg] = None,
fd: t.Optional[int] = None,
) -> BaseWSGIServer:
"""Create a new server instance that is either threaded, or forks
or just processes one request after another.
"""
if threaded and processes > 1:
raise ValueError("cannot have a multithreaded and multi process server.")
elif threaded:
return ThreadedWSGIServer(
host, port, app, request_handler, passthrough_errors, ssl_context, fd=fd
)
elif processes > 1:
return ForkingWSGIServer(
host,
port,
app,
processes,
request_handler,
passthrough_errors,
ssl_context,
fd=fd,
)
else:
return BaseWSGIServer(
host, port, app, request_handler, passthrough_errors, ssl_context, fd=fd
)
def is_running_from_reloader() -> bool:
"""Checks if the application is running from within the Werkzeug
reloader subprocess.
.. versionadded:: 0.10
"""
return os.environ.get("WERKZEUG_RUN_MAIN") == "true"
def run_simple(
hostname: str,
port: int,
application: "WSGIApplication",
use_reloader: bool = False,
use_debugger: bool = False,
use_evalex: bool = True,
extra_files: t.Optional[t.Iterable[str]] = None,
exclude_patterns: t.Optional[t.Iterable[str]] = None,
reloader_interval: int = 1,
reloader_type: str = "auto",
threaded: bool = False,
processes: int = 1,
request_handler: t.Optional[t.Type[WSGIRequestHandler]] = None,
static_files: t.Optional[t.Dict[str, t.Union[str, t.Tuple[str, str]]]] = None,
passthrough_errors: bool = False,
ssl_context: t.Optional[_TSSLContextArg] = None,
) -> None:
"""Start a WSGI application. Optional features include a reloader,
multithreading and fork support.
This function has a command-line interface too::
python -m werkzeug.serving --help
.. versionchanged:: 2.0
Added ``exclude_patterns`` parameter.
.. versionadded:: 0.5
`static_files` was added to simplify serving of static files as well
as `passthrough_errors`.
.. versionadded:: 0.6
support for SSL was added.
.. versionadded:: 0.8
Added support for automatically loading a SSL context from certificate
file and private key.
.. versionadded:: 0.9
Added command-line interface.
.. versionadded:: 0.10
Improved the reloader and added support for changing the backend
through the `reloader_type` parameter. See :ref:`reloader`
for more information.
.. versionchanged:: 0.15
Bind to a Unix socket by passing a path that starts with
``unix://`` as the ``hostname``.
:param hostname: The host to bind to, for example ``'localhost'``.
If the value is a path that starts with ``unix://`` it will bind
to a Unix socket instead of a TCP socket..
:param port: The port for the server. eg: ``8080``
:param application: the WSGI application to execute
:param use_reloader: should the server automatically restart the python
process if modules were changed?
:param use_debugger: should the werkzeug debugging system be used?
:param use_evalex: should the exception evaluation feature be enabled?
:param extra_files: a list of files the reloader should watch
additionally to the modules. For example configuration
files.
:param exclude_patterns: List of :mod:`fnmatch` patterns to ignore
when running the reloader. For example, ignore cache files that
shouldn't reload when updated.
:param reloader_interval: the interval for the reloader in seconds.
:param reloader_type: the type of reloader to use. The default is
auto detection. Valid values are ``'stat'`` and
``'watchdog'``. See :ref:`reloader` for more
information.
:param threaded: should the process handle each request in a separate
thread?
:param processes: if greater than 1 then handle each request in a new process
up to this maximum number of concurrent processes.
:param request_handler: optional parameter that can be used to replace
the default one. You can use this to replace it
with a different
:class:`~BaseHTTPServer.BaseHTTPRequestHandler`
subclass.
:param static_files: a list or dict of paths for static files. This works
exactly like :class:`SharedDataMiddleware`, it's actually
just wrapping the application in that middleware before
serving.
:param passthrough_errors: set this to `True` to disable the error catching.
This means that the server will die on errors but
it can be useful to hook debuggers in (pdb etc.)
:param ssl_context: an SSL context for the connection. Either an
:class:`ssl.SSLContext`, a tuple in the form
``(cert_file, pkey_file)``, the string ``'adhoc'`` if
the server should automatically create one, or ``None``
to disable SSL (which is the default).
"""
if not isinstance(port, int):
raise TypeError("port must be an integer")
if use_debugger:
from .debug import DebuggedApplication
application = DebuggedApplication(application, use_evalex)
if static_files:
from .middleware.shared_data import SharedDataMiddleware
application = SharedDataMiddleware(application, static_files)
def log_startup(sock):
all_addresses_message = (
" * Running on all addresses.\n"
" WARNING: This is a development server. Do not use it in"
" a production deployment."
)
if sock.family == af_unix:
_log("info", " * Running on %s (Press CTRL+C to quit)", hostname)
else:
if hostname == "0.0.0.0":
_log("warning", all_addresses_message)
display_hostname = get_interface_ip(socket.AF_INET)
elif hostname == "::":
_log("warning", all_addresses_message)
display_hostname = get_interface_ip(socket.AF_INET6)
else:
display_hostname = hostname
if ":" in display_hostname:
display_hostname = f"[{display_hostname}]"
_log(
"info",
" * Running on %s://%s:%d/ (Press CTRL+C to quit)",
"http" if ssl_context is None else "https",
display_hostname,
sock.getsockname()[1],
)
def inner():
try:
fd = int(os.environ["WERKZEUG_SERVER_FD"])
except (LookupError, ValueError):
fd = None
srv = make_server(
hostname,
port,
application,
threaded,
processes,
request_handler,
passthrough_errors,
ssl_context,
fd=fd,
)
if fd is None:
log_startup(srv.socket)
srv.serve_forever()
if use_reloader:
# If we're not running already in the subprocess that is the
# reloader we want to open up a socket early to make sure the
# port is actually available.
if not is_running_from_reloader():
if port == 0 and not can_open_by_fd:
raise ValueError(
"Cannot bind to a random port with enabled "
"reloader if the Python interpreter does "
"not support socket opening by fd."
)
# Create and destroy a socket so that any exceptions are
# raised before we spawn a separate Python interpreter and
# lose this ability.
address_family = select_address_family(hostname, port)
server_address = get_sockaddr(hostname, port, address_family)
s = socket.socket(address_family, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(server_address)
s.set_inheritable(True)
# If we can open the socket by file descriptor, then we can just
# reuse this one and our socket will survive the restarts.
if can_open_by_fd:
os.environ["WERKZEUG_SERVER_FD"] = str(s.fileno())
s.listen(LISTEN_QUEUE)
log_startup(s)
else:
s.close()
if address_family == af_unix:
server_address = t.cast(str, server_address)
_log("info", "Unlinking %s", server_address)
os.unlink(server_address)
from ._reloader import run_with_reloader as _rwr
_rwr(
inner,
extra_files=extra_files,
exclude_patterns=exclude_patterns,
interval=reloader_interval,
reloader_type=reloader_type,
)
else:
inner()
def run_with_reloader(*args, **kwargs) -> None:
"""Run a process with the reloader. This is not a public API, do
not use this function.
.. deprecated:: 2.0
This function will be removed in version 2.1.
"""
from ._reloader import run_with_reloader as _rwr
warnings.warn(
(
"'run_with_reloader' is a private API, it will no longer be"
" accessible in version 2.1. Use 'run_simple' instead."
),
DeprecationWarning,
stacklevel=2,
)
_rwr(*args, **kwargs)
def main() -> None:
"""A simple command-line interface for :py:func:`run_simple`."""
import argparse
from .utils import import_string
_log("warning", "This CLI is deprecated and will be removed in version 2.1.")
parser = argparse.ArgumentParser(
description="Run the given WSGI application with the development server.",
allow_abbrev=False,
)
parser.add_argument(
"-b",
"--bind",
dest="address",
help="The hostname:port the app should listen on.",
)
parser.add_argument(
"-d",
"--debug",
action="store_true",
help="Show the interactive debugger for unhandled exceptions.",
)
parser.add_argument(
"-r",
"--reload",
action="store_true",
help="Reload the process if modules change.",
)
parser.add_argument(
"application", help="Application to import and serve, in the form module:app."
)
args = parser.parse_args()
hostname, port = None, None
if args.address:
hostname, _, port = args.address.partition(":")
run_simple(
hostname=hostname or "127.0.0.1",
port=int(port or 5000),
application=import_string(args.application),
use_reloader=args.reload,
use_debugger=args.debug,
)
if __name__ == "__main__":
main()
| 34.950281 | 88 | 0.599941 |
b1238b392db5b6ed0b6b276db07671ce34097a41 | 485 | py | Python | catalog/migrations/0062_auto_20210408_0026.py | Novel-Public-Health/Novel-Public-Health | fbb7dd0da64ae4fc9641097ca8056152129bd83b | [
"CC0-1.0"
] | null | null | null | catalog/migrations/0062_auto_20210408_0026.py | Novel-Public-Health/Novel-Public-Health | fbb7dd0da64ae4fc9641097ca8056152129bd83b | [
"CC0-1.0"
] | null | null | null | catalog/migrations/0062_auto_20210408_0026.py | Novel-Public-Health/Novel-Public-Health | fbb7dd0da64ae4fc9641097ca8056152129bd83b | [
"CC0-1.0"
] | null | null | null | # Generated by Django 3.1.2 on 2021-04-08 04:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0061_profile'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='user_type',
field=models.IntegerField(choices=[(1, 'free-user'), (2, 'low-subscription'), (3, 'mid-subscription'), (4, 'high-subscription'), (5, 'admin')]),
),
]
| 25.526316 | 156 | 0.589691 |
5c8f74e629743595efa08883b7ae4f2884f56205 | 6,860 | py | Python | attackgraph/testing/testENV.py | wyz2368/deepRL | b92c7dc9c6dbec5ff217162c4fcce35695eabcbb | [
"MIT"
] | null | null | null | attackgraph/testing/testENV.py | wyz2368/deepRL | b92c7dc9c6dbec5ff217162c4fcce35695eabcbb | [
"MIT"
] | null | null | null | attackgraph/testing/testENV.py | wyz2368/deepRL | b92c7dc9c6dbec5ff217162c4fcce35695eabcbb | [
"MIT"
] | null | null | null | from attackgraph import DagGenerator as dag
import random
import numpy as np
import time
import os
from attackgraph import rand_strategies_payoff as rp
from attackgraph.sample_strategy import rand_att_str_generator, rand_def_str_generator
from attackgraph import game_data
from attackgraph.util import set_global_seed
from baselines import deepq
import tensorflow as tf
from baselines.common import models
from baselines.deepq import load_action
from attackgraph.sample_strategy import sample_strategy_from_mixed
from attackgraph.parallel_sim import parallel_sim
from attackgraph import file_op as fp
from attackgraph import training
import copy
env = dag.Environment(numNodes=5, numEdges=4, numRoot=2, numGoals=1)
nodeset = [1,2,3,4,5]
edgeset = [(1,2),(2,3),(2,4),(5,2)]
attr = {}
attr['nodes'] = nodeset
attr['edges'] = edgeset
attr['Nroots'] = [1,0,0,0,1]
attr['Ntypes'] = [0,0,0,1,0]
attr['NeTypes'] = [1,1,0,0,1]
attr['Nstates'] = [0,0,0,0,0]
attr['NaRewards'] = [0,0,0,3,0]
attr['NdPenalties'] = [0,0,0,-3,0]
attr['NdCosts'] = [-1,-1,-1,-1,-1]
attr['NaCosts'] = [-1,-1,-1,-1,-1]
attr['NposActiveProbs'] = [0.6,0.6,0.6,0.6,0.6]
attr['NposInactiveProbs'] =[0.2,0.2,0.2,0.2,0.2]
attr['NactProbs'] = [0.8,0.8,0.8,0.8,0.8]
attr['Eeids'] = [1,2,3,4]
attr['Etypes'] = [0,0,0,0]
attr['actProb'] = [0,0.9,0.9,0]
attr['Ecosts'] = [0,-1,-1,0]
# nodeset = [1,2]
# edgeset = [(1,2)]
env.daggenerator_wo_attrs(nodeset, edgeset)
env.specifiedDAG(attr)
env.save_graph_copy()
# env.visualize()
# print(env.G.nodes.data()[4])
# print(env.G.edges.data())
# set_global_seed(5)
env.create_players()
game = game_data.Game_data(env,4,256,[256,256],400,0.1)
def co(game):
env1 = copy.deepcopy(game.env)
env1.set_training_flag(6)
game.env.set_training_flag(7)
print(env1.training_flag)
print(game.env.training_flag)
print(env1.training_flag is game.env.training_flag)
co(game)
#test attacker
# print(env.attacker.ORedges)
# print(env.attacker.ANDnodes)
# print(env.attacker.actionspace)
# print(env.attacker.get_att_canAttack_inAttackSet(env.G))
# print(env.attacker.uniform_strategy(env.G,1))
# env.attacker.update_canAttack(env.attacker.get_att_canAttack(env.G))
# print(env.attacker.canAttack)
# env.attacker.reset_att()
# print(env.attacker.canAttack)
#test defender
# print(env.defender.num_nodes)
# print(env.defender.observation)
# print(env.defender.history)
# print(env.defender.prev_obs)
# print(env.defender.defact)
# print(env.defender.prev_defact)
# print(env.defender.rand_limit)
# env.defender.defact.add(2)
# env.defender.defact.add(3)
# env.defender.defact.add(5)
#
# print(env.defender.get_def_wasDefended(env.G))
# print(env.defender.get_def_inDefenseSet(env.G))
# print(env.defender.get_def_actionspace(env.G))
# print(env.defender.uniform_strategy(env.G))
#
# env.defender.update_obs([0,0,0,0,1])
# env.defender.update_obs([0,0,0,1,1])
# print(env.defender.observation)
#
# env.defender.save_defact2prev()
#
# print('*******')
# print(env.defender.observation)
# print(env.defender.prev_obs)
# print(env.defender.defact)
# print(env.defender.prev_defact)
#
# print(env.defender.def_obs_constructor(env.G,9))
#test the environment
# a = [(1,2),(5,9),(4,3),(1,9),(2,3)]
# print(env.sortEdge(a))
# print(env.getHorizon_G())
# print(env.G.nodes.data())
# print(env.isOrType_N(5))
# print(env.G.nodes)
# for i in env.G.nodes:
# print(env.getState_N(i))
# print(env.getType_N(i))
# print(env.getActivationType_N(i))
# print(env.getAReward_N(i))
# print(env.getDPenalty_N(i))
# print(env.getDCost_N(i))
# print(env.getACost_N(i))
# print(env.getActProb_N(i))
# print(env.getposActiveProb_N(i))
# print(env.getposInactiveProb_N(i))
# print(env.G.edges)
# for i in env.G.edges:
# # print(env.getid_E(i))
# print(env.getActProb_E(i))
# env.print_N(1)
# env.print_E((2,3))
# print(env.getNumNodes())
# print(env.getNumEdges())
# for i in env.G.nodes:
# print(env.inDegree(i))
# print(env.outDegree(i))
# print(env.predecessors(i))
# print(env.successors(i))
# print(env.isDAG())
# print(env.getEdges())
# print(env.get_ANDnodes())
# print(env.get_ORnodes())
# print(env.get_ORedges())
# print(env.get_Targets())
# print(env.get_Roots())
# print(env.get_NormalEdges())
# print(env.get_att_isActive())
# print(env.get_def_hadAlert())
# print(env.get_att_actionspace())
# print(env.get_def_actionspace())
# a = [1,2,3]
# print(env.check_nodes_sorted(a))
# test mask
# def mask_generator_att(env, obses):
# batch_size = np.shape(obses)[0]
# num_nodes = env.G.number_of_nodes()
# mask = []
# for i in np.arange(batch_size):
# state = obses[i][:num_nodes]
# G_cur = env.G_reserved.copy()
#
# for j in G_cur.nodes:
# G_cur.nodes[j]['state'] = state[j-1]
#
# _mask = env.attacker.get_att_canAttack_mask(G_cur)
#
# mask.append(_mask)
# return np.array(mask)
#
# obses = np.array([[1,0,0,0,0],[0,0,0,0,1],[1,0,0,0,1]])
#
# mask = mask_generator_att(env, obses)
# print(mask)
# Test sim using random strategies
# t1 = time.time()
# # payoff_att, payoff_def, ta, tb, tc = rp.parallel_sim(env,1000)
# a,b = rp.rand_parallel_sim(env,1000)
# t2 = time.time()
#
# t3 = time.time()
# payoff_att, payoff_def, tz, tx = rp.rand_strategies_payoff(env,1000)
# t4 = time.time()
#
# # print(payoff_def,payoff_att)
#
# # print(t2-t1,t4-t3, ta, tb, tc)
#
# # print(tz,tx)
#
# print(t2-t1,t4-t3)
# # print(a,b)
#Test creating new random strategies
# rand_att_str_generator(env,game)
# rand_def_str_generator(env,game)
# Test load action
# path = os.getcwd() + "/attacker_strategies/att_str_epoch1.pkl"
# training_flag = 1
# act = load_action.load_action(path,game,training_flag)
# print(type(act))
# Test sample mixed strategy
# str_set = ['1.pkl', '2.pkl', '3.pkl']
# mix_str = np.array([0.3,0.3,0.4])
# identity = 0
# sample_strategy_from_mixed(env, str_set, mix_str, identity)
# Test sim using two networks.
# path = os.getcwd() + "/attacker_strategies/att_str_epoch1.pkl"
# training_flag = 1
# act_att = load_action.load_action(path,game,training_flag)
#
# env.attacker.att_greedy_action_builder_single(env.G,timeleft=8,nn_att=act_att)
# print(env.attacker.attact)
#
# path = os.getcwd() + "/defender_strategies/def_str_epoch1.pkl"
# training_flag = 0
# act_def = load_action.load_action(path,game,training_flag)
# env.defender.def_greedy_action_builder_single(env.G,timeleft=8,nn_def=act_def)
# print(env.defender.defact)
# act_att = 'att_str_epoch1.pkl'
# act_def = 'def_str_epoch1.pkl'
#
# out = parallel_sim(env, game, nn_att=act_att, nn_def=act_def, num_episodes=2)
# print(out)
# num_actions_def = env.act_dim_def()
# num_actions_att = env.act_dim_att()
# print(num_actions_def)
# print(num_actions_att)
# obs = env.attacker.att_obs_constructor(env.G, 8)
# print(len(obs))
# Test Training
| 26.796875 | 86 | 0.701458 |
f7920e29528988094a3f163b43956f8350a2e1c6 | 3,125 | py | Python | bspider/commands/__init__.py | littlebai3618/bspider | ff4d003cd0825247db4efe62db95f9245c0a303c | [
"BSD-3-Clause"
] | 3 | 2020-06-19T03:52:29.000Z | 2021-05-21T05:50:46.000Z | bspider/commands/__init__.py | littlebai3618/bspider | ff4d003cd0825247db4efe62db95f9245c0a303c | [
"BSD-3-Clause"
] | 2 | 2021-03-31T19:39:03.000Z | 2021-05-12T02:10:26.000Z | bspider/commands/__init__.py | littlebai3618/bspider | ff4d003cd0825247db4efe62db95f9245c0a303c | [
"BSD-3-Clause"
] | null | null | null | """
整个 command 模块都参考了scrapy 的 commands模块
这里引用了scrapy 中的代码:https://github.com/scrapy/scrapy/blob/master/scrapy/commands/__init__.py
"""
import os
import re
from optparse import OptionGroup
from os.path import join
from shutil import ignore_patterns, copy2, copystat
import bspider
IGNORE = ignore_patterns('*.pyc', '.svn', '__pycache__')
class BSpiderCommand(object):
# default settings to be used for this command instead of global defaults
frame_settings = {}
exitcode = 0
def _is_valid_name(self, name):
def _spider_exists(module_name):
dirs = [d for d in os.listdir('.') if os.path.isdir(os.path.join('.', d))]
return module_name in dirs
if not re.search(r'^[_a-zA-Z]\w*$', name):
print('Error: names must begin with a letter and contain'
' only\nletters, numbers and underscores')
elif _spider_exists(name):
print('Error: Module %r already exists' % name)
else:
return True
return False
def _copytree(self, src, dst):
"""
copy platform template
"""
ignore = IGNORE
names = os.listdir(src)
ignored_names = ignore(src, names)
if not os.path.exists(dst):
os.makedirs(dst)
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
if os.path.isdir(srcname):
self._copytree(srcname, dstname)
else:
copy2(srcname, dstname)
copystat(src, dst)
def __init__(self):
self.settings = {} # set in scrapy.cmdline
def syntax(self):
"""
Command syntax (preferably one-line). Do not include command name.
"""
return ""
def short_desc(self):
"""
A short description of the command
"""
return ""
def long_desc(self):
"""A long description of the command. Return short description when not
available. It cannot contain newlines, since contents will be formatted
by optparser which removes newlines and wraps text.
"""
return self.short_desc()
def help(self):
"""An extensive help for the command. It will be shown when using the
"help" command. It can contain newlines, since not post-formatting will
be applied to its contents.
"""
return self.long_desc()
def add_options(self, parser):
"""
Populate option parse with options available for this command
"""
group = OptionGroup(parser, "Global Options")
parser.add_option_group(group)
def process_options(self, args, opts):
pass
def run(self, args, opts):
"""
Entry point for running commands
"""
raise NotImplementedError
@property
def templates_dir(self):
try:
return self.settings['TEMPLATES_DIR']
except KeyError:
return join(bspider.__path__[0], 'templates')
| 28.409091 | 89 | 0.5968 |
bcfcf0da80e009527a63eafa041c5e7d9a60327f | 11,248 | py | Python | src/oci/opsi/models/macs_managed_external_host_insight.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/opsi/models/macs_managed_external_host_insight.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/opsi/models/macs_managed_external_host_insight.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .host_insight import HostInsight
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class MacsManagedExternalHostInsight(HostInsight):
"""
MACS-managed external host insight resource.
"""
#: A constant which can be used with the platform_type property of a MacsManagedExternalHostInsight.
#: This constant has a value of "LINUX"
PLATFORM_TYPE_LINUX = "LINUX"
def __init__(self, **kwargs):
"""
Initializes a new MacsManagedExternalHostInsight object with values from keyword arguments. The default value of the :py:attr:`~oci.opsi.models.MacsManagedExternalHostInsight.entity_source` attribute
of this class is ``MACS_MANAGED_EXTERNAL_HOST`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param entity_source:
The value to assign to the entity_source property of this MacsManagedExternalHostInsight.
Allowed values for this property are: "MACS_MANAGED_EXTERNAL_HOST", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type entity_source: str
:param id:
The value to assign to the id property of this MacsManagedExternalHostInsight.
:type id: str
:param compartment_id:
The value to assign to the compartment_id property of this MacsManagedExternalHostInsight.
:type compartment_id: str
:param host_name:
The value to assign to the host_name property of this MacsManagedExternalHostInsight.
:type host_name: str
:param host_display_name:
The value to assign to the host_display_name property of this MacsManagedExternalHostInsight.
:type host_display_name: str
:param host_type:
The value to assign to the host_type property of this MacsManagedExternalHostInsight.
:type host_type: str
:param processor_count:
The value to assign to the processor_count property of this MacsManagedExternalHostInsight.
:type processor_count: int
:param freeform_tags:
The value to assign to the freeform_tags property of this MacsManagedExternalHostInsight.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this MacsManagedExternalHostInsight.
:type defined_tags: dict(str, dict(str, object))
:param system_tags:
The value to assign to the system_tags property of this MacsManagedExternalHostInsight.
:type system_tags: dict(str, dict(str, object))
:param status:
The value to assign to the status property of this MacsManagedExternalHostInsight.
Allowed values for this property are: "DISABLED", "ENABLED", "TERMINATED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type status: str
:param time_created:
The value to assign to the time_created property of this MacsManagedExternalHostInsight.
:type time_created: datetime
:param time_updated:
The value to assign to the time_updated property of this MacsManagedExternalHostInsight.
:type time_updated: datetime
:param lifecycle_state:
The value to assign to the lifecycle_state property of this MacsManagedExternalHostInsight.
Allowed values for this property are: "CREATING", "UPDATING", "ACTIVE", "DELETING", "DELETED", "FAILED", "NEEDS_ATTENTION", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type lifecycle_state: str
:param lifecycle_details:
The value to assign to the lifecycle_details property of this MacsManagedExternalHostInsight.
:type lifecycle_details: str
:param management_agent_id:
The value to assign to the management_agent_id property of this MacsManagedExternalHostInsight.
:type management_agent_id: str
:param platform_name:
The value to assign to the platform_name property of this MacsManagedExternalHostInsight.
:type platform_name: str
:param platform_type:
The value to assign to the platform_type property of this MacsManagedExternalHostInsight.
Allowed values for this property are: "LINUX", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type platform_type: str
:param platform_version:
The value to assign to the platform_version property of this MacsManagedExternalHostInsight.
:type platform_version: str
"""
self.swagger_types = {
'entity_source': 'str',
'id': 'str',
'compartment_id': 'str',
'host_name': 'str',
'host_display_name': 'str',
'host_type': 'str',
'processor_count': 'int',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))',
'system_tags': 'dict(str, dict(str, object))',
'status': 'str',
'time_created': 'datetime',
'time_updated': 'datetime',
'lifecycle_state': 'str',
'lifecycle_details': 'str',
'management_agent_id': 'str',
'platform_name': 'str',
'platform_type': 'str',
'platform_version': 'str'
}
self.attribute_map = {
'entity_source': 'entitySource',
'id': 'id',
'compartment_id': 'compartmentId',
'host_name': 'hostName',
'host_display_name': 'hostDisplayName',
'host_type': 'hostType',
'processor_count': 'processorCount',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags',
'system_tags': 'systemTags',
'status': 'status',
'time_created': 'timeCreated',
'time_updated': 'timeUpdated',
'lifecycle_state': 'lifecycleState',
'lifecycle_details': 'lifecycleDetails',
'management_agent_id': 'managementAgentId',
'platform_name': 'platformName',
'platform_type': 'platformType',
'platform_version': 'platformVersion'
}
self._entity_source = None
self._id = None
self._compartment_id = None
self._host_name = None
self._host_display_name = None
self._host_type = None
self._processor_count = None
self._freeform_tags = None
self._defined_tags = None
self._system_tags = None
self._status = None
self._time_created = None
self._time_updated = None
self._lifecycle_state = None
self._lifecycle_details = None
self._management_agent_id = None
self._platform_name = None
self._platform_type = None
self._platform_version = None
self._entity_source = 'MACS_MANAGED_EXTERNAL_HOST'
@property
def management_agent_id(self):
"""
**[Required]** Gets the management_agent_id of this MacsManagedExternalHostInsight.
The `OCID`__ of the Management Agent
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:return: The management_agent_id of this MacsManagedExternalHostInsight.
:rtype: str
"""
return self._management_agent_id
@management_agent_id.setter
def management_agent_id(self, management_agent_id):
"""
Sets the management_agent_id of this MacsManagedExternalHostInsight.
The `OCID`__ of the Management Agent
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param management_agent_id: The management_agent_id of this MacsManagedExternalHostInsight.
:type: str
"""
self._management_agent_id = management_agent_id
@property
def platform_name(self):
"""
Gets the platform_name of this MacsManagedExternalHostInsight.
Platform name.
:return: The platform_name of this MacsManagedExternalHostInsight.
:rtype: str
"""
return self._platform_name
@platform_name.setter
def platform_name(self, platform_name):
"""
Sets the platform_name of this MacsManagedExternalHostInsight.
Platform name.
:param platform_name: The platform_name of this MacsManagedExternalHostInsight.
:type: str
"""
self._platform_name = platform_name
@property
def platform_type(self):
"""
Gets the platform_type of this MacsManagedExternalHostInsight.
Platform type.
Allowed values for this property are: "LINUX", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The platform_type of this MacsManagedExternalHostInsight.
:rtype: str
"""
return self._platform_type
@platform_type.setter
def platform_type(self, platform_type):
"""
Sets the platform_type of this MacsManagedExternalHostInsight.
Platform type.
:param platform_type: The platform_type of this MacsManagedExternalHostInsight.
:type: str
"""
allowed_values = ["LINUX"]
if not value_allowed_none_or_none_sentinel(platform_type, allowed_values):
platform_type = 'UNKNOWN_ENUM_VALUE'
self._platform_type = platform_type
@property
def platform_version(self):
"""
Gets the platform_version of this MacsManagedExternalHostInsight.
Platform version.
:return: The platform_version of this MacsManagedExternalHostInsight.
:rtype: str
"""
return self._platform_version
@platform_version.setter
def platform_version(self, platform_version):
"""
Sets the platform_version of this MacsManagedExternalHostInsight.
Platform version.
:param platform_version: The platform_version of this MacsManagedExternalHostInsight.
:type: str
"""
self._platform_version = platform_version
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 38.389078 | 245 | 0.664207 |
1bccce4af3d315ed6234c93304e1346f81c1e070 | 192 | py | Python | tokenwiser/extension/__init__.py | Btibert3/tokenwiser | 64f78be285d24ebc53bcc6991466517aed633888 | [
"Apache-2.0"
] | 50 | 2020-11-21T04:29:34.000Z | 2022-02-12T11:16:52.000Z | tokenwiser/extension/__init__.py | Btibert3/tokenwiser | 64f78be285d24ebc53bcc6991466517aed633888 | [
"Apache-2.0"
] | 33 | 2020-11-26T11:03:52.000Z | 2021-12-04T20:27:44.000Z | tokenwiser/extension/__init__.py | Btibert3/tokenwiser | 64f78be285d24ebc53bcc6991466517aed633888 | [
"Apache-2.0"
] | 7 | 2021-04-07T08:54:34.000Z | 2021-11-11T00:18:17.000Z | from ._extension import (
attach_hyphen_extension,
attach_sklearn_extension,
sklearn_method,
)
__all__ = ["attach_hyphen_extension", "attach_sklearn_extension", "sklearn_method"]
| 24 | 83 | 0.776042 |
c43e3f52ce4dfe4d855ef2f2639b10ae02b033d5 | 16,706 | py | Python | posthog/api/signup.py | brave-care/posthog | 8edd14a16ad936fb241dcf856925e9f2ea87cba4 | [
"MIT"
] | 58 | 2020-08-26T16:26:18.000Z | 2022-03-30T05:32:23.000Z | posthog/api/signup.py | brave-care/posthog | 8edd14a16ad936fb241dcf856925e9f2ea87cba4 | [
"MIT"
] | 15 | 2021-11-09T10:49:34.000Z | 2021-11-09T16:11:01.000Z | posthog/api/signup.py | brave-care/posthog | 8edd14a16ad936fb241dcf856925e9f2ea87cba4 | [
"MIT"
] | 13 | 2020-09-08T13:27:07.000Z | 2022-03-19T17:27:10.000Z | from typing import Any, Dict, Optional, Union, cast
import posthoganalytics
from django import forms
from django.conf import settings
from django.contrib.auth import login, password_validation
from django.core.exceptions import ValidationError
from django.db import IntegrityError, transaction
from django.http import HttpResponse
from django.shortcuts import redirect, render
from django.urls.base import reverse
from rest_framework import exceptions, generics, permissions, response, serializers, validators
from sentry_sdk import capture_exception
from social_core.backends.base import BaseAuth
from social_core.pipeline.partial import partial
from social_django.strategy import DjangoStrategy
from posthog.api.shared import UserBasicSerializer
from posthog.demo import create_demo_team
from posthog.event_usage import report_user_joined_organization, report_user_signed_up
from posthog.models import Organization, Team, User
from posthog.models.organization import OrganizationInvite
from posthog.permissions import CanCreateOrg
from posthog.tasks import user_identify
from posthog.utils import get_can_create_org, mask_email_address
class SignupSerializer(serializers.Serializer):
first_name: serializers.Field = serializers.CharField(max_length=128)
email: serializers.Field = serializers.EmailField(
validators=[
validators.UniqueValidator(
queryset=User.objects.all(), message="There is already an account with this email address."
)
]
)
password: serializers.Field = serializers.CharField(allow_null=True)
organization_name: serializers.Field = serializers.CharField(max_length=128, required=False, allow_blank=True)
email_opt_in: serializers.Field = serializers.BooleanField(default=True)
def validate_password(self, value):
if value is not None:
password_validation.validate_password(value)
return value
def create(self, validated_data, **kwargs):
is_instance_first_user: bool = not User.objects.exists()
organization_name = validated_data.pop("organization_name", validated_data["first_name"])
self._organization, self._team, self._user = User.objects.bootstrap(
organization_name=organization_name, create_team=self.create_team, **validated_data,
)
user = self._user
# Temp (due to FF-release [`new-onboarding-2822`]): Activate the setup/onboarding process if applicable
if self.enable_new_onboarding(user):
self._organization.setup_section_2_completed = False
self._organization.save()
login(
self.context["request"], user, backend="django.contrib.auth.backends.ModelBackend",
)
report_user_signed_up(
user.distinct_id,
is_instance_first_user=is_instance_first_user,
is_organization_first_user=True,
new_onboarding_enabled=(not self._organization.setup_section_2_completed),
backend_processor="OrganizationSignupSerializer",
user_analytics_metadata=user.get_analytics_metadata(),
org_analytics_metadata=user.organization.get_analytics_metadata() if user.organization else None,
)
return user
def create_team(self, organization: Organization, user: User) -> Team:
if self.enable_new_onboarding(user):
return create_demo_team(organization=organization)
else:
return Team.objects.create_with_data(user=user, organization=organization)
def to_representation(self, instance) -> Dict:
data = UserBasicSerializer(instance=instance).data
data["redirect_url"] = "/personalization" if self.enable_new_onboarding() else "/ingestion"
return data
def enable_new_onboarding(self, user: Optional[User] = None) -> bool:
if user is None:
user = self._user
return posthoganalytics.feature_enabled("new-onboarding-2822", user.distinct_id)
class SignupViewset(generics.CreateAPIView):
serializer_class = SignupSerializer
# Enables E2E testing of signup flow
permission_classes = (permissions.AllowAny,) if settings.E2E_TESTING else (CanCreateOrg,)
class InviteSignupSerializer(serializers.Serializer):
first_name: serializers.Field = serializers.CharField(max_length=128, required=False)
password: serializers.Field = serializers.CharField(required=False)
email_opt_in: serializers.Field = serializers.BooleanField(default=True)
def validate_password(self, value):
password_validation.validate_password(value)
return value
def to_representation(self, instance):
serializer = UserBasicSerializer(instance=instance)
return serializer.data
def validate(self, data: Dict[str, Any]) -> Dict[str, Any]:
if "request" not in self.context or not self.context["request"].user.is_authenticated:
# If there's no authenticated user and we're creating a new one, attributes are required.
for attr in ["first_name", "password"]:
if not data.get(attr):
raise serializers.ValidationError({attr: "This field is required."}, code="required")
return data
def create(self, validated_data, **kwargs):
if "view" not in self.context or not self.context["view"].kwargs.get("invite_id"):
raise serializers.ValidationError("Please provide an invite ID to continue.")
user: Optional[User] = None
is_new_user: bool = False
if self.context["request"].user.is_authenticated:
user = cast(User, self.context["request"].user)
invite_id = self.context["view"].kwargs.get("invite_id")
try:
invite: OrganizationInvite = OrganizationInvite.objects.select_related("organization").get(id=invite_id)
except (OrganizationInvite.DoesNotExist):
raise serializers.ValidationError("The provided invite ID is not valid.")
with transaction.atomic():
if not user:
is_new_user = True
try:
user = User.objects.create_user(
invite.target_email,
validated_data.pop("password"),
validated_data.pop("first_name"),
**validated_data,
)
except IntegrityError:
raise serializers.ValidationError(
f"There already exists an account with email address {invite.target_email}. Please log in instead."
)
try:
invite.use(user)
except ValueError as e:
raise serializers.ValidationError(str(e))
if is_new_user:
login(
self.context["request"], user, backend="django.contrib.auth.backends.ModelBackend",
)
report_user_signed_up(
user.distinct_id,
is_instance_first_user=False,
is_organization_first_user=False,
new_onboarding_enabled=(not invite.organization.setup_section_2_completed),
backend_processor="OrganizationInviteSignupSerializer",
user_analytics_metadata=user.get_analytics_metadata(),
org_analytics_metadata=user.organization.get_analytics_metadata() if user.organization else None,
)
else:
report_user_joined_organization(organization=invite.organization, current_user=user)
# Update user props
user_identify.identify_task.delay(user_id=user.id)
return user
class InviteSignupViewset(generics.CreateAPIView):
serializer_class = InviteSignupSerializer
permission_classes = (permissions.AllowAny,)
def get(self, request, *args, **kwargs):
"""
Pre-validates an invite code.
"""
invite_id = kwargs.get("invite_id")
if not invite_id:
raise exceptions.ValidationError("Please provide an invite ID to continue.")
try:
invite: OrganizationInvite = OrganizationInvite.objects.get(id=invite_id)
except (OrganizationInvite.DoesNotExist, ValidationError):
raise serializers.ValidationError("The provided invite ID is not valid.")
user = request.user if request.user.is_authenticated else None
invite.validate(user=user)
return response.Response(
{
"id": str(invite.id),
"target_email": mask_email_address(invite.target_email),
"first_name": invite.first_name,
"organization_name": invite.organization.name,
}
)
## Social Signup
## views & serializers
class SocialSignupSerializer(serializers.Serializer):
"""
Signup serializer when the account is created using social authentication.
Pre-processes information not obtained from SSO provider to create organization.
"""
organization_name: serializers.Field = serializers.CharField(max_length=128)
email_opt_in: serializers.Field = serializers.BooleanField(default=True)
def create(self, validated_data, **kwargs):
request = self.context["request"]
if not request.session.get("backend"):
raise serializers.ValidationError(
"Inactive social login session. Go to /login and log in before continuing.",
)
request.session["organization_name"] = validated_data["organization_name"]
request.session["email_opt_in"] = validated_data["email_opt_in"]
request.session.set_expiry(3600) # 1 hour to complete process
return {"continue_url": reverse("social:complete", args=[request.session["backend"]])}
def to_representation(self, instance: Any) -> Any:
return self.instance
class SocialSignupViewset(generics.CreateAPIView):
serializer_class = SocialSignupSerializer
permission_classes = (CanCreateOrg,)
class TeamInviteSurrogate:
"""This reimplements parts of OrganizationInvite that enable compatibility with the old Team.signup_token."""
def __init__(self, signup_token: str):
team = Team.objects.select_related("organization").get(signup_token=signup_token)
self.organization = team.organization
def validate(*args, **kwargs) -> bool:
return True
def use(self, user: Any, *args, **kwargs) -> None:
user.join(organization=self.organization)
class CompanyNameForm(forms.Form):
companyName = forms.CharField(max_length=64)
emailOptIn = forms.BooleanField(required=False)
def finish_social_signup(request):
"""
TODO: DEPRECATED in favor of posthog.api.signup.SocialSignupSerializer
"""
if not get_can_create_org():
return redirect("/login?error=no_new_organizations")
if request.method == "POST":
form = CompanyNameForm(request.POST)
if form.is_valid():
request.session["organization_name"] = form.cleaned_data["companyName"]
request.session["email_opt_in"] = bool(form.cleaned_data["emailOptIn"])
return redirect(reverse("social:complete", args=[request.session["backend"]]))
else:
form = CompanyNameForm()
return render(request, "signup_to_organization_company.html", {"user_name": request.session["user_name"]})
def process_social_invite_signup(
strategy: DjangoStrategy, invite_id: str, email: str, full_name: str
) -> Union[HttpResponse, User]:
try:
invite: Union[OrganizationInvite, TeamInviteSurrogate] = OrganizationInvite.objects.select_related(
"organization",
).get(id=invite_id)
except (OrganizationInvite.DoesNotExist, ValidationError):
try:
invite = TeamInviteSurrogate(invite_id)
except Team.DoesNotExist:
return redirect(f"/signup/{invite_id}?error_code=invalid_invite&source=social_create_user")
try:
invite.validate(user=None, email=email)
except exceptions.ValidationError as e:
return redirect(
f"/signup/{invite_id}?error_code={e.get_codes()[0]}&error_detail={e.args[0]}&source=social_create_user"
)
try:
user = strategy.create_user(email=email, first_name=full_name, password=None)
except Exception as e:
capture_exception(e)
message = "Account unable to be created. This account may already exist. Please try again"
" or use different credentials."
return redirect(f"/signup/{invite_id}?error_code=unknown&error_detail={message}&source=social_create_user")
invite.use(user, prevalidated=True)
return user
def process_social_domain_whitelist_signup(email: str, full_name: str) -> Optional[User]:
domain_organization: Optional[Organization] = None
user: Optional[User] = None
# TODO: This feature is currently available only in self-hosted
if not settings.MULTI_TENANCY:
# Check if the user is on a whitelisted domain
domain = email.split("@")[-1]
# TODO: Handle multiple organizations with the same whitelisted domain
domain_organization = Organization.objects.filter(domain_whitelist__contains=[domain]).first()
if domain_organization:
user = User.objects.create_and_join(
organization=domain_organization, email=email, password=None, first_name=full_name
)
return user
def process_social_saml_signup(backend: BaseAuth, email: str, full_name: str) -> Optional[User]:
"""
With SAML we have automatic provisioning because the IdP should already handle the logic of which users to allow to
login.
"""
if backend.name != "saml":
return None
return User.objects.create_and_join(
organization=Organization.objects.filter(for_internal_metrics=False).order_by("created_at").first(), # type: ignore
email=email,
password=None,
first_name=full_name,
)
@partial
def social_create_user(strategy: DjangoStrategy, details, backend, request, user=None, *args, **kwargs):
if user:
return {"is_new": False}
backend_processor = "social_create_user"
user_email = details["email"][0] if isinstance(details["email"], (list, tuple)) else details["email"]
user_name = (
details["fullname"]
or f"{details['first_name'] or ''} {details['last_name'] or ''}".strip()
or details["username"]
)
strategy.session_set("user_name", user_name)
strategy.session_set("backend", backend.name)
from_invite = False
invite_id = strategy.session_get("invite_id")
if not user_email or not user_name:
missing_attr = "email" if not user_email else "name"
raise ValidationError(
{missing_attr: "This field is required and was not provided by the IdP."}, code="required"
)
if invite_id:
from_invite = True
user = process_social_invite_signup(strategy, invite_id, user_email, user_name)
else:
# Domain whitelist?
user = process_social_domain_whitelist_signup(user_email, user_name)
if user:
backend_processor = "domain_whitelist"
# SAML
if not user:
# Domain whitelist?
user = process_social_saml_signup(backend, user_email, user_name)
if user:
backend_processor = "saml"
if not user:
organization_name = strategy.session_get("organization_name", None)
email_opt_in = strategy.session_get("email_opt_in", None)
if not organization_name or email_opt_in is None:
return redirect(finish_social_signup)
serializer = SignupSerializer(
data={
"organization_name": organization_name,
"email_opt_in": email_opt_in,
"first_name": user_name,
"email": user_email,
"password": None,
},
context={"request": request},
)
serializer.is_valid(raise_exception=True)
user = serializer.save()
report_user_signed_up(
distinct_id=user.distinct_id,
is_instance_first_user=User.objects.count() == 1,
is_organization_first_user=not from_invite,
new_onboarding_enabled=False,
backend_processor=backend_processor,
social_provider=backend.name,
user_analytics_metadata=user.get_analytics_metadata(),
org_analytics_metadata=user.organization.get_analytics_metadata() if user.organization else None,
)
return {"is_new": True, "user": user}
| 39.03271 | 124 | 0.678499 |
0eed7f2c8ea6a73085f41a2f5b5b18624afa32db | 31,208 | py | Python | tests/unit/test_image.py | wuchih-amazon/sagemaker-python-sdk | 716b2591d7c050a1ecb26d91cda1733302dd38a5 | [
"Apache-2.0"
] | 1 | 2020-09-22T19:13:36.000Z | 2020-09-22T19:13:36.000Z | tests/unit/test_image.py | wuchih-amazon/sagemaker-python-sdk | 716b2591d7c050a1ecb26d91cda1733302dd38a5 | [
"Apache-2.0"
] | 1 | 2019-04-23T19:32:17.000Z | 2019-04-23T19:32:17.000Z | tests/unit/test_image.py | wuchih-amazon/sagemaker-python-sdk | 716b2591d7c050a1ecb26d91cda1733302dd38a5 | [
"Apache-2.0"
] | 1 | 2020-04-30T07:43:57.000Z | 2020-04-30T07:43:57.000Z | # Copyright 2017-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import random
import string
from botocore.credentials import Credentials
import base64
import json
import os
import subprocess
import tarfile
import pytest
import yaml
from mock import patch, Mock, MagicMock
import sagemaker
from sagemaker.local.image import _SageMakerContainer, _aws_credentials
REGION = "us-west-2"
BUCKET_NAME = "mybucket"
EXPANDED_ROLE = "arn:aws:iam::111111111111:role/ExpandedRole"
TRAINING_JOB_NAME = "my-job"
INPUT_DATA_CONFIG = [
{
"ChannelName": "a",
"DataUri": "file:///tmp/source1",
"DataSource": {
"FileDataSource": {
"FileDataDistributionType": "FullyReplicated",
"FileUri": "file:///tmp/source1",
}
},
},
{
"ChannelName": "b",
"DataUri": "s3://my-own-bucket/prefix",
"DataSource": {
"S3DataSource": {
"S3DataDistributionType": "FullyReplicated",
"S3DataType": "S3Prefix",
"S3Uri": "s3://my-own-bucket/prefix",
}
},
},
]
OUTPUT_DATA_CONFIG = {"S3OutputPath": ""}
HYPERPARAMETERS = {
"a": 1,
"b": json.dumps("bee"),
"sagemaker_submit_directory": json.dumps("s3://my_bucket/code"),
}
LOCAL_CODE_HYPERPARAMETERS = {
"a": 1,
"b": 2,
"sagemaker_submit_directory": json.dumps("file:///tmp/code"),
}
@pytest.fixture()
def sagemaker_session():
boto_mock = MagicMock(name="boto_session", region_name=REGION)
boto_mock.client("sts").get_caller_identity.return_value = {"Account": "123"}
boto_mock.resource("s3").Bucket(BUCKET_NAME).objects.filter.return_value = []
sms = sagemaker.Session(boto_session=boto_mock, sagemaker_client=MagicMock())
sms.default_bucket = Mock(name="default_bucket", return_value=BUCKET_NAME)
sms.expand_role = Mock(return_value=EXPANDED_ROLE)
return sms
def test_sagemaker_container_hosts_should_have_lowercase_names():
random.seed(a=42)
def assert_all_lowercase(hosts):
for host in hosts:
assert host.lower() == host
sagemaker_container = _SageMakerContainer("local", 2, "my-image", sagemaker_session=Mock())
assert_all_lowercase(sagemaker_container.hosts)
sagemaker_container = _SageMakerContainer("local", 10, "my-image", sagemaker_session=Mock())
assert_all_lowercase(sagemaker_container.hosts)
sagemaker_container = _SageMakerContainer("local", 1, "my-image", sagemaker_session=Mock())
assert_all_lowercase(sagemaker_container.hosts)
@patch("sagemaker.local.local_session.LocalSession")
def test_write_config_file(LocalSession, tmpdir):
sagemaker_container = _SageMakerContainer("local", 2, "my-image")
sagemaker_container.container_root = str(tmpdir.mkdir("container-root"))
host = "algo-1"
sagemaker.local.image._create_config_file_directories(sagemaker_container.container_root, host)
container_root = sagemaker_container.container_root
config_file_root = os.path.join(container_root, host, "input", "config")
hyperparameters_file = os.path.join(config_file_root, "hyperparameters.json")
resource_config_file = os.path.join(config_file_root, "resourceconfig.json")
input_data_config_file = os.path.join(config_file_root, "inputdataconfig.json")
# write the config files, and then lets check they exist and have the right content.
sagemaker_container.write_config_files(host, HYPERPARAMETERS, INPUT_DATA_CONFIG)
assert os.path.exists(hyperparameters_file)
assert os.path.exists(resource_config_file)
assert os.path.exists(input_data_config_file)
hyperparameters_data = json.load(open(hyperparameters_file))
resource_config_data = json.load(open(resource_config_file))
input_data_config_data = json.load(open(input_data_config_file))
# Validate HyperParameters
for k, v in HYPERPARAMETERS.items():
assert k in hyperparameters_data
assert hyperparameters_data[k] == v
# Validate Resource Config
assert resource_config_data["current_host"] == host
assert resource_config_data["hosts"] == sagemaker_container.hosts
# Validate Input Data Config
for channel in INPUT_DATA_CONFIG:
assert channel["ChannelName"] in input_data_config_data
@patch("sagemaker.local.local_session.LocalSession")
def test_write_config_files_input_content_type(LocalSession, tmpdir):
sagemaker_container = _SageMakerContainer("local", 1, "my-image")
sagemaker_container.container_root = str(tmpdir.mkdir("container-root"))
host = "algo-1"
sagemaker.local.image._create_config_file_directories(sagemaker_container.container_root, host)
container_root = sagemaker_container.container_root
config_file_root = os.path.join(container_root, host, "input", "config")
input_data_config_file = os.path.join(config_file_root, "inputdataconfig.json")
# write the config files, and then lets check they exist and have the right content.
input_data_config = [
{
"ChannelName": "channel_a",
"DataUri": "file:///tmp/source1",
"ContentType": "text/csv",
"DataSource": {
"FileDataSource": {
"FileDataDistributionType": "FullyReplicated",
"FileUri": "file:///tmp/source1",
}
},
},
{
"ChannelName": "channel_b",
"DataUri": "s3://my-own-bucket/prefix",
"DataSource": {
"S3DataSource": {
"S3DataDistributionType": "FullyReplicated",
"S3DataType": "S3Prefix",
"S3Uri": "s3://my-own-bucket/prefix",
}
},
},
]
sagemaker_container.write_config_files(host, HYPERPARAMETERS, input_data_config)
assert os.path.exists(input_data_config_file)
parsed_input_config = json.load(open(input_data_config_file))
# Validate Input Data Config
for channel in input_data_config:
assert channel["ChannelName"] in parsed_input_config
# Channel A has a content type
assert "ContentType" in parsed_input_config["channel_a"]
assert parsed_input_config["channel_a"]["ContentType"] == "text/csv"
# Channel B does not have content type
assert "ContentType" not in parsed_input_config["channel_b"]
@patch("sagemaker.local.local_session.LocalSession")
def test_retrieve_artifacts(LocalSession, tmpdir):
sagemaker_container = _SageMakerContainer("local", 2, "my-image")
sagemaker_container.hosts = ["algo-1", "algo-2"] # avoid any randomness
sagemaker_container.container_root = str(tmpdir.mkdir("container-root"))
volume1 = os.path.join(sagemaker_container.container_root, "algo-1")
volume2 = os.path.join(sagemaker_container.container_root, "algo-2")
os.mkdir(volume1)
os.mkdir(volume2)
compose_data = {
"services": {
"algo-1": {
"volumes": [
"%s:/opt/ml/model" % os.path.join(volume1, "model"),
"%s:/opt/ml/output" % os.path.join(volume1, "output"),
]
},
"algo-2": {
"volumes": [
"%s:/opt/ml/model" % os.path.join(volume2, "model"),
"%s:/opt/ml/output" % os.path.join(volume2, "output"),
]
},
}
}
dirs = [
("model", volume1),
("model/data", volume1),
("model", volume2),
("model/data", volume2),
("model/tmp", volume2),
("output", volume1),
("output/data", volume1),
("output", volume2),
("output/data", volume2),
("output/log", volume2),
]
files = [
("model/data/model.json", volume1),
("model/data/variables.csv", volume1),
("model/data/model.json", volume2),
("model/data/variables2.csv", volume2),
("model/tmp/something-else.json", volume2),
("output/data/loss.json", volume1),
("output/data/accuracy.json", volume1),
("output/data/loss.json", volume2),
("output/data/accuracy2.json", volume2),
("output/log/warnings.txt", volume2),
]
expected_model = [
"data",
"data/model.json",
"data/variables.csv",
"data/variables2.csv",
"tmp/something-else.json",
]
expected_output = [
"data",
"log",
"data/loss.json",
"data/accuracy.json",
"data/accuracy2.json",
"log/warnings.txt",
]
for d, volume in dirs:
os.mkdir(os.path.join(volume, d))
# create all the files
for f, volume in files:
open(os.path.join(volume, f), "a").close()
output_path = str(tmpdir.mkdir("exported_files"))
output_data_config = {"S3OutputPath": "file://%s" % output_path}
model_artifacts = sagemaker_container.retrieve_artifacts(
compose_data, output_data_config, sagemaker_session
).replace("file://", "")
artifacts = os.path.dirname(model_artifacts)
# we have both the tar files
assert set(os.listdir(artifacts)) == {"model.tar.gz", "output.tar.gz"}
# check that the tar files contain what we expect
tar = tarfile.open(os.path.join(output_path, "model.tar.gz"))
model_tar_files = [m.name for m in tar.getmembers()]
for f in expected_model:
assert f in model_tar_files
tar = tarfile.open(os.path.join(output_path, "output.tar.gz"))
output_tar_files = [m.name for m in tar.getmembers()]
for f in expected_output:
assert f in output_tar_files
def test_stream_output():
# it should raise an exception if the command fails
with pytest.raises(RuntimeError):
p = subprocess.Popen(
["ls", "/some/unknown/path"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
sagemaker.local.image._stream_output(p)
p = subprocess.Popen(["echo", "hello"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
exit_code = sagemaker.local.image._stream_output(p)
assert exit_code == 0
def test_check_output():
with pytest.raises(Exception):
sagemaker.local.image._check_output(["ls", "/some/unknown/path"])
msg = "hello!"
output = sagemaker.local.image._check_output(["echo", msg]).strip()
assert output == msg
output = sagemaker.local.image._check_output("echo %s" % msg).strip()
assert output == msg
@patch("sagemaker.local.local_session.LocalSession", Mock())
@patch("sagemaker.local.image._stream_output", Mock())
@patch("sagemaker.local.image._SageMakerContainer._cleanup")
@patch("sagemaker.local.image._SageMakerContainer.retrieve_artifacts")
@patch("sagemaker.local.data.get_data_source_instance")
@patch("subprocess.Popen")
def test_train(
popen, get_data_source_instance, retrieve_artifacts, cleanup, tmpdir, sagemaker_session
):
data_source = Mock()
data_source.get_root_dir.return_value = "foo"
get_data_source_instance.return_value = data_source
directories = [str(tmpdir.mkdir("container-root")), str(tmpdir.mkdir("data"))]
with patch(
"sagemaker.local.image._SageMakerContainer._create_tmp_folder", side_effect=directories
):
instance_count = 2
image = "my-image"
sagemaker_container = _SageMakerContainer(
"local", instance_count, image, sagemaker_session=sagemaker_session
)
sagemaker_container.train(
INPUT_DATA_CONFIG, OUTPUT_DATA_CONFIG, HYPERPARAMETERS, TRAINING_JOB_NAME
)
docker_compose_file = os.path.join(
sagemaker_container.container_root, "docker-compose.yaml"
)
call_args = popen.call_args[0][0]
assert call_args is not None
expected = [
"docker-compose",
"-f",
docker_compose_file,
"up",
"--build",
"--abort-on-container-exit",
]
for i, v in enumerate(expected):
assert call_args[i] == v
with open(docker_compose_file, "r") as f:
config = yaml.load(f, Loader=yaml.SafeLoader)
assert len(config["services"]) == instance_count
for h in sagemaker_container.hosts:
assert config["services"][h]["image"] == image
assert config["services"][h]["command"] == "train"
# TODO-reinvent-2019 [akarpur]: uncomment the below assert statement
# assert "AWS_REGION={}".format(REGION) in config["services"][h]["environment"]
assert (
"TRAINING_JOB_NAME={}".format(TRAINING_JOB_NAME)
in config["services"][h]["environment"]
)
# assert that expected by sagemaker container output directories exist
assert os.path.exists(os.path.join(sagemaker_container.container_root, "output"))
assert os.path.exists(os.path.join(sagemaker_container.container_root, "output/data"))
retrieve_artifacts.assert_called_once()
cleanup.assert_called_once()
@patch("sagemaker.local.local_session.LocalSession", Mock())
@patch("sagemaker.local.image._stream_output", Mock())
@patch("sagemaker.local.image._SageMakerContainer._cleanup", Mock())
@patch("sagemaker.local.data.get_data_source_instance")
def test_train_with_hyperparameters_without_job_name(
get_data_source_instance, tmpdir, sagemaker_session
):
data_source = Mock()
data_source.get_root_dir.return_value = "foo"
get_data_source_instance.return_value = data_source
directories = [str(tmpdir.mkdir("container-root")), str(tmpdir.mkdir("data"))]
with patch(
"sagemaker.local.image._SageMakerContainer._create_tmp_folder", side_effect=directories
):
instance_count = 2
image = "my-image"
sagemaker_container = _SageMakerContainer(
"local", instance_count, image, sagemaker_session=sagemaker_session
)
sagemaker_container.train(
INPUT_DATA_CONFIG, OUTPUT_DATA_CONFIG, HYPERPARAMETERS, TRAINING_JOB_NAME
)
docker_compose_file = os.path.join(
sagemaker_container.container_root, "docker-compose.yaml"
)
with open(docker_compose_file, "r") as f:
config = yaml.load(f, Loader=yaml.SafeLoader)
for h in sagemaker_container.hosts:
assert (
"TRAINING_JOB_NAME={}".format(TRAINING_JOB_NAME)
in config["services"][h]["environment"]
)
@patch("sagemaker.local.local_session.LocalSession", Mock())
@patch("sagemaker.local.image._stream_output", side_effect=RuntimeError("this is expected"))
@patch("sagemaker.local.image._SageMakerContainer._cleanup")
@patch("sagemaker.local.image._SageMakerContainer.retrieve_artifacts")
@patch("sagemaker.local.data.get_data_source_instance")
@patch("subprocess.Popen", Mock())
def test_train_error(
get_data_source_instance, retrieve_artifacts, cleanup, _stream_output, tmpdir, sagemaker_session
):
data_source = Mock()
data_source.get_root_dir.return_value = "foo"
get_data_source_instance.return_value = data_source
directories = [str(tmpdir.mkdir("container-root")), str(tmpdir.mkdir("data"))]
with patch(
"sagemaker.local.image._SageMakerContainer._create_tmp_folder", side_effect=directories
):
instance_count = 2
image = "my-image"
sagemaker_container = _SageMakerContainer(
"local", instance_count, image, sagemaker_session=sagemaker_session
)
with pytest.raises(RuntimeError) as e:
sagemaker_container.train(
INPUT_DATA_CONFIG, OUTPUT_DATA_CONFIG, HYPERPARAMETERS, TRAINING_JOB_NAME
)
assert "this is expected" in str(e)
retrieve_artifacts.assert_called_once()
cleanup.assert_called_once()
@patch("sagemaker.local.local_session.LocalSession", Mock())
@patch("sagemaker.local.image._stream_output", Mock())
@patch("sagemaker.local.image._SageMakerContainer._cleanup", Mock())
@patch("sagemaker.local.data.get_data_source_instance")
@patch("subprocess.Popen", Mock())
def test_train_local_code(get_data_source_instance, tmpdir, sagemaker_session):
data_source = Mock()
data_source.get_root_dir.return_value = "foo"
get_data_source_instance.return_value = data_source
directories = [str(tmpdir.mkdir("container-root")), str(tmpdir.mkdir("data"))]
with patch(
"sagemaker.local.image._SageMakerContainer._create_tmp_folder", side_effect=directories
):
instance_count = 2
image = "my-image"
sagemaker_container = _SageMakerContainer(
"local", instance_count, image, sagemaker_session=sagemaker_session
)
sagemaker_container.train(
INPUT_DATA_CONFIG, OUTPUT_DATA_CONFIG, LOCAL_CODE_HYPERPARAMETERS, TRAINING_JOB_NAME
)
docker_compose_file = os.path.join(
sagemaker_container.container_root, "docker-compose.yaml"
)
shared_folder_path = os.path.join(sagemaker_container.container_root, "shared")
with open(docker_compose_file, "r") as f:
config = yaml.load(f, Loader=yaml.SafeLoader)
assert len(config["services"]) == instance_count
for h in sagemaker_container.hosts:
assert config["services"][h]["image"] == image
assert config["services"][h]["command"] == "train"
volumes = config["services"][h]["volumes"]
assert "%s:/opt/ml/code" % "/tmp/code" in volumes
assert "%s:/opt/ml/shared" % shared_folder_path in volumes
config_file_root = os.path.join(
sagemaker_container.container_root, h, "input", "config"
)
hyperparameters_file = os.path.join(config_file_root, "hyperparameters.json")
hyperparameters_data = json.load(open(hyperparameters_file))
assert hyperparameters_data["sagemaker_submit_directory"] == json.dumps("/opt/ml/code")
@patch("sagemaker.local.local_session.LocalSession", Mock())
@patch("sagemaker.local.image._stream_output", Mock())
@patch("sagemaker.local.image._SageMakerContainer._cleanup", Mock())
@patch("sagemaker.local.data.get_data_source_instance")
@patch("subprocess.Popen", Mock())
def test_train_local_intermediate_output(get_data_source_instance, tmpdir, sagemaker_session):
data_source = Mock()
data_source.get_root_dir.return_value = "foo"
get_data_source_instance.return_value = data_source
directories = [str(tmpdir.mkdir("container-root")), str(tmpdir.mkdir("data"))]
with patch(
"sagemaker.local.image._SageMakerContainer._create_tmp_folder", side_effect=directories
):
instance_count = 2
image = "my-image"
sagemaker_container = _SageMakerContainer(
"local", instance_count, image, sagemaker_session=sagemaker_session
)
output_path = str(tmpdir.mkdir("customer_intermediate_output"))
output_data_config = {"S3OutputPath": "file://%s" % output_path}
hyperparameters = {"sagemaker_s3_output": output_path}
sagemaker_container.train(
INPUT_DATA_CONFIG, output_data_config, hyperparameters, TRAINING_JOB_NAME
)
docker_compose_file = os.path.join(
sagemaker_container.container_root, "docker-compose.yaml"
)
intermediate_folder_path = os.path.join(output_path, "output/intermediate")
with open(docker_compose_file, "r") as f:
config = yaml.load(f, Loader=yaml.SafeLoader)
assert len(config["services"]) == instance_count
for h in sagemaker_container.hosts:
assert config["services"][h]["image"] == image
assert config["services"][h]["command"] == "train"
volumes = config["services"][h]["volumes"]
assert "%s:/opt/ml/output/intermediate" % intermediate_folder_path in volumes
def test_container_has_gpu_support(tmpdir, sagemaker_session):
instance_count = 1
image = "my-image"
sagemaker_container = _SageMakerContainer(
"local_gpu", instance_count, image, sagemaker_session=sagemaker_session
)
docker_host = sagemaker_container._create_docker_host("host-1", {}, set(), "train", [])
assert "runtime" in docker_host
assert docker_host["runtime"] == "nvidia"
def test_container_does_not_enable_nvidia_docker_for_cpu_containers(sagemaker_session):
instance_count = 1
image = "my-image"
sagemaker_container = _SageMakerContainer(
"local", instance_count, image, sagemaker_session=sagemaker_session
)
docker_host = sagemaker_container._create_docker_host("host-1", {}, set(), "train", [])
assert "runtime" not in docker_host
@patch("sagemaker.local.image._HostingContainer.run", Mock())
@patch("sagemaker.local.image._SageMakerContainer._prepare_serving_volumes", Mock(return_value=[]))
@patch("shutil.copy", Mock())
@patch("shutil.copytree", Mock())
def test_serve(tmpdir, sagemaker_session):
with patch(
"sagemaker.local.image._SageMakerContainer._create_tmp_folder",
return_value=str(tmpdir.mkdir("container-root")),
):
image = "my-image"
sagemaker_container = _SageMakerContainer(
"local", 1, image, sagemaker_session=sagemaker_session
)
environment = {"env1": 1, "env2": "b", "SAGEMAKER_SUBMIT_DIRECTORY": "s3://some/path"}
sagemaker_container.serve("/some/model/path", environment)
docker_compose_file = os.path.join(
sagemaker_container.container_root, "docker-compose.yaml"
)
with open(docker_compose_file, "r") as f:
config = yaml.load(f, Loader=yaml.SafeLoader)
for h in sagemaker_container.hosts:
assert config["services"][h]["image"] == image
assert config["services"][h]["command"] == "serve"
@patch("sagemaker.local.image._HostingContainer.run", Mock())
@patch("sagemaker.local.image._SageMakerContainer._prepare_serving_volumes", Mock(return_value=[]))
@patch("shutil.copy", Mock())
@patch("shutil.copytree", Mock())
def test_serve_local_code(tmpdir, sagemaker_session):
with patch(
"sagemaker.local.image._SageMakerContainer._create_tmp_folder",
return_value=str(tmpdir.mkdir("container-root")),
):
image = "my-image"
sagemaker_container = _SageMakerContainer(
"local", 1, image, sagemaker_session=sagemaker_session
)
environment = {"env1": 1, "env2": "b", "SAGEMAKER_SUBMIT_DIRECTORY": "file:///tmp/code"}
sagemaker_container.serve("/some/model/path", environment)
docker_compose_file = os.path.join(
sagemaker_container.container_root, "docker-compose.yaml"
)
with open(docker_compose_file, "r") as f:
config = yaml.load(f, Loader=yaml.SafeLoader)
for h in sagemaker_container.hosts:
assert config["services"][h]["image"] == image
assert config["services"][h]["command"] == "serve"
volumes = config["services"][h]["volumes"]
assert "%s:/opt/ml/code" % "/tmp/code" in volumes
assert (
"SAGEMAKER_SUBMIT_DIRECTORY=/opt/ml/code"
in config["services"][h]["environment"]
)
@patch("sagemaker.local.image._HostingContainer.run", Mock())
@patch("sagemaker.local.image._SageMakerContainer._prepare_serving_volumes", Mock(return_value=[]))
@patch("shutil.copy", Mock())
@patch("shutil.copytree", Mock())
def test_serve_local_code_no_env(tmpdir, sagemaker_session):
with patch(
"sagemaker.local.image._SageMakerContainer._create_tmp_folder",
return_value=str(tmpdir.mkdir("container-root")),
):
image = "my-image"
sagemaker_container = _SageMakerContainer(
"local", 1, image, sagemaker_session=sagemaker_session
)
sagemaker_container.serve("/some/model/path", {})
docker_compose_file = os.path.join(
sagemaker_container.container_root, "docker-compose.yaml"
)
with open(docker_compose_file, "r") as f:
config = yaml.load(f, Loader=yaml.SafeLoader)
for h in sagemaker_container.hosts:
assert config["services"][h]["image"] == image
assert config["services"][h]["command"] == "serve"
@patch("sagemaker.local.data.get_data_source_instance")
@patch("tarfile.is_tarfile")
@patch("tarfile.open", MagicMock())
@patch("os.makedirs", Mock())
def test_prepare_serving_volumes_with_s3_model(
is_tarfile, get_data_source_instance, sagemaker_session
):
sagemaker_container = _SageMakerContainer(
"local", 1, "some-image", sagemaker_session=sagemaker_session
)
sagemaker_container.container_root = "/tmp/container_root"
s3_data_source = Mock()
s3_data_source.get_root_dir.return_value = "/tmp/downloaded/data/"
s3_data_source.get_file_list.return_value = ["/tmp/downloaded/data/my_model.tar.gz"]
get_data_source_instance.return_value = s3_data_source
is_tarfile.return_value = True
volumes = sagemaker_container._prepare_serving_volumes("s3://bucket/my_model.tar.gz")
is_tarfile.assert_called_with("/tmp/downloaded/data/my_model.tar.gz")
assert len(volumes) == 1
assert volumes[0].container_dir == "/opt/ml/model"
assert volumes[0].host_dir == "/tmp/downloaded/data/"
@patch("sagemaker.local.data.get_data_source_instance")
@patch("tarfile.is_tarfile", Mock(return_value=False))
@patch("os.makedirs", Mock())
def test_prepare_serving_volumes_with_local_model(get_data_source_instance, sagemaker_session):
sagemaker_container = _SageMakerContainer(
"local", 1, "some-image", sagemaker_session=sagemaker_session
)
sagemaker_container.container_root = "/tmp/container_root"
local_file_data_source = Mock()
local_file_data_source.get_root_dir.return_value = "/path/to/my_model"
local_file_data_source.get_file_list.return_value = ["/path/to/my_model/model"]
get_data_source_instance.return_value = local_file_data_source
volumes = sagemaker_container._prepare_serving_volumes("file:///path/to/my_model")
assert len(volumes) == 1
assert volumes[0].container_dir == "/opt/ml/model"
assert volumes[0].host_dir == "/path/to/my_model"
def test_ecr_login_non_ecr():
session_mock = Mock()
result = sagemaker.local.image._ecr_login_if_needed(session_mock, "ubuntu")
session_mock.assert_not_called()
assert result is False
@patch("sagemaker.local.image._check_output", return_value="123451324")
@pytest.mark.parametrize(
"image",
[
"520713654638.dkr.ecr.us-east-1.amazonaws.com/image-i-have:1.0",
"520713654638.dkr.ecr.us-iso-east-1.c2s.ic.gov/image-i-have:1.0",
],
)
def test_ecr_login_image_exists(_check_output, image):
session_mock = Mock()
result = sagemaker.local.image._ecr_login_if_needed(session_mock, image)
session_mock.assert_not_called()
_check_output.assert_called()
assert result is False
@patch("subprocess.check_output", return_value="".encode("utf-8"))
def test_ecr_login_needed(check_output):
session_mock = Mock()
token = "very-secure-token"
token_response = "AWS:%s" % token
b64_token = base64.b64encode(token_response.encode("utf-8"))
response = {
u"authorizationData": [
{
u"authorizationToken": b64_token,
u"proxyEndpoint": u"https://520713654638.dkr.ecr.us-east-1.amazonaws.com",
}
],
"ResponseMetadata": {
"RetryAttempts": 0,
"HTTPStatusCode": 200,
"RequestId": "25b2ac63-36bf-11e8-ab6a-e5dc597d2ad9",
},
}
session_mock.client("ecr").get_authorization_token.return_value = response
image = "520713654638.dkr.ecr.us-east-1.amazonaws.com/image-i-need:1.1"
result = sagemaker.local.image._ecr_login_if_needed(session_mock, image)
expected_command = (
"docker login -u AWS -p %s https://520713654638.dkr.ecr.us-east-1.amazonaws.com" % token
)
check_output.assert_called_with(expected_command, shell=True)
session_mock.client("ecr").get_authorization_token.assert_called_with(
registryIds=["520713654638"]
)
assert result is True
@patch("subprocess.check_output", return_value="".encode("utf-8"))
def test_pull_image(check_output):
image = "520713654638.dkr.ecr.us-east-1.amazonaws.com/image-i-need:1.1"
sagemaker.local.image._pull_image(image)
expected_command = "docker pull %s" % image
check_output.assert_called_once_with(expected_command, shell=True)
def test__aws_credentials_with_long_lived_credentials():
credentials = Credentials(access_key=_random_string(), secret_key=_random_string(), token=None)
session = Mock()
session.get_credentials.return_value = credentials
aws_credentials = _aws_credentials(session)
assert aws_credentials == [
"AWS_ACCESS_KEY_ID=%s" % credentials.access_key,
"AWS_SECRET_ACCESS_KEY=%s" % credentials.secret_key,
]
@patch("sagemaker.local.image._aws_credentials_available_in_metadata_service")
def test__aws_credentials_with_short_lived_credentials_and_ec2_metadata_service_having_credentials(
mock,
):
credentials = Credentials(
access_key=_random_string(), secret_key=_random_string(), token=_random_string()
)
session = Mock()
session.get_credentials.return_value = credentials
mock.return_value = True
aws_credentials = _aws_credentials(session)
assert aws_credentials is None
@patch("sagemaker.local.image._aws_credentials_available_in_metadata_service")
def test__aws_credentials_with_short_lived_credentials_and_ec2_metadata_service_having_no_credentials(
mock,
):
credentials = Credentials(
access_key=_random_string(), secret_key=_random_string(), token=_random_string()
)
session = Mock()
session.get_credentials.return_value = credentials
mock.return_value = False
aws_credentials = _aws_credentials(session)
assert aws_credentials == [
"AWS_ACCESS_KEY_ID=%s" % credentials.access_key,
"AWS_SECRET_ACCESS_KEY=%s" % credentials.secret_key,
"AWS_SESSION_TOKEN=%s" % credentials.token,
]
def _random_string(size=6, chars=string.ascii_uppercase):
return "".join(random.choice(chars) for x in range(size))
| 37.330144 | 102 | 0.676878 |
528f70a603776fba21d1b2867bba2294a8d8b171 | 4,588 | py | Python | src/helow/reporter/serializers.py | BuildForSDG/team-079 | 817fd17f566be07c4b225c769ba6576462f080b8 | [
"MIT"
] | null | null | null | src/helow/reporter/serializers.py | BuildForSDG/team-079 | 817fd17f566be07c4b225c769ba6576462f080b8 | [
"MIT"
] | null | null | null | src/helow/reporter/serializers.py | BuildForSDG/team-079 | 817fd17f566be07c4b225c769ba6576462f080b8 | [
"MIT"
] | null | null | null | """The serializer class for all endpoints."""
from rest_framework import serializers
from django.contrib.auth import get_user_model
from reporter.models import IncidentReport, Place, IncidentType
from config import Config as config
class UserSerializer(serializers.Serializer):
"""User serializer class."""
id = serializers.IntegerField()
username = serializers.CharField(required=False)
email = serializers.CharField(required=False)
class IncidentLocationSerializer(serializers.ModelSerializer):
"""Serializer for location model."""
class Meta:
model = Place
fields = '__all__'
class IncidentTypeSerializer(serializers.Serializer):
"""IncidentType Serializer."""
id = serializers.IntegerField()
label = serializers.CharField(required=False)
frequency = serializers.IntegerField(required=False)
def create(self, validated_data):
return IncidentType.objects.create(**validated_data)
def update(self, instance, validated_data):
instance.id = validated_data.get('id', instance.id)
instance.label = validated_data.get('label', instance.label)
instance.frequency = validated_data.get('frequency', instance.frequency)
instance.save()
return instance
class CreateIncidentReportSerializer(serializers.Serializer):
"""Model serializer for `IncidentReport`."""
import responder.serializers as se
id = serializers.PrimaryKeyRelatedField(queryset=IncidentReport.objects.all(), required=False)
title = serializers.CharField(max_length=50)
description = serializers.CharField(max_length=200)
reported_by = UserSerializer(required=False)
reported_at = serializers.DateTimeField()
incident_type = IncidentTypeSerializer()
responder = se.ResponderSerializer(required=False)
status = serializers.CharField(max_length=100, required=False)
location = IncidentLocationSerializer()
def create(self, validated_data):
"""Create and return a new `IncidentReport` instance, given the validated data."""
# get user object if user is not anonymous
user_data = validated_data.get('reported_by')
if user_data:
user = get_user_model().objects.get(**user_data)
validated_data['reported_by'] = user
# get the incident type object with the provided id
incident_type_data = validated_data.get('incident_type')
if incident_type_data:
incident_type = IncidentType.objects.get(id=incident_type_data.get('id'))
validated_data['incident_type'] = incident_type
# create a location for this incident
location_data = validated_data.pop('location')
location = Place.objects.create(owner=config.REPORTER_LOCATION, **location_data)
# create this incident
return IncidentReport.objects.create(location=location, **validated_data)
class ReportSerializer(serializers.Serializer):
"""Model serializer for `IncidentReport`."""
business_status = serializers.CharField(max_length=20)
icon = serializers.CharField(max_length=200)
id = serializers.CharField(max_length=100)
name = serializers.CharField(max_length=200)
place_id = serializers.CharField(max_length=200)
rating = serializers.FloatField()
user_ratings_total = serializers.IntegerField()
vicinity = serializers.CharField(max_length=200)
def create(self, validated_data):
"""Create and return a new `IncidentReport` instance, given the validated data."""
# get user object if user is not anonymous
user_data = validated_data.get('reported_by')
if user_data:
user = get_user_model().objects.get(**user_data)
validated_data['reported_by'] = user
# get the incident type object with the provided id
incident_type_data = validated_data.get('incident_type')
if incident_type_data:
incident_type = IncidentType.objects.get(id=incident_type_data.get('id'))
validated_data['incident_type'] = incident_type
# create a location for this incident
location_data = validated_data.pop('location')
location = Place.objects.create(owner=config.REPORTER_LOCATION, **location_data)
# create this incident
incident = IncidentReport.objects.create(location=location, **validated_data)
# call find_responders to scan for available responders
from django.shortcuts import redirect, reverse
return redirect(reverse('find_responder') + f'?incident={incident.id}')
| 38.881356 | 98 | 0.717306 |
a7348497853933b78d8b132874bbe54747fe8345 | 634 | py | Python | recipes/Python/578176_Click_counter_for_Windows/recipe-578176.py | tdiprima/code | 61a74f5f93da087d27c70b2efe779ac6bd2a3b4f | [
"MIT"
] | 2,023 | 2017-07-29T09:34:46.000Z | 2022-03-24T08:00:45.000Z | recipes/Python/578176_Click_counter_for_Windows/recipe-578176.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 32 | 2017-09-02T17:20:08.000Z | 2022-02-11T17:49:37.000Z | recipes/Python/578176_Click_counter_for_Windows/recipe-578176.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 780 | 2017-07-28T19:23:28.000Z | 2022-03-25T20:39:41.000Z | # -*- coding: utf-8 -*-
'''
Click counter
Left click: +1
Right click: -1
Middle click: Reset
'''
import pyHook
import pythoncom
print(__doc__)
click = 0
def left_down(event):
global click
click += 1
print(click)
return True
def right_down(event):
global click
click -= 1
print(click)
return True
def middle_down(event):
global click
click = 0
print(click)
return True
hm = pyHook.HookManager()
hm.SubscribeMouseLeftDown(left_down)
hm.SubscribeMouseRightDown(right_down)
hm.SubscribeMouseMiddleDown(middle_down)
hm.HookMouse()
pythoncom.PumpMessages()
hm.UnhookMouse()
| 14.744186 | 40 | 0.690852 |
f07486c75788e245f7d0afd6c99ade614c8b1c1e | 13,764 | py | Python | sdk/lusid_asyncio/models/interest_rate_swap.py | finbourne/lusid-sdk-python-asyncio-preview | 290f93590ab5485661216c8622d3de9f7af0ed60 | [
"MIT"
] | null | null | null | sdk/lusid_asyncio/models/interest_rate_swap.py | finbourne/lusid-sdk-python-asyncio-preview | 290f93590ab5485661216c8622d3de9f7af0ed60 | [
"MIT"
] | null | null | null | sdk/lusid_asyncio/models/interest_rate_swap.py | finbourne/lusid-sdk-python-asyncio-preview | 290f93590ab5485661216c8622d3de9f7af0ed60 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.3923
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from lusid_asyncio.configuration import Configuration
class InterestRateSwap(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'start_date': 'datetime',
'maturity_date': 'datetime',
'is_non_deliverable': 'bool',
'legs': 'list[InstrumentLeg]',
'settlement_ccy': 'str',
'instrument_type': 'str'
}
attribute_map = {
'start_date': 'startDate',
'maturity_date': 'maturityDate',
'is_non_deliverable': 'isNonDeliverable',
'legs': 'legs',
'settlement_ccy': 'settlementCcy',
'instrument_type': 'instrumentType'
}
required_map = {
'start_date': 'required',
'maturity_date': 'required',
'is_non_deliverable': 'optional',
'legs': 'required',
'settlement_ccy': 'optional',
'instrument_type': 'required'
}
def __init__(self, start_date=None, maturity_date=None, is_non_deliverable=None, legs=None, settlement_ccy=None, instrument_type=None, local_vars_configuration=None): # noqa: E501
"""InterestRateSwap - a model defined in OpenAPI"
:param start_date: The start date of the instrument. This is normally synonymous with the trade-date. (required)
:type start_date: datetime
:param maturity_date: The final maturity date of the instrument. This means the last date on which the instruments makes a payment of any amount. For the avoidance of doubt, that is not necessarily prior to its last sensitivity date for the purposes of risk; e.g. instruments such as Constant Maturity Swaps (CMS) often have sensitivities to rates beyond their last payment date. (required)
:type maturity_date: datetime
:param is_non_deliverable: Is the contract an IRS of \"Non-Deliverable\" type, meaning a single payment in the settlement currency based on the difference between the fixed and floating rates.
:type is_non_deliverable: bool
:param legs: The set of instrument legs that define the swap instrument. (required)
:type legs: list[lusid_asyncio.InstrumentLeg]
:param settlement_ccy: Settlement currency if IRS is non-deliverable.
:type settlement_ccy: str
:param instrument_type: The available values are: QuotedSecurity, InterestRateSwap, FxForward, Future, ExoticInstrument, FxOption, CreditDefaultSwap, InterestRateSwaption, Bond, EquityOption, FixedLeg, FloatingLeg, BespokeCashFlowsLeg, Unknown, TermDeposit, ContractForDifference, EquitySwap, CashPerpetual, CapFloor, CashSettled, CdsIndex, Basket, FundingLeg, CrossCurrencySwap, FxSwap, ForwardRateAgreement, SimpleInstrument, Repo, Equity, ExchangeTradedOption (required)
:type instrument_type: str
""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._start_date = None
self._maturity_date = None
self._is_non_deliverable = None
self._legs = None
self._settlement_ccy = None
self._instrument_type = None
self.discriminator = None
self.start_date = start_date
self.maturity_date = maturity_date
if is_non_deliverable is not None:
self.is_non_deliverable = is_non_deliverable
self.legs = legs
self.settlement_ccy = settlement_ccy
self.instrument_type = instrument_type
@property
def start_date(self):
"""Gets the start_date of this InterestRateSwap. # noqa: E501
The start date of the instrument. This is normally synonymous with the trade-date. # noqa: E501
:return: The start_date of this InterestRateSwap. # noqa: E501
:rtype: datetime
"""
return self._start_date
@start_date.setter
def start_date(self, start_date):
"""Sets the start_date of this InterestRateSwap.
The start date of the instrument. This is normally synonymous with the trade-date. # noqa: E501
:param start_date: The start_date of this InterestRateSwap. # noqa: E501
:type start_date: datetime
"""
if self.local_vars_configuration.client_side_validation and start_date is None: # noqa: E501
raise ValueError("Invalid value for `start_date`, must not be `None`") # noqa: E501
self._start_date = start_date
@property
def maturity_date(self):
"""Gets the maturity_date of this InterestRateSwap. # noqa: E501
The final maturity date of the instrument. This means the last date on which the instruments makes a payment of any amount. For the avoidance of doubt, that is not necessarily prior to its last sensitivity date for the purposes of risk; e.g. instruments such as Constant Maturity Swaps (CMS) often have sensitivities to rates beyond their last payment date. # noqa: E501
:return: The maturity_date of this InterestRateSwap. # noqa: E501
:rtype: datetime
"""
return self._maturity_date
@maturity_date.setter
def maturity_date(self, maturity_date):
"""Sets the maturity_date of this InterestRateSwap.
The final maturity date of the instrument. This means the last date on which the instruments makes a payment of any amount. For the avoidance of doubt, that is not necessarily prior to its last sensitivity date for the purposes of risk; e.g. instruments such as Constant Maturity Swaps (CMS) often have sensitivities to rates beyond their last payment date. # noqa: E501
:param maturity_date: The maturity_date of this InterestRateSwap. # noqa: E501
:type maturity_date: datetime
"""
if self.local_vars_configuration.client_side_validation and maturity_date is None: # noqa: E501
raise ValueError("Invalid value for `maturity_date`, must not be `None`") # noqa: E501
self._maturity_date = maturity_date
@property
def is_non_deliverable(self):
"""Gets the is_non_deliverable of this InterestRateSwap. # noqa: E501
Is the contract an IRS of \"Non-Deliverable\" type, meaning a single payment in the settlement currency based on the difference between the fixed and floating rates. # noqa: E501
:return: The is_non_deliverable of this InterestRateSwap. # noqa: E501
:rtype: bool
"""
return self._is_non_deliverable
@is_non_deliverable.setter
def is_non_deliverable(self, is_non_deliverable):
"""Sets the is_non_deliverable of this InterestRateSwap.
Is the contract an IRS of \"Non-Deliverable\" type, meaning a single payment in the settlement currency based on the difference between the fixed and floating rates. # noqa: E501
:param is_non_deliverable: The is_non_deliverable of this InterestRateSwap. # noqa: E501
:type is_non_deliverable: bool
"""
self._is_non_deliverable = is_non_deliverable
@property
def legs(self):
"""Gets the legs of this InterestRateSwap. # noqa: E501
The set of instrument legs that define the swap instrument. # noqa: E501
:return: The legs of this InterestRateSwap. # noqa: E501
:rtype: list[lusid_asyncio.InstrumentLeg]
"""
return self._legs
@legs.setter
def legs(self, legs):
"""Sets the legs of this InterestRateSwap.
The set of instrument legs that define the swap instrument. # noqa: E501
:param legs: The legs of this InterestRateSwap. # noqa: E501
:type legs: list[lusid_asyncio.InstrumentLeg]
"""
if self.local_vars_configuration.client_side_validation and legs is None: # noqa: E501
raise ValueError("Invalid value for `legs`, must not be `None`") # noqa: E501
self._legs = legs
@property
def settlement_ccy(self):
"""Gets the settlement_ccy of this InterestRateSwap. # noqa: E501
Settlement currency if IRS is non-deliverable. # noqa: E501
:return: The settlement_ccy of this InterestRateSwap. # noqa: E501
:rtype: str
"""
return self._settlement_ccy
@settlement_ccy.setter
def settlement_ccy(self, settlement_ccy):
"""Sets the settlement_ccy of this InterestRateSwap.
Settlement currency if IRS is non-deliverable. # noqa: E501
:param settlement_ccy: The settlement_ccy of this InterestRateSwap. # noqa: E501
:type settlement_ccy: str
"""
self._settlement_ccy = settlement_ccy
@property
def instrument_type(self):
"""Gets the instrument_type of this InterestRateSwap. # noqa: E501
The available values are: QuotedSecurity, InterestRateSwap, FxForward, Future, ExoticInstrument, FxOption, CreditDefaultSwap, InterestRateSwaption, Bond, EquityOption, FixedLeg, FloatingLeg, BespokeCashFlowsLeg, Unknown, TermDeposit, ContractForDifference, EquitySwap, CashPerpetual, CapFloor, CashSettled, CdsIndex, Basket, FundingLeg, CrossCurrencySwap, FxSwap, ForwardRateAgreement, SimpleInstrument, Repo, Equity, ExchangeTradedOption # noqa: E501
:return: The instrument_type of this InterestRateSwap. # noqa: E501
:rtype: str
"""
return self._instrument_type
@instrument_type.setter
def instrument_type(self, instrument_type):
"""Sets the instrument_type of this InterestRateSwap.
The available values are: QuotedSecurity, InterestRateSwap, FxForward, Future, ExoticInstrument, FxOption, CreditDefaultSwap, InterestRateSwaption, Bond, EquityOption, FixedLeg, FloatingLeg, BespokeCashFlowsLeg, Unknown, TermDeposit, ContractForDifference, EquitySwap, CashPerpetual, CapFloor, CashSettled, CdsIndex, Basket, FundingLeg, CrossCurrencySwap, FxSwap, ForwardRateAgreement, SimpleInstrument, Repo, Equity, ExchangeTradedOption # noqa: E501
:param instrument_type: The instrument_type of this InterestRateSwap. # noqa: E501
:type instrument_type: str
"""
if self.local_vars_configuration.client_side_validation and instrument_type is None: # noqa: E501
raise ValueError("Invalid value for `instrument_type`, must not be `None`") # noqa: E501
allowed_values = ["QuotedSecurity", "InterestRateSwap", "FxForward", "Future", "ExoticInstrument", "FxOption", "CreditDefaultSwap", "InterestRateSwaption", "Bond", "EquityOption", "FixedLeg", "FloatingLeg", "BespokeCashFlowsLeg", "Unknown", "TermDeposit", "ContractForDifference", "EquitySwap", "CashPerpetual", "CapFloor", "CashSettled", "CdsIndex", "Basket", "FundingLeg", "CrossCurrencySwap", "FxSwap", "ForwardRateAgreement", "SimpleInstrument", "Repo", "Equity", "ExchangeTradedOption"] # noqa: E501
if self.local_vars_configuration.client_side_validation and instrument_type not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `instrument_type` ({0}), must be one of {1}" # noqa: E501
.format(instrument_type, allowed_values)
)
self._instrument_type = instrument_type
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InterestRateSwap):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, InterestRateSwap):
return True
return self.to_dict() != other.to_dict()
| 44.4 | 513 | 0.672406 |
a0717de16734fc5fca1cb26d5cf5e133d88b4bfc | 23,775 | py | Python | aiida/backends/djsite/querybuilder_django/querybuilder_django.py | tomzhang/aiida_core | 949810e9f3daff0f748c5c9aa1dde4f5222bb49b | [
"BSD-2-Clause"
] | 1 | 2019-04-29T12:39:31.000Z | 2019-04-29T12:39:31.000Z | aiida/backends/djsite/querybuilder_django/querybuilder_django.py | tomzhang/aiida_core | 949810e9f3daff0f748c5c9aa1dde4f5222bb49b | [
"BSD-2-Clause"
] | null | null | null | aiida/backends/djsite/querybuilder_django/querybuilder_django.py | tomzhang/aiida_core | 949810e9f3daff0f748c5c9aa1dde4f5222bb49b | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
from __future__ import absolute_import
import datetime
from datetime import datetime
from json import loads as json_loads
import six
# ~ import aiida.backends.djsite.querybuilder_django.dummy_model as dummy_model
from . import dummy_model
from aiida.backends.djsite.db.models import DbAttribute, DbExtra, ObjectDoesNotExist
from sqlalchemy import and_, or_, not_, exists, select, exists, case
from sqlalchemy.types import Float, String
from sqlalchemy.orm import aliased
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.sql.expression import cast, ColumnClause
from sqlalchemy.sql.elements import Cast, Label
from aiida.common.exceptions import InputValidationError
from aiida.backends.general.querybuilder_interface import QueryBuilderInterface
from aiida.backends.utils import _get_column
from aiida.common.exceptions import (
InputValidationError, DbContentError,
MissingPluginError, ConfigurationError
)
class QueryBuilderImplDjango(QueryBuilderInterface):
def __init__(self, *args, **kwargs):
# ~ from aiida.orm.implementation.django.node import Node as AiidaNode
# ~ from aiida.orm.implementation.django.group import Group as AiidaGroup
# ~ from aiida.orm.implementation.django.computer import Computer as AiidaComputer
# ~ from aiida.orm.implementation.django.user import User as AiidaUser
# ~ self.Link = dummy_model.DbLink
# ~ self.Node = dummy_model.DbNode
# ~ self.Computer = dummy_model.DbComputer
# ~ self.User = dummy_model.DbUser
# ~ self.Group = dummy_model.DbGroup
# ~ self.table_groups_nodes = dummy_model.table_groups_nodes
# ~ self.AiidaNode = AiidaNode
# ~ self.AiidaGroup = AiidaGroup
# ~ self.AiidaComputer = AiidaComputer
# ~ self.AiidaUser = AiidaUser
super(QueryBuilderImplDjango, self).__init__(*args, **kwargs)
@property
def Node(self):
return dummy_model.DbNode
@property
def Link(self):
return dummy_model.DbLink
@property
def Computer(self):
return dummy_model.DbComputer
@property
def User(self):
return dummy_model.DbUser
@property
def Group(self):
return dummy_model.DbGroup
@property
def table_groups_nodes(self):
return dummy_model.table_groups_nodes
@property
def AiidaNode(self):
import aiida.orm.implementation.django.node
return aiida.orm.implementation.django.node.Node
@property
def AiidaGroup(self):
import aiida.orm.implementation.django.group
return aiida.orm.implementation.django.group.Group
@property
def AiidaUser(self):
import aiida.orm
return aiida.orm.User
@property
def AiidaComputer(self):
import aiida.orm
return aiida.orm.Computer
def get_filter_expr_from_column(self, operator, value, column):
# Label is used because it is what is returned for the
# 'state' column by the hybrid_column construct
if not isinstance(column, (Cast, InstrumentedAttribute, Label, ColumnClause)):
raise TypeError(
'column ({}) {} is not a valid column'.format(
type(column), column
)
)
database_entity = column
if operator == '==':
expr = database_entity == value
elif operator == '>':
expr = database_entity > value
elif operator == '<':
expr = database_entity < value
elif operator == '>=':
expr = database_entity >= value
elif operator == '<=':
expr = database_entity <= value
elif operator == 'like':
expr = database_entity.like(value)
elif operator == 'ilike':
expr = database_entity.ilike(value)
elif operator == 'in':
expr = database_entity.in_(value)
else:
raise InputValidationError(
'Unknown operator {} for filters on columns'.format(operator)
)
return expr
def get_filter_expr(
self, operator, value, attr_key, is_attribute,
alias=None, column=None, column_name=None
):
"""
Applies a filter on the alias given.
Expects the alias of the ORM-class on which to filter, and filter_spec.
Filter_spec contains the specification on the filter.
Expects:
:param operator: The operator to apply, see below for further details
:param value:
The value for the right side of the expression,
the value you want to compare with.
:param path: The path leading to the value
:param attr_key: Boolean, whether the value is in a json-column,
or in an attribute like table.
Implemented and valid operators:
* for any type:
* == (compare single value, eg: '==':5.0)
* in (compare whether in list, eg: 'in':[5, 6, 34]
* for floats and integers:
* >
* <
* <=
* >=
* for strings:
* like (case - sensitive), for example
'like':'node.calc.%' will match node.calc.relax and
node.calc.RELAX and node.calc. but
not node.CALC.relax
* ilike (case - unsensitive)
will also match node.CaLc.relax in the above example
.. note::
The character % is a reserved special character in SQL,
and acts as a wildcard. If you specifically
want to capture a ``%`` in the string, use: ``_%``
* for arrays and dictionaries (only for the
SQLAlchemy implementation):
* contains: pass a list with all the items that
the array should contain, or that should be among
the keys, eg: 'contains': ['N', 'H'])
* has_key: pass an element that the list has to contain
or that has to be a key, eg: 'has_key':'N')
* for arrays only (SQLAlchemy version):
* of_length
* longer
* shorter
All the above filters invoke a negation of the
expression if preceded by **~**::
# first example:
filter_spec = {
'name' : {
'~in':[
'halle',
'lujah'
]
} # Name not 'halle' or 'lujah'
}
# second example:
filter_spec = {
'id' : {
'~==': 2
}
} # id is not 2
"""
expr = None
if operator.startswith('~'):
negation = True
operator = operator.lstrip('~')
elif operator.startswith('!'):
negation = True
operator = operator.lstrip('!')
else:
negation = False
if operator in ('longer', 'shorter', 'of_length'):
if not isinstance(value, int):
raise InputValidationError(
"You have to give an integer when comparing to a length"
)
elif operator in ('like', 'ilike'):
if not isinstance(value, six.string_types):
raise InputValidationError(
"Value for operator {} has to be a string (you gave {})"
"".format(operator, value)
)
elif operator == 'in':
value_type_set = set([type(i) for i in value])
if len(value_type_set) > 1:
raise InputValidationError(
'{} contains more than one type'.format(value)
)
elif len(value_type_set) == 0:
raise InputValidationError(
'{} contains is an empty list'.format(value)
)
elif operator in ('and', 'or'):
expressions_for_this_path = []
for filter_operation_dict in value:
for newoperator, newvalue in filter_operation_dict.items():
expressions_for_this_path.append(
self.get_filter_expr(
newoperator, newvalue,
attr_key=attr_key, is_attribute=is_attribute,
alias=alias, column=column,
column_name=column_name
)
)
if operator == 'and':
expr = and_(*expressions_for_this_path)
elif operator == 'or':
expr = or_(*expressions_for_this_path)
if expr is None:
if is_attribute:
expr = self.get_filter_expr_from_attributes(
operator, value, attr_key,
column=column, column_name=column_name, alias=alias
)
else:
if column is None:
if (alias is None) and (column_name is None):
raise Exception(
"I need to get the column but do not know \n"
"the alias and the column name"
)
column = _get_column(column_name, alias)
expr = self.get_filter_expr_from_column(operator, value, column)
if negation:
return not_(expr)
return expr
def get_session(self):
return dummy_model.get_aldjemy_session()
# return dummy_model.session
def modify_expansions(self, alias, expansions):
"""
For the Django schema, we have as additioanl expansions 'attributes'
and 'extras'
"""
if issubclass(alias._sa_class_manager.class_, self.Node):
expansions.append("attributes")
expansions.append("extras")
elif issubclass(alias._sa_class_manager.class_, self.Computer):
try:
expansions.remove('metadata')
expansions.append('_metadata')
except KeyError:
pass
return expansions
def get_filter_expr_from_attributes(
self, operator, value, attr_key,
column=None, column_name=None,
alias=None):
def get_attribute_db_column(mapped_class, dtype, castas=None):
if dtype == 't':
mapped_entity = mapped_class.tval
elif dtype == 'b':
mapped_entity = mapped_class.bval
# ~ mapped_entity = cast(mapped_class.value_str, Boolean)
elif dtype == 'f':
mapped_entity = mapped_class.fval
# ~ mapped_entity = cast(mapped_class.value_str, Float)
elif dtype == 'i':
mapped_entity = mapped_class.ival
# ~ mapped_entity = cast(mapped_class.value_str, Integer)
elif dtype == 'd':
mapped_entity = mapped_class.dval
else:
raise InputValidationError(
"I don't know what to do with dtype {}".format(dtype)
)
if castas == 't':
mapped_entity = cast(mapped_entity, String)
elif castas == 'f':
mapped_entity = cast(mapped_entity, Float)
return mapped_entity
if column:
mapped_class = column.prop.mapper.class_
else:
column = getattr(alias, column_name)
mapped_class = column.prop.mapper.class_
# Ok, so we have an attribute key here.
# Unless cast is specified, will try to infer my self where the value
# is stored
# Datetime -> dval
# bool -> bval
# string -> tval
# integer -> ival, fval (cast ival to float)
# float -> ival, fval (cast ival to float)
# If the user specified of_type ??
# That is basically a query for where the value is sitting
# (which db_column in the dbattribtues)
# If the user specified in what to cast, he wants an operation to
# be performed to cast the value to a different type
if isinstance(value, (list, tuple)):
value_type_set = set([type(i) for i in value])
if len(value_type_set) > 1:
raise InputValidationError('{} contains more than one type'.format(value))
elif len(value_type_set) == 0:
raise InputValidationError('Given list is empty, cannot determine type')
else:
value_to_consider = value[0]
else:
value_to_consider = value
# First cases, I maybe need not do anything but just count the
# number of entries
if operator in ('of_length', 'shorter', 'longer'):
raise NotImplementedError(
"Filtering by lengths of arrays or lists is not implemented\n"
"in the Django-Backend"
)
elif operator == 'of_type':
raise NotImplementedError(
"Filtering by type is not implemented\n"
"in the Django-Backend"
)
elif operator == 'contains':
raise NotImplementedError(
"Contains is not implemented in the Django-backend"
)
elif operator == 'has_key':
if issubclass(mapped_class, dummy_model.DbAttribute):
expr = alias.attributes.any(mapped_class.key == '.'.join(attr_key + [value]))
elif issubclass(mapped_class, dummy_model.DbExtra):
expr = alias.extras.any(mapped_class.key == '.'.join(attr_key + [value]))
else:
raise Exception("I was given {} as an attribute base class".format(mapped_class))
else:
types_n_casts = []
if isinstance(value_to_consider, six.string_types):
types_n_casts.append(('t', None))
elif isinstance(value_to_consider, bool):
types_n_casts.append(('b', None))
elif isinstance(value_to_consider, (int, float)):
types_n_casts.append(('f', None))
types_n_casts.append(('i', 'f'))
elif isinstance(value_to_consider, datetime):
types_n_casts.append(('d', None))
expressions = []
for dtype, castas in types_n_casts:
try:
expressions.append(
self.get_filter_expr(
operator, value, attr_key=[],
column=get_attribute_db_column(mapped_class, dtype, castas=castas),
is_attribute=False
)
)
except InputValidationError as e:
raise e
actual_attr_key = '.'.join(attr_key)
expr = column.any(and_(
mapped_class.key == actual_attr_key,
or_(*expressions)
)
)
return expr
def get_projectable_attribute(
self, alias, column_name, attrpath,
cast=None, **kwargs
):
if cast is not None:
raise NotImplementedError(
"Casting is not implemented in the Django backend"
)
if not attrpath:
# If the user with Django backend wants all the attributes or all
# the extras, I will select as entity the ID of the node.
# in get_aiida_res, this is transformed to the dictionary of attributes.
if column_name in ('attributes', 'extras'):
entity = alias.id
else:
raise NotImplementedError(
"Whatever you asked for "
"({}) is not implemented"
"".format(column_name)
)
else:
aliased_attributes = aliased(getattr(alias, column_name).prop.mapper.class_)
if not issubclass(alias._aliased_insp.class_, self.Node):
NotImplementedError(
"Other classes than Nodes are not implemented yet"
)
attrkey = '.'.join(attrpath)
exists_stmt = exists(select([1], correlate=True).select_from(
aliased_attributes
).where(and_(
aliased_attributes.key == attrkey,
aliased_attributes.dbnode_id == alias.id
)))
select_stmt = select(
[aliased_attributes.id], correlate=True
).select_from(aliased_attributes).where(and_(
aliased_attributes.key == attrkey,
aliased_attributes.dbnode_id == alias.id
)).label('miao')
entity = case([(exists_stmt, select_stmt), ], else_=None)
return entity
def get_aiida_res(self, key, res):
"""
Some instance returned by ORM (django or SA) need to be converted
to Aiida instances (eg nodes)
:param res: the result returned by the query
:param key: the key that this entry would be return with
:returns: an aiida-compatible instance
"""
if key.startswith('attributes.'):
# If you want a specific attributes, that key was stored in res.
# So I call the getvalue method to expand into a dictionary
try:
returnval = DbAttribute.objects.get(id=res).getvalue()
except ObjectDoesNotExist:
# If the object does not exist, return None. This is consistent
# with SQLAlchemy inside the JSON
returnval = None
elif key.startswith('extras.'):
# Same as attributes
try:
returnval = DbExtra.objects.get(id=res).getvalue()
except ObjectDoesNotExist:
returnval = None
elif key == 'attributes':
# If you asked for all attributes, the QB return the ID of the node
# I use DbAttribute.get_all_values_for_nodepk
# to get the dictionary
return DbAttribute.get_all_values_for_nodepk(res)
elif key == 'extras':
# same as attributes
return DbExtra.get_all_values_for_nodepk(res)
elif key in ('_metadata', 'transport_params') and res is not None:
# Metadata and transport_params are stored as json strings in the DB:
return json_loads(res)
elif isinstance(res, (self.Group, self.Node, self.Computer, self.User)):
returnval = res.get_aiida_class()
else:
returnval = res
return returnval
def yield_per(self, query, batch_size):
"""
:param count: Number of rows to yield per step
Yields *count* rows at a time
:returns: a generator
"""
from django.db import transaction
with transaction.atomic():
return query.yield_per(batch_size)
def count(self, query):
from django.db import transaction
with transaction.atomic():
return query.count()
def first(self, query):
"""
Executes query in the backend asking for one instance.
:returns: One row of aiida results
"""
from django.db import transaction
with transaction.atomic():
return query.first()
def iterall(self, query, batch_size, tag_to_index_dict):
from django.db import transaction
if not tag_to_index_dict:
raise Exception("Got an empty dictionary: {}".format(tag_to_index_dict))
with transaction.atomic():
results = query.yield_per(batch_size)
if len(tag_to_index_dict) == 1:
# Sqlalchemy, for some strange reason, does not return a list of lsits
# if you have provided an ormclass
if list(tag_to_index_dict.values()) == ['*']:
for rowitem in results:
yield [self.get_aiida_res(tag_to_index_dict[0], rowitem)]
else:
for rowitem, in results:
yield [self.get_aiida_res(tag_to_index_dict[0], rowitem)]
elif len(tag_to_index_dict) > 1:
for resultrow in results:
yield [
self.get_aiida_res(tag_to_index_dict[colindex], rowitem)
for colindex, rowitem
in enumerate(resultrow)
]
def iterdict(self, query, batch_size, tag_to_projected_entity_dict):
from django.db import transaction
nr_items = sum(len(v) for v in tag_to_projected_entity_dict.values())
if not nr_items:
raise Exception("Got an empty dictionary")
# Wrapping everything in an atomic transaction:
with transaction.atomic():
results = query.yield_per(batch_size)
# Two cases: If one column was asked, the database returns a matrix of rows * columns:
if nr_items > 1:
for this_result in results:
yield {
tag: {
attrkey: self.get_aiida_res(
attrkey, this_result[index_in_sql_result]
)
for attrkey, index_in_sql_result
in projected_entities_dict.items()
}
for tag, projected_entities_dict
in tag_to_projected_entity_dict.items()
}
elif nr_items == 1:
# I this case, sql returns a list, where each listitem is the result
# for one row. Here I am converting it to a list of lists (of length 1)
if [v for entityd in tag_to_projected_entity_dict.values() for v in entityd.keys()] == ['*']:
for this_result in results:
yield {
tag: {
attrkey: self.get_aiida_res(attrkey, this_result)
for attrkey, position in projected_entities_dict.items()
}
for tag, projected_entities_dict in tag_to_projected_entity_dict.items()
}
else:
for this_result, in results:
yield {
tag: {
attrkey: self.get_aiida_res(attrkey, this_result)
for attrkey, position in projected_entities_dict.items()
}
for tag, projected_entities_dict in tag_to_projected_entity_dict.items()
}
| 38.658537 | 109 | 0.543596 |
754efeb117c966ab4ec29a10d93113198c0c187b | 1,253 | py | Python | nlp_utils.py | UKPLab/acl2017-theta_evaluation_summarization | e96868622c81cfe752be3795639b2fb415a5ee27 | [
"Apache-2.0"
] | 1 | 2021-08-24T13:06:09.000Z | 2021-08-24T13:06:09.000Z | nlp_utils.py | Zhujunnan/acl2017-theta_evaluation_summarization | e96868622c81cfe752be3795639b2fb415a5ee27 | [
"Apache-2.0"
] | null | null | null | nlp_utils.py | Zhujunnan/acl2017-theta_evaluation_summarization | e96868622c81cfe752be3795639b2fb415a5ee27 | [
"Apache-2.0"
] | 4 | 2017-08-04T10:08:01.000Z | 2018-04-04T10:53:32.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division, unicode_literals
# Get the python version (used to try decode an unknow instance to unicode)
from sys import version_info
PY3 = version_info[0] == 3
# Use classical Snowball stemmer for english
import nltk
from nltk.util import ngrams
from nltk.stem.snowball import SnowballStemmer
stemmer = SnowballStemmer("english")
from nltk.tokenize import RegexpTokenizer
tokenizer = RegexpTokenizer(r'\w+')
from nltk.corpus import stopwords
stopset = frozenset(stopwords.words('english'))
# Convert an object to its unicode representation (if possible)
def to_unicode(object):
if isinstance(object, unicode):
return object
elif isinstance(object, bytes):
return object.decode("utf8")
else:
print str(object)
if PY3:
if hasattr(instance, "__str__"):
return unicode(instance)
elif hasattr(instance, "__bytes__"):
return bytes(instance).decode("utf8")
else:
if hasattr(instance, "__unicode__"):
return unicode(instance)
elif hasattr(instance, "__str__"):
return bytes(instance).decode("utf8")
# convert to unicode and convert to lower case
def normalize_word(word):
return to_unicode(word).lower() | 27.23913 | 75 | 0.75419 |
70bd01d586dd7f8eea86542a16620ff09dd532e5 | 1,913 | py | Python | Solutions/context_managers/raises.py | UWPCE-PythonCert/InstructorResources | 13b1cfa4cad6a8c3491f8a602c8afda5400c9ac7 | [
"Unlicense"
] | null | null | null | Solutions/context_managers/raises.py | UWPCE-PythonCert/InstructorResources | 13b1cfa4cad6a8c3491f8a602c8afda5400c9ac7 | [
"Unlicense"
] | 1 | 2020-12-20T17:07:17.000Z | 2020-12-20T17:07:17.000Z | Solutions/context_managers/raises.py | UWPCE-PythonCert/InstructorResources | 13b1cfa4cad6a8c3491f8a602c8afda5400c9ac7 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env
"""
A couple nifty context managers
"""
import pytest
class Failed(AssertionError):
pass
class Raises:
def __init__(self, *args):
print("initializing:", args)
self.exceptions = args
def __enter__(self):
"""nothing to be done here."""
pass
def __exit__(self, exc_type, exc_val, exc_tb):
""" Here's where we check the exceptions """
if exc_type in self.exceptions:
# tests pass
return True
else:
expected = ", ".join([e.__name__ for e in self.exceptions])
if exc_type is None:
msg = "No error was raised -- expected {}".format(expected)
else:
msg = "{} raised -- expected {}".format(exc_type.__name__,
expected)
raise Failed(msg)
# putting the tests for raises
# Four are expected to fail
def test_one_exp_pass():
"""This test should pass"""
with Raises(ZeroDivisionError):
45 / 0
def test_multiple_exp_pass():
"""This test should pass"""
with Raises(ZeroDivisionError, AttributeError, RuntimeError):
45 / 0
def test_multiple_exp_pass2():
"""This test should pass"""
with Raises(ZeroDivisionError, AttributeError, RuntimeError):
x = 5
x.something_not_there
def test_one_exp_fail():
"""This test should fail"""
with Raises(ZeroDivisionError):
45 / 5
def test_one_exp_fail_diff_exp():
"""This test should fail"""
with Raises(AttributeError):
45 / 0.0
def test_multiple_exp_fail():
"""This test should fail"""
with Raises(ZeroDivisionError, AttributeError, RuntimeError):
45 / 5
def test_multiple_exp_fail_diff_exp():
"""This test should fail"""
with Raises(ZeroDivisionError, AttributeError, RuntimeError):
float("not a number")
| 23.9125 | 75 | 0.604286 |
3e07f925e00bc7234535c433585744e3e5fad517 | 6,842 | py | Python | synapse/federation/sender/transaction_manager.py | littlebenlittle/synapse | 0eccf531466d762ede0dd365284a8465bfb18d0f | [
"Apache-2.0"
] | 1 | 2021-04-27T19:04:56.000Z | 2021-04-27T19:04:56.000Z | synapse/federation/sender/transaction_manager.py | littlebenlittle/synapse | 0eccf531466d762ede0dd365284a8465bfb18d0f | [
"Apache-2.0"
] | null | null | null | synapse/federation/sender/transaction_manager.py | littlebenlittle/synapse | 0eccf531466d762ede0dd365284a8465bfb18d0f | [
"Apache-2.0"
] | 1 | 2021-09-27T14:45:52.000Z | 2021-09-27T14:45:52.000Z | # -*- coding: utf-8 -*-
# Copyright 2019 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, List
from prometheus_client import Gauge
from synapse.api.errors import HttpResponseException
from synapse.events import EventBase
from synapse.federation.persistence import TransactionActions
from synapse.federation.units import Edu, Transaction
from synapse.logging.opentracing import (
extract_text_map,
set_tag,
start_active_span_follows_from,
tags,
whitelisted_homeserver,
)
from synapse.util import json_decoder
from synapse.util.metrics import measure_func
if TYPE_CHECKING:
import synapse.server
logger = logging.getLogger(__name__)
last_pdu_age_metric = Gauge(
"synapse_federation_last_sent_pdu_age",
"The age (in seconds) of the last PDU successfully sent to the given domain",
labelnames=("server_name",),
)
class TransactionManager:
"""Helper class which handles building and sending transactions
shared between PerDestinationQueue objects
"""
def __init__(self, hs: "synapse.server.HomeServer"):
self._server_name = hs.hostname
self.clock = hs.get_clock() # nb must be called this for @measure_func
self._store = hs.get_datastore()
self._transaction_actions = TransactionActions(self._store)
self._transport_layer = hs.get_federation_transport_client()
self._federation_metrics_domains = (
hs.get_config().federation.federation_metrics_domains
)
# HACK to get unique tx id
self._next_txn_id = int(self.clock.time_msec())
@measure_func("_send_new_transaction")
async def send_new_transaction(
self, destination: str, pdus: List[EventBase], edus: List[Edu],
) -> bool:
"""
Args:
destination: The destination to send to (e.g. 'example.org')
pdus: In-order list of PDUs to send
edus: List of EDUs to send
Returns:
True iff the transaction was successful
"""
# Make a transaction-sending opentracing span. This span follows on from
# all the edus in that transaction. This needs to be done since there is
# no active span here, so if the edus were not received by the remote the
# span would have no causality and it would be forgotten.
span_contexts = []
keep_destination = whitelisted_homeserver(destination)
for edu in edus:
context = edu.get_context()
if context:
span_contexts.append(extract_text_map(json_decoder.decode(context)))
if keep_destination:
edu.strip_context()
with start_active_span_follows_from("send_transaction", span_contexts):
success = True
logger.debug("TX [%s] _attempt_new_transaction", destination)
txn_id = str(self._next_txn_id)
logger.debug(
"TX [%s] {%s} Attempting new transaction (pdus: %d, edus: %d)",
destination,
txn_id,
len(pdus),
len(edus),
)
transaction = Transaction.create_new(
origin_server_ts=int(self.clock.time_msec()),
transaction_id=txn_id,
origin=self._server_name,
destination=destination,
pdus=pdus,
edus=edus,
)
self._next_txn_id += 1
logger.info(
"TX [%s] {%s} Sending transaction [%s], (PDUs: %d, EDUs: %d)",
destination,
txn_id,
transaction.transaction_id,
len(pdus),
len(edus),
)
# Actually send the transaction
# FIXME (erikj): This is a bit of a hack to make the Pdu age
# keys work
# FIXME (richardv): I also believe it no longer works. We (now?) store
# "age_ts" in "unsigned" rather than at the top level. See
# https://github.com/matrix-org/synapse/issues/8429.
def json_data_cb():
data = transaction.get_dict()
now = int(self.clock.time_msec())
if "pdus" in data:
for p in data["pdus"]:
if "age_ts" in p:
unsigned = p.setdefault("unsigned", {})
unsigned["age"] = now - int(p["age_ts"])
del p["age_ts"]
return data
try:
response = await self._transport_layer.send_transaction(
transaction, json_data_cb
)
code = 200
except HttpResponseException as e:
code = e.code
response = e.response
if e.code in (401, 404, 429) or 500 <= e.code:
logger.info(
"TX [%s] {%s} got %d response", destination, txn_id, code
)
raise e
logger.info("TX [%s] {%s} got %d response", destination, txn_id, code)
if code == 200:
for e_id, r in response.get("pdus", {}).items():
if "error" in r:
logger.warning(
"TX [%s] {%s} Remote returned error for %s: %s",
destination,
txn_id,
e_id,
r,
)
else:
for p in pdus:
logger.warning(
"TX [%s] {%s} Failed to send event %s",
destination,
txn_id,
p.event_id,
)
success = False
if success and pdus and destination in self._federation_metrics_domains:
last_pdu = pdus[-1]
last_pdu_age = self.clock.time_msec() - last_pdu.origin_server_ts
last_pdu_age_metric.labels(server_name=destination).set(
last_pdu_age / 1000
)
set_tag(tags.ERROR, not success)
return success
| 35.268041 | 84 | 0.560216 |
ce565923d8ea1d443ea14a76e71d9fd48b1a5863 | 1,991 | py | Python | sdk/python/feast/templates/spark/example.py | jbvaningen/feast | 70d4a1335312747521fc57e7742abf7eb85c12a6 | [
"Apache-2.0"
] | null | null | null | sdk/python/feast/templates/spark/example.py | jbvaningen/feast | 70d4a1335312747521fc57e7742abf7eb85c12a6 | [
"Apache-2.0"
] | null | null | null | sdk/python/feast/templates/spark/example.py | jbvaningen/feast | 70d4a1335312747521fc57e7742abf7eb85c12a6 | [
"Apache-2.0"
] | null | null | null | # # # # # # # # # # # # # # # # # # # # # # # #
# This is an example feature definition file #
# # # # # # # # # # # # # # # # # # # # # # # #
from datetime import timedelta
from pathlib import Path
from feast import Entity, Feature, FeatureView, ValueType
from feast.infra.offline_stores.contrib.spark_offline_store.spark_source import (
SparkSource,
)
# Constants related to the generated data sets
CURRENT_DIR = Path(__file__).parent
# Entity definitions
driver = Entity(name="driver_id", value_type=ValueType.INT64, description="driver id",)
customer = Entity(
name="customer_id", value_type=ValueType.INT64, description="customer id",
)
# Sources
driver_hourly_stats = SparkSource(
name="driver_hourly_stats",
path=f"{CURRENT_DIR}/data/driver_hourly_stats.parquet",
file_format="parquet",
timestamp_field="event_timestamp",
created_timestamp_column="created",
)
customer_daily_profile = SparkSource(
name="customer_daily_profile",
path=f"{CURRENT_DIR}/data/customer_daily_profile.parquet",
file_format="parquet",
timestamp_field="event_timestamp",
created_timestamp_column="created",
)
# Feature Views
driver_hourly_stats_view = FeatureView(
name="driver_hourly_stats",
entities=["driver_id"],
ttl=timedelta(days=7),
features=[
Feature(name="conv_rate", dtype=ValueType.FLOAT),
Feature(name="acc_rate", dtype=ValueType.FLOAT),
Feature(name="avg_daily_trips", dtype=ValueType.INT64),
],
online=True,
batch_source=driver_hourly_stats,
tags={},
)
customer_daily_profile_view = FeatureView(
name="customer_daily_profile",
entities=["customer_id"],
ttl=timedelta(days=7),
features=[
Feature(name="current_balance", dtype=ValueType.FLOAT),
Feature(name="avg_passenger_count", dtype=ValueType.FLOAT),
Feature(name="lifetime_trip_count", dtype=ValueType.INT64),
],
online=True,
batch_source=customer_daily_profile,
tags={},
)
| 30.166667 | 87 | 0.698644 |
11ec79710f89b06deafd91461a3224c19335cfba | 1,319 | py | Python | exercicios/ex112/utilidadescev/moeda/__init__.py | gabrielaraujo3/exercicios-python | 37a431854205e489cb7cede8fcae459bfef75a39 | [
"MIT"
] | null | null | null | exercicios/ex112/utilidadescev/moeda/__init__.py | gabrielaraujo3/exercicios-python | 37a431854205e489cb7cede8fcae459bfef75a39 | [
"MIT"
] | null | null | null | exercicios/ex112/utilidadescev/moeda/__init__.py | gabrielaraujo3/exercicios-python | 37a431854205e489cb7cede8fcae459bfef75a39 | [
"MIT"
] | null | null | null | def aumentar(preco=0, taxa=0, formato=False):
"""
-> Calcula o aumentoo de um determinado preço,
retornando o resultado com ou sem formatação.
:param preco: o preço que se quer reajustar.
:param taxa: qual é a porcentagem do aumento.
:param formato: quer a saída formatada ou não?
:return: o valor reajustado, com ou sem formato.
"""
res = preco + (preco * taxa/100)
return res if formato is False else moeda(res)
def diminuir(preco=0, taxa=0, formato=False):
res = preco - (preco * taxa/100)
return res if formato is False else moeda(res)
def dobro(preco=0, formato=False):
res = preco * 2
return res if not formato else moeda(res)
def metade(preco=0, formato=False):
res = preco / 2
return res if not formato else moeda(res)
def moeda(preco=0, moedaa='R$'):
return f'{moedaa}{preco:>.2f}'.replace('.', ',')
def resumo(preco=0, taxaa=10, taxar=5):
print('-' * 30)
print('RESUMO DO VALOR'.center(30))
print('-' * 30)
print(f'Preço analisado: \t{moeda(preco)}')
print(f'Dobro do preco: \t{dobro(preco, True)}')
print(f'Metade do preço: \t{metade(preco, True)}')
print(f'{taxaa}% de aumento: \t{aumentar(preco, taxaa, True)}')
print(f'{taxar}% de redução: \t{diminuir(preco, taxar, True)}')
print('-' * 30)
| 30.674419 | 67 | 0.639121 |
f90e9035d79693f4c0777851555345f59deb6fa9 | 719 | py | Python | WORDPLEASE/src/users/permissions.py | agdwm/08_KC_Python-Django_Entrega | cb28879624e7a8634cef703774894ff128557055 | [
"MIT"
] | null | null | null | WORDPLEASE/src/users/permissions.py | agdwm/08_KC_Python-Django_Entrega | cb28879624e7a8634cef703774894ff128557055 | [
"MIT"
] | 2 | 2020-06-05T17:23:57.000Z | 2021-03-19T21:53:45.000Z | WORDPLEASE/src/users/permissions.py | agdwm/08_KC_Python_Django_Entrega | cb28879624e7a8634cef703774894ff128557055 | [
"MIT"
] | null | null | null | from rest_framework.permissions import BasePermission
class UsersPermission(BasePermission):
def has_permission(self, request, view):
# We are importing "UserDetailAPI" here to avoid the Circular Dependency
from users.api import UserDetailAPI
if request.method == "POST" or request.user.is_superuser:
return True
if request.method == "GET" and request.user.is_authenticated and isinstance(view, UserDetailAPI):
return True
return request.user.is_authenticated and (request.method == "PUT" or request.method == "DELETE")
def has_object_permission(self, request, view, obj):
return request.user == obj or request.user.is_superuser
| 31.26087 | 105 | 0.705146 |
e0e32c682a4277921e8ec96e569bcc383c9962aa | 3,750 | py | Python | ecs_container_exporter/io_metrics.py | chroto/ecs-container-exporter | ea18a9f72b03f8184a443a86bc46b13cc3be7a99 | [
"MIT"
] | null | null | null | ecs_container_exporter/io_metrics.py | chroto/ecs-container-exporter | ea18a9f72b03f8184a443a86bc46b13cc3be7a99 | [
"MIT"
] | null | null | null | ecs_container_exporter/io_metrics.py | chroto/ecs-container-exporter | ea18a9f72b03f8184a443a86bc46b13cc3be7a99 | [
"MIT"
] | null | null | null | import logging
from collections import defaultdict
from ecs_container_exporter.utils import create_metric, create_task_metrics, TASK_CONTAINER_NAME_TAG
log = logging.getLogger()
def calculate_io_metrics(stats, task_container_tags):
"""
Calculate IO metrics from the below data:
"blkio_stats": {
"io_merged_recursive": [],
"io_queue_recursive": [],
"io_service_bytes_recursive": [
{
"major": 259,
"minor": 0,
"op": "Read",
"value": 10653696
},
{
"major": 259,
"minor": 0,
"op": "Write",
"value": 0
},
{
"major": 259,
"minor": 0,
"op": "Sync",
"value": 10653696
},
{
"major": 259,
"minor": 0,
"op": "Async",
"value": 0
},
{
"major": 259,
"minor": 0,
"op": "Total",
"value": 10653696
}
],
"io_service_time_recursive": [],
"io_serviced_recursive": [
{
"major": 259,
"minor": 0,
"op": "Read",
"value": 164
},
{
"major": 259,
"minor": 0,
"op": "Write",
"value": 0
},
{
"major": 259,
"minor": 0,
"op": "Sync",
"value": 164
},
{
"major": 259,
"minor": 0,
"op": "Async",
"value": 0
},
{
"major": 259,
"minor": 0,
"op": "Total",
"value": 164
}
],
"io_time_recursive": [],
"io_wait_time_recursive": [],
"sectors_recursive": []
},
"""
metrics_by_container = {}
# task level metrics
task_metrics = defaultdict(int)
# if this changes, the task metrics logic will change below
metric_type = 'counter'
for container_id, container_stats in stats.items():
metrics = []
blkio_stats = container_stats.get('blkio_stats')
iostats = {'io_service_bytes_recursive': 'bytes', 'io_serviced_recursive': 'iops'}
for blk_key, blk_type in iostats.items():
tags = task_container_tags[container_id]
read_counter = write_counter = 0
for blk_stat in blkio_stats.get(blk_key):
if blk_stat['op'] == 'Read' and 'value' in blk_stat:
read_counter += blk_stat['value']
elif blk_stat['op'] == 'Write' and 'value' in blk_stat:
write_counter += blk_stat['value']
metrics.append(
create_metric('disk_read_' + blk_type + '_total', read_counter, tags,
metric_type, 'Total disk read ' + blk_type)
)
metrics.append(
create_metric('disk_written_' + blk_type + '_total', write_counter, tags,
metric_type, 'Total disk written ' + blk_type)
)
task_metrics['disk_read_' + blk_type + '_total'] += read_counter
task_metrics['disk_written_' + blk_type + '_total'] += write_counter
metrics_by_container[container_id] = metrics
# task level metrics
metrics_by_container[TASK_CONTAINER_NAME_TAG] = create_task_metrics(task_metrics, metric_type)
return metrics_by_container
| 30.991736 | 100 | 0.454133 |
80c1fdfe9eeea2166f0d717ef71ee3a1b2ef9725 | 15,834 | py | Python | cse547/data.py | kspathak/cse547 | 2379c6435c871720aa7da53d3c8066a628e81830 | [
"MIT"
] | null | null | null | cse547/data.py | kspathak/cse547 | 2379c6435c871720aa7da53d3c8066a628e81830 | [
"MIT"
] | null | null | null | cse547/data.py | kspathak/cse547 | 2379c6435c871720aa7da53d3c8066a628e81830 | [
"MIT"
] | 1 | 2021-02-18T01:39:20.000Z | 2021-02-18T01:39:20.000Z | from collections import OrderedDict, defaultdict
import cv2
from math import ceil, floor
import os
import pickle
import logging
from typing import Callable, Dict, List, Optional, Set
import numpy as np
from PIL import Image
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from pycocotools.coco import COCO
_ANNOTATIONS_FILE_PATH = {
'train': os.path.join('annotations', 'instances_train2014.json'),
'test': os.path.join('annotations', 'instances_test2014.json'),
'validation': os.path.join('annotations', 'instances_val2014.json'),
}
_FEATURES_FILE_PATH = {
'small': {
'train': os.path.join('features_small', 'train2014.p'),
'test': os.path.join('features_small', 'test2014.p'),
'validation': os.path.join('features_small', 'val2014.p'),
},
'tiny': {
'train': os.path.join('features_tiny', 'train2014.p'),
'test': os.path.join('features_tiny', 'test2014.p'),
'validation': os.path.join('features_tiny', 'val2014.p'),
},
}
_FEATURES2_FILE_PATH = {
'small': {
'train': os.path.join('features2_small', 'train2014.p'),
'test': os.path.join('features2_small', 'test2014.p'),
'validation': os.path.join('features2_small', 'val2014.p'),
},
'tiny': {
'train': os.path.join('features2_tiny', 'train2014.p'),
'test': os.path.join('features2_tiny', 'test2014.p'),
'validation': os.path.join('features2_tiny', 'val2014.p'),
},
}
_IMG_DIR_PATH = {
'train': os.path.join('train2014_2'),
'test': os.path.join('test2014_2'),
'validation': os.path.join('val2014_2'),
}
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.DEBUG)
class CocoSingleLabelFeaturesDataset(Dataset):
def __init__(self, data_dir: str, mode: str, size: str,
transform: Optional[Callable] = None) -> None:
self._transform = transform
annotations_file_path: str = os.path.join(
data_dir, _ANNOTATIONS_FILE_PATH[mode])
coco = COCO(annotations_file_path)
features_file_path = os.path.join(data_dir, _FEATURES_FILE_PATH[size][mode])
with open(features_file_path, 'rb') as f:
img_ids, features = pickle.load(f, encoding='bytes')
self._features = features
imgs = coco.loadImgs(img_ids)
categories = coco.loadCats(coco.getCatIds())
category_id_to_supercategory = {c['id']: c['supercategory'] for c in categories}
category_tree = defaultdict(list)
for c in categories:
category_tree[c['supercategory']].append(c['name'])
self._labels = []
for img_id in img_ids:
annotation_ids = coco.getAnnIds(imgIds=[img_id], iscrowd=None)
annotations = coco.loadAnns(annotation_ids)
img_category_ids = frozenset([a['category_id'] for a in annotations])
img_supercategories = set([category_id_to_supercategory[c]
for c in img_category_ids])
assert len(img_supercategories) == 1, 'Each image should only have one label.'
self._labels.append(img_supercategories.pop())
_logger.info(
'Loaded the features and labels for %d images.', self.__len__())
_logger.debug('Manually inspect a random sample of images as a sanity check.')
for i in np.random.choice(np.arange(len(self._labels)), 10):
_logger.debug("{'index': %d, 'id': %d, 'url': '%s', 'label': '%s'}",
i, imgs[i]['id'], imgs[i]['coco_url'], self._labels[i])
def __len__(self) -> int:
return len(self._labels)
def __getitem__(self, i: int):
sample = {'features': self._features[i], 'label': self._labels[i]}
if self._transform:
return self._transform(sample)
return sample
class CocoMultiLabelFeaturesDataset(Dataset):
def __init__(self, data_dir: str, mode: str, size: str,
transform: Optional[Callable] = None) -> None:
self._transform = transform
annotations_file_path: str = os.path.join(
data_dir, _ANNOTATIONS_FILE_PATH[mode])
coco = COCO(annotations_file_path)
features_file_path = os.path.join(data_dir, _FEATURES2_FILE_PATH[size][mode])
with open(features_file_path, 'rb') as f:
img_ids, features = pickle.load(f, encoding="bytes")
self._features = features
self._label_names = []
categories = {}
category_ids = []
for category in filter(
lambda category: category['supercategory'] == 'animal' or category['supercategory'] == 'vehicle',
coco.loadCats(coco.getCatIds())):
categories[category['id']] = {'name': category['name'], 'index': len(self._label_names)}
category_ids.append(category['id'])
self._label_names.append(category['name'])
self._labels = []
for img_id in img_ids:
annotation_ids = coco.getAnnIds(imgIds=[img_id], iscrowd=None)
annotations = coco.loadAnns(annotation_ids)
img_category_ids = frozenset([a['category_id'] for a in annotations])
indices = [categories[i]['index'] for i in img_category_ids if i in categories]
self._labels.append(torch.sparse.FloatTensor(
torch.LongTensor([indices]),
torch.ones(len(indices)),
torch.Size([len(categories)])).to_dense())
_logger.info(
'Loaded the features and labels for %d images.', self.__len__())
imgs = coco.loadImgs(img_ids)
_logger.debug('Manually inspect a random sample of images as a sanity check.')
for i in np.random.choice(np.arange(len(self._labels)), 10):
labels = [categories[category_ids[j[0]]]['name']
for j in np.argwhere(self._labels[i].numpy())]
_logger.debug("{'index': %d, 'id': %d, 'url': '%s', 'label': '%s'}",
i, imgs[i]['id'], imgs[i]['coco_url'], labels)
@property
def label_names(self):
return self._label_names
def __len__(self) -> int:
return len(self._labels)
def __getitem__(self, i: int):
sample = {'features': self._features[i], 'label': self._labels[i]}
if self._transform:
return self._transform(sample)
return sample
class FlattenTensorTransform(Callable):
def __call__(self, sample):
return {
'features': torch.from_numpy(sample['features'].reshape(-1)),
'label': sample['label'],
}
class TensorTransform(Callable):
def __call__(self, sample):
return {
'features': torch.from_numpy(sample['features'].reshape(-1)),
'label': (1.0 if sample['label'] == 'vehicle' else -1.0),
}
class OneShotDataLoader(DataLoader):
"""Used to evaluate small datasets.
"""
def __init__(self, dataset: Dataset) -> None:
super().__init__(dataset, batch_size=len(dataset), shuffle=False)
class CocoPatchesDataset(Dataset):
def __init__(self, categories, features, labels) -> None:
self._categories = categories
self._features = features
self._labels = labels
@staticmethod
def from_images(data_dir: str, mode: str, size: str,
iou_threshold: float = 0.5,
supercategories: Optional[Set[str]] = None,
bbox_file_path = None,
negative_sampling: float = 0) -> 'CocoPatchesDataset':
"""
Args:
data_dir: The path to the images directory.
mode: Choose between training, validation, or test datasets.
size: Which size data to use.
iou_threshold: Restrict positive bounding boxes based on intersection over union.
supercategories: Restrict positive bounding boxes to certain categories.
bbox_file_path: Pickled pre-computed bounding box candidates.
negative_sampling: A number in the range [0,1]. If greater than 0, contains negative patches.
Returns:
Patches
"""
img_dir_path = os.path.join(data_dir, _IMG_DIR_PATH[mode])
annotations_file_path: str = os.path.join(
data_dir, _ANNOTATIONS_FILE_PATH[mode])
coco = COCO(annotations_file_path)
features_file_path = os.path.join(data_dir, _FEATURES2_FILE_PATH[size][mode])
with open(features_file_path, 'rb') as f:
img_ids, img_features = pickle.load(f, encoding='bytes')
# We'll encode each category as a one-hot vector.
categories = [
category for category in coco.loadCats(coco.getCatIds())
if supercategories is None or category['supercategory'] in supercategories
]
category_index = {
category['id']: i
for i, category in enumerate(categories)
}
features = []
labels = []
featurizer = _Featurizer()
imgs = coco.loadImgs(img_ids)
bbox_dict = _read_bboxes(bbox_file_path) if bbox_file_path else {}
for img_index, img in enumerate(imgs):
if img_index % 10 == 0 and img_index > 0:
_logger.info('Patch features extracted for %d images.', img_index)
try:
bboxes = (
bbox_dict[img['id']] if img['id'] in bbox_dict else
_get_bboxes(cv2.imread(os.path.join(img_dir_path, img['file_name'])), num_rects = 2048))
except Exception as e:
_logger.warn('%s raised when getting bounding boxes for image: %s',
e, img)
continue
ann_ids = coco.getAnnIds(imgIds=img['id'], iscrowd=None)
annotations = [
annotation for annotation in coco.loadAnns(ann_ids)
if annotation['category_id'] in category_index
]
img_pil = Image.open(os.path.join(img_dir_path, img['file_name']))
for bbox in bboxes:
bbox_category_indices: Set[int] = set()
for annotation in annotations:
if _iou(bbox, annotation['bbox']) > iou_threshold:
bbox_category_indices.add(category_index[annotation['category_id']])
projected_bbox = _project_onto_feature_space(bbox, img_pil.size)
bbox_features = featurizer(projected_bbox, img_features[img_index])
label = (torch.sparse.FloatTensor(
torch.LongTensor([list(bbox_category_indices)]),
torch.ones(len(bbox_category_indices)),
torch.Size([len(categories)])).to_dense()
if len(bbox_category_indices) > 0
else torch.zeros(len(categories)))
if ((len(bbox_category_indices) > 0 and negative_sampling == 0) or
(len(bbox_category_indices) == 0 and np.random.random() <= negative_sampling)):
features.append(bbox_features)
labels.append(label)
return CocoPatchesDataset(categories, features, labels)
@staticmethod
def from_state_dict(state_dict) -> 'CocoPatchesDataset':
return CocoPatchesDataset(
state_dict['categories'], state_dict['features'], state_dict['labels'])
@staticmethod
def from_state_dict_files(files: List[str]) -> 'CocoPatchesDataset':
def read_state_dict(filename: str):
with open(filename, 'rb') as f:
return pickle.load(f)
state_dicts = [read_state_dict(filename) for filename in files]
assert len(state_dicts) > 0, 'The list of files is empty.'
categories = state_dicts[0]['categories']
features = [f for state_dict in state_dicts for f in state_dict['features']]
labels = [l for state_dict in state_dicts for l in state_dict['labels']]
return CocoPatchesDataset(categories, features, labels)
def state_dict(self):
return OrderedDict([
('categories', self._categories),
('features', self._features),
('labels', self._labels),
])
def __len__(self) -> int:
return len(self._labels)
def __getitem__(self, i: int):
return {'features': self._features[i], 'label': self._labels[i]}
@property
def categories(self):
return self._categories
cv2.setNumThreads(4)
_SELECTIVE_SEARCHER = cv2.ximgproc.segmentation.createSelectiveSearchSegmentation()
def _get_bboxes(img: np.array, num_rects: Optional[int] = None) -> np.array:
_SELECTIVE_SEARCHER.setBaseImage(img)
_SELECTIVE_SEARCHER.switchToSelectiveSearchQuality()
# _SELECTIVE_SEARCHER.switchToSelectiveSearchFast()
bboxes = _SELECTIVE_SEARCHER.process()
return bboxes if num_rects is None else bboxes[:num_rects]
def _read_bboxes(bbox_file_path: str) -> Dict[int, np.array]:
with open(bbox_file_path, 'rb') as f:
img_ids, bboxes = pickle.load(f, encoding='bytes')
return {img_id: bboxes[i] for i, img_id in enumerate(img_ids) if bboxes[i] is not None}
def _iou(rect1, rect2) -> float: # rect = [x, y, w, h]
"""Computes intersection over union.
"""
x1, y1, w1, h1 = rect1
X1, Y1 = x1+w1, y1 + h1
x2, y2, w2, h2 = rect2
X2, Y2 = x2+w2, y2 + h2
a1 = (X1 - x1 + 1) * (Y1 - y1 + 1)
a2 = (X2 - x2 + 1) * (Y2 - y2 + 1)
x_int = max(x1, x2)
X_int = min(X1, X2)
y_int = max(y1, y2)
Y_int = min(Y1, Y2)
a_int = (X_int - x_int + 1) * (Y_int - y_int + 1) * 1.0
if x_int > X_int or y_int > Y_int:
a_int = 0.0
return a_int / (a1 + a2 - a_int)
# nearest neighbor in 1-based indexing
def _nnb_1(x):
x1 = int(floor((x + 8) / 16.0))
x1 = max(1, min(x1, 13))
return x1
def _project_onto_feature_space(rect, image_dims):
"""Projects bounding box onto convolutional network.
Args:
rect: Bounding box (x, y, w, h).
image_dims: Image size, (imgx, imgy).
Returns:
Projected coordinates, (x, y, x'+1, y'+1) where the box is x:x', y:y'.
"""
# For conv 5, center of receptive field of i is i_0 = 16 i for 1-based indexing
imgx, imgy = image_dims
x, y, w, h = rect
# scale to 224 x 224, standard input size.
x1, y1 = ceil((x + w) * 224 / imgx), ceil((y + h) * 224 / imgy)
x, y = floor(x * 224 / imgx), floor(y * 224 / imgy)
px = _nnb_1(x + 1) - 1 # inclusive
py = _nnb_1(y + 1) - 1 # inclusive
px1 = _nnb_1(x1 + 1) # exclusive
py1 = _nnb_1(y1 + 1) # exclusive
return [px, py, px1, py1]
class _Featurizer(Callable):
dim = 11776 # for small features
def __init__(self):
# pyramidal pooling of sizes 1, 3, 6
self.pool1 = nn.AdaptiveMaxPool2d(1)
self.pool3 = nn.AdaptiveMaxPool2d(3)
self.pool6 = nn.AdaptiveMaxPool2d(6)
self.lst = [self.pool1, self.pool3, self.pool6]
def __call__(self, projected_bbox, image_features):
# projected_bbox: bbox projected onto final layer
# image_features: C x W x H tensor : output of conv net
full_image_features = torch.from_numpy(image_features)
x, y, x1, y1 = projected_bbox
crop = full_image_features[:, x:x1, y:y1]
return torch.cat([self.pool1(crop).view(-1), self.pool3(crop).view(-1),
self.pool6(crop).view(-1)], dim=0).data.numpy() # returns numpy array
| 40.809278 | 141 | 0.598459 |
7a612c9601367a245fd3d51de7e25d66c0b71194 | 2,538 | py | Python | libpermian/events/test_events.py | velezd/permian | b52189f44c3112ad933a6b1e303a6b30c272651a | [
"MIT"
] | null | null | null | libpermian/events/test_events.py | velezd/permian | b52189f44c3112ad933a6b1e303a6b30c272651a | [
"MIT"
] | 9 | 2022-02-07T14:14:10.000Z | 2022-03-22T09:17:16.000Z | libpermian/events/test_events.py | velezd/permian | b52189f44c3112ad933a6b1e303a6b30c272651a | [
"MIT"
] | 3 | 2022-01-20T09:17:39.000Z | 2022-03-08T00:35:58.000Z | import unittest
from libpermian.events.factory import EventFactory
from libpermian.events.base import Event
from libpermian.events.builtin import UnknownEvent
class TestEvent(Event):
pass
class Test2Event(Event):
pass
class Test2FooEvent(Event):
pass
class TestEventFactory(unittest.TestCase):
OLD_EVENT_TYPES = []
@classmethod
def setUpClass(cls):
cls.OLD_EVENT_TYPES = EventFactory.EVENT_TYPES.copy()
EventFactory.register('test')(TestEvent)
@classmethod
def tearDownClass(cls):
EventFactory.EVENT_TYPES = cls.OLD_EVENT_TYPES
def setUp(self):
event_string = '''{"type" : "test",
"other" : {"value" : "42"}}'''
self.event = EventFactory.make(None, event_string)
def test_registered(self):
self.assertIs(TestEvent, EventFactory.EVENT_TYPES['test'])
def test_structure(self):
self.assertEqual(self.event.other['value'], "42")
def test_format_branch_spec(self):
branch = self.event.format_branch_spec('Answer is {{event.other["value"]}}')
self.assertEqual(branch, 'Answer is 42')
class TestEventFactoryTypes(unittest.TestCase):
OLD_EVENT_TYPES = []
@classmethod
def setUpClass(cls):
cls.OLD_EVENT_TYPES = EventFactory.EVENT_TYPES.copy()
EventFactory.register('test')(TestEvent)
EventFactory.register('test2')(Test2Event)
EventFactory.register('test2.foo')(Test2FooEvent)
@classmethod
def tearDownClass(cls):
EventFactory.EVENT_TYPES = cls.OLD_EVENT_TYPES
def test_correct_event(self):
event = EventFactory.make(None, '{"type": "test"}')
event2 = EventFactory.make(None, '{"type": "test2"}')
event2foo = EventFactory.make(None, '{"type": "test2.foo"}')
self.assertIsInstance(event, TestEvent)
self.assertIsInstance(event2, Test2Event)
self.assertIsInstance(event2foo, Test2FooEvent)
def test_more_specific_event(self):
event = EventFactory.make(None, '{"type": "test2.foo.bar"}')
self.assertIsInstance(event, Test2FooEvent)
def test_more_specific_event_fallback(self):
event = EventFactory.make(None, '{"type": "test.bar"}')
event2 = EventFactory.make(None, '{"type": "test2.bar"}')
self.assertIsInstance(event, TestEvent)
self.assertIsInstance(event2, Test2Event)
def test_unknown_event(self):
event = EventFactory.make(None, '{"type": "foo"}')
self.assertIsInstance(event, UnknownEvent)
| 32.961039 | 84 | 0.674547 |
a568335adb3437d803de6bf3ae7786a08f00dc82 | 2,363 | py | Python | desuwa/cli.py | megagonlabs/desuwa | f29ab3149f47852cfbe4634f7e440abac66ddf10 | [
"Apache-2.0"
] | 5 | 2021-04-20T08:30:40.000Z | 2021-05-22T15:45:14.000Z | desuwa/cli.py | megagonlabs/desuwa | f29ab3149f47852cfbe4634f7e440abac66ddf10 | [
"Apache-2.0"
] | 21 | 2021-05-11T00:44:46.000Z | 2022-03-31T01:12:32.000Z | desuwa/cli.py | megagonlabs/desuwa | f29ab3149f47852cfbe4634f7e440abac66ddf10 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import argparse
from pathlib import Path
import desuwa.predicate
from desuwa.rule import loads_tags
from desuwa.rule.rule_mrph import RulerMrph, RulesLevelMrph
from desuwa.rule.rule_tag import RulerTag, RulesLevelTag
from desuwa.util import get_mlist
DEFAULT_RULE_BASE = Path(__file__).parent.joinpath("knp_rules")
DEFAULT_MRPH_RULES = [
DEFAULT_RULE_BASE.joinpath("mrph_filter.rule"),
DEFAULT_RULE_BASE.joinpath("mrph_basic.rule"),
]
DEFAULT_TAG_RULES = [
DEFAULT_RULE_BASE.joinpath("bnst_basic.rule"),
]
def get_opts() -> argparse.Namespace:
oparser = argparse.ArgumentParser()
oparser.add_argument("--mrphrule", "-m", type=Path, action="append", default=DEFAULT_MRPH_RULES)
oparser.add_argument("--tagrule", "-t", type=Path, action="append", default=DEFAULT_TAG_RULES)
oparser.add_argument("--input", "-i", type=argparse.FileType("r"), default="-")
oparser.add_argument("--output", "-o", type=argparse.FileType("w"), default="-")
oparser.add_argument("--dump", action="store_true")
oparser.add_argument("--segment", action="store_true")
oparser.add_argument("--predicate", action="store_true")
return oparser.parse_args()
def main() -> None:
opts = get_opts()
ruler = RulerMrph([RulesLevelMrph(r_path) for r_path in opts.mrphrule])
tag_ruler = None
if opts.tagrule:
tag_ruler = RulerTag([RulesLevelTag(r_path) for r_path in opts.tagrule])
if opts.dump:
for rules in ruler.rules_list:
for rule in rules:
print(rule)
print()
return
if opts.predicate:
with opts.input as inf, opts.output as outf:
buf: str = ""
for line in inf:
buf += line
if line != "EOS\n":
continue
tags = loads_tags(buf)
buf = ""
for r in desuwa.predicate.parse(tags):
outf.write(f"{r}\n")
return
with opts.input as inf, opts.output as outf:
for mlist in get_mlist(inf):
tags = ruler.get_tags(mlist)
if tag_ruler:
tag_ruler.apply(tags)
if opts.segment:
outf.write(f"{tags}\n")
else:
outf.write(tags.dumps())
if __name__ == "__main__":
main()
| 30.688312 | 100 | 0.619975 |
d0c45401766b143f8f665b24856f87379f6819dd | 3,395 | py | Python | 2020/day23_sol.py | vladan-stojnic/AdventOfCode | f694179a5d62c42112a0f8e50a6f75c18760e8eb | [
"MIT"
] | null | null | null | 2020/day23_sol.py | vladan-stojnic/AdventOfCode | f694179a5d62c42112a0f8e50a6f75c18760e8eb | [
"MIT"
] | null | null | null | 2020/day23_sol.py | vladan-stojnic/AdventOfCode | f694179a5d62c42112a0f8e50a6f75c18760e8eb | [
"MIT"
] | null | null | null | import math
class Node:
def __init__(self, value=None, next_node=None):
self.value = value
self.next_node = next_node
class CircularList:
def __init__(self, values=None):
self.head = None
self.length = 0
self.min = None
self.max = None
self.elements = {}
if values:
node = None
last = None
self.length = len(values)
for val in values[::-1]:
node = Node(val, node)
self.elements[val] = node
if not last:
last = node
if not self.min:
self.min = val
else:
if val < self.min:
self.min = val
if not self.max:
self.max = val
else:
if val > self.max:
self.max = val
self.head = node
last.next_node = self.head
def __str__(self):
nxt = self.head
out = []
for i in range(self.length):
out.append(str(nxt.value))
nxt = nxt.next_node
return ','.join(out)
def get_element(self, value):
if value in self.elements:
return self.elements[value]
return None
def play_game(cups, num_turns=100):
current = cups.head
min_value = cups.min
max_value = cups.max
for i in range(num_turns):
removed_start = current.next_node
tmp = current
removed_values = []
for i in range(3):
tmp = tmp.next_node
removed_values.append(tmp.value)
removed_end = tmp
destination = current.value
while True:
destination -= 1
if destination < min_value:
destination = max_value
destination_node = cups.get_element(destination)
if destination_node and destination_node.value not in removed_values:
old_nxt = destination_node.next_node
old_removed = removed_end.next_node
destination_node.next_node = removed_start
removed_end.next_node = old_nxt
current.next_node = old_removed
current = current.next_node
break
return cups
def part1(start_order, num_turns=100):
cups = list(start_order)
cups = [int(cup) for cup in cups]
cups = CircularList(cups)
cups = play_game(cups, num_turns=num_turns)
tmp = cups.get_element(1)
out = ''
for i in range(cups.length-1):
tmp = tmp.next_node
out += str(tmp.value)
return out
def part2(start_order, num_turns=10000000):
cups = list(start_order)
cups = [int(cup) for cup in cups]
min_value = min(cups)
max_value = max(cups)
for i in range(max_value+1, 1000001):
cups.append(i)
cups = CircularList(cups)
cups = play_game(cups, num_turns=num_turns)
tmp = cups.get_element(1)
out = []
for i in range(2):
tmp = tmp.next_node
out.append(tmp.value)
return math.prod(out)
# Part 1
assert part1('389125467', 10) == '92658374'
assert part1('389125467') == '67384529'
print(part1('916438275'))
# Part 2
assert part2('389125467') == 149245887792
print(part2('916438275')) | 23.741259 | 81 | 0.540501 |
8ef9827c220417cb2b711f39b3cd1d6d65ae945c | 1,939 | py | Python | setup.py | myhro/bootstrap-vz-deb | 16cdb8bbea979f0c084d5b9adbb9ee572e343065 | [
"Apache-2.0"
] | null | null | null | setup.py | myhro/bootstrap-vz-deb | 16cdb8bbea979f0c084d5b9adbb9ee572e343065 | [
"Apache-2.0"
] | null | null | null | setup.py | myhro/bootstrap-vz-deb | 16cdb8bbea979f0c084d5b9adbb9ee572e343065 | [
"Apache-2.0"
] | null | null | null | from setuptools import setup
from setuptools import find_packages
import os.path
def find_version(path):
import re
version_file = open(path).read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(name='bootstrap-vz',
version=find_version(os.path.join(os.path.dirname(__file__), 'bootstrapvz/__init__.py')),
packages=find_packages(exclude=['docs']),
include_package_data=True,
entry_points={'console_scripts': ['bootstrap-vz = bootstrapvz.base:main',
'bootstrap-vz-remote = bootstrapvz.remote.main:main',
'bootstrap-vz-server = bootstrapvz.remote.server:main',
]},
install_requires=['termcolor >= 1.1.0',
'fysom >= 1.0.15',
'jsonschema >= 2.3.0',
'pyyaml >= 3.10',
'boto >= 2.14.0',
'docopt >= 0.6.1',
'pyrfc3339 >= 1.0',
'requests >= 2.4.3',
],
license='Apache License, Version 2.0',
description='Bootstrap Debian images for virtualized environments',
long_description='''bootstrap-vz is a bootstrapping framework for Debian.
It is is specifically targeted at bootstrapping systems for virtualized environments.
bootstrap-vz runs without any user intervention and generates ready-to-boot images for
a number of virtualization platforms.
Its aim is to provide a reproducible bootstrapping process using manifests
as well as supporting a high degree of customizability through plugins.''',
author='Anders Ingemann',
author_email='anders@ingemann.de',
url='http://www.github.com/andsens/bootstrap-vz',
)
| 45.093023 | 95 | 0.605982 |
7a5982e8c6f9251868f58353d2d70e70fb87959c | 283 | py | Python | assingment/student_score.py | thepros847/python_programiing | d177f79d0d1f21df434bf3f8663ae6469fcf8357 | [
"MIT"
] | null | null | null | assingment/student_score.py | thepros847/python_programiing | d177f79d0d1f21df434bf3f8663ae6469fcf8357 | [
"MIT"
] | null | null | null | assingment/student_score.py | thepros847/python_programiing | d177f79d0d1f21df434bf3f8663ae6469fcf8357 | [
"MIT"
] | null | null | null | student_score = float(input("Enter student exams score:"))
if student_score >= 90:
print("student gets a laptop")
elif student_score >= 59:
print("student gets a tablet")
elif student_score >= 0:
print("student gets a nothing")
else:
print (" student not specify")
| 25.727273 | 58 | 0.689046 |
69603c1f843a39570903fedad24f73314c691f8a | 111,028 | py | Python | youtube_dl/extractor/youtube.py | rajkotraja/YTDownloader | 01a0c511ebfa56699c1f58164c679b24f7972681 | [
"Unlicense"
] | null | null | null | youtube_dl/extractor/youtube.py | rajkotraja/YTDownloader | 01a0c511ebfa56699c1f58164c679b24f7972681 | [
"Unlicense"
] | null | null | null | youtube_dl/extractor/youtube.py | rajkotraja/YTDownloader | 01a0c511ebfa56699c1f58164c679b24f7972681 | [
"Unlicense"
] | null | null | null | # coding: utf-8
from __future__ import unicode_literals
import itertools
import json
import os.path
import random
import re
import time
import traceback
from .common import InfoExtractor, SearchInfoExtractor
from ..jsinterp import JSInterpreter
from ..swfinterp import SWFInterpreter
from ..compat import (
compat_chr,
compat_parse_qs,
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
compat_urlparse,
compat_str,
)
from ..utils import (
clean_html,
error_to_compat_str,
ExtractorError,
float_or_none,
get_element_by_attribute,
get_element_by_id,
int_or_none,
mimetype2ext,
orderedSet,
parse_duration,
remove_quotes,
remove_start,
sanitized_Request,
smuggle_url,
str_to_int,
unescapeHTML,
unified_strdate,
unsmuggle_url,
uppercase_escape,
urlencode_postdata,
ISO3166Utils,
)
class YoutubeBaseInfoExtractor(InfoExtractor):
"""Provide base functions for Youtube extractors"""
_LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
_TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
_NETRC_MACHINE = 'youtube'
# If True it will raise an error if no login info is provided
_LOGIN_REQUIRED = False
def _set_language(self):
self._set_cookie(
'.youtube.com', 'PREF', 'f1=50000000&hl=en',
# YouTube sets the expire time to about two months
expire_time=time.time() + 2 * 30 * 24 * 3600)
def _ids_to_results(self, ids):
return [
self.url_result(vid_id, 'Youtube', video_id=vid_id)
for vid_id in ids]
def _login(self):
"""
Attempt to log in to YouTube.
True is returned if successful or skipped.
False is returned if login failed.
If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
"""
(username, password) = self._get_login_info()
# No authentication to be performed
if username is None:
if self._LOGIN_REQUIRED:
raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
return True
login_page = self._download_webpage(
self._LOGIN_URL, None,
note='Downloading login page',
errnote='unable to fetch login page', fatal=False)
if login_page is False:
return
galx = self._search_regex(r'(?s)<input.+?name="GALX".+?value="(.+?)"',
login_page, 'Login GALX parameter')
# Log in
login_form_strs = {
'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
'Email': username,
'GALX': galx,
'Passwd': password,
'PersistentCookie': 'yes',
'_utf8': '霱',
'bgresponse': 'js_disabled',
'checkConnection': '',
'checkedDomains': 'youtube',
'dnConn': '',
'pstMsg': '0',
'rmShown': '1',
'secTok': '',
'signIn': 'Sign in',
'timeStmp': '',
'service': 'youtube',
'uilel': '3',
'hl': 'en_US',
}
login_data = urlencode_postdata(login_form_strs)
req = sanitized_Request(self._LOGIN_URL, login_data)
login_results = self._download_webpage(
req, None,
note='Logging in', errnote='unable to log in', fatal=False)
if login_results is False:
return False
error_msg = self._html_search_regex(
r'<[^>]+id="errormsg_0_Passwd"[^>]*>([^<]+)<',
login_results, 'error message', default=None)
if error_msg:
raise ExtractorError('Unable to login: %s' % error_msg, expected=True)
if re.search(r'id="errormsg_0_Passwd"', login_results) is not None:
raise ExtractorError('Please use your account password and a two-factor code instead of an application-specific password.', expected=True)
# Two-Factor
# TODO add SMS and phone call support - these require making a request and then prompting the user
if re.search(r'(?i)<form[^>]* id="challenge"', login_results) is not None:
tfa_code = self._get_tfa_info('2-step verification code')
if not tfa_code:
self._downloader.report_warning(
'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
'(Note that only TOTP (Google Authenticator App) codes work at this time.)')
return False
tfa_code = remove_start(tfa_code, 'G-')
tfa_form_strs = self._form_hidden_inputs('challenge', login_results)
tfa_form_strs.update({
'Pin': tfa_code,
'TrustDevice': 'on',
})
tfa_data = urlencode_postdata(tfa_form_strs)
tfa_req = sanitized_Request(self._TWOFACTOR_URL, tfa_data)
tfa_results = self._download_webpage(
tfa_req, None,
note='Submitting TFA code', errnote='unable to submit tfa', fatal=False)
if tfa_results is False:
return False
if re.search(r'(?i)<form[^>]* id="challenge"', tfa_results) is not None:
self._downloader.report_warning('Two-factor code expired or invalid. Please try again, or use a one-use backup code instead.')
return False
if re.search(r'(?i)<form[^>]* id="gaia_loginform"', tfa_results) is not None:
self._downloader.report_warning('unable to log in - did the page structure change?')
return False
if re.search(r'smsauth-interstitial-reviewsettings', tfa_results) is not None:
self._downloader.report_warning('Your Google account has a security notice. Please log in on your web browser, resolve the notice, and try again.')
return False
if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None:
self._downloader.report_warning('unable to log in: bad username or password')
return False
return True
def _real_initialize(self):
if self._downloader is None:
return
self._set_language()
if not self._login():
return
class YoutubeEntryListBaseInfoExtractor(YoutubeBaseInfoExtractor):
# Extract entries from page with "Load more" button
def _entries(self, page, playlist_id):
more_widget_html = content_html = page
for page_num in itertools.count(1):
for entry in self._process_page(content_html):
yield entry
mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
if not mobj:
break
more = self._download_json(
'https://youtube.com/%s' % mobj.group('more'), playlist_id,
'Downloading page #%s' % page_num,
transform_source=uppercase_escape)
content_html = more['content_html']
if not content_html.strip():
# Some webpages show a "Load more" button but they don't
# have more videos
break
more_widget_html = more['load_more_widget_html']
class YoutubePlaylistBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
def _process_page(self, content):
for video_id, video_title in self.extract_videos_from_page(content):
yield self.url_result(video_id, 'Youtube', video_id, video_title)
def extract_videos_from_page(self, page):
ids_in_page = []
titles_in_page = []
for mobj in re.finditer(self._VIDEO_RE, page):
# The link with index 0 is not the first video of the playlist (not sure if still actual)
if 'index' in mobj.groupdict() and mobj.group('id') == '0':
continue
video_id = mobj.group('id')
video_title = unescapeHTML(mobj.group('title'))
if video_title:
video_title = video_title.strip()
try:
idx = ids_in_page.index(video_id)
if video_title and not titles_in_page[idx]:
titles_in_page[idx] = video_title
except ValueError:
ids_in_page.append(video_id)
titles_in_page.append(video_title)
return zip(ids_in_page, titles_in_page)
class YoutubePlaylistsBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
def _process_page(self, content):
for playlist_id in orderedSet(re.findall(
r'<h3[^>]+class="[^"]*yt-lockup-title[^"]*"[^>]*><a[^>]+href="/?playlist\?list=([0-9A-Za-z-_]{10,})"',
content)):
yield self.url_result(
'https://www.youtube.com/playlist?list=%s' % playlist_id, 'YoutubePlaylist')
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
title = self._og_search_title(webpage, fatal=False)
return self.playlist_result(self._entries(webpage, playlist_id), playlist_id, title)
class YoutubeIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com'
_VALID_URL = r"""(?x)^
(
(?:https?://|//) # http(s):// or protocol-independent URL
(?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/|
(?:www\.)?deturl\.com/www\.youtube\.com/|
(?:www\.)?pwnyoutube\.com/|
(?:www\.)?yourepeat\.com/|
tube\.majestyc\.net/|
youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
(?:.*?\#/)? # handle anchor (#/) redirect urls
(?: # the various things that can precede the ID:
(?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
|(?: # or the v= param in all its forms
(?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
(?:\?|\#!?) # the params delimiter ? or # or #!
(?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&v=V36LpHqtcDY)
v=
)
))
|(?:
youtu\.be| # just youtu.be/xxxx
vid\.plus| # or vid.plus/xxxx
zwearz\.com/watch| # or zwearz.com/watch/xxxx
)/
|(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
)
)? # all until now is optional -> you can pass the naked ID
([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
(?!.*?&list=) # combined list/video URLs are handled by the playlist IE
(?(1).+)? # if we found the ID, everything can follow
$"""
_NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
_formats = {
'5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
'17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
'18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
'22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
'35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
# itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
'36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
'37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
'44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
'45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
'46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
'59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
'78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
# 3D videos
'82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
'83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
'84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
'85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
'100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
'101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
'102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
# Apple HTTP Live Streaming
'91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
'94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
'95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
'96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
'132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
# DASH mp4 video
'133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
'134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
'135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
'136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
'137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
'138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40}, # Height can vary (https://github.com/rg3/youtube-dl/issues/4559)
'160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
'264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
'298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60, 'preference': -40},
'299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60, 'preference': -40},
'266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
# Dash mp4 audio
'139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'preference': -50, 'container': 'm4a_dash'},
'140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'preference': -50, 'container': 'm4a_dash'},
'141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'preference': -50, 'container': 'm4a_dash'},
'256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'preference': -50, 'container': 'm4a_dash'},
'258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'preference': -50, 'container': 'm4a_dash'},
# Dash webm
'167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
'168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
'169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
'170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
'218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
'219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
'278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9', 'preference': -40},
'242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
'243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
'244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
'245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
'246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
'247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
'248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
'271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
# itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
'272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
'302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
'303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
'308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
'313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
'315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
# Dash webm audio
'171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50},
'172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256, 'preference': -50},
# Dash webm audio with opus inside
'249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50, 'preference': -50},
'250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70, 'preference': -50},
'251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160, 'preference': -50},
# RTMP (unnamed)
'_rtmp': {'protocol': 'rtmp'},
}
_SUBTITLE_FORMATS = ('ttml', 'vtt')
IE_NAME = 'youtube'
_TESTS = [
{
'url': 'http://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/phihag',
'upload_date': '20121002',
'license': 'Standard YouTube License',
'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'like_count': int,
'dislike_count': int,
'start_time': 1,
'end_time': 9,
}
},
{
'url': 'http://www.youtube.com/watch?v=UxxajLWwzqY',
'note': 'Test generic use_cipher_signature video (#897)',
'info_dict': {
'id': 'UxxajLWwzqY',
'ext': 'mp4',
'upload_date': '20120506',
'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
'alt_title': 'I Love It (feat. Charli XCX)',
'description': 'md5:f3ceb5ef83a08d95b9d146f973157cc8',
'tags': ['Icona Pop i love it', 'sweden', 'pop music', 'big beat records', 'big beat', 'charli',
'xcx', 'charli xcx', 'girls', 'hbo', 'i love it', "i don't care", 'icona', 'pop',
'iconic ep', 'iconic', 'love', 'it'],
'uploader': 'Icona Pop',
'uploader_id': 'IconaPop',
'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/IconaPop',
'license': 'Standard YouTube License',
'creator': 'Icona Pop',
}
},
{
'url': 'https://www.youtube.com/watch?v=07FYdnEawAQ',
'note': 'Test VEVO video with age protection (#956)',
'info_dict': {
'id': '07FYdnEawAQ',
'ext': 'mp4',
'upload_date': '20130703',
'title': 'Justin Timberlake - Tunnel Vision (Explicit)',
'alt_title': 'Tunnel Vision',
'description': 'md5:64249768eec3bc4276236606ea996373',
'uploader': 'justintimberlakeVEVO',
'uploader_id': 'justintimberlakeVEVO',
'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/justintimberlakeVEVO',
'license': 'Standard YouTube License',
'creator': 'Justin Timberlake',
'age_limit': 18,
}
},
{
'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
'note': 'Embed-only video (#1746)',
'info_dict': {
'id': 'yZIXLfi8CZQ',
'ext': 'mp4',
'upload_date': '20120608',
'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
'uploader': 'SET India',
'uploader_id': 'setindia',
'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/setindia',
'license': 'Standard YouTube License',
'age_limit': 18,
}
},
{
'url': 'http://www.youtube.com/watch?v=BaW_jenozKc&v=UxxajLWwzqY',
'note': 'Use the first video ID in the URL',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/phihag',
'upload_date': '20121002',
'license': 'Standard YouTube License',
'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://www.youtube.com/watch?v=a9LDPn-MO4I',
'note': '256k DASH audio (format 141) via DASH manifest',
'info_dict': {
'id': 'a9LDPn-MO4I',
'ext': 'm4a',
'upload_date': '20121002',
'uploader_id': '8KVIDEO',
'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
'description': '',
'uploader': '8KVIDEO',
'license': 'Standard YouTube License',
'title': 'UHDTV TEST 8K VIDEO.mp4'
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141',
},
'skip': 'format 141 not served anymore',
},
# DASH manifest with encrypted signature
{
'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
'info_dict': {
'id': 'IB3lcPjvWLA',
'ext': 'm4a',
'title': 'Afrojack, Spree Wilson - The Spark ft. Spree Wilson',
'description': 'md5:12e7067fa6735a77bdcbb58cb1187d2d',
'uploader': 'AfrojackVEVO',
'uploader_id': 'AfrojackVEVO',
'upload_date': '20131011',
'license': 'Standard YouTube License',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141/bestaudio[ext=m4a]',
},
},
# JS player signature function name containing $
{
'url': 'https://www.youtube.com/watch?v=nfWlot6h_JM',
'info_dict': {
'id': 'nfWlot6h_JM',
'ext': 'm4a',
'title': 'Taylor Swift - Shake It Off',
'alt_title': 'Shake It Off',
'description': 'md5:95f66187cd7c8b2c13eb78e1223b63c3',
'uploader': 'TaylorSwiftVEVO',
'uploader_id': 'TaylorSwiftVEVO',
'upload_date': '20140818',
'license': 'Standard YouTube License',
'creator': 'Taylor Swift',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141/bestaudio[ext=m4a]',
},
},
# Controversy video
{
'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
'info_dict': {
'id': 'T4XJQO3qol8',
'ext': 'mp4',
'upload_date': '20100909',
'uploader': 'The Amazing Atheist',
'uploader_id': 'TheAmazingAtheist',
'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/TheAmazingAtheist',
'license': 'Standard YouTube License',
'title': 'Burning Everyone\'s Koran',
'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
}
},
# Normal age-gate video (No vevo, embed allowed)
{
'url': 'http://youtube.com/watch?v=HtVdAasjOgU',
'info_dict': {
'id': 'HtVdAasjOgU',
'ext': 'mp4',
'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
'description': 're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
'uploader': 'The Witcher',
'uploader_id': 'WitcherGame',
'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/WitcherGame',
'upload_date': '20140605',
'license': 'Standard YouTube License',
'age_limit': 18,
},
},
# Age-gate video with encrypted signature
{
'url': 'http://www.youtube.com/watch?v=6kLq3WMV1nU',
'info_dict': {
'id': '6kLq3WMV1nU',
'ext': 'mp4',
'title': 'Dedication To My Ex (Miss That) (Lyric Video)',
'description': 'md5:33765bb339e1b47e7e72b5490139bb41',
'uploader': 'LloydVEVO',
'uploader_id': 'LloydVEVO',
'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/LloydVEVO',
'upload_date': '20110629',
'license': 'Standard YouTube License',
'age_limit': 18,
},
},
# video_info is None (https://github.com/rg3/youtube-dl/issues/4421)
{
'url': '__2ABJjxzNo',
'info_dict': {
'id': '__2ABJjxzNo',
'ext': 'mp4',
'upload_date': '20100430',
'uploader_id': 'deadmau5',
'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/deadmau5',
'creator': 'deadmau5',
'description': 'md5:12c56784b8032162bb936a5f76d55360',
'uploader': 'deadmau5',
'license': 'Standard YouTube License',
'title': 'Deadmau5 - Some Chords (HD)',
'alt_title': 'Some Chords',
},
'expected_warnings': [
'DASH manifest missing',
]
},
# Olympics (https://github.com/rg3/youtube-dl/issues/4431)
{
'url': 'lqQg6PlCWgI',
'info_dict': {
'id': 'lqQg6PlCWgI',
'ext': 'mp4',
'upload_date': '20150827',
'uploader_id': 'olympic',
'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/olympic',
'license': 'Standard YouTube License',
'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
'uploader': 'Olympic',
'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
},
'params': {
'skip_download': 'requires avconv',
}
},
# Non-square pixels
{
'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
'info_dict': {
'id': '_b-2C3KPAM0',
'ext': 'mp4',
'stretched_ratio': 16 / 9.,
'upload_date': '20110310',
'uploader_id': 'AllenMeow',
'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/AllenMeow',
'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
'uploader': '孫艾倫',
'license': 'Standard YouTube License',
'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
},
},
# url_encoded_fmt_stream_map is empty string
{
'url': 'qEJwOuvDf7I',
'info_dict': {
'id': 'qEJwOuvDf7I',
'ext': 'webm',
'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
'description': '',
'upload_date': '20150404',
'uploader_id': 'spbelect',
'uploader': 'Наблюдатели Петербурга',
},
'params': {
'skip_download': 'requires avconv',
},
'skip': 'This live event has ended.',
},
# Extraction from multiple DASH manifests (https://github.com/rg3/youtube-dl/pull/6097)
{
'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
'info_dict': {
'id': 'FIl7x6_3R5Y',
'ext': 'mp4',
'title': 'md5:7b81415841e02ecd4313668cde88737a',
'description': 'md5:116377fd2963b81ec4ce64b542173306',
'upload_date': '20150625',
'uploader_id': 'dorappi2000',
'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
'uploader': 'dorappi2000',
'license': 'Standard YouTube License',
'formats': 'mincount:32',
},
},
# DASH manifest with segment_list
{
'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
'md5': '8ce563a1d667b599d21064e982ab9e31',
'info_dict': {
'id': 'CsmdDsKjzN8',
'ext': 'mp4',
'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
'uploader': 'Airtek',
'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
'license': 'Standard YouTube License',
'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '135', # bestvideo
},
'skip': 'This live event has ended.',
},
{
# Multifeed videos (multiple cameras), URL is for Main Camera
'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs',
'info_dict': {
'id': 'jqWvoWXjCVs',
'title': 'teamPGP: Rocket League Noob Stream',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
},
'playlist': [{
'info_dict': {
'id': 'jqWvoWXjCVs',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (Main Camera)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}, {
'info_dict': {
'id': '6h8e8xoXJzg',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (kreestuh)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}, {
'info_dict': {
'id': 'PUOgX5z9xZw',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (grizzle)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}, {
'info_dict': {
'id': 'teuwxikvS5k',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (zim)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}],
'params': {
'skip_download': True,
},
},
{
# Multifeed video with comma in title (see https://github.com/rg3/youtube-dl/issues/8536)
'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
'info_dict': {
'id': 'gVfLd0zydlo',
'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
},
'playlist_count': 2,
'skip': 'Not multifeed anymore',
},
{
'url': 'http://vid.plus/FlRa-iH7PGw',
'only_matching': True,
},
{
'url': 'http://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
'only_matching': True,
},
{
# Title with JS-like syntax "};" (see https://github.com/rg3/youtube-dl/issues/7468)
# Also tests cut-off URL expansion in video description (see
# https://github.com/rg3/youtube-dl/issues/1892,
# https://github.com/rg3/youtube-dl/issues/8164)
'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
'info_dict': {
'id': 'lsguqyKfVQg',
'ext': 'mp4',
'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
'alt_title': 'Dark Walk',
'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
'upload_date': '20151119',
'uploader_id': 'IronSoulElf',
'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/IronSoulElf',
'uploader': 'IronSoulElf',
'license': 'Standard YouTube License',
'creator': 'Todd Haberman, Daniel Law Heath & Aaron Kaplan',
},
'params': {
'skip_download': True,
},
},
{
# Tags with '};' (see https://github.com/rg3/youtube-dl/issues/7468)
'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
'only_matching': True,
},
{
# Video with yt:stretch=17:0
'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
'info_dict': {
'id': 'Q39EVAstoRM',
'ext': 'mp4',
'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
'description': 'md5:ee18a25c350637c8faff806845bddee9',
'upload_date': '20151107',
'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
'uploader': 'CH GAMER DROID',
},
'params': {
'skip_download': True,
},
'skip': 'This video does not exist.',
},
{
# Video licensed under Creative Commons
'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
'info_dict': {
'id': 'M4gD1WSo5mA',
'ext': 'mp4',
'title': 'md5:e41008789470fc2533a3252216f1c1d1',
'description': 'md5:a677553cf0840649b731a3024aeff4cc',
'upload_date': '20150127',
'uploader_id': 'BerkmanCenter',
'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter',
'uploader': 'BerkmanCenter',
'license': 'Creative Commons Attribution license (reuse allowed)',
},
'params': {
'skip_download': True,
},
},
{
# Channel-like uploader_url
'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
'info_dict': {
'id': 'eQcmzGIKrzg',
'ext': 'mp4',
'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
'description': 'md5:dda0d780d5a6e120758d1711d062a867',
'upload_date': '20151119',
'uploader': 'Bernie 2016',
'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
'uploader_url': 're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
'license': 'Creative Commons Attribution license (reuse allowed)',
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;v=V36LpHqtcDY',
'only_matching': True,
}
]
def __init__(self, *args, **kwargs):
super(YoutubeIE, self).__init__(*args, **kwargs)
self._player_cache = {}
def report_video_info_webpage_download(self, video_id):
"""Report attempt to download video info webpage."""
self.to_screen('%s: Downloading video info webpage' % video_id)
def report_information_extraction(self, video_id):
"""Report attempt to extract video information."""
self.to_screen('%s: Extracting video information' % video_id)
def report_unavailable_format(self, video_id, format):
"""Report extracted video URL."""
self.to_screen('%s: Format %s not available' % (video_id, format))
def report_rtmp_download(self):
"""Indicate the download will use the RTMP protocol."""
self.to_screen('RTMP download detected')
def _signature_cache_id(self, example_sig):
""" Return a string representation of a signature """
return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
def _extract_signature_function(self, video_id, player_url, example_sig):
id_m = re.match(
r'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player(?:-new)?|/base)?\.(?P<ext>[a-z]+)$',
player_url)
if not id_m:
raise ExtractorError('Cannot identify player %r' % player_url)
player_type = id_m.group('ext')
player_id = id_m.group('id')
# Read from filesystem cache
func_id = '%s_%s_%s' % (
player_type, player_id, self._signature_cache_id(example_sig))
assert os.path.basename(func_id) == func_id
cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
if cache_spec is not None:
return lambda s: ''.join(s[i] for i in cache_spec)
download_note = (
'Downloading player %s' % player_url
if self._downloader.params.get('verbose') else
'Downloading %s player %s' % (player_type, player_id)
)
if player_type == 'js':
code = self._download_webpage(
player_url, video_id,
note=download_note,
errnote='Download of %s failed' % player_url)
res = self._parse_sig_js(code)
elif player_type == 'swf':
urlh = self._request_webpage(
player_url, video_id,
note=download_note,
errnote='Download of %s failed' % player_url)
code = urlh.read()
res = self._parse_sig_swf(code)
else:
assert False, 'Invalid player type %r' % player_type
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = res(test_string)
cache_spec = [ord(c) for c in cache_res]
self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
return res
def _print_sig_code(self, func, example_sig):
def gen_sig_code(idxs):
def _genslice(start, end, step):
starts = '' if start == 0 else str(start)
ends = (':%d' % (end + step)) if end + step >= 0 else ':'
steps = '' if step == 1 else (':%d' % step)
return 's[%s%s%s]' % (starts, ends, steps)
step = None
# Quelch pyflakes warnings - start will be set when step is set
start = '(Never used)'
for i, prev in zip(idxs[1:], idxs[:-1]):
if step is not None:
if i - prev == step:
continue
yield _genslice(start, prev, step)
step = None
continue
if i - prev in [-1, 1]:
step = i - prev
start = prev
continue
else:
yield 's[%d]' % prev
if step is None:
yield 's[%d]' % i
else:
yield _genslice(start, i, step)
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = func(test_string)
cache_spec = [ord(c) for c in cache_res]
expr_code = ' + '.join(gen_sig_code(cache_spec))
signature_id_tuple = '(%s)' % (
', '.join(compat_str(len(p)) for p in example_sig.split('.')))
code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
' return %s\n') % (signature_id_tuple, expr_code)
self.to_screen('Extracted signature function:\n' + code)
def _parse_sig_js(self, jscode):
funcname = self._search_regex(
r'\.sig\|\|([a-zA-Z0-9$]+)\(', jscode,
'Initial JS player signature function name')
jsi = JSInterpreter(jscode)
initial_function = jsi.extract_function(funcname)
return lambda s: initial_function([s])
def _parse_sig_swf(self, file_contents):
swfi = SWFInterpreter(file_contents)
TARGET_CLASSNAME = 'SignatureDecipher'
searched_class = swfi.extract_class(TARGET_CLASSNAME)
initial_function = swfi.extract_function(searched_class, 'decipher')
return lambda s: initial_function([s])
def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
"""Turn the encrypted s field into a working signature"""
if player_url is None:
raise ExtractorError('Cannot decrypt signature without player_url')
if player_url.startswith('//'):
player_url = 'https:' + player_url
try:
player_id = (player_url, self._signature_cache_id(s))
if player_id not in self._player_cache:
func = self._extract_signature_function(
video_id, player_url, s
)
self._player_cache[player_id] = func
func = self._player_cache[player_id]
if self._downloader.params.get('youtube_print_sig_code'):
self._print_sig_code(func, s)
return func(s)
except Exception as e:
tb = traceback.format_exc()
raise ExtractorError(
'Signature extraction failed: ' + tb, cause=e)
def _get_subtitles(self, video_id, webpage):
try:
subs_doc = self._download_xml(
'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
video_id, note=False)
except ExtractorError as err:
self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err))
return {}
sub_lang_list = {}
for track in subs_doc.findall('track'):
lang = track.attrib['lang_code']
if lang in sub_lang_list:
continue
sub_formats = []
for ext in self._SUBTITLE_FORMATS:
params = compat_urllib_parse_urlencode({
'lang': lang,
'v': video_id,
'fmt': ext,
'name': track.attrib['name'].encode('utf-8'),
})
sub_formats.append({
'url': 'https://www.youtube.com/api/timedtext?' + params,
'ext': ext,
})
sub_lang_list[lang] = sub_formats
if not sub_lang_list:
self._downloader.report_warning('video doesn\'t have subtitles')
return {}
return sub_lang_list
def _get_ytplayer_config(self, video_id, webpage):
patterns = (
# User data may contain arbitrary character sequences that may affect
# JSON extraction with regex, e.g. when '};' is contained the second
# regex won't capture the whole JSON. Yet working around by trying more
# concrete regex first keeping in mind proper quoted string handling
# to be implemented in future that will replace this workaround (see
# https://github.com/rg3/youtube-dl/issues/7468,
# https://github.com/rg3/youtube-dl/pull/7599)
r';ytplayer\.config\s*=\s*({.+?});ytplayer',
r';ytplayer\.config\s*=\s*({.+?});',
)
config = self._search_regex(
patterns, webpage, 'ytplayer.config', default=None)
if config:
return self._parse_json(
uppercase_escape(config), video_id, fatal=False)
def _get_automatic_captions(self, video_id, webpage):
"""We need the webpage for getting the captions url, pass it as an
argument to speed up the process."""
self.to_screen('%s: Looking for automatic captions' % video_id)
player_config = self._get_ytplayer_config(video_id, webpage)
err_msg = 'Couldn\'t find automatic captions for %s' % video_id
if not player_config:
self._downloader.report_warning(err_msg)
return {}
try:
args = player_config['args']
caption_url = args.get('ttsurl')
if caption_url:
timestamp = args['timestamp']
# We get the available subtitles
list_params = compat_urllib_parse_urlencode({
'type': 'list',
'tlangs': 1,
'asrs': 1,
})
list_url = caption_url + '&' + list_params
caption_list = self._download_xml(list_url, video_id)
original_lang_node = caption_list.find('track')
if original_lang_node is None:
self._downloader.report_warning('Video doesn\'t have automatic captions')
return {}
original_lang = original_lang_node.attrib['lang_code']
caption_kind = original_lang_node.attrib.get('kind', '')
sub_lang_list = {}
for lang_node in caption_list.findall('target'):
sub_lang = lang_node.attrib['lang_code']
sub_formats = []
for ext in self._SUBTITLE_FORMATS:
params = compat_urllib_parse_urlencode({
'lang': original_lang,
'tlang': sub_lang,
'fmt': ext,
'ts': timestamp,
'kind': caption_kind,
})
sub_formats.append({
'url': caption_url + '&' + params,
'ext': ext,
})
sub_lang_list[sub_lang] = sub_formats
return sub_lang_list
# Some videos don't provide ttsurl but rather caption_tracks and
# caption_translation_languages (e.g. 20LmZk1hakA)
caption_tracks = args['caption_tracks']
caption_translation_languages = args['caption_translation_languages']
caption_url = compat_parse_qs(caption_tracks.split(',')[0])['u'][0]
parsed_caption_url = compat_urllib_parse_urlparse(caption_url)
caption_qs = compat_parse_qs(parsed_caption_url.query)
sub_lang_list = {}
for lang in caption_translation_languages.split(','):
lang_qs = compat_parse_qs(compat_urllib_parse_unquote_plus(lang))
sub_lang = lang_qs.get('lc', [None])[0]
if not sub_lang:
continue
sub_formats = []
for ext in self._SUBTITLE_FORMATS:
caption_qs.update({
'tlang': [sub_lang],
'fmt': [ext],
})
sub_url = compat_urlparse.urlunparse(parsed_caption_url._replace(
query=compat_urllib_parse_urlencode(caption_qs, True)))
sub_formats.append({
'url': sub_url,
'ext': ext,
})
sub_lang_list[sub_lang] = sub_formats
return sub_lang_list
# An extractor error can be raise by the download process if there are
# no automatic captions but there are subtitles
except (KeyError, ExtractorError):
self._downloader.report_warning(err_msg)
return {}
def _mark_watched(self, video_id, video_info):
playback_url = video_info.get('videostats_playback_base_url', [None])[0]
if not playback_url:
return
parsed_playback_url = compat_urlparse.urlparse(playback_url)
qs = compat_urlparse.parse_qs(parsed_playback_url.query)
# cpn generation algorithm is reverse engineered from base.js.
# In fact it works even with dummy cpn.
CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16)))
qs.update({
'ver': ['2'],
'cpn': [cpn],
})
playback_url = compat_urlparse.urlunparse(
parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
self._download_webpage(
playback_url, video_id, 'Marking watched',
'Unable to mark watched', fatal=False)
@classmethod
def extract_id(cls, url):
mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
video_id = mobj.group(2)
return video_id
def _extract_from_m3u8(self, manifest_url, video_id):
url_map = {}
def _get_urls(_manifest):
lines = _manifest.split('\n')
urls = filter(lambda l: l and not l.startswith('#'),
lines)
return urls
manifest = self._download_webpage(manifest_url, video_id, 'Downloading formats manifest')
formats_urls = _get_urls(manifest)
for format_url in formats_urls:
itag = self._search_regex(r'itag/(\d+?)/', format_url, 'itag')
url_map[itag] = format_url
return url_map
def _extract_annotations(self, video_id):
url = 'https://www.youtube.com/annotations_invideo?features=1&legacy=1&video_id=%s' % video_id
return self._download_webpage(url, video_id, note='Searching for annotations.', errnote='Unable to download video annotations.')
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
proto = (
'http' if self._downloader.params.get('prefer_insecure', False)
else 'https')
start_time = None
end_time = None
parsed_url = compat_urllib_parse_urlparse(url)
for component in [parsed_url.fragment, parsed_url.query]:
query = compat_parse_qs(component)
if start_time is None and 't' in query:
start_time = parse_duration(query['t'][0])
if start_time is None and 'start' in query:
start_time = parse_duration(query['start'][0])
if end_time is None and 'end' in query:
end_time = parse_duration(query['end'][0])
# Extract original video URL from URL with redirection, like age verification, using next_url parameter
mobj = re.search(self._NEXT_URL_RE, url)
if mobj:
url = proto + '://www.youtube.com/' + compat_urllib_parse_unquote(mobj.group(1)).lstrip('/')
video_id = self.extract_id(url)
# Get video webpage
url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
video_webpage = self._download_webpage(url, video_id)
# Attempt to extract SWF player URL
mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
if mobj is not None:
player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
else:
player_url = None
dash_mpds = []
def add_dash_mpd(video_info):
dash_mpd = video_info.get('dashmpd')
if dash_mpd and dash_mpd[0] not in dash_mpds:
dash_mpds.append(dash_mpd[0])
# Get video info
embed_webpage = None
is_live = None
if re.search(r'player-age-gate-content">', video_webpage) is not None:
age_gate = True
# We simulate the access to the video from www.youtube.com/v/{video_id}
# this can be viewed without login into Youtube
url = proto + '://www.youtube.com/embed/%s' % video_id
embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
data = compat_urllib_parse_urlencode({
'video_id': video_id,
'eurl': 'https://youtube.googleapis.com/v/' + video_id,
'sts': self._search_regex(
r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''),
})
video_info_url = proto + '://www.youtube.com/get_video_info?' + data
video_info_webpage = self._download_webpage(
video_info_url, video_id,
note='Refetching age-gated info webpage',
errnote='unable to download video info webpage')
video_info = compat_parse_qs(video_info_webpage)
add_dash_mpd(video_info)
else:
age_gate = False
video_info = None
# Try looking directly into the video webpage
ytplayer_config = self._get_ytplayer_config(video_id, video_webpage)
if ytplayer_config:
args = ytplayer_config['args']
if args.get('url_encoded_fmt_stream_map'):
# Convert to the same format returned by compat_parse_qs
video_info = dict((k, [v]) for k, v in args.items())
add_dash_mpd(video_info)
if args.get('livestream') == '1' or args.get('live_playback') == 1:
is_live = True
if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True):
# We also try looking in get_video_info since it may contain different dashmpd
# URL that points to a DASH manifest with possibly different itag set (some itags
# are missing from DASH manifest pointed by webpage's dashmpd, some - from DASH
# manifest pointed by get_video_info's dashmpd).
# The general idea is to take a union of itags of both DASH manifests (for example
# video with such 'manifest behavior' see https://github.com/rg3/youtube-dl/issues/6093)
self.report_video_info_webpage_download(video_id)
for el_type in ['&el=info', '&el=embedded', '&el=detailpage', '&el=vevo', '']:
video_info_url = (
'%s://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
% (proto, video_id, el_type))
video_info_webpage = self._download_webpage(
video_info_url,
video_id, note=False,
errnote='unable to download video info webpage')
get_video_info = compat_parse_qs(video_info_webpage)
if get_video_info.get('use_cipher_signature') != ['True']:
add_dash_mpd(get_video_info)
if not video_info:
video_info = get_video_info
if 'token' in get_video_info:
# Different get_video_info requests may report different results, e.g.
# some may report video unavailability, but some may serve it without
# any complaint (see https://github.com/rg3/youtube-dl/issues/7362,
# the original webpage as well as el=info and el=embedded get_video_info
# requests report video unavailability due to geo restriction while
# el=detailpage succeeds and returns valid data). This is probably
# due to YouTube measures against IP ranges of hosting providers.
# Working around by preferring the first succeeded video_info containing
# the token if no such video_info yet was found.
if 'token' not in video_info:
video_info = get_video_info
break
if 'token' not in video_info:
if 'reason' in video_info:
if 'The uploader has not made this video available in your country.' in video_info['reason']:
regions_allowed = self._html_search_meta('regionsAllowed', video_webpage, default=None)
if regions_allowed:
raise ExtractorError('YouTube said: This video is available in %s only' % (
', '.join(map(ISO3166Utils.short2full, regions_allowed.split(',')))),
expected=True)
raise ExtractorError(
'YouTube said: %s' % video_info['reason'][0],
expected=True, video_id=video_id)
else:
raise ExtractorError(
'"token" parameter not in video info for unknown reason',
video_id=video_id)
# title
if 'title' in video_info:
video_title = video_info['title'][0]
else:
self._downloader.report_warning('Unable to extract video title')
video_title = '_'
# description
video_description = get_element_by_id("eow-description", video_webpage)
if video_description:
video_description = re.sub(r'''(?x)
<a\s+
(?:[a-zA-Z-]+="[^"]*"\s+)*?
(?:title|href)="([^"]+)"\s+
(?:[a-zA-Z-]+="[^"]*"\s+)*?
class="[^"]*"[^>]*>
[^<]+\.{3}\s*
</a>
''', r'\1', video_description)
video_description = clean_html(video_description)
else:
fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage)
if fd_mobj:
video_description = unescapeHTML(fd_mobj.group(1))
else:
video_description = ''
if 'multifeed_metadata_list' in video_info and not smuggled_data.get('force_singlefeed', False):
if not self._downloader.params.get('noplaylist'):
entries = []
feed_ids = []
multifeed_metadata_list = video_info['multifeed_metadata_list'][0]
for feed in multifeed_metadata_list.split(','):
# Unquote should take place before split on comma (,) since textual
# fields may contain comma as well (see
# https://github.com/rg3/youtube-dl/issues/8536)
feed_data = compat_parse_qs(compat_urllib_parse_unquote_plus(feed))
entries.append({
'_type': 'url_transparent',
'ie_key': 'Youtube',
'url': smuggle_url(
'%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]),
{'force_singlefeed': True}),
'title': '%s (%s)' % (video_title, feed_data['title'][0]),
})
feed_ids.append(feed_data['id'][0])
self.to_screen(
'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
% (', '.join(feed_ids), video_id))
return self.playlist_result(entries, video_id, video_title, video_description)
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
if 'view_count' in video_info:
view_count = int(video_info['view_count'][0])
else:
view_count = None
# Check for "rental" videos
if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
raise ExtractorError('"rental" videos not supported')
# Start extracting information
self.report_information_extraction(video_id)
# uploader
if 'author' not in video_info:
raise ExtractorError('Unable to extract uploader name')
video_uploader = compat_urllib_parse_unquote_plus(video_info['author'][0])
# uploader_id
video_uploader_id = None
video_uploader_url = None
mobj = re.search(
r'<link itemprop="url" href="(?P<uploader_url>https?://www.youtube.com/(?:user|channel)/(?P<uploader_id>[^"]+))">',
video_webpage)
if mobj is not None:
video_uploader_id = mobj.group('uploader_id')
video_uploader_url = mobj.group('uploader_url')
else:
self._downloader.report_warning('unable to extract uploader nickname')
# thumbnail image
# We try first to get a high quality image:
m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
video_webpage, re.DOTALL)
if m_thumb is not None:
video_thumbnail = m_thumb.group(1)
elif 'thumbnail_url' not in video_info:
self._downloader.report_warning('unable to extract video thumbnail')
video_thumbnail = None
else: # don't panic if we can't find it
video_thumbnail = compat_urllib_parse_unquote_plus(video_info['thumbnail_url'][0])
# upload date
upload_date = self._html_search_meta(
'datePublished', video_webpage, 'upload date', default=None)
if not upload_date:
upload_date = self._search_regex(
[r'(?s)id="eow-date.*?>(.*?)</span>',
r'id="watch-uploader-info".*?>.*?(?:Published|Uploaded|Streamed live|Started) on (.+?)</strong>'],
video_webpage, 'upload date', default=None)
if upload_date:
upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
upload_date = unified_strdate(upload_date)
video_license = self._html_search_regex(
r'<h4[^>]+class="title"[^>]*>\s*License\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li',
video_webpage, 'license', default=None)
m_music = re.search(
r'<h4[^>]+class="title"[^>]*>\s*Music\s*</h4>\s*<ul[^>]*>\s*<li>(?P<title>.+?) by (?P<creator>.+?)(?:\(.+?\))?</li',
video_webpage)
if m_music:
video_alt_title = remove_quotes(unescapeHTML(m_music.group('title')))
video_creator = clean_html(m_music.group('creator'))
else:
video_alt_title = video_creator = None
m_cat_container = self._search_regex(
r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
video_webpage, 'categories', default=None)
if m_cat_container:
category = self._html_search_regex(
r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
default=None)
video_categories = None if category is None else [category]
else:
video_categories = None
video_tags = [
unescapeHTML(m.group('content'))
for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)]
def _extract_count(count_name):
return str_to_int(self._search_regex(
r'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>'
% re.escape(count_name),
video_webpage, count_name, default=None))
like_count = _extract_count('like')
dislike_count = _extract_count('dislike')
# subtitles
video_subtitles = self.extract_subtitles(video_id, video_webpage)
automatic_captions = self.extract_automatic_captions(video_id, video_webpage)
if 'length_seconds' not in video_info:
self._downloader.report_warning('unable to extract video duration')
video_duration = None
else:
video_duration = int(compat_urllib_parse_unquote_plus(video_info['length_seconds'][0]))
# annotations
video_annotations = None
if self._downloader.params.get('writeannotations', False):
video_annotations = self._extract_annotations(video_id)
def _map_to_format_list(urlmap):
formats = []
for itag, video_real_url in urlmap.items():
dct = {
'format_id': itag,
'url': video_real_url,
'player_url': player_url,
}
if itag in self._formats:
dct.update(self._formats[itag])
formats.append(dct)
return formats
if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
self.report_rtmp_download()
formats = [{
'format_id': '_rtmp',
'protocol': 'rtmp',
'url': video_info['conn'][0],
'player_url': player_url,
}]
elif len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1:
encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
if 'rtmpe%3Dyes' in encoded_url_map:
raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True)
formats_spec = {}
fmt_list = video_info.get('fmt_list', [''])[0]
if fmt_list:
for fmt in fmt_list.split(','):
spec = fmt.split('/')
if len(spec) > 1:
width_height = spec[1].split('x')
if len(width_height) == 2:
formats_spec[spec[0]] = {
'resolution': spec[1],
'width': int_or_none(width_height[0]),
'height': int_or_none(width_height[1]),
}
formats = []
for url_data_str in encoded_url_map.split(','):
url_data = compat_parse_qs(url_data_str)
if 'itag' not in url_data or 'url' not in url_data:
continue
format_id = url_data['itag'][0]
url = url_data['url'][0]
if 'sig' in url_data:
url += '&signature=' + url_data['sig'][0]
elif 's' in url_data:
encrypted_sig = url_data['s'][0]
ASSETS_RE = r'"assets":.+?"js":\s*("[^"]+")'
jsplayer_url_json = self._search_regex(
ASSETS_RE,
embed_webpage if age_gate else video_webpage,
'JS player URL (1)', default=None)
if not jsplayer_url_json and not age_gate:
# We need the embed website after all
if embed_webpage is None:
embed_url = proto + '://www.youtube.com/embed/%s' % video_id
embed_webpage = self._download_webpage(
embed_url, video_id, 'Downloading embed webpage')
jsplayer_url_json = self._search_regex(
ASSETS_RE, embed_webpage, 'JS player URL')
player_url = json.loads(jsplayer_url_json)
if player_url is None:
player_url_json = self._search_regex(
r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
video_webpage, 'age gate player URL')
player_url = json.loads(player_url_json)
if self._downloader.params.get('verbose'):
if player_url is None:
player_version = 'unknown'
player_desc = 'unknown'
else:
if player_url.endswith('swf'):
player_version = self._search_regex(
r'-(.+?)(?:/watch_as3)?\.swf$', player_url,
'flash player', fatal=False)
player_desc = 'flash player %s' % player_version
else:
player_version = self._search_regex(
[r'html5player-([^/]+?)(?:/html5player(?:-new)?)?\.js', r'(?:www|player)-([^/]+)/base\.js'],
player_url,
'html5 player', fatal=False)
player_desc = 'html5 player %s' % player_version
parts_sizes = self._signature_cache_id(encrypted_sig)
self.to_screen('{%s} signature length %s, %s' %
(format_id, parts_sizes, player_desc))
signature = self._decrypt_signature(
encrypted_sig, video_id, player_url, age_gate)
url += '&signature=' + signature
if 'ratebypass' not in url:
url += '&ratebypass=yes'
dct = {
'format_id': format_id,
'url': url,
'player_url': player_url,
}
if format_id in self._formats:
dct.update(self._formats[format_id])
if format_id in formats_spec:
dct.update(formats_spec[format_id])
# Some itags are not included in DASH manifest thus corresponding formats will
# lack metadata (see https://github.com/rg3/youtube-dl/pull/5993).
# Trying to extract metadata from url_encoded_fmt_stream_map entry.
mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0])
width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None)
more_fields = {
'filesize': int_or_none(url_data.get('clen', [None])[0]),
'tbr': float_or_none(url_data.get('bitrate', [None])[0], 1000),
'width': width,
'height': height,
'fps': int_or_none(url_data.get('fps', [None])[0]),
'format_note': url_data.get('quality_label', [None])[0] or url_data.get('quality', [None])[0],
}
for key, value in more_fields.items():
if value:
dct[key] = value
type_ = url_data.get('type', [None])[0]
if type_:
type_split = type_.split(';')
kind_ext = type_split[0].split('/')
if len(kind_ext) == 2:
kind, _ = kind_ext
dct['ext'] = mimetype2ext(type_split[0])
if kind in ('audio', 'video'):
codecs = None
for mobj in re.finditer(
r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)', type_):
if mobj.group('key') == 'codecs':
codecs = mobj.group('val')
break
if codecs:
codecs = codecs.split(',')
if len(codecs) == 2:
acodec, vcodec = codecs[1], codecs[0]
else:
acodec, vcodec = (codecs[0], 'none') if kind == 'audio' else ('none', codecs[0])
dct.update({
'acodec': acodec,
'vcodec': vcodec,
})
formats.append(dct)
elif video_info.get('hlsvp'):
manifest_url = video_info['hlsvp'][0]
url_map = self._extract_from_m3u8(manifest_url, video_id)
formats = _map_to_format_list(url_map)
# Accept-Encoding header causes failures in live streams on Youtube and Youtube Gaming
for a_format in formats:
a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = 'True'
else:
unavailable_message = self._html_search_regex(
r'(?s)<h1[^>]+id="unavailable-message"[^>]*>(.+?)</h1>',
video_webpage, 'unavailable message', default=None)
if unavailable_message:
raise ExtractorError(unavailable_message, expected=True)
raise ExtractorError('no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
# Look for the DASH manifest
if self._downloader.params.get('youtube_include_dash_manifest', True):
dash_mpd_fatal = True
for mpd_url in dash_mpds:
dash_formats = {}
try:
def decrypt_sig(mobj):
s = mobj.group(1)
dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
return '/signature/%s' % dec_s
mpd_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, mpd_url)
for df in self._extract_mpd_formats(
mpd_url, video_id, fatal=dash_mpd_fatal,
formats_dict=self._formats):
# Do not overwrite DASH format found in some previous DASH manifest
if df['format_id'] not in dash_formats:
dash_formats[df['format_id']] = df
# Additional DASH manifests may end up in HTTP Error 403 therefore
# allow them to fail without bug report message if we already have
# some DASH manifest succeeded. This is temporary workaround to reduce
# burst of bug reports until we figure out the reason and whether it
# can be fixed at all.
dash_mpd_fatal = False
except (ExtractorError, KeyError) as e:
self.report_warning(
'Skipping DASH manifest: %r' % e, video_id)
if dash_formats:
# Remove the formats we found through non-DASH, they
# contain less info and it can be wrong, because we use
# fixed values (for example the resolution). See
# https://github.com/rg3/youtube-dl/issues/5774 for an
# example.
formats = [f for f in formats if f['format_id'] not in dash_formats.keys()]
formats.extend(dash_formats.values())
# Check for malformed aspect ratio
stretched_m = re.search(
r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
video_webpage)
if stretched_m:
w = float(stretched_m.group('w'))
h = float(stretched_m.group('h'))
# yt:stretch may hold invalid ratio data (e.g. for Q39EVAstoRM ratio is 17:0).
# We will only process correct ratios.
if w > 0 and h > 0:
ratio = w / h
for f in formats:
if f.get('vcodec') != 'none':
f['stretched_ratio'] = ratio
self._sort_formats(formats)
self.mark_watched(video_id, video_info)
return {
'id': video_id,
'uploader': video_uploader,
'uploader_id': video_uploader_id,
'uploader_url': video_uploader_url,
'upload_date': upload_date,
'license': video_license,
'creator': video_creator,
'title': video_title,
'alt_title': video_alt_title,
'thumbnail': video_thumbnail,
'description': video_description,
'categories': video_categories,
'tags': video_tags,
'subtitles': video_subtitles,
'automatic_captions': automatic_captions,
'duration': video_duration,
'age_limit': 18 if age_gate else 0,
'annotations': video_annotations,
'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'average_rating': float_or_none(video_info.get('avg_rating', [None])[0]),
'formats': formats,
'is_live': is_live,
'start_time': start_time,
'end_time': end_time,
}
class YoutubeSharedVideoIE(InfoExtractor):
_VALID_URL = r'(?:https?:)?//(?:www\.)?youtube\.com/shared\?ci=(?P<id>[0-9A-Za-z_-]{11})'
IE_NAME = 'youtube:shared'
_TEST = {
'url': 'https://www.youtube.com/shared?ci=1nEzmT-M4fU',
'info_dict': {
'id': 'uPDB5I9wfp8',
'ext': 'webm',
'title': 'Pocoyo: 90 minutos de episódios completos Português para crianças - PARTE 3',
'description': 'md5:d9e4d9346a2dfff4c7dc4c8cec0f546d',
'upload_date': '20160219',
'uploader': 'Pocoyo - Português (BR)',
'uploader_id': 'PocoyoBrazil',
},
'add_ie': ['Youtube'],
'params': {
# There are already too many Youtube downloads
'skip_download': True,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
real_video_id = self._html_search_meta(
'videoId', webpage, 'YouTube video id', fatal=True)
return self.url_result(real_video_id, YoutubeIE.ie_key())
class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor):
IE_DESC = 'YouTube.com playlists'
_VALID_URL = r"""(?x)(?:
(?:https?://)?
(?:\w+\.)?
youtube\.com/
(?:
(?:course|view_play_list|my_playlists|artist|playlist|watch|embed/videoseries)
\? (?:.*?[&;])*? (?:p|a|list)=
| p/
)
(
(?:PL|LL|EC|UU|FL|RD|UL)?[0-9A-Za-z-_]{10,}
# Top tracks, they can also include dots
|(?:MC)[\w\.]*
)
.*
|
((?:PL|LL|EC|UU|FL|RD|UL)[0-9A-Za-z-_]{10,})
)"""
_TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
_VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&[^"]*?index=(?P<index>\d+)(?:[^>]+>(?P<title>[^<]+))?'
IE_NAME = 'youtube:playlist'
_TESTS = [{
'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
'info_dict': {
'title': 'ytdl test PL',
'id': 'PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
},
'playlist_count': 3,
}, {
'url': 'https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
'info_dict': {
'id': 'PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
'title': 'YDL_Empty_List',
},
'playlist_count': 0,
}, {
'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
'info_dict': {
'title': '29C3: Not my department',
'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
},
'playlist_count': 95,
}, {
'note': 'issue #673',
'url': 'PLBB231211A4F62143',
'info_dict': {
'title': '[OLD]Team Fortress 2 (Class-based LP)',
'id': 'PLBB231211A4F62143',
},
'playlist_mincount': 26,
}, {
'note': 'Large playlist',
'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
'info_dict': {
'title': 'Uploads from Cauchemar',
'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
},
'playlist_mincount': 799,
}, {
'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
'info_dict': {
'title': 'YDL_safe_search',
'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
},
'playlist_count': 2,
}, {
'note': 'embedded',
'url': 'http://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
'playlist_count': 4,
'info_dict': {
'title': 'JODA15',
'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
}
}, {
'note': 'Embedded SWF player',
'url': 'http://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
'playlist_count': 4,
'info_dict': {
'title': 'JODA7',
'id': 'YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ',
}
}, {
'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
'info_dict': {
'title': 'Uploads from Interstellar Movie',
'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
},
'playlist_mincout': 21,
}]
def _real_initialize(self):
self._login()
def _extract_mix(self, playlist_id):
# The mixes are generated from a single video
# the id of the playlist is just 'RD' + video_id
ids = []
last_id = playlist_id[-11:]
for n in itertools.count(1):
url = 'https://youtube.com/watch?v=%s&list=%s' % (last_id, playlist_id)
webpage = self._download_webpage(
url, playlist_id, 'Downloading page {0} of Youtube mix'.format(n))
new_ids = orderedSet(re.findall(
r'''(?xs)data-video-username=".*?".*?
href="/watch\?v=([0-9A-Za-z_-]{11})&[^"]*?list=%s''' % re.escape(playlist_id),
webpage))
# Fetch new pages until all the videos are repeated, it seems that
# there are always 51 unique videos.
new_ids = [_id for _id in new_ids if _id not in ids]
if not new_ids:
break
ids.extend(new_ids)
last_id = ids[-1]
url_results = self._ids_to_results(ids)
search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
title_span = (
search_title('playlist-title') or
search_title('title long-title') or
search_title('title'))
title = clean_html(title_span)
return self.playlist_result(url_results, playlist_id, title)
def _extract_playlist(self, playlist_id):
url = self._TEMPLATE_URL % playlist_id
page = self._download_webpage(url, playlist_id)
for match in re.findall(r'<div class="yt-alert-message">([^<]+)</div>', page):
match = match.strip()
# Check if the playlist exists or is private
if re.match(r'[^<]*(The|This) playlist (does not exist|is private)[^<]*', match):
raise ExtractorError(
'The playlist doesn\'t exist or is private, use --username or '
'--netrc to access it.',
expected=True)
elif re.match(r'[^<]*Invalid parameters[^<]*', match):
raise ExtractorError(
'Invalid parameters. Maybe URL is incorrect.',
expected=True)
elif re.match(r'[^<]*Choose your language[^<]*', match):
continue
else:
self.report_warning('Youtube gives an alert message: ' + match)
playlist_title = self._html_search_regex(
r'(?s)<h1 class="pl-header-title[^"]*"[^>]*>\s*(.*?)\s*</h1>',
page, 'title')
return self.playlist_result(self._entries(page, playlist_id), playlist_id, playlist_title)
def _check_download_just_video(self, url, playlist_id):
# Check if it's a video-specific URL
query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
if 'v' in query_dict:
video_id = query_dict['v'][0]
if self._downloader.params.get('noplaylist'):
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
return self.url_result(video_id, 'Youtube', video_id=video_id)
else:
self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
def _real_extract(self, url):
# Extract playlist id
mobj = re.match(self._VALID_URL, url)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
playlist_id = mobj.group(1) or mobj.group(2)
video = self._check_download_just_video(url, playlist_id)
if video:
return video
if playlist_id.startswith(('RD', 'UL', 'PU')):
# Mixes require a custom extraction process
return self._extract_mix(playlist_id)
return self._extract_playlist(playlist_id)
class YoutubeChannelIE(YoutubePlaylistBaseInfoExtractor):
IE_DESC = 'YouTube.com channels'
_VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/(?P<id>[0-9A-Za-z_-]+)'
_TEMPLATE_URL = 'https://www.youtube.com/channel/%s/videos'
_VIDEO_RE = r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?'
IE_NAME = 'youtube:channel'
_TESTS = [{
'note': 'paginated channel',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'playlist_mincount': 91,
'info_dict': {
'id': 'UUKfVa3S1e4PHvxWcwyMMg8w',
'title': 'Uploads from lex will',
}
}, {
'note': 'Age restricted channel',
# from https://www.youtube.com/user/DeusExOfficial
'url': 'https://www.youtube.com/channel/UCs0ifCMCm1icqRbqhUINa0w',
'playlist_mincount': 64,
'info_dict': {
'id': 'UUs0ifCMCm1icqRbqhUINa0w',
'title': 'Uploads from Deus Ex',
},
}]
@classmethod
def suitable(cls, url):
return (False if YoutubePlaylistsIE.suitable(url) or YoutubeLiveIE.suitable(url)
else super(YoutubeChannelIE, cls).suitable(url))
def _real_extract(self, url):
channel_id = self._match_id(url)
url = self._TEMPLATE_URL % channel_id
# Channel by page listing is restricted to 35 pages of 30 items, i.e. 1050 videos total (see #5778)
# Workaround by extracting as a playlist if managed to obtain channel playlist URL
# otherwise fallback on channel by page extraction
channel_page = self._download_webpage(
url + '?view=57', channel_id,
'Downloading channel page', fatal=False)
if channel_page is False:
channel_playlist_id = False
else:
channel_playlist_id = self._html_search_meta(
'channelId', channel_page, 'channel id', default=None)
if not channel_playlist_id:
channel_url = self._html_search_meta(
('al:ios:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad'),
channel_page, 'channel url', default=None)
if channel_url:
channel_playlist_id = self._search_regex(
r'vnd\.youtube://user/([0-9A-Za-z_-]+)',
channel_url, 'channel id', default=None)
if channel_playlist_id and channel_playlist_id.startswith('UC'):
playlist_id = 'UU' + channel_playlist_id[2:]
return self.url_result(
compat_urlparse.urljoin(url, '/playlist?list=%s' % playlist_id), 'YoutubePlaylist')
channel_page = self._download_webpage(url, channel_id, 'Downloading page #1')
autogenerated = re.search(r'''(?x)
class="[^"]*?(?:
channel-header-autogenerated-label|
yt-channel-title-autogenerated
)[^"]*"''', channel_page) is not None
if autogenerated:
# The videos are contained in a single page
# the ajax pages can't be used, they are empty
entries = [
self.url_result(
video_id, 'Youtube', video_id=video_id,
video_title=video_title)
for video_id, video_title in self.extract_videos_from_page(channel_page)]
return self.playlist_result(entries, channel_id)
try:
next(self._entries(channel_page, channel_id))
except StopIteration:
alert_message = self._html_search_regex(
r'(?s)<div[^>]+class=(["\']).*?\byt-alert-message\b.*?\1[^>]*>(?P<alert>[^<]+)</div>',
channel_page, 'alert', default=None, group='alert')
if alert_message:
raise ExtractorError('Youtube said: %s' % alert_message, expected=True)
return self.playlist_result(self._entries(channel_page, channel_id), channel_id)
class YoutubeUserIE(YoutubeChannelIE):
IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)'
_VALID_URL = r'(?:(?:https?://(?:\w+\.)?youtube\.com/(?:user/|c/)?(?!(?:attribution_link|watch|results)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)(?P<id>[A-Za-z0-9_-]+)'
_TEMPLATE_URL = 'https://www.youtube.com/user/%s/videos'
IE_NAME = 'youtube:user'
_TESTS = [{
'url': 'https://www.youtube.com/user/TheLinuxFoundation',
'playlist_mincount': 320,
'info_dict': {
'id': 'UUfX55Sx5hEFjoC3cNs6mCUQ',
'title': 'Uploads from The Linux Foundation',
}
}, {
'url': 'ytuser:phihag',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/c/gametrailers',
'only_matching': True,
}, {
# This channel is not available.
'url': 'https://www.youtube.com/user/kananishinoSMEJ/videos',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
# Don't return True if the url can be extracted with other youtube
# extractor, the regex would is too permissive and it would match.
other_yt_ies = iter(klass for (name, klass) in globals().items() if name.startswith('Youtube') and name.endswith('IE') and klass is not cls)
if any(ie.suitable(url) for ie in other_yt_ies):
return False
else:
return super(YoutubeUserIE, cls).suitable(url)
class YoutubeLiveIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com live streams'
_VALID_URL = r'(?P<base_url>https?://(?:\w+\.)?youtube\.com/(?:user|channel)/(?P<id>[^/]+))/live'
IE_NAME = 'youtube:live'
_TESTS = [{
'url': 'http://www.youtube.com/user/TheYoungTurks/live',
'info_dict': {
'id': 'a48o2S1cPoo',
'ext': 'mp4',
'title': 'The Young Turks - Live Main Show',
'uploader': 'The Young Turks',
'uploader_id': 'TheYoungTurks',
'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
'upload_date': '20150715',
'license': 'Standard YouTube License',
'description': 'md5:438179573adcdff3c97ebb1ee632b891',
'categories': ['News & Politics'],
'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
channel_id = mobj.group('id')
base_url = mobj.group('base_url')
webpage = self._download_webpage(url, channel_id, fatal=False)
if webpage:
page_type = self._og_search_property(
'type', webpage, 'page type', default=None)
video_id = self._html_search_meta(
'videoId', webpage, 'video id', default=None)
if page_type == 'video' and video_id and re.match(r'^[0-9A-Za-z_-]{11}$', video_id):
return self.url_result(video_id, YoutubeIE.ie_key())
return self.url_result(base_url)
class YoutubePlaylistsIE(YoutubePlaylistsBaseInfoExtractor):
IE_DESC = 'YouTube.com user/channel playlists'
_VALID_URL = r'https?://(?:\w+\.)?youtube\.com/(?:user|channel)/(?P<id>[^/]+)/playlists'
IE_NAME = 'youtube:playlists'
_TESTS = [{
'url': 'http://www.youtube.com/user/ThirstForScience/playlists',
'playlist_mincount': 4,
'info_dict': {
'id': 'ThirstForScience',
'title': 'Thirst for Science',
},
}, {
# with "Load more" button
'url': 'http://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
'playlist_mincount': 70,
'info_dict': {
'id': 'igorkle1',
'title': 'Игорь Клейнер',
},
}, {
'url': 'https://www.youtube.com/channel/UCiU1dHvZObB2iP6xkJ__Icw/playlists',
'playlist_mincount': 17,
'info_dict': {
'id': 'UCiU1dHvZObB2iP6xkJ__Icw',
'title': 'Chem Player',
},
}]
class YoutubeSearchIE(SearchInfoExtractor, YoutubePlaylistIE):
IE_DESC = 'YouTube.com searches'
# there doesn't appear to be a real limit, for example if you search for
# 'python' you get more than 8.000.000 results
_MAX_RESULTS = float('inf')
IE_NAME = 'youtube:search'
_SEARCH_KEY = 'ytsearch'
_EXTRA_QUERY_ARGS = {}
_TESTS = []
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
videos = []
limit = n
for pagenum in itertools.count(1):
url_query = {
'search_query': query.encode('utf-8'),
'page': pagenum,
'spf': 'navigate',
}
url_query.update(self._EXTRA_QUERY_ARGS)
result_url = 'https://www.youtube.com/results?' + compat_urllib_parse_urlencode(url_query)
data = self._download_json(
result_url, video_id='query "%s"' % query,
note='Downloading page %s' % pagenum,
errnote='Unable to download API page')
html_content = data[1]['body']['content']
if 'class="search-message' in html_content:
raise ExtractorError(
'[youtube] No video results', expected=True)
new_videos = self._ids_to_results(orderedSet(re.findall(
r'href="/watch\?v=(.{11})', html_content)))
videos += new_videos
if not new_videos or len(videos) > limit:
break
if len(videos) > n:
videos = videos[:n]
return self.playlist_result(videos, query)
class YoutubeSearchDateIE(YoutubeSearchIE):
IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
_SEARCH_KEY = 'ytsearchdate'
IE_DESC = 'YouTube.com searches, newest videos first'
_EXTRA_QUERY_ARGS = {'search_sort': 'video_date_uploaded'}
class YoutubeSearchURLIE(YoutubePlaylistBaseInfoExtractor):
IE_DESC = 'YouTube.com search URLs'
IE_NAME = 'youtube:search_url'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?P<query>[^&]+)(?:[&]|$)'
_VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})(?:[^"]*"[^>]+\btitle="(?P<title>[^"]+))?'
_TESTS = [{
'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
'playlist_mincount': 5,
'info_dict': {
'title': 'youtube-dl test video',
}
}, {
'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
query = compat_urllib_parse_unquote_plus(mobj.group('query'))
webpage = self._download_webpage(url, query)
return self.playlist_result(self._process_page(webpage), playlist_title=query)
class YoutubeShowIE(YoutubePlaylistsBaseInfoExtractor):
IE_DESC = 'YouTube.com (multi-season) shows'
_VALID_URL = r'https?://www\.youtube\.com/show/(?P<id>[^?#]*)'
IE_NAME = 'youtube:show'
_TESTS = [{
'url': 'https://www.youtube.com/show/airdisasters',
'playlist_mincount': 5,
'info_dict': {
'id': 'airdisasters',
'title': 'Air Disasters',
}
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
return super(YoutubeShowIE, self)._real_extract(
'https://www.youtube.com/show/%s/playlists' % playlist_id)
class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
"""
Base class for feed extractors
Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
"""
_LOGIN_REQUIRED = True
@property
def IE_NAME(self):
return 'youtube:%s' % self._FEED_NAME
def _real_initialize(self):
self._login()
def _real_extract(self, url):
page = self._download_webpage(
'https://www.youtube.com/feed/%s' % self._FEED_NAME, self._PLAYLIST_TITLE)
# The extraction process is the same as for playlists, but the regex
# for the video ids doesn't contain an index
ids = []
more_widget_html = content_html = page
for page_num in itertools.count(1):
matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html)
# 'recommended' feed has infinite 'load more' and each new portion spins
# the same videos in (sometimes) slightly different order, so we'll check
# for unicity and break when portion has no new videos
new_ids = filter(lambda video_id: video_id not in ids, orderedSet(matches))
if not new_ids:
break
ids.extend(new_ids)
mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
if not mobj:
break
more = self._download_json(
'https://youtube.com/%s' % mobj.group('more'), self._PLAYLIST_TITLE,
'Downloading page #%s' % page_num,
transform_source=uppercase_escape)
content_html = more['content_html']
more_widget_html = more['load_more_widget_html']
return self.playlist_result(
self._ids_to_results(ids), playlist_title=self._PLAYLIST_TITLE)
class YoutubeWatchLaterIE(YoutubePlaylistIE):
IE_NAME = 'youtube:watchlater'
IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
_VALID_URL = r'https?://www\.youtube\.com/(?:feed/watch_later|(?:playlist|watch)\?(?:.+&)?list=WL)|:ytwatchlater'
_TESTS = [{
'url': 'https://www.youtube.com/playlist?list=WL',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?v=bCNU9TrbiRk&index=1&list=WL',
'only_matching': True,
}]
def _real_extract(self, url):
video = self._check_download_just_video(url, 'WL')
if video:
return video
return self._extract_playlist('WL')
class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
IE_NAME = 'youtube:favorites'
IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
_VALID_URL = r'https?://www\.youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
_LOGIN_REQUIRED = True
def _real_extract(self, url):
webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, 'favourites playlist id')
return self.url_result(playlist_id, 'YoutubePlaylist')
class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
_VALID_URL = r'https?://www\.youtube\.com/feed/recommended|:ytrec(?:ommended)?'
_FEED_NAME = 'recommended'
_PLAYLIST_TITLE = 'Youtube Recommended videos'
class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
_VALID_URL = r'https?://www\.youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
_FEED_NAME = 'subscriptions'
_PLAYLIST_TITLE = 'Youtube Subscriptions'
class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
_VALID_URL = 'https?://www\.youtube\.com/feed/history|:ythistory'
_FEED_NAME = 'history'
_PLAYLIST_TITLE = 'Youtube History'
class YoutubeTruncatedURLIE(InfoExtractor):
IE_NAME = 'youtube:truncated_url'
IE_DESC = False # Do not list
_VALID_URL = r'''(?x)
(?:https?://)?
(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
(?:watch\?(?:
feature=[a-z_]+|
annotation_id=annotation_[^&]+|
x-yt-cl=[0-9]+|
hl=[^&]*|
t=[0-9]+
)?
|
attribution_link\?a=[^&]+
)
$
'''
_TESTS = [{
'url': 'http://www.youtube.com/watch?annotation_id=annotation_3951667041',
'only_matching': True,
}, {
'url': 'http://www.youtube.com/watch?',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?feature=foo',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?hl=en-GB',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?t=2372',
'only_matching': True,
}]
def _real_extract(self, url):
raise ExtractorError(
'Did you forget to quote the URL? Remember that & is a meta '
'character in most shells, so you want to put the URL in quotes, '
'like youtube-dl '
'"http://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
' or simply youtube-dl BaW_jenozKc .',
expected=True)
class YoutubeTruncatedIDIE(InfoExtractor):
IE_NAME = 'youtube:truncated_id'
IE_DESC = False # Do not list
_VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
_TESTS = [{
'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
raise ExtractorError(
'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
expected=True)
| 46.06971 | 226 | 0.532289 |
10d9151df48209c6518cf47cef431472ba6db007 | 433 | py | Python | committees/migrations/0045_committeespage_description.py | meagles/site | c54a61a0e9e98788086d0d9300baebbcac7fb625 | [
"MIT"
] | null | null | null | committees/migrations/0045_committeespage_description.py | meagles/site | c54a61a0e9e98788086d0d9300baebbcac7fb625 | [
"MIT"
] | null | null | null | committees/migrations/0045_committeespage_description.py | meagles/site | c54a61a0e9e98788086d0d9300baebbcac7fb625 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.11 on 2022-01-11 05:35
from django.db import migrations
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('committees', '0044_auto_20220109_0118'),
]
operations = [
migrations.AddField(
model_name='committeespage',
name='description',
field=wagtail.core.fields.RichTextField(null=True),
),
]
| 21.65 | 63 | 0.632794 |
041fcbb376bbd32f69b6ec283dabdaea9f55af8b | 1,010 | py | Python | bokeh/app.py | intuinno/capviz | 46a18b6e3ed0a279e7c966fbfd3f3955db501cf7 | [
"MIT"
] | null | null | null | bokeh/app.py | intuinno/capviz | 46a18b6e3ed0a279e7c966fbfd3f3955db501cf7 | [
"MIT"
] | null | null | null | bokeh/app.py | intuinno/capviz | 46a18b6e3ed0a279e7c966fbfd3f3955db501cf7 | [
"MIT"
] | null | null | null | from numpy.random import random
from bokeh.io import curdoc
from bokeh.layouts import column, row
from bokeh.plotting import ColumnDataSource, Figure
from bokeh.models.widgets import Select, TextInput
def get_data(N):
return dict(x=random(size=N), y=random(size=N), r=random(size=N) * 0.034)
source = ColumnDataSource(data=get_data(200))
p = Figure(tools='', toolbar_location=None)
r = p.circle(x='x',y='y', radius='r', source=source, color='navy', alpha=0.6,
line_color='white')
COLORS = ['black','firebrick','navy','olive','goldenrod']
select = Select(title='Color', value='navy', options=COLORS)
input = TextInput(title='Number of points', value='200')
def update_color(attrname,old, new):
r.glyph.fill_color=select.value
select.on_change('value',update_color)
def update_points(attrname,old,new):
N = int(input.value)
source.data = get_data(N)
input.on_change('value', update_points)
layout = column(row(select, input, width=400), row(p))
curdoc().add_root(layout)
| 28.055556 | 77 | 0.718812 |
673348c900f2efa1b72a27143d4df7dbd75d7d89 | 7,969 | py | Python | third_party/protobuf26/reflection.py | stdft112/depot_tools | 52c7211807930272424213ff6127c209de790eca | [
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | third_party/protobuf26/reflection.py | stdft112/depot_tools | 52c7211807930272424213ff6127c209de790eca | [
"BSD-3-Clause"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | third_party/protobuf26/reflection.py | stdft112/depot_tools | 52c7211807930272424213ff6127c209de790eca | [
"BSD-3-Clause"
] | 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This code is meant to work on Python 2.4 and above only.
"""Contains a metaclass and helper functions used to create
protocol message classes from Descriptor objects at runtime.
Recall that a metaclass is the "type" of a class.
(A class is to a metaclass what an instance is to a class.)
In this case, we use the GeneratedProtocolMessageType metaclass
to inject all the useful functionality into the classes
output by the protocol compiler at compile-time.
The upshot of all this is that the real implementation
details for ALL pure-Python protocol buffers are *here in
this file*.
"""
__author__ = 'robinson@google.com (Will Robinson)'
from protobuf26.internal import api_implementation
from protobuf26 import descriptor as descriptor_mod
from protobuf26 import message
_FieldDescriptor = descriptor_mod.FieldDescriptor
if api_implementation.Type() == 'cpp':
if api_implementation.Version() == 2:
from protobuf26.pyext import cpp_message
_NewMessage = cpp_message.NewMessage
_InitMessage = cpp_message.InitMessage
else:
from protobuf26.internal import cpp_message
_NewMessage = cpp_message.NewMessage
_InitMessage = cpp_message.InitMessage
else:
from protobuf26.internal import python_message
_NewMessage = python_message.NewMessage
_InitMessage = python_message.InitMessage
class GeneratedProtocolMessageType(type):
"""Metaclass for protocol message classes created at runtime from Descriptors.
We add implementations for all methods described in the Message class. We
also create properties to allow getting/setting all fields in the protocol
message. Finally, we create slots to prevent users from accidentally
"setting" nonexistent fields in the protocol message, which then wouldn't get
serialized / deserialized properly.
The protocol compiler currently uses this metaclass to create protocol
message classes at runtime. Clients can also manually create their own
classes at runtime, as in this example:
mydescriptor = Descriptor(.....)
class MyProtoClass(Message):
__metaclass__ = GeneratedProtocolMessageType
DESCRIPTOR = mydescriptor
myproto_instance = MyProtoClass()
myproto.foo_field = 23
...
The above example will not work for nested types. If you wish to include them,
use reflection.MakeClass() instead of manually instantiating the class in
order to create the appropriate class structure.
"""
# Must be consistent with the protocol-compiler code in
# proto2/compiler/internal/generator.*.
_DESCRIPTOR_KEY = 'DESCRIPTOR'
def __new__(cls, name, bases, dictionary):
"""Custom allocation for runtime-generated class types.
We override __new__ because this is apparently the only place
where we can meaningfully set __slots__ on the class we're creating(?).
(The interplay between metaclasses and slots is not very well-documented).
Args:
name: Name of the class (ignored, but required by the
metaclass protocol).
bases: Base classes of the class we're constructing.
(Should be message.Message). We ignore this field, but
it's required by the metaclass protocol
dictionary: The class dictionary of the class we're
constructing. dictionary[_DESCRIPTOR_KEY] must contain
a Descriptor object describing this protocol message
type.
Returns:
Newly-allocated class.
"""
descriptor = dictionary[GeneratedProtocolMessageType._DESCRIPTOR_KEY]
bases = _NewMessage(bases, descriptor, dictionary)
superclass = super(GeneratedProtocolMessageType, cls)
new_class = superclass.__new__(cls, name, bases, dictionary)
setattr(descriptor, '_concrete_class', new_class)
return new_class
def __init__(cls, name, bases, dictionary):
"""Here we perform the majority of our work on the class.
We add enum getters, an __init__ method, implementations
of all Message methods, and properties for all fields
in the protocol type.
Args:
name: Name of the class (ignored, but required by the
metaclass protocol).
bases: Base classes of the class we're constructing.
(Should be message.Message). We ignore this field, but
it's required by the metaclass protocol
dictionary: The class dictionary of the class we're
constructing. dictionary[_DESCRIPTOR_KEY] must contain
a Descriptor object describing this protocol message
type.
"""
descriptor = dictionary[GeneratedProtocolMessageType._DESCRIPTOR_KEY]
_InitMessage(descriptor, cls)
superclass = super(GeneratedProtocolMessageType, cls)
superclass.__init__(name, bases, dictionary)
def ParseMessage(descriptor, byte_str):
"""Generate a new Message instance from this Descriptor and a byte string.
Args:
descriptor: Protobuf Descriptor object
byte_str: Serialized protocol buffer byte string
Returns:
Newly created protobuf Message object.
"""
result_class = MakeClass(descriptor)
new_msg = result_class()
new_msg.ParseFromString(byte_str)
return new_msg
def MakeClass(descriptor):
"""Construct a class object for a protobuf described by descriptor.
Composite descriptors are handled by defining the new class as a member of the
parent class, recursing as deep as necessary.
This is the dynamic equivalent to:
class Parent(message.Message):
__metaclass__ = GeneratedProtocolMessageType
DESCRIPTOR = descriptor
class Child(message.Message):
__metaclass__ = GeneratedProtocolMessageType
DESCRIPTOR = descriptor.nested_types[0]
Sample usage:
file_descriptor = descriptor_pb2.FileDescriptorProto()
file_descriptor.ParseFromString(proto2_string)
msg_descriptor = descriptor.MakeDescriptor(file_descriptor.message_type[0])
msg_class = reflection.MakeClass(msg_descriptor)
msg = msg_class()
Args:
descriptor: A descriptor.Descriptor object describing the protobuf.
Returns:
The Message class object described by the descriptor.
"""
attributes = {}
for name, nested_type in descriptor.nested_types_by_name.items():
attributes[name] = MakeClass(nested_type)
attributes[GeneratedProtocolMessageType._DESCRIPTOR_KEY] = descriptor
return GeneratedProtocolMessageType(str(descriptor.name), (message.Message,),
attributes)
| 38.684466 | 80 | 0.759568 |
cec68cffe62818f86bcb7602204a99a0b0faaee7 | 2,120 | py | Python | src/projects/migrations/0002_projectpermission_userpermissiongroupprojectpermission_userprojectpermission.py | nixplay/bullet-train-api | 608422d174443a4d9178d875ccaeb756a771e908 | [
"BSD-3-Clause"
] | 1,259 | 2021-06-10T11:24:09.000Z | 2022-03-31T10:30:44.000Z | src/projects/migrations/0002_projectpermission_userpermissiongroupprojectpermission_userprojectpermission.py | nixplay/bullet-train-api | 608422d174443a4d9178d875ccaeb756a771e908 | [
"BSD-3-Clause"
] | 392 | 2021-06-10T11:12:29.000Z | 2022-03-31T10:13:53.000Z | src/projects/migrations/0002_projectpermission_userpermissiongroupprojectpermission_userprojectpermission.py | nixplay/bullet-train-api | 608422d174443a4d9178d875ccaeb756a771e908 | [
"BSD-3-Clause"
] | 58 | 2021-06-11T03:18:07.000Z | 2022-03-31T14:39:10.000Z | # Generated by Django 2.2.10 on 2020-02-16 20:50
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('users', '0024_auto_20200216_1924'),
('projects', '0001_initial'),
('permissions', '0001_initial')
]
operations = [
migrations.CreateModel(
name='ProjectPermission',
fields=[
('key', models.CharField(max_length=100, primary_key=True, serialize=False)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='UserProjectPermission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('admin', models.BooleanField(default=False)),
('permissions', models.ManyToManyField(to='permissions.PermissionModel')),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='projects.Project')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='UserPermissionGroupProjectPermission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('admin', models.BooleanField(default=False)),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.UserPermissionGroup')),
('permissions', models.ManyToManyField(to='permissions.PermissionModel')),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='projects.Project')),
],
options={
'abstract': False,
},
),
]
| 40.769231 | 122 | 0.595755 |
ae512c49edadeb7e9fe1c733b66eb7670f6347b6 | 535 | py | Python | libs/yowsup/yowsup/yowsup/layers/protocol_privacy/layer.py | akshitpradhan/TomHack | 837226e7b38de1140c19bc2d478eeb9e379ed1fd | [
"MIT"
] | 22 | 2017-07-14T20:01:17.000Z | 2022-03-08T14:22:39.000Z | libs/yowsup/yowsup/yowsup/layers/protocol_privacy/layer.py | akshitpradhan/TomHack | 837226e7b38de1140c19bc2d478eeb9e379ed1fd | [
"MIT"
] | 6 | 2017-07-14T21:03:50.000Z | 2021-06-10T19:08:32.000Z | libs/yowsup/yowsup/yowsup/layers/protocol_privacy/layer.py | akshitpradhan/TomHack | 837226e7b38de1140c19bc2d478eeb9e379ed1fd | [
"MIT"
] | 13 | 2017-07-14T20:13:14.000Z | 2020-11-12T08:06:05.000Z | from yowsup.layers import YowLayer, YowLayerEvent, YowProtocolLayer
from .protocolentities import *
class YowPrivacyProtocolLayer(YowProtocolLayer):
def __init__(self):
handleMap = {
"iq": (self.recvIq, self.sendIq)
}
super(YowPrivacyProtocolLayer, self).__init__(handleMap)
def __str__(self):
return "Privacy Layer"
def sendIq(self, entity):
if entity.getXmlns() == "jabber:iq:privacy":
self.entityToLower(entity)
def recvIq(self, node):
pass
| 28.157895 | 67 | 0.654206 |
d936bb78a27b77b8fadc963c9db698090024d1ca | 1,240 | py | Python | project_euler/problem_021/sol1.py | sourcery-ai-bot/Python | f1444aca73068e0c55f60ec63b12c53b402e54dd | [
"MIT"
] | null | null | null | project_euler/problem_021/sol1.py | sourcery-ai-bot/Python | f1444aca73068e0c55f60ec63b12c53b402e54dd | [
"MIT"
] | null | null | null | project_euler/problem_021/sol1.py | sourcery-ai-bot/Python | f1444aca73068e0c55f60ec63b12c53b402e54dd | [
"MIT"
] | null | null | null | """
Amicable Numbers
Problem 21
Let d(n) be defined as the sum of proper divisors of n (numbers less than n
which divide evenly into n).
If d(a) = b and d(b) = a, where a ≠ b, then a and b are an amicable pair and
each of a and b are called amicable numbers.
For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55
and 110; therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71 and
142; so d(284) = 220.
Evaluate the sum of all the amicable numbers under 10000.
"""
from math import sqrt
def sum_of_divisors(n: int) -> int:
total = 0
for i in range(1, int(sqrt(n) + 1)):
if n % i == 0 and i != sqrt(n):
total += i + n // i
elif i == sqrt(n):
total += i
return total - n
def solution(n: int = 10000) -> int:
"""Returns the sum of all the amicable numbers under n.
>>> solution(10000)
31626
>>> solution(5000)
8442
>>> solution(1000)
504
>>> solution(100)
0
>>> solution(50)
0
"""
return sum(
i
for i in range(1, n)
if sum_of_divisors(sum_of_divisors(i)) == i and sum_of_divisors(i) != i
)
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 23.846154 | 79 | 0.589516 |
34ad2db0477662b87e374cd5be15b26b0053b6dc | 101 | py | Python | hw_twitterapi/twitterapiapp/apps.py | cerob/bounswe2018group6 | 6f18bfa629a7422862151050a4e187163baddb68 | [
"MIT"
] | 12 | 2018-02-06T06:52:07.000Z | 2021-04-11T15:19:15.000Z | hw_twitterapi/twitterapiapp/apps.py | ozgursolak/bounswe2018group6 | 6f18bfa629a7422862151050a4e187163baddb68 | [
"MIT"
] | 151 | 2018-02-11T20:53:48.000Z | 2019-01-08T08:35:46.000Z | hw_twitterapi/twitterapiapp/apps.py | ozgursolak/bounswe2018group6 | 6f18bfa629a7422862151050a4e187163baddb68 | [
"MIT"
] | 7 | 2018-02-06T20:27:25.000Z | 2019-02-23T10:10:01.000Z | from django.apps import AppConfig
class TwitterapiappConfig(AppConfig):
name = 'twitterapiapp'
| 16.833333 | 37 | 0.782178 |
5b14b287d93b77b6e449d20c57f89371d2a5578e | 4,318 | py | Python | rocketchat_bot_app_bridge/BotBackendController.py | JargeZ/RocketChat-Simple-AppBot | 859e5e2138ce19ed7cbb7f5888583924a6830103 | [
"MIT"
] | 3 | 2021-07-28T20:03:34.000Z | 2022-01-26T14:08:04.000Z | rocketchat_bot_app_bridge/BotBackendController.py | JargeZ/RocketChat-Simple-AppBot | 859e5e2138ce19ed7cbb7f5888583924a6830103 | [
"MIT"
] | null | null | null | rocketchat_bot_app_bridge/BotBackendController.py | JargeZ/RocketChat-Simple-AppBot | 859e5e2138ce19ed7cbb7f5888583924a6830103 | [
"MIT"
] | null | null | null | import json
import os
import webhook_listener
from loguru import logger
import requests
from rocketchat_bot_app_bridge.definitions.application import IBackendRequest, IToBackendButtonClick, EventType, \
IFromBackendRequestSendMessage
from rocketchat_bot_app_bridge.definitions.message import IMessage
from rocketchat_bot_app_bridge.definitions.user import IUser
class BotBackendController(object):
def __new__(cls):
if not hasattr(cls, 'instance'):
cls.instance = super(BotBackendController, cls).__new__(cls)
return cls.instance
def __init__(self):
self.bots = {}
self.app_endpoint = os.getenv('APP_ENDPOINT', None)
self.webhooks = webhook_listener.Listener(
port=os.getenv('BOT_PORT', 3228),
handlers={
"POST": self.handle_POST,
},
)
def handle_POST(self, request, *args, **kwargs):
body = request.body.read(int(request.headers["Content-Length"]))
logger.debug(
"Received request:\n"
+ "Method: {}\n".format(request.method)
+ "Headers: {}\n".format(request.headers)
+ "Args (url path): {}\n".format(args)
+ "Keyword Args (url parameters): {}\n".format(kwargs)
+ "Body: {}".format(
body
if int(request.headers.get("Content-Length", 0)) > 0
else ""
)
)
from_app_data: IBackendRequest = json.loads(body)
event = from_app_data['event']
bot = from_app_data['bot']
payload = from_app_data['payload']
if event == 'new_message':
self.dispatch_new_message(payload, bot)
elif event == 'button_click':
self.dispatch_button_click(payload, bot)
def run(self):
logger.info(f'Running webhook server')
self.webhooks.start()
logger.info(f'Listen on http://{self.webhooks.host}:{self.webhooks.port}/ copy this(or service ip for '
f'kubernetes) to RocketChat app settings')
def register_bot(self, instance, bot_username=None):
logger.info(f'Registered bot {bot_username}')
self.bots[bot_username] = instance
def dispatch_button_click(self, payload: IToBackendButtonClick, bot: IUser):
bot = self._select_context_bot(bot)
handler = bot.button_handler_by_action.get(payload['action'], None)
if not handler:
for group_name, callback in bot.button_group_handler_by_startsw.items():
if payload['action'].startswith(group_name):
handler = callback
break
if not handler:
logger.error(f"Bot {bot.username} does not have button/group handler for action {payload['action']}")
return
handler(payload['action'], payload['user'], payload['message'])
def dispatch_new_message(self, payload: IMessage, bot: IUser):
bot = self._select_context_bot(bot)
for regex, callback in bot.hears_by_regexp.items():
matches = regex.search(payload["text"])
if matches:
callback(payload, **matches.groupdict())
return
break
handler = bot.callback_by_event.get('new_message', None)
if not handler:
logger.error(f"Bot {bot.username} does not have new_message/hear handler")
return
handler(payload)
def _select_context_bot(self, bot: IUser):
if bot['type'] == 'app':
try:
return self.bots['self_app']
except KeyError:
logger.error(f"Failed select default bot")
else:
try:
return self.bots[bot['username']]
except KeyError:
logger.error(f"Failed dispatch new message for {bot['username']} please create instance for them")
return
def send_event_to_app(self, *args, instance=None, event_type: EventType, payload: IFromBackendRequestSendMessage):
data = {
'event': event_type,
'bot': IUser(username=instance.username),
'payload': payload
}
result = requests.post(
self.app_endpoint,
json=data)
logger.debug(result)
| 35.68595 | 118 | 0.602131 |
d54d7d31d526b1be713145942d13c9b3de734d0d | 868 | py | Python | Python/0021_merge_two_sorted_lists.py | codingyen/CodeAlone | b16653957258ac09f74bb1a380f29dd93e055a44 | [
"MIT"
] | 2 | 2018-08-20T04:38:06.000Z | 2019-02-03T07:48:28.000Z | Python/0021_merge_two_sorted_lists.py | codingyen/CodeAlone | b16653957258ac09f74bb1a380f29dd93e055a44 | [
"MIT"
] | null | null | null | Python/0021_merge_two_sorted_lists.py | codingyen/CodeAlone | b16653957258ac09f74bb1a380f29dd93e055a44 | [
"MIT"
] | null | null | null | # What additional node do we need?
# while condition?
# curr = curr.next
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def mergeTwoLists(self, l1, l2):
curr = dummy = ListNode(0)
while l1 and l2:
if l1.val < l2.val:
curr.next = l1
l1 = l1.next
else:
curr.next = l2
l2 = l2.next
curr = curr.next
curr.next = l1 or l2
return dummy.next
if __name__ == '__main__':
s = Solution()
l1 = ListNode(1)
l1.next = ListNode(2)
l1.next.next = ListNode(4)
l2 = ListNode(1)
l2.next = ListNode(3)
l2.next.next = ListNode(4)
outlist = s.mergeTwoLists(l1, l2)
while outlist:
print(outlist.val)
outlist = outlist.next | 22.842105 | 41 | 0.531106 |
8daaa0f70d7006a8abae26e22ea9e2bd901b0693 | 1,463 | py | Python | src/arg_taker.py | maph420/yt_downloader | fac8681295875f0638330f9af333a109638b150f | [
"MIT"
] | null | null | null | src/arg_taker.py | maph420/yt_downloader | fac8681295875f0638330f9af333a109638b150f | [
"MIT"
] | null | null | null | src/arg_taker.py | maph420/yt_downloader | fac8681295875f0638330f9af333a109638b150f | [
"MIT"
] | null | null | null | import sys
import argparse
argsList=["-s","-d","--search","--download","-h","--help"]
class unknown_parameter(Exception):
pass
def helpDisplay():
print("\n--------------------------------------")
print("\tYT DOWNLOADER")
print("\tmade by maph")
print("--------------------------------------\n")
print("usage: python yt.py [args] <optional args>")
print("General options\n")
print("-h , --help\n\tshow this help message and exit\n\n")
print("-d [video_url_goes_here], --download [video_url_goes_here]\n\tdownload a video given its url\n\n")
print("-s [video_title] <num_of_results_to_be_searched>, --search [video_title] <num_of_results_to_be_searched>\n\tMake a youtube search within the CLI given a video's title")
print("\t(You can ommit the <num_of_results_to_be_searched> arg though, then 10 results will be displayed as default)")
print("\tIMPORTANT: [video_title] CANNOT contain spaces due to arg compatibility, however youtube results won't change at all if you use '_' instead of a space\n")
exit()
def main():
if len(sys.argv)<2 or not (sys.argv[1]=='-h' or sys.argv[1]=='--help') and len(sys.argv) < 3:
print("Error: Too few arguments, try again\nYou can look for help with: 'python yt.py -h'")
exit()
else:
arg = sys.argv[1]
for a in argsList:
if a == arg:
return arg
raise unknown_parameter
if __name__ == "__main__":
x = main() | 38.5 | 179 | 0.623377 |
bf9030f4eee7f7b2a4fee948b8a363510add52c8 | 82,855 | py | Python | module4/Supporting Materials/subsample_covid.py | tcxxxx/SARS-CoV-2_workshop | 93d93016a3ce955e3cde7e360417de695e47e0d6 | [
"MIT"
] | 18 | 2021-03-12T16:19:33.000Z | 2021-09-04T18:19:57.000Z | module4/Supporting Materials/subsample_covid.py | babasaraki/SARS-CoV-2_workshop | 5ff032a2d485d5bdda137c28634c8b51273f3040 | [
"MIT"
] | null | null | null | module4/Supporting Materials/subsample_covid.py | babasaraki/SARS-CoV-2_workshop | 5ff032a2d485d5bdda137c28634c8b51273f3040 | [
"MIT"
] | 14 | 2021-03-16T22:24:20.000Z | 2021-05-04T13:56:50.000Z | #!/usr/bin/env python3
#
# Subsample sequences
#
#
# Author: Josh Cherry
import os, sys, getopt, itertools
import datetime, time, hashlib, random, pickle
# Read fasta (from course.py)
def fasta_read(fname_or_fid):
"""
Read sequences from a file in FASTA format.
Returns a dictionary whose values are sequences
and keys are sequence identifiers.
*fname_or_fid* is a string (file name) or an open file
"""
seq_lines = {}
names = []
if type(fname_or_fid) == str: # were passed a file name
fid = open(fname_or_fid)
else: # otherwise, treat as file-like object
fid = fname_or_fid
for line in fid:
if line.startswith('>'):
name = line[1:].split()[0]
if name in seq_lines:
raise RuntimeError('Duplicate name "%s" in fasta file %s'
% (name, fname_or_fid))
seq_lines[name] = []
names.append(name)
else:
seq_lines[name].append(line.strip())
rv = {}
for name in names:
rv[name] = ''.join(seq_lines[name]) # .upper()
return [rv[name] for name in names], names
# Crude fasta writing
def writefasta(seqs, ids, fname, comments=None):
line_len = 60
if len(seqs) != len(ids):
raise RuntimeError('lenth of seqs not equal to length of ids')
with open(fname, 'w') as fid:
for i in range(len(seqs)):
if comments and i < len(comments):
fid.write('>%s %s\n' % (ids[i], comments[i]))
else:
fid.write('>%s\n' % ids[i])
for pos in range(0, len(seqs[i]), line_len):
fid.write(seqs[i][pos:pos+line_len]); fid.write('\n')
# Group things (into lists) according to the result of supplied function
# (which becomes a dictionary key)
def GroupBy(l, func):
d = {}
for el in l:
key = func(el)
if not key in d:
d[key] = []
d[key].append(el)
return d
# iterator (generator) that "flattens" a list of lists (or other iterables),
# iterating over all the elements of all the elements.
def flatten(l):
for t in l:
for x in t:
yield x
# return randomly shuffled version of iterable as a list
def shuffled(it):
import random
l = list(it)
random.shuffle(l)
return l
# Given a list or tuple, make a dictionary of counts of the keys
def Counts(l):
rv = {}
for el in l:
if not el in rv:
rv[el] = 0
rv[el] += 1
return rv
_times = []
def tick():
import os
global _times
_times.append(os.times())
tt_messages = []
def tock(msg=''):
import os
global _times
global tt_messages
new_times = os.times()
tic_times = _times.pop()
indent = (2 * len(_times)) * ' '
st = '%s%s %s' % (indent,
' '.join(['%.3f' % (new_times[i] - tic_times[i])
for i in [0, 1, 4]]),
msg)
print(st)
tt_messages.append(st)
def GetInputFname(prompt, type_text=''):
'''
Get path to file from user, guaranteeing that it exists
and is readable, and tolerating blank line input.
'''
while True:
fname = input(prompt)
if not fname.strip():
# just white space
pass
if fname.strip() == '.':
try:
fname = Gui.FileDialog('r', prompt, type_text)
except Exception as e:
print('Problem running graphical file dialog: %s'
% e.args[0])
print('Returning to text interface')
print()
continue
if fname == None:
print('File dialog cancelled; enter file name at prompt')
print()
continue
else:
return fname
elif not os.access(fname, os.F_OK):
print()
print('%s does not exist; try again' % fname)
elif not os.access(fname, os.R_OK):
print()
print('%s exists but is not readable; try again' % fname)
else:
# good fname
return fname
def GetPrefsWithinCat():
'''
Get preference crieteria in order of priority.
'''
print('Specify the criteria for choosing within a category, ')
print('in order of importance. The first criterion takes priority, ')
print('so the others only matter when candidates are "tied" for the')
print('first. Similarly, the second takes priority over all but ')
print('the first, and so on. Randomization applies in all cases.')
print()
print('Specify one or more letters (either case), separated by whitespace, commas, or nothing:')
print()
print('N No preferences. Purely random choice. Must be only letter specified.')
print('U Seek uniformity of date distribution.')
print(' If used, this must come first in priorities,')
print(' and the date (in effect, year) must be part of')
print(' the category definition.')
print('L Maximize sequence length '
'(includes internal gaps, but not leading and trailing gaps)')
print('D Completeness of date. YMD > YM > Y')
print('M Presence of month; no preference for YMD over YM.')
print(' It can be meaningful and useful to provide both D and M,')
print(' provided that M comes first and something comes between, e.g., MLD.')
print('G Minimize number of internal gaps with lengths not divisible by 3 and less than 5')
print('A Minimize number of ambiguity characters (N, etc.).')
print()
while True:
line = input('Specify preferences for isolate selection in order of priority: ')
if not line.strip():
# just white space
pass
else:
# Get the letters as a string with nothing else
s = ''.join(line.replace(',', ' ').split()).upper()
if len(set(s)) < len(s):
print('Duplicate letters not allowed')
continue
if 'N' in s:
if len(s) > 1:
print('N must be the only letter if specified')
continue
else:
if not set(s).issubset('ULDMGA'):
print('Response contained non-allowed character(s): %s'
% set(s).difference('ULDMGA'))
continue
if 'U' in s[1:]:
print('U must come first if specified.')
continue
if 'D' in s and 'M' in s:
idx_d = s.find('D')
idx_m = s.find('M')
if idx_d < idx_m:
print('M is redundant with D unless M comes first (has higher priority)')
continue
if idx_d == idx_m + 1:
print('M is redundant with D unless something comes between them')
continue
return s
def GetYN(prompt, default=None):
'''
'''
while True:
s = input(prompt)
if not s:
# just hit return
if default is not None:
return default
else:
s_mod = s.strip().lower()
if s_mod and len(s_mod) <= 3:
if 'yes'.startswith(s_mod):
return True
if 'no'.startswith(s_mod):
return False
print()
if default is not None:
print('please type y(es) or n(o) (or RETURN for %s)'
% ('yes' if default else 'no'))
else:
print('please type y(es) or n(o)')
def GetOutputFname(prompt, type_text=''):
'''
Get path to file from user, tolerating blank line input.
'.' causes file dialog to launch, if possible (requires wxPython)
'''
while True:
fname = input(prompt)
if not fname.strip():
# just white space
pass
else:
if fname.strip() == '.':
try:
fname = Gui.FileDialog('w', prompt, type_text)
except Exception as e:
print('Problem running graphical file dialog: %s' % e.args[0])
print('Returning to text interface')
print()
continue
if fname == None:
print('File dialog cancelled; enter file name at prompt')
print()
continue
# good fname
return fname
def GetFieldChoices(num_fields):
'''
Get one or more field indices.
'''
while True:
s = input('Enter one or more field numbers '
'separated by commas or whitespace, '
'or S to load spreadsheet: ')
if not s.strip():
# just white space
pass
else:
if s.strip().upper() == 'S':
return 'S'
field_strs = s.replace(',', ' ').split()
field_indices = []
for fs in field_strs:
try:
n = int(fs)
except ValueError:
print('"%s" is not a valid integer' % fs)
break
if n < 0 or n >= num_fields:
print('%d out of range' % n)
break
field_indices.append(n)
else: # unless break from above
if len(set(field_indices)) < len(field_indices):
print('Error: duplicate field indices')
else:
return field_indices
def GetPosInt(prompt):
'''
Get a positive integer from the user.
'''
while True:
s = input(prompt)
if not s.strip():
# just white space
pass
else:
try:
n = int(s)
except ValueError:
print('"s" not a valid integer' % s)
continue
if n <= 0:
print('Number must be greater than zero')
continue
print()
return n
def GetSingleCharacter(prompt, default):
'''
Get single character from user, with default value
'''
while True:
ch = input(prompt)
if not ch:
# just hit return
return default
elif len(ch) > 1:
print()
print('%s invalid; must be single character' % ch)
else:
return ch
def StrFTime(fmt, t=time.localtime()):
'''
Fix for long (non-abbreviated) time zone string under Windows
'''
s = time.strftime(fmt, t)
if '%Z' in fmt:
tz = time.strftime('%Z', t)
if len(tz.split()) == 3:
abbr = ''.join([x[0] for x in tz.split()])
s = s.replace(tz, abbr)
return s
def TimeRange(st_early, st_late):
'''
Acts on time.struct_time instances.
Makes a string represnting the range of time.
'''
if st_early.tm_year != st_late.tm_year:
return '%s-%s' % (StrFTime('%X %Z %d %b %Y', st_early), StrFTime('%X %Z %d %b %Y', st_late))
elif st_early.tm_mon != st_late.tm_mon or st_early.tm_mday != st_late.tm_mday:
# Not the same day, but same year
return '%s-%s' % (StrFTime('%X %Z %d %b', st_early), StrFTime('%X %Z %d %b %Y', st_late))
elif st_early.tm_isdst != st_late.tm_isdst:
# Unlikely: same day, but DST has changed
return '%s-%s' % (StrFTime('%X %Z', st_early), StrFTime('%X %Z %d %b %Y', st_late))
elif st_early.tm_hour != st_late.tm_hour:
# Same day, and DST has not changed.
return '%s-%s' % (StrFTime('%X', st_early), StrFTime('%X %Z %d %b %Y', st_late))
elif st_early.tm_min != st_late.tm_min:
return '%s-%s' % (StrFTime('%X', st_early), StrFTime(':%M:%S %Z %d %b %Y', st_late))
else:
return '%s-%s' % (StrFTime('%X', st_early), StrFTime(':%S %Z %d %b %Y', st_late))
def Quit(val):
#Gui.fini()
if sys.platform.startswith('win32'):
if 'PROMPT' not in os.environ: # Not run from command prompt?
tmp = input('Press return to exit')
exit(val)
def ShowExampleTagVals(names, delimiter):
'''
Show example tag field values to aid user
'''
def processTag(tag):
'''
If tag length above threshold, replace with
truncation plus '...'
'''
if len(tag) > 30:
return tag[:27] + '...'
return tag
print('Example tag values:')
print()
tags1 = names[0].split(delimiter)
tags2 = names[-1].split(delimiter)
if len(tags1) != len(tags2):
print('Number of fields in first and last entries not equal')
Quit(1)
for i in range(len(tags1)):
print('%d %-30s %-30s'
% (i, processTag(tags1[i]), processTag(tags2[i])))
print()
def FindDateField(names, delim):
def CouldBeDate(s):
return (len(s) >= 4 and s[:4].isdecimal()
and set(s).issubset('0123456789-')
and (len(s) < 5 or s[4] == '-'))
fields = [n.split(delim) for n in names]
date_fields = [i for i in range(len(fields[0]))
if all(CouldBeDate(t[i]) for t in fields)]
if len(date_fields) != 1:
raise RuntimeError('Could not determine date field')
return date_fields[0]
def FieldTuple(name, delim, fields, date_index):
l = name.split(delim)
if date_index in fields:
l[date_index] = l[date_index][:4]
return tuple(l[i] for i in fields)
def DateCompleteness(name, delim, idx):
'''
0 if only year
1 if year + month
2 if year + month + date
'''
s = name.split(delim)[idx]
if set(s[4:]).issubset('-'):
return 0
if s[4] == '-' and s[5:7].isdecimal():
# at least have month
if set(s[7:]).issubset('-'):
return 1
if s[7] == '-' and s[8:10].isdecimal() and len(s) == 10:
return 2
raise ValueError('Invalid date string: %s' % s) # Should have returned
def NumGapsIndivisibleThreeLT5(s):
'''
For covid, count only gaps of length < 5
'''
s = s.strip('-') # lose leading and terminal "gaps"
# replace everything but '-' with ' '
for ch in set(s):
if ch != '-':
s = s.replace(ch, ' ')
gaps = s.split()
return len([g for g in gaps if len(g) % 3 != 0 and len(g) < 5])
all_distrs_cache = {}
scores_cache = {}
def AllDistrs(n, ncat, cat_maxes):
if n == 0:
return set([ncat * (0,)])
if ncat == 1:
return set([(n,)])
if type(cat_maxes) != tuple:
cat_maxes = tuple(cat_maxes)
if (n, ncat, cat_maxes) in all_distrs_cache:
return all_distrs_cache[(n, ncat, cat_maxes)]
res = []
half = ncat // 2
for i in range(max(0, n - sum(cat_maxes[half:])),
min(n, sum(cat_maxes[:half])) + 1):
first = AllDistrs(i, len(cat_maxes[:half]), cat_maxes[:half])
second = AllDistrs(n - i, len(cat_maxes[half:]), cat_maxes[half:])
res.extend(f + s for f in first for s in second)
all_distrs_cache[(n, ncat, cat_maxes)] = res
return res
all_best_distrs_cache = {}
def AllBestDistrs(n, ncat, cat_maxes):
'''
Returns only "best" distributions with regard to
numbers on same date
'''
if n == 0:
return [ncat * (0,)]
if ncat == 1:
return [(n,)]
if type(cat_maxes) != tuple:
cat_maxes = tuple(cat_maxes)
if (n, ncat, cat_maxes) in all_best_distrs_cache:
return all_best_distrs_cache[(n, ncat, cat_maxes)]
res = []
best = None # will contain sorted best distr
half = ncat // 2
for i in range(max(0, n - sum(cat_maxes[half:])),
min(n, sum(cat_maxes[:half])) + 1):
first = AllBestDistrs(i, len(cat_maxes[:half]), cat_maxes[:half])
second = AllBestDistrs(n - i, len(cat_maxes[half:]), cat_maxes[half:])
if first and second:
cand_best = sorted(first[0] + second[0], reverse=True)
if cand_best == best:
res.extend(f + s for f in first for s in second)
elif best is None or cand_best < best:
res = [f + s for f in first for s in second]
best = cand_best
all_best_distrs_cache[(n, ncat, cat_maxes)] = res
return res
# Load some pre-computed results for speed, if they can be found
mec_basename = 'subsamp_data.pic'
prog_dir = os.path.dirname(os.path.realpath(__file__))
mec_fname = os.path.join(prog_dir, mec_basename)
if os.path.exists(mec_fname):
min_energy_cache = pickle.load(open(mec_fname, 'rb'))
else:
msg = ''
msg += '\n'
msg += '** WARNING: file %s not found in %s\n' % (mec_basename, prog_dir)
msg += '** If the \'U\' criterion is used, computations may be\n'
msg += '** slower than they could be.\n'
msg += '** Results will be correct nonetheless.\n'
msg += '\n'
sys.stderr.write(msg)
min_energy_cache = {}
def MinEnergy(n, ncat, cat_maxes):
if n == 0:
return set([ncat * (0,)])
if ncat == 1:
return set([(n,)])
if type(cat_maxes) != tuple:
cat_maxes = tuple(cat_maxes)
if (n, cat_maxes) in min_energy_cache:
return min_energy_cache[(n, cat_maxes)]
half = ncat // 2
en_min = Energy((n,) + (ncat-1)*(0,))
res_min = []
for i in range(max(0, n - sum(cat_maxes[half:])),
min(n, sum(cat_maxes[:half])) + 1):
first = AllDistrs(i, len(cat_maxes[:half]),
cat_maxes[:half])
second = AllDistrs(n - i, len(cat_maxes[half:]),
cat_maxes[half:])
first = list(first)
second = list(second)
energy_first = [Energy(f, ncat, n) for f in first]
energy_second = [Energy(s, ncat, n) for s in second]
#for i1, d1 in enumerate(first):
for i1 in sorted(range(len(energy_first)),
key=lambda x: energy_first[x]):
if energy_first[i1] > en_min:
continue
d1 = first[i1]
for i2 in sorted(range(len(energy_second)),
key=lambda x: energy_second[x]):
if energy_first[i1] + energy_second[i2] > en_min:
continue
d2 = second[i2]
d = d1 + d2
en = Energy(d)
if en <= en_min:
if en < en_min:
en_min = en
res_min = [d]
else:
res_min.append(d)
min_energy_cache[n, cat_maxes] = res_min
return res_min
def MinEnergy2(n, ncat, cat_maxes):
if n == 0:
return set([ncat * (0,)])
if ncat == 1:
return set([(n,)])
if type(cat_maxes) != tuple:
cat_maxes = tuple(cat_maxes)
if (n, cat_maxes) in min_energy_cache:
return min_energy_cache[(n, cat_maxes)]
half = ncat // 2
en_min = Energy((n,) + (ncat-1)*(0,))
res_min = []
for i in range(max(0, n - sum(cat_maxes[half:])),
min(n, sum(cat_maxes[:half])) + 1):
first = AllBestDistrs(i, len(cat_maxes[:half]),
cat_maxes[:half])
second = AllBestDistrs(n - i, len(cat_maxes[half:]),
cat_maxes[half:])
first = list(first)
second = list(second)
energy_first = [Energy(f, ncat, n) for f in first]
energy_second = [Energy(s, ncat, n) for s in second]
#for i1, d1 in enumerate(first):
for i1 in sorted(range(len(energy_first)),
key=lambda x: energy_first[x]):
if energy_first[i1] > en_min:
continue
d1 = first[i1]
#for i2, d2 in enumerate(second):
for i2 in sorted(range(len(energy_second)),
key=lambda x: energy_second[x]):
if energy_first[i1] + energy_second[i2] > en_min:
continue
d2 = second[i2]
d = d1 + d2
en = Energy(d)
if en <= en_min:
if en < en_min:
en_min = en
res_min = [d]
else:
res_min.append(d)
min_energy_cache[n, cat_maxes] = res_min
return res_min
def EnergyKernel(t, len_tot):
lcm = LCM(len_tot - 1)
kern = (len_tot - len(t)) * [0]
for i, n in enumerate(t):
if not n:
continue
for j in range(len(kern)):
d = len(t) - i + j
kern[j] += lcm // d * t[i]
return kern
def combinations(items, k):
n = len(items)
if n == k:
yield items
elif k==0:
yield tuple()
else:
for i in range(n - k + 1):
for t in combinations(items[i+1:], k - 1):
yield (items[i],) + t
def nchoosek(n, k):
k = max(k, n - k)
num = 1
for i in range(k + 1, n + 1):
num *= i
den = 1
for i in range(1, n - k + 1):
den *= i
res = num // den
if num / den != res:
raise RuntimeError('%d != %f; num=%d, den=%d'
% (res, num / den, num, den))
return res
def inside_out(it):
l = list(it)
l1 = l[:len(l)//2]
l2 = l[len(l)//2:]
for i in range(len(l2)):
yield l2[i]
if i < len(l1):
yield l1[-(i+1)]
def MinEnergy3(n, cat_maxes):
ncat = len(cat_maxes)
if n == 0:
return set([ncat * (0,)])
if ncat == 1:
return set([(n,)])
if type(cat_maxes) != tuple:
cat_maxes = tuple(cat_maxes)
if sum(cat_maxes) == n:
return [cat_maxes]
if (n, cat_maxes) in min_energy_cache:
return min_energy_cache[(n, cat_maxes)]
mx = max(cat_maxes)
poss = tuple(i for i, x in enumerate(cat_maxes) # places where one might go
if x == mx)
base = tuple(min(x, mx - 1) for x in cat_maxes) # definitely placed
k = n - sum(base) # remaining to be placed
res_min = [tuple(base[i] + (i in poss[:k]) for i in range(len(base)))]
en_min = Energy(res_min[0]) + 1
if verbose:
print('Choosing %d from %d positions' % (k, len(poss)))
ncombs = nchoosek(len(poss), k)
if ncombs < 20000:
for t in combinations(poss, k):
cand = list(base)
for i in t:
cand[i] += 1
cand = tuple(cand)
en = Energy(cand)
if en <= en_min:
if en < en_min:
en_min = en
res_min = []
res_min.append(cand)
else:
# This should be faster than above, at least for hard cases
# get an upper bound on best energy
ens = [Energy(MinEnergyHeur(n, cat_maxes))
for i in range(50)]
en_ub = min(ens)
# find candidates whose lower bound is <= upper bound
half = len(poss) // 2
poss1 = poss[:half]
poss2 = poss[half:]
ncands = 0
en_min = en_ub + 1
res_min = []
for k1 in inside_out(range(max(k - len(poss2), 0),
min(k, len(poss1)) + 1)):
k2 = k - k1
cands1 = [tuple(base[i] + (i in t)
for i in range(poss1[-1]+1))
for t in combinations(poss1, k1)]
cands2 = [tuple(base[i] + (i in t)
for i in range(poss1[-1]+1, len(cat_maxes)))
for t in combinations(poss2, k2)]
ens1 = [Energy(t, n_tot=n, len_tot=ncat) for t in cands1]
ens2 = [Energy(t, n_tot=n, len_tot=ncat) for t in cands2]
far2 = tuple(base[i] + (i in poss2[-k2:])
for i in range(poss1[-1]+1, len(cat_maxes)))
en_far2 = Energy(far2, n_tot=n, len_tot=ncat)
min_en_comb1 = [Energy(t + far2) - ens1[i] - en_far2
for i, t in enumerate(cands1)]
far1 = tuple(base[i] + (i in poss1[:k1])
for i in range(poss1[-1]+1))
en_far1 = Energy(far1, n_tot=n, len_tot=ncat)
min_en_comb2 = [Energy(far1 + t) - ens2[i] - en_far1
for i, t in enumerate(cands2)]
en1_min = min(ens1)
en2_min = min(ens2)
cands1 = [(i, c) for i, c in enumerate(cands1)
if min_en_comb1[i] + ens1[i] + en2_min <= en_ub]
cands2 = [(i, c) for i, c in enumerate(cands2)
if min_en_comb2[i] + ens2[i] + en1_min <= en_ub]
for i1, c1 in cands1:
kern = EnergyKernel(c1, ncat)
en_c1_base = sum(k * base[len(c1)+i]
for i, k in enumerate(kern))
for i2, c2 in cands2:
# conjunction is faster than using max()?
if (ens1[i1] + ens2[i2] + min_en_comb1[i1] <= en_min
and
ens1[i1] + ens2[i2] + min_en_comb2[i2] <= en_min):
ncands += 1
en = ens1[i1] + ens2[i2] + en_c1_base
for i, c in enumerate(c2):
if c == mx:
en += kern[i]
if en > en_min:
en = None
break
if en is None:
continue
if en <= en_min:
cand = c1 + c2
if en < en_min:
en_min = en
res_min = []
res_min.append(cand)
if verbose:
print('Reduced from %d to %d candidates'
% (ncombs, ncands))
min_energy_cache[n, cat_maxes] = res_min
return res_min
def Minimize(t, cat_maxes):
t = list(t)
en_min = Energy(tuple(t))
while True:
for i in range(len(t)):
if t[i] == 0:
continue
for j in range(len(t)):
if j == i or t[j] == cat_maxes[j] or t[j] >= t[i]:
continue
cand = list(t)
cand[i] -= 1
cand[j] += 1
en_cand = Energy(tuple(cand))
if en_cand < en_min:
t = cand
en_min = en_cand
break
else:
continue
break
else:
# occupied = [i for i, n in enumerate(t) if n]
# for i in occupied[:-1]:
# for j in occupied:
# if j <= i or j == len(t) - 1:
# continue
break
return t
def MinEnergyHeur(n, cat_maxes):
ncat = len(cat_maxes)
if n == 0:
return set([ncat * (0,)])
if ncat == 1:
return set([(n,)])
if type(cat_maxes) != tuple:
cat_maxes = tuple(cat_maxes)
# Initialize distribution
dist_ini = len(cat_maxes) * [0]
for it in range(n):
mn = min(c for i, c in enumerate(dist_ini)
if c < cat_maxes[i])
tmp = [i for i, c in enumerate(dist_ini)
if c == mn and c < cat_maxes[i]]
idx = random.choice(tmp)
dist_ini[idx] += 1
res = Minimize(dist_ini, cat_maxes)
res = tuple(res)
return res
def Primes(n):
'''
Returns all primes <= n
'''
primes = []
for i in range(2, n+1):
if all([i % p for p in primes]):
primes.append(i)
return primes
lcms = {}
def LCM(n):
'''
Least common multiple of {1, 2, 3...n}
'''
if n in lcms:
return lcms[n]
primes = Primes(n)
powers = []
for p in primes:
for i in itertools.count():
if p**i > n:
powers.append(i-1)
break
res = 1
for i, p in enumerate(primes):
res *= p**powers[i]
lcms[n] = res
return res
energy_cache = {}
def Energy(t, len_tot=None, n_tot=None):
'''
n_tot is the total number that will be chosen, which may
be greater than sum(t) if t contains just part of the
distribution. It affects the energy associated
with elements of t that are greater than one.
By default (i.e., if None), it is sum(t).
Similarly, len_tot is the total length of the distribution,
and affects the scale of the result (lcm. By default it
is len(t)
'''
if n_tot is None:
n_tot = sum(t)
if len_tot is None:
len_tot = len(t)
if (t, len_tot, n_tot) in energy_cache:
return energy_cache[(t, len_tot, n_tot)]
lcm = LCM(len_tot - 1)
indices = set([i for i, x in enumerate(t) if x])
n = sum(t)
rv = sum([(lcm // d if d else n_tot*lcm) * sum([t[i]*t[i+d]
for i in indices
if i+d in indices
and i+d < len(t)])
for d in range(len(t))])
energy_cache[(t, len_tot, n_tot)] = rv
return rv
def cumsum(l):
rv = []
cs = 0
for x in l:
cs += x
rv.append(cs)
return rv
def UniformitySum(t):
n = sum(t)
l = len(t)
# We make cumulative and expected (under uniformity) cumulative
# integers by expressing relative to 1 / (n * l)
cum = [l * x for x in cumsum(t)]
cum_uni = cumsum(l*[n])
return sum([abs(cum[i] - cum_uni[i]) for i in range(len(cum))])
def FindMostUni(n, cat_maxes, cats_left=0, n_left=0, cats_right=0, n_right=0):
cats_tot = len(cat_maxes) + cats_left + cats_right
n_tot = n + n_left + n_right
ex = [(cats_left + 1 + i) * n_tot # expected under uniformity
for i in range(len(cat_maxes))]
if len(cat_maxes) == 1:
if n > cat_maxes[0]:
raise RuntimeError('n (%d) > cat_maxes[0] (%d)' % (n, cat_maxes[0]))
return [(n,)], abs((n + n_left) * cats_tot - ex[0]) + cats_tot*n_tot*n*(n-1)
if sum(cat_maxes) == 0 or n == 0:
return ([len(cat_maxes) * (0,)],
sum([abs(n_left * cats_tot - x) for x in ex]))
half = len(cat_maxes) // 2
score_best = (len(cat_maxes) * n_tot * cats_tot # any real score will be
+ cats_tot*n_tot*n*(n-1)) # less than this
dists_best = []
for n_first in range(0, n + 1):
n_second = n - n_first
if n_first > sum(cat_maxes[:half]) or n_second > sum(cat_maxes[half:]):
# impossible to satisfy
continue
dists1, score1 = FindMostUni(n_first, cat_maxes[:half],
cats_left, n_left,
cats_right + len(cat_maxes) - half,
n_right + n_second)
dists2, score2 = FindMostUni(n_second, cat_maxes[half:],
cats_left + half,
n_left + n_first,
cats_right, n_right)
score = score1 + score2
if score <= score_best:
dists = [d1 + d2 for d1 in dists1 for d2 in dists2]
if score < score_best:
score_best = score
dists_best = []
dists_best.extend(dists)
return dists_best, score_best
days_in_mo_common = [None, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
days_in_mo_leap = [None, 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
def SubSample(ids, number, priorities, delim, date_index, aln):
global verbose
if verbose:
print('Subsample %d from %d isolates' % (number, len(ids)))
print(list(ids)[0])
order_ecs = [shuffled(ids)] # Equivalence classes under ordering, in order
if len(ids) <= number: # could come before shuffling above for speed
return list(ids) # new list is defensive
if priorities != 'N': # 'N' means "none", i.e., no preferences.
# Sort by lowest priority first, moving eventually to highest.
# Relies on the fact that sort is a stabile sort.
for ch in priorities:
if ch == 'L':
#l.sort(key=lambda name: len(aln[name].strip('-')), reverse=True)
tmp = [GroupBy(l, lambda name: len(aln[name].strip('-')))
for l in order_ecs]
order_ecs = [[d[val] for val in sorted(d, reverse=True)]
for d in tmp]
order_ecs = list(flatten(order_ecs))
elif ch == 'D':
tmp = [GroupBy(l, lambda name: DateCompleteness(name,
delim,
date_index))
for l in order_ecs]
order_ecs = [[d[val] for val in sorted(d, reverse=True)]
for d in tmp]
order_ecs = list(flatten(order_ecs))
elif ch == 'M':
# Calling bool() on completeness
# produces True iff month is present
tmp = [GroupBy(l,
lambda name: bool(DateCompleteness(name,
delim,
date_index)))
for l in order_ecs]
order_ecs = [[d[val] for val in sorted(d, reverse=True)]
for d in tmp]
order_ecs = list(flatten(order_ecs))
elif ch == 'G':
tmp = [GroupBy(l,
lambda name: NumGapsIndivisibleThreeLT5(aln[name]))
for l in order_ecs]
order_ecs = [[d[val] for val in sorted(d)]
for d in tmp]
order_ecs = list(flatten(order_ecs))
elif ch == 'A':
tmp = [GroupBy(l, lambda name: sum([aln[name].count(nuc)
for nuc in 'AGCT-']))
for l in order_ecs]
order_ecs = [[d[val] for val in sorted(d, reverse=True)]
for d in tmp]
order_ecs = list(flatten(order_ecs))
elif ch == 'N':
raise ValueError('N cannot occur with additional letters '
'in priority string')
elif ch == 'U':
continue
else:
raise ValueError('Unrecognized character '
'in priorities string: %s' % ch)
l = list(flatten(order_ecs))
if priorities[0] != 'U':
return l[:number]
# Remainder of function deals with date uniformity ('U')
dc = [DateCompleteness(id, delim, date_index) for id in l]
with_month = [i for i, x in enumerate(dc) if x > 0]
#sad = ScoresAllDistrs(min(number, len(with_month)), 12)
month_counts = 12 * [0]
month_counts_w_date = 12 * [0]
for idx in with_month:
id = l[idx]
d = id.split(delim)[date_index]
month = int(d.split('-')[1])
month_counts[month - 1] += 1
if dc[idx] == 2:
month_counts_w_date[month - 1] += 1
if sum(month_counts) <= number:
# must use all with month, so cannot do any unifority optimization
return l[:number]
for i in itertools.count(1):
# at most i from each month
mc_red = [min(c, i) for c in month_counts]
if sum(mc_red) >= number or mc_red == month_counts:
break
if verbose:
print('Computing distribution across months')
all_distrs = sorted(AllBestDistrs(min(number, sum(month_counts)),
len(mc_red), mc_red))
by_score = GroupBy(all_distrs, lambda t: Energy(t))
cands = by_score[min(by_score)]
# sanity checks
if len(cands) == 0:
raise RuntimeError('Zero candidates from FindMostUni')
if any([sum(cand) != min(number, sum(month_counts)) for cand in cands]):
raise RuntimeError('Wrong sum of cand: expected %d, got %d'
% (min(number, sum(month_counts)), sum(cand)))
if any([Energy(cand) != min(by_score) for cand in cands]):
raise RuntimeError('Returned score (%d) '
'does not equal calculated score (%s) for %s'
% (score, [Energy(cand) for cand in cands],
cands))
if len(cands) > 1:
# pick those requiring the smallest number of incomplete dates
inc_counts = []
for cand in cands:
tmp = [cand[mo] - month_counts_w_date[mo] for mo in range(12)]
tmp = sum([x for x in tmp if x > 0])
inc_counts.append(tmp)
mn = min(inc_counts)
cands = [cand for i, cand in enumerate(cands) if inc_counts[i] == mn]
yr = int(l[0].split(delim)[date_index].split('-')[0]) # rely all same year
cands_scores = {}
for cand in cands:
chosen = []
for mo in range(1, 13):
if cand[mo - 1] == 0:
continue
ids_mo = [id for i, id in enumerate(l)
if dc[i] == 2
and int(id.split(delim)[date_index].split('-')[1]) == mo]
dates = {id : int(id.split(delim)[date_index].split('-')[2])
for id in ids_mo}
# critical that order within each list is order in l
ids_by_date = GroupBy(ids_mo, lambda id: dates[id])
date_counts = {date : len(l) for date, l in ids_by_date.items()}
if yr % 4:
days_in_mo = days_in_mo_common[mo]
else:
days_in_mo = days_in_mo_leap[mo]
date_counts = [date_counts.get(i + 1, 0)
for i in range(days_in_mo)]
if sum(date_counts) == 0:
mo_cands = [days_in_mo * (0,)]
else:
for i in itertools.count(1):
# at most i from each day
dc_red = [min(c, i) for c in date_counts]
if sum(dc_red) >= cand[mo - 1] or dc_red == date_counts:
break
if verbose:
print('Computing distribution within month %d; ' % mo,
end='')
print('Choosing %d from %d isolates with full dates'
% (cand[mo - 1], sum(date_counts)))
tick()
# ress = [MinEnergyHeur(min(cand[mo - 1],
# sum(date_counts)),
# dc_red)
# for i in range(10)]
# print('%d unique' % len(set(ress)))
# en = [Energy(t) for t in ress]
# mn = min(en)
# mo_cands = [t for i, t in enumerate(ress)
# if en[i] == mn]
mo_cands = list(sorted(MinEnergy3(min(cand[mo - 1],
sum(date_counts)),
dc_red)))
# sanity checks
if (len(mo_cands) == 0):
raise RuntimeError('0 month candidates')
if any([sum(mo_cand) != min(cand[mo - 1],
sum(date_counts))
for mo_cand in mo_cands]):
raise RuntimeError('Wrong number chosen: expected %d; '
'mo_cands = %s'
% (min(cand[mo - 1], sum(date_counts)),
mo_cands))
mo_cand = mo_cands[0]
to_add = []
for i, num in enumerate(mo_cand):
if num:
to_add.extend(ids_by_date[i + 1][:num])
if verbose:
print('Cat. maxes: %s' % (dc_red,))
print('Result: %s' % (mo_cand,))
print('Minimum energy %d' % Energy(mo_cand))
tock()
if len(to_add) < cand[mo - 1]:
nmissing = cand[mo - 1] - len(to_add)
ids_mo_dateless = [id for idx, id in enumerate(l)
if dc[idx] == 1
and int(id.split(delim)[date_index]
.split('-')[1]) == mo]
to_add.extend(ids_mo_dateless[:nmissing])
chosen.extend(to_add)
# we go with the first candidate
nmissing = number - len(chosen)
if nmissing:
# fill remaining requirement with monthless IDs
nadded = 0
for idx, c in enumerate(dc):
if not c:
chosen.append(l[idx])
nadded += 1
if nadded == nmissing:
break
if len(chosen) < min(number, len(ids)):
raise RuntimeError('Fewer chosen than expected; ids[0] = %s; '
'cand = %s; mo_cand = %s; '
'number expected = %d; number chosen = %d'
% (ids[0], cand, mo_cand,
min(number, len(ids)), len(chosen)))
if verbose:
print()
return chosen
def IndexToColName(idx):
'''
Given a zero-based index, return the corresponding column name
in a spreadsheet.
ex.:
0 -> 'A'
1 -> 'B'
25 -> 'Z'
26 -> 'AA'
27 -> 'AB'
52 -> 'BA'
'''
n = idx + 1 # zero- to one-based
res = ''
while n:
rem = n % 26
res = chr(64 + rem) + res # 'A' is 65, 'B' is 66, etc.
n = n // 26
return res
empty_field_label = '(empty field)'
def WriteEditableCSV(groups, nsamp, fname):
'''
Write a file with two counts for each category:
the total number of isolates in set, and the
default number to be sampled (min(nsamp, total)).
Meant to allow editing to express different preferences
for number to be sampled from each category.
groups keys are tuples specifying categories;
lenghts of values equal total counts
nsamp Requested number of samples per category
fname Name of file to write
'''
import csv
with open(fname, 'w', newline='') as fid:
csv_writer = csv.writer(fid)
nfields = len(list(groups)[0])
if True: #nfields > 1:
if nfields > 1:
nv = (nfields + 1) // 2 # number of fields to run vertically
nh = nfields - nv # number of fields to run horizontally
else:
# if just one, make it horizontal
nv = 0
nh = 1
fvs = sorted(set(t[:nv] for t in groups))
fhs = sorted(set(t[nv:] for t in groups))
row_len = nh + 2 * len(fvs) + 2
rows_written = 0
# header: nv rows
rows = [row_len * [''] for i in range(nv)]
for i, t in enumerate(fvs):
for j in range(nv):
if i == 0 or (t[:j + 1] != fvs[i - 1][:j + 1]):
val = fvs[i][j]
if val == '':
val = empty_field_label
rows[j][nh + 2 * i] = val
csv_writer.writerows(rows)
rows_written += len(rows)
# data rows
for i, fh in enumerate(fhs):
row = row_len * ['']
# field values in initial cols
for j in range(nh):
if i == 0 or (fh[:j + 1] != fhs[i - 1][:j + 1]):
row[j] = fh[j]
# counts
for j, fv in enumerate(fvs):
cat = fv + fh # category (tuple)
if cat in groups:
tot_count = len(groups[cat])
samp_count = min([nsamp, tot_count]) # sample <= tot
row[nh + 2 * j] = str(tot_count)
row[nh + 2 * j + 1] = str(samp_count)
# Total samples formula
if i == 0:
row[-1] = 'Total Samples'
row.append('Force Inclusion of:')
if i == 1:
sum_arg = ','.join(['%s%d:%s%d'
% (IndexToColName(col_idx), nv + 1,
IndexToColName(col_idx), nv + len(fhs))
for col_idx in
range(nh + 1, nh + 1 + 2 * max([len(fvs), 1]), 2)])
sum_formula = 'sum(%s)' % sum_arg
row[-1] = '=' + sum_formula
csv_writer.writerow(row)
rows_written += 1
def ReadEditableCSV(fname):
'''
Read sample count info for categories from CSV file
produced by WriteEditableCSV and possibly edited.
Returns requested sample counts by category.
'''
# Read rows
import csv
with open(fname, newline='') as fid:
rdr = csv.reader(fid)
rows = [r for r in rdr]
# First data row is first to have nonempty col 1
nv = [i for i in range(len(rows)) if rows[i][0] != ''][0] # vert fields
hdr = rows[:nv]
data = rows[nv:]
if len(hdr) == 0:
# Given rules for writing, this should only happen when
# there is a single field, with horizontal orientation,
# meaning (confusingly?) that each row has
# category value in first col, total count in second,
# and sampled count in third.
res = {(row[0],) : int(row[2])
for row in data
if row[2].strip() and int(row[2]) != 0}
return res
# Last row of header tells us number of vert cats and number of hdr fields
nvcats = len([s for s in hdr[-1] if s != '']) # num vert cats
nh = [i for i in range(len(hdr[-1])) if hdr[-1][i] != ''][0]
nhcats = len(data)
# Fill in header blanks by repeating values to their right into blanks
for row in hdr:
for i in range(nh, nh + 2 * nvcats):
if row[i] == '':
row[i] = row[i - 1]
# Any originally empty fields have been given a nonempty label;
# replace them with empty string.
for row in hdr:
if empty_field_label in row:
for i in range(len(row)):
if row[i] == empty_field_label:
row[i] = ''
# Vertical cats, in left to right order
tp = [x for x in zip(*hdr)]
vcats = [tp[i] for i in range(nh + 1, nh + 2 * nvcats + 1, 2)]
# Get the sample counts
counts = {}
hcat_prev = None
for rnum, row in enumerate(data):
hcat = row[:nh]
for i in range(len(hcat)):
if hcat[i] == '':
hcat[i] = hcat_prev[i]
# Any originally empty fields have been given a nonempty label;
# replace them with empty string.
if empty_field_label in hcat:
for i in range(len(hcat)):
if hcat[i] == empty_field_label:
hcat[i] = ''
# Get counts for categories with nonzero counts
hcat = tuple(hcat)
hcat_prev = hcat
for i in range(nvcats):
cnum = nh + 2 * i + 1
if row[cnum].strip():
count = int(row[cnum])
if count:
counts[vcats[i] + hcat] = count
# Forced includes
cnum = nh + 2 * nvcats + 2
row = data[1]
if len(row) >= cnum + 1:
# Allow accessions or identifiers to be separated by ',' or whitespace
forced_includes = row[cnum].replace(',', ' ').split()
return counts, forced_includes
def LaunchFile(fname):
'''
Open a file with appropriate (hopefully) application.
Return 0 on success, return (*not* raise) exception on failure.
Uses OS-dependent mechanisms.
For other than Windows and MacOS (e.g., linux), best we can do
is through web browser.
'''
try:
plat = sys.platform
if plat.startswith('win32'):
import subprocess
# Extra parameter ('') necessary if
# file name has spaces
subprocess.call(['start', '', fname],
shell=True)
elif plat.startswith('darwin'):
import subprocess
subprocess.call(['open', fname])
else:
webbrowser.open(fname)
return 0 # 0 on success
except Exception as e:
# Do not allow the program to fail because of failed launch
return e # *return* exception, which tests as True, on failure
################### Find largest min distance among removed #################
# Necessary do-nothing functions
def tic():
pass
def toc(s=None):
pass
def profile(f):
return f
# The class that does the computations
class MismatchCount(object):
'''
The purpose of this code is to determine the largest minimum distance
of the sequences in one set to any of the sequences in another set.
With the first set referred to as the queries and the second as the
subjects, this is max for q in Q of (min for s in S of d(q, s)).
This minimum, along with the corresponding query indices, is returned
by MaxMinDist(). For simplest use of the class, the only other method
that might be called by user is convenience function EndGapsToN().
Computing all pairwise dists for large sets would be slow, but is
unnecessary. Comparing some queries to all subjects gives a lower
bound on max. Comparing all queries to some subjects gives an upper
bound for each min. Queries for which the latter is smaller than the
former can then be eliminated from consideration. Additional comparisons
and eliminations follow.
Additional speed-ups:
1. Calculate min differences on partitions of the sequence positions,
after uniquifying (MMCountPieces).
I.e., use seq[0::10], seq[1::10] ... seq[9::10], and uniquify before
calculating distances. These smaller sequences are more commonly
identical between isolates, and number to compare can go down by_min
a large factor.
2. All query against all subject comparison of seq[0::10]
(with uniquification) is fast and immediately provides good candidates
for large minimum distance.
'''
nucs = 'AGCTRYSWMKBHDVN'
bits = [0b0001, 0b0010, 0b0100, 0b1000, # AGCT
0b0011, 0b1100, 0b0110, 0b1001, 0b0101, 0b1010, # RYSWMK
0b1110, 0b1101, 0b1011, 0b0111, # BHDV
0b1111] # N
nucs_match = {}
for j, nuc2 in enumerate(nucs):
for i, nuc1 in enumerate(nucs):
nucs_match[(nuc1, nuc2)] = bool(bits[i] & bits[j])
nucs_match['-', '-'] = True
for nuc in nucs:
nucs_match[nuc, '-'] = False
nucs_match['-', nuc] = False
verbose = False
@staticmethod
def EndGapsToN(seq):
orig_len = len(seq)
s1 = seq.lstrip('-')
lgaps = orig_len - len(s1)
seq = s1.rstrip('-')
rgaps = len(s1) - len(seq)
return lgaps * 'N' + seq + rgaps * 'N'
@staticmethod
def MakeColDescs(seqs):
'''
For each column of alignment, make a dict mapping characters to
rows in which they occur.
'''
cols = zip(*seqs)
groups = [GroupBy(range(len(seqs)), lambda i: col[i])
for col in cols]
return groups
@classmethod
def MMCount(cls, queries, subjects, scol_descs=None):
nucs_match = cls.nucs_match
if scol_descs is None:
nsubjects = len(subjects)
scol_descs = cls.MakeColDescs(subjects)
else:
# do not use arg 'subjects', so that it can be, e.g., None
nsubjects = sum([len(l) for l in scol_descs[0].values()])
qcol_descs = cls.MakeColDescs(queries)
all_counts = [nsubjects * [0] for i in range(len(queries))]
all_common_counts = [0 for i in range(len(queries))]
for pos, qcol_desc in enumerate(qcol_descs):
non_matching = {qnuc : [snuc for snuc in scol_descs[pos]
if not nucs_match[qnuc, snuc]]
for qnuc in qcol_descs[pos]}
for qnuc in qcol_descs[pos]:
num_non_matching = sum(len(scol_descs[pos][snuc])
for snuc in non_matching[qnuc])
if num_non_matching <= nsubjects - num_non_matching:
# Straightforward: increment appropriate mismatch counts
snums = list(flatten(scol_descs[pos][snuc]
for snuc in non_matching[qnuc]))
for qnum in qcol_descs[pos][qnuc]:
#all_counts_qnum = all_counts[qnum] # just for speed
for snum in snums:
all_counts[qnum][snum] += 1
else:
# Time-saving trick:
# increment overall count, decrement matchers
snums = list(flatten(scol_descs[pos][snuc]
for snuc in scol_descs[pos]
if snuc not in non_matching[qnuc]))
for qnum in qcol_descs[pos][qnuc]:
all_common_counts[qnum] += 1
for snum in snums:
all_counts[qnum][snum] -= 1
# Finish the time-saving trick
for qnum in range(len(queries)):
for snum in range(nsubjects):
all_counts[qnum][snum] += all_common_counts[qnum]
return all_counts
@classmethod
def MMCountPieces(cls, queries, subjects, pieces, col_descs=None):
'''
Computes min distance over subjects for each query,
analyzing subsets of positions separately for speed.
This is fast for typical sequence sets because number of
unique values of, e.g., seq[i::10], for any 0 <= i < 10, is
much smaller than the number of sequences for a large set.
The function returns only the minimimum for each query.
Computing the distances for every query/subject pair would
be too time-consuming.
queries, subjects are iterables of sequences
pieces is the number of subsets of positions to use
col_descs optional; results of MakeColDescs for each
position subset of subjects. Can speed things
up if caller already has it.
'''
tic()
qpieces = [GroupBy(range(len(queries)),
lambda i: queries[i][start::pieces])
for start in range(pieces)]
spieces = [GroupBy(range(len(subjects)),
lambda i: subjects[i][start::pieces])
for start in range(pieces)]
if col_descs is None:
res = [cls.MMCount(qpieces[i].keys(), spieces[i].keys())
for i in range(pieces)]
else:
res = [cls.MMCount(qpieces[i].keys(), None, col_descs[i])
for i in range(pieces)]
toc('counts of pieces')
qmap = [{seq : i for i, seq in enumerate(qp)} for qp in qpieces]
smap = [{seq : i for i, seq in enumerate(sp)} for sp in spieces]
all_sindices = [[smap[p][s[p::pieces]] for p in range(pieces)]
for s in subjects]
mins = []
for i, q in enumerate(queries):
qindices = [qmap[p][q[p::pieces]] for p in range(pieces)]
mn = len(q) # dist can't be any larger than this
# Find the min for this query over all subjects
for j, s in enumerate(subjects):
sindices = all_sindices[j]
sm = 0 # sum of dists for pieces
for p in range(pieces):
sm += res[p][qindices[p]][sindices[p]]
if sm >= mn:
# Total cannot be smaller than min so far
break
else:
mn = sm
mins.append(mn)
return mins
class TooManyItError(RuntimeError):
pass
@profile
@classmethod
def MaxMinDist(cls, queries, subjects, end_gaps_to_n):
'''
Find the max over all q of the min over all s of the distance.
'''
skip = max([len(queries) // 65, 1])
skip2 = max([skip // 2, 1])
cands = list(range(len(queries))) # All are initially candidates
queries = [s.upper() for s in queries]
subjects = [s.upper() for s in subjects]
if end_gaps_to_n:
queries = [cls.EndGapsToN(s) for s in queries]
subjects = [cls.EndGapsToN(s) for s in subjects]
q_part = 0
s_part = 0
cur_max = 0
cur_max_queries = []
# Upper bound for min dist for each query
ubs = [len(queries[0]) for q in queries]
tic()
pieces = 10
spieces = [GroupBy(range(len(subjects)),
lambda i: subjects[i][start::pieces])
for start in range(pieces)]
scol_descs10 = [cls.MakeColDescs(x) for x in spieces]
searched_against_all = set()
toc('Col descs for subjects in 10 pieces')
for itnum in itertools.count():
if cls.verbose:
print('%d candidates' % len(cands))
if len(cands) <= 20 and itnum > 0:
# With a small number of candidates remaining,
# cut to the chase.
tic()
mins = cls.MMCountPieces([queries[i] for i in cands], subjects,
10, scol_descs10)
toc('MMCount 3')
max_min = max(mins)
return max_min, [qnum for i, qnum in enumerate(cands)
if mins[i] == max_min]
# A fraction of query candidates against all subjects
# to get lower bound on max of min
if itnum == 0:
# Comparison of all queries to all subjects
# using just a fraction of positions.
# This is a fast way to find good candidates (among queries)
# for high minimum distance (to subjects).
q10 = [q[::10] for q in queries]
s10 = [s[::10] for s in subjects]
qset = list(set(q10))
sset = list(set(s10))
res = cls.MMCount(qset, sset)
mins = [min(r) for r in res]
min_for_seq = {qset[i] : mins[i] for i in range(len(qset))}
by_min = sorted(range(len(q10)),
key=lambda i: min_for_seq[q10[i]],
reverse=True)
query_subset = by_min[:20] # Likely to have high min. distance
elif itnum % 2 == 0:
query_subset = [i for i in cands if i % skip2 == q_part]
q_part += 1
cands_not_searched = [i for i in cands
if i not in searched_against_all]
cs = sorted(cands_not_searched,
key=lambda c: ubs[c],
reverse=True)
query_subset += cs[:10]
# Remove any that have already been searched against all
query_subset = [qnum for qnum in query_subset
if qnum not in searched_against_all]
if query_subset and itnum % 2 == 0:
tic()
mins = cls.MMCountPieces([queries[i] for i in query_subset],
subjects,
10, scol_descs10)
toc('MMCount 1')
searched_against_all.update(query_subset)
for i, idx in enumerate(query_subset):
ubs[idx] = mins[i]
max_min = max(mins)
if max_min >= cur_max:
if cls.verbose:
print('max %d' % max_min)
if max_min > cur_max:
cur_max = max_min
cur_max_queries = [qnum for i, qnum
in enumerate(query_subset)
if mins[i] == max_min]
cands = [i for i in cands if ubs[i] >= cur_max]
else:
# equal to cur_max; append indices
cur_max_queries += [qnum for i, qnum
in enumerate(query_subset)
if mins[i] == max_min]
if len(set(cur_max_queries)) < len(cur_max_queries):
raise RuntimeError('Duplicate values in '
'cur_max_queries: %s'
% cur_max_queries)
# All query candidates against a fraction of subjects
# to get upper bound on min for each cand
subject_subset = list(range(s_part, len(subjects), skip))
subject_subset_seqs = [subjects[i] for i in subject_subset]
s_part += 1
tic()
mins = cls.MMCountPieces([queries[i] for i in cands],
subject_subset_seqs, 10)
toc('MMCount 2')
to_rm = set() # candidates whose ub now falls below max
for i, qnum in enumerate(cands):
if mins[i] < ubs[qnum]:
ubs[qnum] = mins[i]
if ubs[qnum] < cur_max:
to_rm.add(qnum)
if to_rm:
cands = [i for i in cands if i not in to_rm]
# Are we done?
# One way:
if len(cands) == len(cur_max_queries):
if set(cands) != set(cur_max_queries):
raise RuntimeError('set(cands) != set(cur_max_queries)',
cands, cur_max_queries)
return cur_max, cur_max_queries
# Related sanity check:
if len(cands) < len(cur_max_queries):
raise RuntimeError('len(cands) < len(cur_max_queries)',
cands, cur_max_queries)
# Another way to be done:
if s_part == skip or searched_against_all.issuperset(cands):
# Upper bounds for all remaining candidates are actual mins
mx = max(ubs[i] for i in cands)
arg_max = [i for i in cands if ubs[i] == mx]
return mx, arg_max
##############################################################################
#################### GUI ####################
class GuiError(RuntimeError):
pass
class Gui(object):
initialized = False
dir = ''
@staticmethod
def InitGui():
if Gui.initialized:
return
try:
import wx
except ImportError as e:
raise GuiError('wxPython absent or incorrectly installed; '
'try "pip install wxPython"')
Gui.app = wx.App()
Gui.frame = wx.Frame()
Gui.initialized = True
@staticmethod
def FileDialog(mode, prompt, type_text):
Gui.InitGui()
import wx
if mode == 'r':
flags = wx.FD_OPEN | wx.FD_FILE_MUST_EXIST
elif mode == 'w':
flags = wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT
else:
raise ValueError("'%s' is not a valid mode; "
"should be 'r' for input or 'w' for output"
% mode)
fileDialog = wx.FileDialog(Gui.frame, prompt, Gui.dir, "", type_text, flags)
res = fileDialog.ShowModal()
if res == wx.ID_CANCEL:
return None
path = fileDialog.GetPath()
if path.strip():
Gui.dir = fileDialog.GetDirectory()
fileDialog.Destroy()
return path
#################### End GUI ################
start_time = time.localtime()
try:
optlist, args = getopt.getopt(sys.argv[1:], 's:vc:h',
['seed=',
'verbose',
'comment=',
'help'])
optdict = dict(optlist)
if len(args) not in [0, 1, 2]:
raise getopt.GetoptError('Too many command-line parameters; '
'maximum is 2')
verbose = '-v' in optdict or '--verbose' in optdict
if '-s' in optdict or '--seed' in optdict:
if '-s' in optdict and '--seed' in optdict:
raise getopt.GetoptError('Illegal to give both -s and --seed')
if '-s' in optdict:
seed = int(optdict['-s'])
else:
seed = int(optdict['--seed'])
else:
seed = None
if '-c' in optdict or '--comment' in optdict:
if '-c' in optdict and '--comment' in optdict:
raise getopt.GetoptError('Illegal to give both -c and --comment')
if '-c' in optdict:
comment = optdict['-c']
else:
comment = (optdict['--comment'])
else:
comment = None
if '-h' in optdict or '--help' in optdict:
raise getopt.GetoptError('')
except getopt.GetoptError as e:
sys.stderr.write(e.msg)
sys.stderr.write('''
Usage: %s [OPTIONS] [input_fname [output_fname]]
OPTIONS
-v,--verbose Print a lot of information
-s,--seed With an integer, sets random number seed
-c,--comment With a string, sets comment in top of output fasta
-h,--help This help
''' % sys.argv[0])
Quit(1)
print()
print('** Modified program for COVID: changed meanings of A and G criteria **')
print()
if seed is not None:
random.seed(seed)
summaries = []
for itnum in itertools.count():
if itnum == 0 and len(args) > 0:
in_fname = args[1]
elif (itnum > 0 and
GetYN('Use same input file (%s)? y(es)/N(o) (RETURN for No): '
% in_fname, False)):
# User wants to use same input file as in last iteration;
# leave value as it is
pass
else:
in_fname = GetInputFname('Enter FASTA-format file for input: ',
'fasta files (*.fa; *.fas; *.fasta)'
'|*.fa;*.fas;*.fasta|all files (*.*)|*.*')
if itnum == 0 and len(args) > 1:
out_fname = args[1]
else:
out_fname = GetOutputFname('Enter name for FASTA-format output file: ',
'fasta files (*.fa)|*.fa'
'|all files (*.*)|*.*')
st_input = os.stat(in_fname)
seqs, names = fasta_read(in_fname)
aln = {name : seqs[i] for i, name in enumerate(names)}
default_delim = '|'
delim = GetSingleCharacter("Enter delimiter, or return for '%s': "
% default_delim,
default_delim)
ShowExampleTagVals(names, delim)
print('Note: only the year will be used if the date field is included')
print()
date_index = FindDateField(aln.keys(), delim)
using_ss_input = False
fields = GetFieldChoices(len(names[0].split(delim)))
if fields == 'S':
# Request to load spreadsheet
using_ss_input = True
csv_input_fname = GetInputFname('Enter CSV-format file for input: ',
'CSV files (*.csv)|*.csv'
'|all files (*.*)|*.*')
target_counts, forced_includes = ReadEditableCSV(csv_input_fname)
# Figure out which fields these correspond to
values = [set(x) for x in zip(*[n.split(delim) for n in names])]
values[date_index] = set(s[:4] for s in values[date_index])
ss_values = [set(x) for x in zip(*target_counts.keys())]
fields = []
for ss in ss_values:
fnum, = [i for i, s in enumerate(values) if s.issuperset(ss)]
fields.append(fnum)
# Group by chosen fields
groups = GroupBy(aln.keys(),
lambda s: FieldTuple(s, delim, fields, date_index))
print()
print('%d categories' % len(groups))
print('Mean category size: %.2f'
% (sum(len(l) for l in groups.values()) / float(len(groups))))
print('Minimum: %d' % min(len(l) for l in groups.values()))
print('Maximum: %d' % max(len(l) for l in groups.values()))
print()
if len(fields) == 1 or len(fields) == 2:
do_ss = GetYN('Save a spreadsheet showing '
'category sizes? y(es)/N(o) (RETURN for No): ', False)
if do_ss:
csv_out_fname = GetOutputFname('Enter name for output spreadsheet (CSV) file: ',
'CSV files (*.csv)|*.csv')
import csv
with open(csv_out_fname, 'w', newline='') as fid:
csv_writer = csv.writer(fid)
if len(fields) == 2:
f1s = sorted(set(t[0] for t in groups))
f2s = sorted(set(t[1] for t in groups))
csv_writer.writerow([''] + f2s + ['', 'Total']) # Col names
for f1 in f1s:
row = ([f1]
+ [len(groups[f1, f2]) if (f1, f2) in groups
else '' # or 0 here
for f2 in f2s]
+ ['']
# Row sum
+ [sum(len(l) for t, l in groups.items()
if t[0] == f1)])
csv_writer.writerow(row)
csv_writer.writerow([])
# Column sums
csv_writer.writerow(['Total'] +
[sum([len(l) for t, l in groups.items()
if t[1] == f2]) for f2 in f2s] +
[''] +
[sum(len(l) for l in groups.values())]
)
else: # len(fields) must equal 1
f1s = sorted(set(t[0] for t in groups))
for f1 in f1s:
row = [f1, len(groups[f1,])]
csv_writer.writerow(row)
csv_writer.writerow([])
csv_writer.writerow(['Total',
sum(len(l) for l in groups.values())])
st_csv = os.stat(csv_out_fname)
print()
print('Wrote CSV file %s' % csv_out_fname)
print()
if GetYN('Launch CSV file %s now? y(es)/N(o) (RETURN for No): '
% csv_out_fname, False):
try:
plat = sys.platform
if plat.startswith('win32'):
import subprocess
# Extra parameter ('') necessary if
# file name has spaces
subprocess.call(['start', '', csv_out_fname],
shell=True)
elif plat.startswith('darwin'):
import subprocess
subprocess.call(['open', csv_out_fname])
else:
webbrowser.open(csv_out_fname)
print()
print('Launched CSV file')
print()
except Exception as e: # Do not allow program to fail
print()
print('Problem launching CSV file: %s' % e)
print()
else:
print()
else:
# No ss
print()
if not using_ss_input:
# Show user number of samples that will result from various
# choices of samples per category.
cat_sizes = [len(l) for l in groups.values()]
max_cat_size = max(cat_sizes)
print('Total samples for some choices of samples per category:')
print()
pairs = []
for num_samps in itertools.chain(range(1, 10),
range(10, 100, 10),
range(100, 1001, 100)):
if num_samps > max_cat_size:
num_samps = max_cat_size
total = sum(min([num_samps, sz]) for sz in cat_sizes)
pairs.append('%4d %-10d' % (num_samps, total))
if num_samps == max_cat_size:
pairs[-1] = pairs[-1].rstrip() + ' (all)'
break
# Make two columns
if len(pairs) % 2:
pairs.append('')
nlines = len(pairs) // 2
for i in range(nlines):
print(pairs[i] + 10 * ' ' + pairs[i + nlines].rstrip())
print()
num_samps = GetPosInt('Number to sample from each category: ')
total = sum(min([num_samps, sz]) for sz in cat_sizes)
print('%d sequences will be sampled' % total)
print()
do_ss2 = GetYN('Save a spreadsheet showing '
'category sizes and number sampled '
'for editing and reloading? '
'y(es)/N(o) (RETURN for No): ', False)
if do_ss2:
csv_out_fname2 = GetOutputFname('Enter name for output spreadsheet '
'(CSV) file: ',
'CSV files (*.csv)|*.csv')
WriteEditableCSV(groups, num_samps, csv_out_fname2)
print()
print('Wrote CSV file %s' % csv_out_fname2)
print()
if GetYN('Launch CSV file %s now? y(es)/N(o) (RETURN for No): '
% csv_out_fname2, False):
status = LaunchFile(csv_out_fname2)
print()
if not status:
print('Launched CSV file')
else:
# Print Exception object
print('Problem launching CSV file: %s' % status)
print()
else:
# No ss2
print()
while True:
priorities = GetPrefsWithinCat()
if priorities[0] == 'U' and date_index not in fields:
print('\n** U cannot be used unless date field is part of category **\n')
else:
break
if not using_ss_input:
target_counts = {key : min([num_samps, len(value)])
for key, value in groups.items()}
forced_includes = []
# Deal with forced includes: group by category, remove from category,
# and adjust target count downward.
forced_includes_for_cat = {}
if forced_includes:
print()
print('Forcing inclusion of %d sequences' % len(forced_includes))
print()
acc_to_id = {id.split(delim)[0] : id for id in aln}
id_to_cat = {}
for cat, ids in groups.items():
for id in ids:
id_to_cat[id] = cat
for fi in forced_includes:
if len(fi.split(delim)) == 1:
try:
id = acc_to_id[fi]
except KeyError as e:
raise ValueError('Accession %s (forced include) not found'
% fi)
else:
id = fi
try:
cat = id_to_cat[id]
except KeyError as e:
raise ValueError('ID %s (forced include) not found'
% id)
if cat not in forced_includes_for_cat:
forced_includes_for_cat[cat] = []
forced_includes_for_cat[cat].append(id)
for cat, ids in forced_includes_for_cat.items():
if len(ids) > target_counts[cat]:
print('WARNING: Only %d sequences requested for %s,'
% (target_counts[cat], cat))
print(' but inclusion of %d forced:' % len(ids))
print(' %s' % ' '.join([id.split('|')[0] for id in ids]))
print(' All %d will be included, increasing total by %d.'
% (len(ids), len(ids) - target_counts[cat]))
print()
target_counts[cat] = 0
else:
target_counts[cat] -= len(ids)
for id in ids:
groups[cat].remove(id)
sampled = {key : (SubSample(value, target_counts[key], priorities,
delim, date_index, aln)
+ forced_includes_for_cat.get(key, []))
for key, value in groups.items()
if key in target_counts}
sampled_ids = list(flatten(sampled.values()))
# A comment to add to first line of fasta
if comment is None or comment.startswith('+'):
if using_ss_input:
num_samps_str = 'from %s' % csv_input_fname
else:
num_samps_str = str(num_samps)
try:
login = os.getlogin() + ' ' # os.getlogin() fails on some linux
except Exception:
login = ''
tmp = ('%s %s fields: %s prefs: %s samps_per_cat: %s (%s %s)'
% (os.path.basename(sys.argv[0]), in_fname,
fields, priorities,
num_samps_str, login, StrFTime('%d %b %Y %X %Z')))
if comment is not None:
comment = tmp + '; ' + comment[1:]
else:
comment = tmp
writefasta([aln[id] for id in sampled_ids], sampled_ids,
out_fname, [comment])
# Description of actions taken
in_mtime = datetime.datetime.fromtimestamp(st_input.st_mtime)
digest = hashlib.md5(open(in_fname, 'rb').read()).hexdigest()
line0 = ('Input file: %s\n(modified %s; size %d; MD5 %s)'
% (in_fname, in_mtime.strftime('%d %b %Y %X %Z'),
st_input.st_size, digest)
)
if seed is not None:
line_seed = 'Random seed set to %d' % seed
if not using_ss_input:
line1 = ('Chose %d (or all) from each category '
'using fields %s and preferences %s'
% (num_samps, fields, priorities))
else:
line1 = ('Chose according to %s from each category '
'using fields %s and preferences %s'
% (csv_input_fname, fields, priorities))
line2 = '%d sequences written to %s' % (len(sampled_ids), out_fname)
if seed is None:
summaries.append([line0, line1, line2])
else:
summaries.append([line0, line_seed, line1, line2])
print()
print(line1)
if seed is not None:
print(line_seed)
print(line2)
# Report largest min distance of non-sampled isolate to sampled isolates
ssampled_ids = set(sampled_ids)
non_sampled_ids = [id for id in names if id not in ssampled_ids]
try:
max_dist, indices = MismatchCount.MaxMinDist(
[aln[id]
for id in non_sampled_ids],
[aln[id]
for id in sampled_ids],
True)
except Exception as e:
print()
print('Problem calculating largest min distance:')
sys.excepthook(type(e), e, e.__traceback__)
print()
print('This will not affect anything else.')
print()
else:
print()
print('Largest distance from non-sampled sequence '
'to nearest sampled sequence:')
print()
print(' %d mismatches' % max_dist)
print()
print('%d sequence%s with this minimum distance:'
% (len(indices), 's' if len(indices) > 1 else ''))
# Print ID(s) for seqs with max min dist
for idx in indices:
print(non_sampled_ids[idx])
print()
print()
if not GetYN('Create another subsample? y(es)/N(o) (RETURN for No): ',
False):
break # Done with iterations at user's request
end_time = time.localtime()
if len(summaries) > 0:
try:
login = os.getlogin() # os.getlogin() fails on some linux
except Exception:
login = 'unknown'
print()
print()
print('Summary of session by %s %s:'
% (login, TimeRange(start_time, end_time)))
print()
for l in summaries:
for line in l:
print(line)
print()
Quit(0)
| 36.451826 | 100 | 0.487659 |
949bc1da5f62cb15ee831e9c9fde3c3d1b30f083 | 1,143 | py | Python | test_algo.py | offthewallace/Extreme-clicking-Annotation | 1fab5b7db668bebe1cc16e05030adb9f43327d7d | [
"MIT"
] | 1 | 2020-04-27T23:52:56.000Z | 2020-04-27T23:52:56.000Z | test_algo.py | offthewallace/Extreme-clicking-Annotation | 1fab5b7db668bebe1cc16e05030adb9f43327d7d | [
"MIT"
] | null | null | null | test_algo.py | offthewallace/Extreme-clicking-Annotation | 1fab5b7db668bebe1cc16e05030adb9f43327d7d | [
"MIT"
] | null | null | null | import math
import numpy as np
import sys, os
import matplotlib.pyplot as plt
from lxml import etree as ET
import cv2
import random
import pdb
#ROW=640
#COL=640
img = cv2.imread('messi5.jpg')
#print(path)
point1x=309
point1y=76
point2x=56
point2y=149
point3x=367
point3y=635
point4x=617
point4y=571
(ROW,COL,_)= img.shape
print(ROW,COL)
detector = cv2.ximgproc.createStructuredEdgeDetection('model.yml.gz')
rgb_im = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
im = detector.detectEdges(np.float32(rgb_im) / 255.0)
orimap = detector.computeOrientation(im)
edges = detector.edgesNms(im, orimap)
for x in range(len(edges.tolist())):
for y in range(len(edges.tolist()[0])):
im[x][y]=1-im[x][y]
edges[x][y]=1-edges[x][y]
#if Matrix[x][y] ==1:
# Matrix[x][y]=1000
mask = np.zeros(img.shape[:2],np.uint8)
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
#rect = (max(0,xmin-25),max(0,ymin-25),xmax-25,ymax-25)
rect=(xmin,ymin,xmax,ymax)
cv.grabCut(img,mask,rect,bgdModel,fgdModel,5,cv.GC_INIT_WITH_RECT)
print(im.shape)
cv2.imshow("edges", im)
cv2.imshow("edgeboxes", edges)
cv2.waitKey(0)
cv2.destroyAllWindows() | 23.8125 | 69 | 0.727909 |
cd90d1c57bb5a081e84666b3b2f953a0c67319c6 | 16,454 | py | Python | experimental/language_structure/psl/psl_model_multiwoz_test.py | dvdzhang/uncertainty-baselines | 8ce0d7494e5cae0719c1b750da4b61564e536636 | [
"Apache-2.0"
] | null | null | null | experimental/language_structure/psl/psl_model_multiwoz_test.py | dvdzhang/uncertainty-baselines | 8ce0d7494e5cae0719c1b750da4b61564e536636 | [
"Apache-2.0"
] | null | null | null | experimental/language_structure/psl/psl_model_multiwoz_test.py | dvdzhang/uncertainty-baselines | 8ce0d7494e5cae0719c1b750da4b61564e536636 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for MultiWoz rules."""
import tensorflow as tf
import constrained_evaluation as eval_model # local file import from experimental.language_structure.psl
import data # local file import from experimental.language_structure.psl
import psl_model_multiwoz as model # local file import from experimental.language_structure.psl
import psl_model_multiwoz_test_util as test_util # local file import from experimental.language_structure.psl
class PslRulesTest(tf.test.TestCase):
def setUp(self):
super(PslRulesTest, self).setUp()
self.config = test_util.TEST_MULTIWOZ_CONFIG
self.data = test_util.DATA
tf.random.set_seed(self.config['default_seed'])
train_dialogs = data.add_features(
self.data['train_data'],
vocab_mapping=self.data['vocab_mapping'],
accept_words=self.config['accept_words'],
cancel_words=self.config['cancel_words'],
end_words=self.config['end_words'],
greet_words=self.config['greet_words'],
info_question_words=self.config['info_question_words'],
insist_words=self.config['insist_words'],
slot_question_words=self.config['slot_question_words'],
includes_word=self.config['includes_word'],
excludes_word=self.config['excludes_word'],
accept_index=self.config['accept_index'],
cancel_index=self.config['cancel_index'],
end_index=self.config['end_index'],
greet_index=self.config['greet_index'],
info_question_index=self.config['info_question_index'],
insist_index=self.config['insist_index'],
slot_question_index=self.config['slot_question_index'],
utterance_mask=self.config['utterance_mask'],
pad_utterance_mask=self.config['pad_utterance_mask'],
last_utterance_mask=self.config['last_utterance_mask'],
mask_index=self.config['mask_index'])
train_data = data.pad_dialogs(train_dialogs, self.config['max_dialog_size'],
self.config['max_utterance_size'])
raw_train_labels = data.one_hot_string_encoding(self.data['train_labels'],
self.config['class_map'])
train_labels = data.pad_one_hot_labels(raw_train_labels,
self.config['max_dialog_size'],
self.config['class_map'])
self.train_ds = data.list_to_dataset(train_data[0], train_labels[0],
self.config['shuffle_train'],
self.config['batch_size'])
test_dialogs = data.add_features(
self.data['test_data'],
vocab_mapping=self.data['vocab_mapping'],
accept_words=self.config['accept_words'],
cancel_words=self.config['cancel_words'],
end_words=self.config['end_words'],
greet_words=self.config['greet_words'],
info_question_words=self.config['info_question_words'],
insist_words=self.config['insist_words'],
slot_question_words=self.config['slot_question_words'],
includes_word=self.config['includes_word'],
excludes_word=self.config['excludes_word'],
accept_index=self.config['accept_index'],
cancel_index=self.config['cancel_index'],
end_index=self.config['end_index'],
greet_index=self.config['greet_index'],
info_question_index=self.config['info_question_index'],
insist_index=self.config['insist_index'],
slot_question_index=self.config['slot_question_index'],
utterance_mask=self.config['utterance_mask'],
pad_utterance_mask=self.config['pad_utterance_mask'],
last_utterance_mask=self.config['last_utterance_mask'],
mask_index=self.config['mask_index'])
test_data = data.pad_dialogs(test_dialogs, self.config['max_dialog_size'],
self.config['max_utterance_size'])
raw_test_labels = data.one_hot_string_encoding(self.data['test_labels'],
self.config['class_map'])
self.test_labels = data.pad_one_hot_labels(raw_test_labels,
self.config['max_dialog_size'],
self.config['class_map'])
self.test_ds = data.list_to_dataset(test_data[0], self.test_labels[0],
self.config['shuffle_test'],
self.config['batch_size'])
def check_greet(self, predictions, mask, class_map):
for dialog_pred, dialog_mask in zip(predictions, mask):
first = True
for utterance_pred, utterance_mask in zip(dialog_pred, dialog_mask):
if first or utterance_mask == 0:
first = False
continue
if utterance_pred == class_map['greet']:
return False
return True
def test_psl_rule_1_run_model(self):
rule_weights = (1.0,)
rule_names = ('rule_1',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
constrained_model = test_util.build_constrained_model(
[self.config['max_dialog_size'], self.config['max_utterance_size']])
constrained_model.fit(self.train_ds, epochs=self.config['train_epochs'])
logits = eval_model.evaluate_constrained_model(constrained_model,
self.test_ds,
psl_constraints)
predictions = tf.math.argmax(logits[0], axis=-1)
result = self.check_greet(predictions, self.test_labels[1],
self.config['class_map'])
self.assertTrue(result)
def test_psl_rule_1(self):
rule_weights = (1.0,)
rule_names = ('rule_1',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_1(logits=tf.constant(logits))
self.assertEqual(loss, 1.4)
def test_psl_rule_2_run_model(self):
rule_weights = (10.0,)
rule_names = ('rule_2',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
constrained_model = test_util.build_constrained_model(
[self.config['max_dialog_size'], self.config['max_utterance_size']])
constrained_model.fit(self.train_ds, epochs=self.config['train_epochs'])
logits = eval_model.evaluate_constrained_model(constrained_model,
self.test_ds,
psl_constraints)
predictions = tf.math.argmax(logits[0], axis=-1)
self.assertEqual(predictions[2][0], self.config['class_map']['greet'])
self.assertEqual(predictions[3][0], self.config['class_map']['greet'])
def test_psl_rule_2(self):
rule_weights = (1.0,)
rule_names = ('rule_2',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_2(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertEqual(loss, 0.6)
def test_psl_rule_3_run_model(self):
rule_weights = (1.0,)
rule_names = ('rule_3',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
constrained_model = test_util.build_constrained_model(
[self.config['max_dialog_size'], self.config['max_utterance_size']])
constrained_model.fit(self.train_ds, epochs=self.config['train_epochs'])
logits = eval_model.evaluate_constrained_model(constrained_model,
self.test_ds,
psl_constraints)
predictions = tf.math.argmax(logits[0], axis=-1)
self.assertEqual(predictions[0][0],
self.config['class_map']['init_request'])
self.assertEqual(predictions[1][0],
self.config['class_map']['init_request'])
def test_psl_rule_3(self):
rule_weights = (1.0,)
rule_names = ('rule_3',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_3(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertEqual(loss, 0.8)
def test_psl_rule_4_run_model(self):
rule_weights = (1.0,)
rule_names = ('rule_4',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
constrained_model = test_util.build_constrained_model(
[self.config['max_dialog_size'], self.config['max_utterance_size']])
constrained_model.fit(self.train_ds, epochs=self.config['train_epochs'])
logits = eval_model.evaluate_constrained_model(constrained_model,
self.test_ds,
psl_constraints)
predictions = tf.math.argmax(logits[0], axis=-1)
self.assertEqual(predictions[1][1],
self.config['class_map']['second_request'])
self.assertEqual(predictions[2][1],
self.config['class_map']['second_request'])
def test_psl_rule_4(self):
rule_weights = (1.0,)
rule_names = ('rule_4',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_4(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 1.8, err=1e-6)
def test_psl_rule_5_run_model(self):
rule_weights = (1.0,)
rule_names = ('rule_5',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
constrained_model = test_util.build_constrained_model(
[self.config['max_dialog_size'], self.config['max_utterance_size']])
constrained_model.fit(self.train_ds, epochs=self.config['train_epochs'])
logits = eval_model.evaluate_constrained_model(constrained_model,
self.test_ds,
psl_constraints)
predictions = tf.math.argmax(logits[0], axis=-1)
self.assertNotEqual(predictions[1][1],
self.config['class_map']['init_request'])
self.assertNotEqual(predictions[2][1],
self.config['class_map']['init_request'])
def test_psl_rule_5(self):
rule_weights = (1.0,)
rule_names = ('rule_5',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_5(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 1.4, err=1e-6)
def test_psl_rule_6_run_model(self):
rule_weights = (1.0,)
rule_names = ('rule_6',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
constrained_model = test_util.build_constrained_model(
[self.config['max_dialog_size'], self.config['max_utterance_size']])
constrained_model.fit(self.train_ds, epochs=self.config['train_epochs'])
logits = eval_model.evaluate_constrained_model(constrained_model,
self.test_ds,
psl_constraints)
predictions = tf.math.argmax(logits[0], axis=-1)
self.assertNotEqual(predictions[1][0], self.config['class_map']['greet'])
self.assertNotEqual(predictions[2][0], self.config['class_map']['greet'])
def test_psl_rule_6(self):
rule_weights = (1.0,)
rule_names = ('rule_6',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_6(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 1.4, err=1e-6)
def test_psl_rule_7_run_model(self):
rule_weights = (1.0,)
rule_names = ('rule_7',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
constrained_model = test_util.build_constrained_model(
[self.config['max_dialog_size'], self.config['max_utterance_size']])
constrained_model.fit(self.train_ds, epochs=self.config['train_epochs'])
logits = eval_model.evaluate_constrained_model(constrained_model,
self.test_ds,
psl_constraints)
predictions = tf.math.argmax(logits[0], axis=-1)
self.assertEqual(predictions[1][2], self.config['class_map']['end'])
self.assertEqual(predictions[2][3], self.config['class_map']['end'])
def test_psl_rule_7(self):
rule_weights = (1.0,)
rule_names = ('rule_7',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_7(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 1.1, err=1e-6)
def test_psl_rule_8(self):
rule_weights = (1.0,)
rule_names = ('rule_8',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_8(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 0.9, err=1e-6)
def test_psl_rule_9(self):
rule_weights = (1.0,)
rule_names = ('rule_9',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_9(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 0.8, err=1e-6)
def test_psl_rule_10(self):
rule_weights = (1.0,)
rule_names = ('rule_10',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_10(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 0.3, err=1e-6)
def test_psl_rule_11(self):
rule_weights = (1.0,)
rule_names = ('rule_11',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_11(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 0.7, err=1e-6)
def test_psl_rule_12(self):
rule_weights = (1.0,)
rule_names = ('rule_12',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_12(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 0.1, err=1e-6)
def test_compute_loss_per_rule(self):
rule_weights = (1.0, 2.0)
rule_names = ('rule_11', 'rule_12')
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss_per_rule = psl_constraints.compute_loss_per_rule(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertArrayNear(loss_per_rule, [0.7, 0.2], err=1e-6)
def test_compute_loss(self):
rule_weights = (1.0, 2.0)
rule_names = ('rule_11', 'rule_12')
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.compute_loss(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 0.9, err=1e-6)
if __name__ == '__main__':
tf.test.main()
| 41.550505 | 110 | 0.656679 |
28e95434d5656e29feacd80157f4d9ec492b2fb8 | 2,325 | py | Python | data.py | renqianluo/DAG2N | 28381776ed26cd7148de8424c00f9b56ece72281 | [
"Apache-2.0"
] | 2 | 2018-06-29T07:51:27.000Z | 2019-02-07T15:19:54.000Z | data.py | renqianluo/DAG2N | 28381776ed26cd7148de8424c00f9b56ece72281 | [
"Apache-2.0"
] | null | null | null | data.py | renqianluo/DAG2N | 28381776ed26cd7148de8424c00f9b56ece72281 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import random
parser = argparse.ArgumentParser()
# Basic model parameters.
parser.add_argument('--data_dir', type=str, dest='data_dir', default='/tmp/cifar10_data',
help='The path to the CIFAR-10 data directory.')
_HEIGHT = 32
_WIDTH = 32
_DEPTH = 3
_NUM_CLASSES = 10
_NUM_DATA_FILES = 5
_NUM_IMAGES = {
'train': 45000,
'valid': 5000,
'test': 10000,
}
def record_dataset(data_dir):
filenames = get_filenames(data_dir)
label_bytes = 1
image_bytes = _HEIGHT * _WIDTH * _DEPTH
record_bytes = label_bytes + image_bytes
contents = []
for filename in filenames:
with open(filename, 'rb') as f:
content = f.read(record_bytes)
while content:
assert len(content) == record_bytes
contents.append(content)
content = f.read(record_bytes)
assert len(contents) == _NUM_IMAGES['train'] + _NUM_IMAGES['valid']
valid_index = random.sample(range(len(contents)), _NUM_IMAGES['valid'])
valid_data = []
train_data = []
for i in range(len(contents)):
if i in valid_index:
valid_data.append(contents[i])
else:
train_data.append(contents[i])
assert len(train_data) == _NUM_IMAGES['train']
assert len(valid_data) == _NUM_IMAGES['valid']
num_train_data = len(train_data)
num_train_data_per_file = num_train_data // _NUM_DATA_FILES
for i in range(1, _NUM_DATA_FILES+1):
with open(os.path.join(data_dir, 'train_batch_%d.bin' % i), 'wb') as f:
for j in range((i-1)*num_train_data_per_file, i*num_train_data_per_file):
f.write(train_data[j])
with open(os.path.join(data_dir, 'valid_batch.bin'), 'wb') as f:
for c in valid_data:
f.write(c)
def get_filenames(data_dir):
"""Returns a list of filenames."""
data_dir = os.path.join(data_dir, 'cifar-10-batches-bin')
assert os.path.exists(data_dir), (
'Run cifar10_download_and_extract.py first to download and extract the '
'CIFAR-10 data.')
return [
os.path.join(data_dir, 'data_batch_%d.bin' % i)
for i in range(1, _NUM_DATA_FILES + 1)
]
def main(args):
record_dataset(args.data_dir)
if __name__ == '__main__':
args = parser.parse_args()
main(args)
| 26.724138 | 89 | 0.688172 |
994cb90d0d7583dd30e4a45f6c1fba6dccb468ad | 5,387 | py | Python | BParkingNano/python/tracksBPark_cff.py | nancymarinelli/BParkingNANO | d220fc7afb4f570dee0ca58d0ae84dc3e5befa95 | [
"Apache-2.0"
] | null | null | null | BParkingNano/python/tracksBPark_cff.py | nancymarinelli/BParkingNANO | d220fc7afb4f570dee0ca58d0ae84dc3e5befa95 | [
"Apache-2.0"
] | null | null | null | BParkingNano/python/tracksBPark_cff.py | nancymarinelli/BParkingNANO | d220fc7afb4f570dee0ca58d0ae84dc3e5befa95 | [
"Apache-2.0"
] | 1 | 2020-06-19T08:55:56.000Z | 2020-06-19T08:55:56.000Z | import FWCore.ParameterSet.Config as cms
from PhysicsTools.NanoAOD.common_cff import *
tracksBPark = cms.EDProducer('TrackMerger',
beamSpot = cms.InputTag("offlineBeamSpot"),
trgMuon = cms.InputTag("muonTrgSelector:trgMuons"),
tracks = cms.InputTag("packedPFCandidates"),
lostTracks = cms.InputTag("lostTracks"),
trkPtCut = cms.double(0.7),
muons = cms.InputTag("slimmedMuons"),
pfElectrons= cms.InputTag("slimmedElectrons"),
vertices = cms.InputTag("offlineSlimmedPrimaryVertices"),
trkEtaCut = cms.double(2.5),
dzTrg_cleaning = cms.double(1.),
drTrg_Cleaning = cms.double(0.03),
dcaSig = cms.double(-100000),
trkNormChiMin = cms.int32(-1),
trkNormChiMax = cms.int32(-1)
)
trackBParkTable = cms.EDProducer(
"SimpleCompositeCandidateFlatTableProducer",
src = cms.InputTag("tracksBPark:SelectedTracks"),
cut = cms.string(""),
name = cms.string("ProbeTracks"),
doc = cms.string("track collection probe side for BPark after basic selection"),
singleton = cms.bool(False),
extension = cms.bool(False),
variables = cms.PSet(
CandVars,
vx = Var("vx()", float, doc="x coordinate of vertex position, in cm", precision=10),
vy = Var("vy()", float, doc="y coordinate of vertex position, in cm", precision=10),
vz = Var("vz()", float, doc="z coordinate of vertex position, in cm", precision=10),
isPacked = Var("userInt('isPacked')",int,doc="track from packedCandidate collection", precision=10),
isLostTrk = Var("userInt('isLostTrk')",int,doc="track from lostTrack collection", precision=10),
dz = Var("userFloat('dz')",float,doc="dz (with sign) wrt first PV, in cm", precision=10),
dxy = Var("userFloat('dxy')",float,doc="dxy (with sign) wrt first PV, in cm", precision=10),
dzS = Var("userFloat('dzS')", float, doc="dz/err (with sign) wrt first PV, in cm", precision=10),
dxyS = Var("userFloat('dxyS')", float, doc="dxy/err (with sign) wrt first PV, in cm", precision=10),
DCASig=Var("userFloat('DCASig')", float,doc="significance of xy-distance of closest approach wrt beamspot", precision=10),
isMatchedToMuon = Var("userInt('isMatchedToMuon')",bool,doc="track was used to build a muon", precision=10),
isMatchedToLooseMuon = Var("userInt('isMatchedToLooseMuon')",bool,doc="track was used to build a muon passing LooseID", precision=10),
isMatchedToSoftMuon = Var("userInt('isMatchedToSoftMuon')",bool,doc="track was used to build a muon passing softID", precision=10),
isMatchedToMediumMuon = Var("userInt('isMatchedToMediumMuon')",bool,doc="track was used to build a muon passing mediumID", precision=10),
isMatchedToEle = Var("userInt('isMatchedToEle')",bool,doc="track was used to build a PF ele", precision=10),
nValidHits = Var("userInt('nValidHits')", int,doc="Number of valid hits on track", precision=10),
#dEdXStrip=Var("userFloat('dEdXStrip')", float,doc="dE/dX from strips of associated isolated track"),
#dEdXPixel=Var("userFloat('dEdXPixel')", float,doc="dE/dX from pixels of associated isolated track"),
),
)
tracksBParkMCMatchForTable = cms.EDProducer("MCMatcher", # cut on deltaR, deltaPt/Pt; pick best by deltaR
src = trackBParkTable.src, # final reco collection
matched = cms.InputTag("finalGenParticlesBPark"), # final mc-truth particle collection
mcPdgId = cms.vint32(321,211), # one or more PDG ID (321 = charged kaon, 211 = charged pion); absolute values (see below)
checkCharge = cms.bool(False), # True = require RECO and MC objects to have the same charge
mcStatus = cms.vint32(1), # PYTHIA status code (1 = stable, 2 = shower, 3 = hard scattering)
maxDeltaR = cms.double(0.03), # Minimum deltaR for the match
maxDPtRel = cms.double(0.5), # Minimum deltaPt/Pt for the match
resolveAmbiguities = cms.bool(True), # Forbid two RECO objects to match to the same GEN object
resolveByMatchQuality = cms.bool(True), # False = just match input in order; True = pick lowest deltaR pair first
)
tracksBParkMCMatchEmbedded = cms.EDProducer(
'CompositeCandidateMatchEmbedder',
src = trackBParkTable.src,
matching = cms.InputTag("tracksBParkMCMatchForTable")
)
tracksBParkMCTable = cms.EDProducer("CandMCMatchTableProducerBPark",
src = tracksBParkMCMatchForTable.src,
mcMap = cms.InputTag("tracksBParkMCMatchForTable"),
objName = trackBParkTable.name,
objType = trackBParkTable.name,
branchName = cms.string("genPart"),
docString = cms.string("MC matching to status==1 kaons or pions"),
)
tracksBParkSequence = cms.Sequence(tracksBPark)
tracksBParkTables = cms.Sequence(trackBParkTable)
tracksBParkMC = cms.Sequence(tracksBParkSequence + tracksBParkMCMatchForTable + tracksBParkMCMatchEmbedded + tracksBParkMCTable)
| 61.91954 | 149 | 0.635604 |
a061b9ca5f5e16b86b177f0efb181ee6283460f5 | 15,453 | py | Python | vm_server/send/proto/request_pb2.py | googleinterns/automated-windows-vms | 860edbba8956edc3b76893b2c1283eeacca8a0f6 | [
"Apache-2.0"
] | 3 | 2020-06-02T09:24:13.000Z | 2020-11-17T10:14:35.000Z | vm_server/send/proto/request_pb2.py | googleinterns/automated-windows-vms | 860edbba8956edc3b76893b2c1283eeacca8a0f6 | [
"Apache-2.0"
] | 6 | 2020-06-09T13:48:05.000Z | 2022-03-12T00:43:38.000Z | vm_server/send/proto/request_pb2.py | googleinterns/automated-windows-vms | 860edbba8956edc3b76893b2c1283eeacca8a0f6 | [
"Apache-2.0"
] | 1 | 2020-10-12T21:37:26.000Z | 2020-10-12T21:37:26.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: proto/request.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='proto/request.proto',
package='',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x13proto/request.proto\"\xc0\x01\n\x0bTaskRequest\x12\x11\n\tcode_path\x18\x01 \x01(\t\x12\x11\n\tdata_path\x18\x02 \x01(\t\x12\x13\n\x0boutput_path\x18\x03 \x01(\t\x12\x13\n\x0btarget_path\x18\x04 \x01(\t\x12\x0f\n\x07timeout\x18\x05 \x01(\x01\x12!\n\x0c\x63onfig_pairs\x18\x06 \x03(\x0b\x32\x0b.ConfigPair\x12\x19\n\x11number_of_retries\x18\x07 \x01(\x05\x12\x12\n\nrequest_id\x18\x08 \x01(\x05\"(\n\nConfigPair\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\x94\x01\n\x0cTaskResponse\x12%\n\x06status\x18\x01 \x01(\x0e\x32\x15.TaskResponse.options\x12\x17\n\x0fnumber_of_files\x18\x02 \x01(\x05\x12\x12\n\ntime_taken\x18\x03 \x01(\x01\"0\n\x07options\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0b\n\x07\x46\x41ILURE\x10\x01\x12\x0b\n\x07SUCCESS\x10\x02\"\'\n\x11TaskStatusRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\x05\"\xd1\x01\n\x12TaskStatusResponse\x12\x17\n\x0f\x63urrent_task_id\x18\x01 \x01(\x05\x12*\n\x06status\x18\x02 \x01(\x0e\x32\x1a.TaskStatusResponse.Status\x12$\n\rtask_response\x18\x03 \x01(\x0b\x32\r.TaskResponse\"P\n\x06Status\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0c\n\x08\x41\x43\x43\x45PTED\x10\x01\x12\x0c\n\x08REJECTED\x10\x02\x12\r\n\tCOMPLETED\x10\x03\x12\x0e\n\nINVALID_ID\x10\x04\x62\x06proto3'
)
_TASKRESPONSE_OPTIONS = _descriptor.EnumDescriptor(
name='options',
full_name='TaskResponse.options',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='FAILURE', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SUCCESS', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=361,
serialized_end=409,
)
_sym_db.RegisterEnumDescriptor(_TASKRESPONSE_OPTIONS)
_TASKSTATUSRESPONSE_STATUS = _descriptor.EnumDescriptor(
name='Status',
full_name='TaskStatusResponse.Status',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ACCEPTED', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='REJECTED', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='COMPLETED', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='INVALID_ID', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=582,
serialized_end=662,
)
_sym_db.RegisterEnumDescriptor(_TASKSTATUSRESPONSE_STATUS)
_TASKREQUEST = _descriptor.Descriptor(
name='TaskRequest',
full_name='TaskRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='code_path', full_name='TaskRequest.code_path', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='data_path', full_name='TaskRequest.data_path', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='output_path', full_name='TaskRequest.output_path', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='target_path', full_name='TaskRequest.target_path', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='timeout', full_name='TaskRequest.timeout', index=4,
number=5, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='config_pairs', full_name='TaskRequest.config_pairs', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='number_of_retries', full_name='TaskRequest.number_of_retries', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='request_id', full_name='TaskRequest.request_id', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=24,
serialized_end=216,
)
_CONFIGPAIR = _descriptor.Descriptor(
name='ConfigPair',
full_name='ConfigPair',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='ConfigPair.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='ConfigPair.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=218,
serialized_end=258,
)
_TASKRESPONSE = _descriptor.Descriptor(
name='TaskResponse',
full_name='TaskResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='TaskResponse.status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='number_of_files', full_name='TaskResponse.number_of_files', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='time_taken', full_name='TaskResponse.time_taken', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_TASKRESPONSE_OPTIONS,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=261,
serialized_end=409,
)
_TASKSTATUSREQUEST = _descriptor.Descriptor(
name='TaskStatusRequest',
full_name='TaskStatusRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='request_id', full_name='TaskStatusRequest.request_id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=411,
serialized_end=450,
)
_TASKSTATUSRESPONSE = _descriptor.Descriptor(
name='TaskStatusResponse',
full_name='TaskStatusResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='current_task_id', full_name='TaskStatusResponse.current_task_id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='status', full_name='TaskStatusResponse.status', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='task_response', full_name='TaskStatusResponse.task_response', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_TASKSTATUSRESPONSE_STATUS,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=453,
serialized_end=662,
)
_TASKREQUEST.fields_by_name['config_pairs'].message_type = _CONFIGPAIR
_TASKRESPONSE.fields_by_name['status'].enum_type = _TASKRESPONSE_OPTIONS
_TASKRESPONSE_OPTIONS.containing_type = _TASKRESPONSE
_TASKSTATUSRESPONSE.fields_by_name['status'].enum_type = _TASKSTATUSRESPONSE_STATUS
_TASKSTATUSRESPONSE.fields_by_name['task_response'].message_type = _TASKRESPONSE
_TASKSTATUSRESPONSE_STATUS.containing_type = _TASKSTATUSRESPONSE
DESCRIPTOR.message_types_by_name['TaskRequest'] = _TASKREQUEST
DESCRIPTOR.message_types_by_name['ConfigPair'] = _CONFIGPAIR
DESCRIPTOR.message_types_by_name['TaskResponse'] = _TASKRESPONSE
DESCRIPTOR.message_types_by_name['TaskStatusRequest'] = _TASKSTATUSREQUEST
DESCRIPTOR.message_types_by_name['TaskStatusResponse'] = _TASKSTATUSRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TaskRequest = _reflection.GeneratedProtocolMessageType('TaskRequest', (_message.Message,), {
'DESCRIPTOR' : _TASKREQUEST,
'__module__' : 'proto.request_pb2'
# @@protoc_insertion_point(class_scope:TaskRequest)
})
_sym_db.RegisterMessage(TaskRequest)
ConfigPair = _reflection.GeneratedProtocolMessageType('ConfigPair', (_message.Message,), {
'DESCRIPTOR' : _CONFIGPAIR,
'__module__' : 'proto.request_pb2'
# @@protoc_insertion_point(class_scope:ConfigPair)
})
_sym_db.RegisterMessage(ConfigPair)
TaskResponse = _reflection.GeneratedProtocolMessageType('TaskResponse', (_message.Message,), {
'DESCRIPTOR' : _TASKRESPONSE,
'__module__' : 'proto.request_pb2'
# @@protoc_insertion_point(class_scope:TaskResponse)
})
_sym_db.RegisterMessage(TaskResponse)
TaskStatusRequest = _reflection.GeneratedProtocolMessageType('TaskStatusRequest', (_message.Message,), {
'DESCRIPTOR' : _TASKSTATUSREQUEST,
'__module__' : 'proto.request_pb2'
# @@protoc_insertion_point(class_scope:TaskStatusRequest)
})
_sym_db.RegisterMessage(TaskStatusRequest)
TaskStatusResponse = _reflection.GeneratedProtocolMessageType('TaskStatusResponse', (_message.Message,), {
'DESCRIPTOR' : _TASKSTATUSRESPONSE,
'__module__' : 'proto.request_pb2'
# @@protoc_insertion_point(class_scope:TaskStatusResponse)
})
_sym_db.RegisterMessage(TaskStatusResponse)
# @@protoc_insertion_point(module_scope)
| 39.320611 | 1,255 | 0.753317 |
80f927f24c133e72117f306e0c43702fb88fafdc | 2,162 | py | Python | setup.py | ndem0/ATHENA | 87825ad95de539ac5e816a19922e9d615fabd5b8 | [
"MIT"
] | 33 | 2019-12-05T15:20:26.000Z | 2022-03-27T17:53:57.000Z | setup.py | ndem0/ATHENA | 87825ad95de539ac5e816a19922e9d615fabd5b8 | [
"MIT"
] | 12 | 2020-03-23T08:54:32.000Z | 2021-11-07T14:33:04.000Z | setup.py | ndem0/ATHENA | 87825ad95de539ac5e816a19922e9d615fabd5b8 | [
"MIT"
] | 16 | 2019-12-05T14:10:57.000Z | 2021-07-30T14:12:10.000Z | from setuptools import setup
meta = {}
with open("athena/meta.py") as fp:
exec(fp.read(), meta)
# Package meta-data.
IMPORTNAME = meta['__title__']
PIPNAME = meta['__packagename__']
DESCRIPTION = (
"Advanced Techniques for High dimensional parameter spaces to Enhance "
"Numerical Analysis"
)
URL = 'https://github.com/mathLab/ATHENA'
MAIL = meta['__mail__']
AUTHOR = meta['__author__']
VERSION = meta['__version__']
KEYWORDS = (
"parameter-space-reduction active-subspaces kernel-active-subspaces "
"model-reduction sensitivity-analysis nonlinear-level-set-learning"
)
REQUIRED = [
'numpy', 'scipy', 'matplotlib', 'torch', 'GPy', 'GPyOpt', 'scikit-learn'
]
EXTRAS = {
'docs': ['Sphinx>=1.4', 'sphinx_rtd_theme'],
'formatting': ['yapf'],
'tutorials': ['pyro', 'pyhmc'],
'test': ['pytest', 'pytest-cov'],
}
LDESCRIPTION = (
'ATHENA is a Python package for reduction of high dimensional '
'parameter spaces in the context of numerical analysis. It allows '
'the use of several dimensionality reduction techniques such as '
'Active Subspaces (AS), Kernel-based Active Subspaces (KAS), and '
'Nonlinear Level-set Learning (NLL).\n'
'\n'
'It is particularly suited for the study of parametric PDEs, for '
'sensitivity analysis, and for the approximation of engineering '
'quantities of interest. It can handle both scalar and vectorial '
'high dimensional functions, making it a useful tool also to reduce '
'the burden of computational intensive optimization tasks.'
)
setup(
name=PIPNAME,
version=VERSION,
description=DESCRIPTION,
long_description=LDESCRIPTION,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Mathematics'
],
keywords=KEYWORDS,
url=URL,
author=AUTHOR,
author_email=MAIL,
license='MIT',
packages=[IMPORTNAME],
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
zip_safe=False
)
| 30.450704 | 76 | 0.689639 |
2b3654468d8b7c4f3914d8d26f2584bda6965ed3 | 363 | py | Python | src/shell/shell.py | vkhaydarov/PlantEye | bfc4f59023cfa97012ce0e5ca1b94f97b88b6f69 | [
"MIT"
] | null | null | null | src/shell/shell.py | vkhaydarov/PlantEye | bfc4f59023cfa97012ce0e5ca1b94f97b88b6f69 | [
"MIT"
] | null | null | null | src/shell/shell.py | vkhaydarov/PlantEye | bfc4f59023cfa97012ce0e5ca1b94f97b88b6f69 | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
from src.configuration.config_provider import ConfigProvider
class Shell(ABC):
@abstractmethod
def import_configuration(self, config_provider: ConfigProvider):
pass
@abstractmethod
def apply_configuration(self):
pass
@abstractmethod
def attach_callback(self, callback):
pass
| 21.352941 | 68 | 0.727273 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.