hexsha
stringlengths
40
40
size
int64
2
1.02M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
4
245
max_stars_repo_name
stringlengths
6
130
max_stars_repo_head_hexsha
stringlengths
40
40
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
4
245
max_issues_repo_name
stringlengths
6
130
max_issues_repo_head_hexsha
stringlengths
40
40
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
4
245
max_forks_repo_name
stringlengths
6
130
max_forks_repo_head_hexsha
stringlengths
40
40
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
2
1.02M
avg_line_length
float64
1
417k
max_line_length
int64
1
987k
alphanum_fraction
float64
0
1
content_no_comment
stringlengths
0
1.01M
is_comment_constant_removed
bool
1 class
is_sharp_comment_removed
bool
1 class
f70fbd91114f85dd58aec1ae1d0e5b97858e7d13
1,429
py
Python
helpers.py
rkeulemans/exercise_public
5f8020198b8b234169eea4d5e08c98344438de5d
[ "MIT" ]
null
null
null
helpers.py
rkeulemans/exercise_public
5f8020198b8b234169eea4d5e08c98344438de5d
[ "MIT" ]
null
null
null
helpers.py
rkeulemans/exercise_public
5f8020198b8b234169eea4d5e08c98344438de5d
[ "MIT" ]
null
null
null
from sympy import Rational, Symbol, latex, UnevaluatedExpr import sympy as sp import numpy as np u = lambda x : UnevaluatedExpr(x) # Helper functions def explain_add(a, b): assert(np.shape(a) == np.shape(b)) rows, columns = np.shape(a) return sp.Matrix([[Symbol(f"({latex(u(a[i,j]))} + {latex(u(b[i,j]))})") for j in range(columns)] for i in range(rows)]) def symbolic_matrix(character, rows, columns): # row or column vector if rows == 1: return sp.Matrix([[Symbol(f"{{{character}}}_{{{j+1}}}") for j in range(columns)] for i in range(rows)]) if columns == 1: return sp.Matrix([[Symbol(f"{{{character}}}_{{{i+1}}}") for j in range(columns)] for i in range(rows)]) return sp.Matrix([[Symbol(f"{{{character}}}_{{{i+1}, {j+1}}}") for j in range(columns)] for i in range(rows)]) def explain_multiply(a, b): # #rows in b == #columns in a assert(np.shape(a)[1] == np.shape(b)[0]) rows = np.shape(a)[0] columns = np.shape(b)[1] result = np.empty(shape=(rows, columns), dtype=object) for i in range(rows): row = a[i,:] for j in range(columns): column = b[:,j] zipped = zip(row, column) mapped = list(map(lambda t: f"{latex(u(t[0]))} \cdot {latex(u(t[1]))}", zipped)) s = Symbol("") result[i, j] = Symbol(" + ".join(mapped), evaluate=False) return sp.Matrix(result)
39.694444
123
0.577327
from sympy import Rational, Symbol, latex, UnevaluatedExpr import sympy as sp import numpy as np u = lambda x : UnevaluatedExpr(x) def explain_add(a, b): assert(np.shape(a) == np.shape(b)) rows, columns = np.shape(a) return sp.Matrix([[Symbol(f"({latex(u(a[i,j]))} + {latex(u(b[i,j]))})") for j in range(columns)] for i in range(rows)]) def symbolic_matrix(character, rows, columns): if rows == 1: return sp.Matrix([[Symbol(f"{{{character}}}_{{{j+1}}}") for j in range(columns)] for i in range(rows)]) if columns == 1: return sp.Matrix([[Symbol(f"{{{character}}}_{{{i+1}}}") for j in range(columns)] for i in range(rows)]) return sp.Matrix([[Symbol(f"{{{character}}}_{{{i+1}, {j+1}}}") for j in range(columns)] for i in range(rows)]) def explain_multiply(a, b): )[0]) rows = np.shape(a)[0] columns = np.shape(b)[1] result = np.empty(shape=(rows, columns), dtype=object) for i in range(rows): row = a[i,:] for j in range(columns): column = b[:,j] zipped = zip(row, column) mapped = list(map(lambda t: f"{latex(u(t[0]))} \cdot {latex(u(t[1]))}", zipped)) s = Symbol("") result[i, j] = Symbol(" + ".join(mapped), evaluate=False) return sp.Matrix(result)
true
true
f70fbf815f40c2d553b7ff2f423ff855c4ec22d7
5,650
py
Python
space/station.py
galactics/space-command
496b054883c6464bcd8d73d72c8145ae80606336
[ "MIT" ]
14
2019-03-22T08:12:39.000Z
2022-01-25T17:45:26.000Z
space/station.py
galactics/space-command
496b054883c6464bcd8d73d72c8145ae80606336
[ "MIT" ]
3
2019-11-07T09:34:14.000Z
2019-12-29T14:38:17.000Z
space/station.py
galactics/space-command
496b054883c6464bcd8d73d72c8145ae80606336
[ "MIT" ]
1
2019-12-12T00:48:39.000Z
2019-12-12T00:48:39.000Z
import logging from numpy import degrees, pi, radians from beyond.frames import get_frame, create_station from beyond.errors import UnknownFrameError from .wspace import ws from .utils import dms2deg, deg2dms log = logging.getLogger(__name__) class StationDb: def __new__(cls): if not hasattr(cls, "_instance"): # Singleton cls._instance = super().__new__(cls) return cls._instance @classmethod def list(cls): self = cls() if not hasattr(self, "_stations"): self._stations = {} for abbr, charact in ws.config["stations"].items(): charact["parent_frame"] = get_frame(charact["parent_frame"]) full_name = charact.pop("name") mask = charact.get("mask") if mask: # reverse direction of the mask to put it in counterclockwise # to comply with the mathematical definition charact["mask"] = ( (2 * pi - radians(mask["azims"][::-1])), radians(mask["elevs"][::-1]), ) # Deletion of all unknown characteristics from the charact dict # and conversion to object attributes (they may be used by addons) extra_charact = {} for key in list(charact.keys()): if key not in ("parent_frame", "latlonalt", "mask"): extra_charact[key] = charact.pop(key) self._stations[abbr] = create_station(abbr, **charact) self._stations[abbr].abbr = abbr self._stations[abbr].full_name = full_name for key, value in extra_charact.items(): setattr(self._stations[abbr], key, value) return self._stations @classmethod def get(cls, name): self = cls() try: return get_frame(name) except UnknownFrameError: if name not in self.list().keys(): raise return self.list()[name] @classmethod def save(cls, station): self = cls() ws.config["stations"].update(station) ws.config.save() if hasattr(self, "_stations"): del self._stations def wshook(cmd, *args, **kwargs): if cmd in ("init", "full-init"): name = "TLS" ws.config.setdefault("stations", {}) try: StationDb.get(name) except UnknownFrameError: StationDb.save( { name: { "latlonalt": [43.604482, 1.443962, 172.0], "name": "Toulouse", "parent_frame": "WGS84", } } ) log.info("Station {} created".format(name)) else: log.warning("Station {} already exists".format(name)) def space_station(*argv): """Stations management Usage: space-station list [--map] [<abbr>] space-station create <abbr> <name> <lat> <lon> <alt> Options list List available stations create Interactively create a station <abbr> Abbreviation <name> Name of the station <lat> Latitude in degrees <lon> Longitude in degrees <alt> Altitude in meters -m, --map Display the station on a map Latitude and longitude both accept degrees as float or as degrees, minutes and seconds of arc (e.g. 43°25"12') """ from pathlib import Path import matplotlib.pyplot as plt from .utils import docopt from .map.background import set_background args = docopt(space_station.__doc__) station = StationDb() if args["create"]: abbr = args["<abbr>"] name = args["<name>"] latitude = args["<lat>"] longitude = args["<lon>"] altitude = args["<alt>"] if "°" in latitude: latitude = dms2deg(latitude) else: latitude = float(latitude) if "°" in longitude: longitude = dms2deg(longitude) else: longitude = float(longitude) altitude = float(altitude) log.info("Creation of station '{}' ({})".format(name, abbr)) log.debug( "{} {}, altitude : {} m".format( deg2dms(latitude, "lat"), deg2dms(longitude, "lon"), altitude ) ) StationDb.save( { abbr: { "name": name, "latlonalt": (latitude, longitude, altitude), "parent_frame": "WGS84", } } ) else: stations = [] for station in sorted(station.list().values(), key=lambda x: x.abbr): if args["<abbr>"] and station.abbr != args["<abbr>"]: continue print(station.name) print("-" * len(station.name)) lat, lon, alt = station.latlonalt lat, lon = degrees([lat, lon]) print("name: {}".format(station.full_name)) print( "altitude: {} m\nposition: {}, {}".format( alt, deg2dms(lat, "lat"), deg2dms(lon, "lon") ) ) print() stations.append((station.name, lat, lon)) if args["--map"]: plt.figure(figsize=(15.2, 8.2)) set_background() plt.subplots_adjust(left=0.02, right=0.98, top=0.98, bottom=0.02) plt.show()
28.25
82
0.507965
import logging from numpy import degrees, pi, radians from beyond.frames import get_frame, create_station from beyond.errors import UnknownFrameError from .wspace import ws from .utils import dms2deg, deg2dms log = logging.getLogger(__name__) class StationDb: def __new__(cls): if not hasattr(cls, "_instance"): cls._instance = super().__new__(cls) return cls._instance @classmethod def list(cls): self = cls() if not hasattr(self, "_stations"): self._stations = {} for abbr, charact in ws.config["stations"].items(): charact["parent_frame"] = get_frame(charact["parent_frame"]) full_name = charact.pop("name") mask = charact.get("mask") if mask: charact["mask"] = ( (2 * pi - radians(mask["azims"][::-1])), radians(mask["elevs"][::-1]), ) extra_charact = {} for key in list(charact.keys()): if key not in ("parent_frame", "latlonalt", "mask"): extra_charact[key] = charact.pop(key) self._stations[abbr] = create_station(abbr, **charact) self._stations[abbr].abbr = abbr self._stations[abbr].full_name = full_name for key, value in extra_charact.items(): setattr(self._stations[abbr], key, value) return self._stations @classmethod def get(cls, name): self = cls() try: return get_frame(name) except UnknownFrameError: if name not in self.list().keys(): raise return self.list()[name] @classmethod def save(cls, station): self = cls() ws.config["stations"].update(station) ws.config.save() if hasattr(self, "_stations"): del self._stations def wshook(cmd, *args, **kwargs): if cmd in ("init", "full-init"): name = "TLS" ws.config.setdefault("stations", {}) try: StationDb.get(name) except UnknownFrameError: StationDb.save( { name: { "latlonalt": [43.604482, 1.443962, 172.0], "name": "Toulouse", "parent_frame": "WGS84", } } ) log.info("Station {} created".format(name)) else: log.warning("Station {} already exists".format(name)) def space_station(*argv): from pathlib import Path import matplotlib.pyplot as plt from .utils import docopt from .map.background import set_background args = docopt(space_station.__doc__) station = StationDb() if args["create"]: abbr = args["<abbr>"] name = args["<name>"] latitude = args["<lat>"] longitude = args["<lon>"] altitude = args["<alt>"] if "°" in latitude: latitude = dms2deg(latitude) else: latitude = float(latitude) if "°" in longitude: longitude = dms2deg(longitude) else: longitude = float(longitude) altitude = float(altitude) log.info("Creation of station '{}' ({})".format(name, abbr)) log.debug( "{} {}, altitude : {} m".format( deg2dms(latitude, "lat"), deg2dms(longitude, "lon"), altitude ) ) StationDb.save( { abbr: { "name": name, "latlonalt": (latitude, longitude, altitude), "parent_frame": "WGS84", } } ) else: stations = [] for station in sorted(station.list().values(), key=lambda x: x.abbr): if args["<abbr>"] and station.abbr != args["<abbr>"]: continue print(station.name) print("-" * len(station.name)) lat, lon, alt = station.latlonalt lat, lon = degrees([lat, lon]) print("name: {}".format(station.full_name)) print( "altitude: {} m\nposition: {}, {}".format( alt, deg2dms(lat, "lat"), deg2dms(lon, "lon") ) ) print() stations.append((station.name, lat, lon)) if args["--map"]: plt.figure(figsize=(15.2, 8.2)) set_background() plt.subplots_adjust(left=0.02, right=0.98, top=0.98, bottom=0.02) plt.show()
true
true
f70fbff6ebbce086af0b72f88cdfefd2aaa4e033
2,561
py
Python
fieldbook/client.py
CSIS-iLab/fieldbook-python
7dc5c26eab9675b4b3421ef1c943668d0616372e
[ "0BSD" ]
null
null
null
fieldbook/client.py
CSIS-iLab/fieldbook-python
7dc5c26eab9675b4b3421ef1c943668d0616372e
[ "0BSD" ]
null
null
null
fieldbook/client.py
CSIS-iLab/fieldbook-python
7dc5c26eab9675b4b3421ef1c943668d0616372e
[ "0BSD" ]
1
2021-04-15T17:14:19.000Z
2021-04-15T17:14:19.000Z
# -*- coding: utf-8 -*- import requests from urllib.parse import urljoin from os import getenv import types class Fieldbook(object): """ Client for Fieldbook API: https://github.com/fieldbook/api-docs Initialize with a fieldbook_id and optionally the api key (name) and secret. """ BASE_URL = "https://api.fieldbook.com" API_VERSION = "v1" def __init__(self, book_id, key=None, secret=None): super(Fieldbook, self).__init__() self._key = key if key else getenv('FIELDBOOK_API_KEY', None) self._secret = secret if secret else getenv('FIELDBOOK_API_SECRET', None) self.book_id = book_id self.session = requests.Session() if self._key and self._secret: self.set_auth(self._key, self._secret) def set_auth(self, key, secret): self._key = key self._secret = secret self.session.auth = (self._key, self._secret) def _make_sheet_endpoints(self, endpoint_names): def make_endpoint(name): def sheet_endpoint(self, **kwargs): return self._get(name, **kwargs) return sheet_endpoint for name in endpoint_names: endpoint = make_endpoint(name) endpoint.__doc__ = "Query '{}' sheet.".format(name) setattr(self, name, types.MethodType(endpoint, self)) def _make_url(self, sheet_name=None): return urljoin(Fieldbook.BASE_URL, "/".join((Fieldbook.API_VERSION, self.book_id, sheet_name or ''))) def _get(self, sheet_name=None, **kwargs): if not self.session.auth and self._key and self._secret: self.set_auth(self._key, self._secret) url = self._make_url(sheet_name=sheet_name) if 'row_id' in kwargs: row_id = str(kwargs.pop('row_id')) url = '{}/{}'.format(url, row_id) resp = self.session.get(url, params=kwargs) if not resp.ok: raise resp.raise_for_status() return resp.json() def sheets(self, make_endpoints=False): """Returns a list of sheets associated with a book""" sheets = self._get() if make_endpoints: self._make_sheet_endpoints(sheets) return sheets def list(self, sheet_name, **kwargs): """Query a named sheet""" return self._get(sheet_name=sheet_name, **kwargs) def get(self, sheet_name, row_id, **kwargs): """Retrieve a row from a sheet by its id""" kwargs['row_id'] = row_id return self._get(sheet_name=sheet_name, **kwargs)
36.070423
109
0.627099
import requests from urllib.parse import urljoin from os import getenv import types class Fieldbook(object): BASE_URL = "https://api.fieldbook.com" API_VERSION = "v1" def __init__(self, book_id, key=None, secret=None): super(Fieldbook, self).__init__() self._key = key if key else getenv('FIELDBOOK_API_KEY', None) self._secret = secret if secret else getenv('FIELDBOOK_API_SECRET', None) self.book_id = book_id self.session = requests.Session() if self._key and self._secret: self.set_auth(self._key, self._secret) def set_auth(self, key, secret): self._key = key self._secret = secret self.session.auth = (self._key, self._secret) def _make_sheet_endpoints(self, endpoint_names): def make_endpoint(name): def sheet_endpoint(self, **kwargs): return self._get(name, **kwargs) return sheet_endpoint for name in endpoint_names: endpoint = make_endpoint(name) endpoint.__doc__ = "Query '{}' sheet.".format(name) setattr(self, name, types.MethodType(endpoint, self)) def _make_url(self, sheet_name=None): return urljoin(Fieldbook.BASE_URL, "/".join((Fieldbook.API_VERSION, self.book_id, sheet_name or ''))) def _get(self, sheet_name=None, **kwargs): if not self.session.auth and self._key and self._secret: self.set_auth(self._key, self._secret) url = self._make_url(sheet_name=sheet_name) if 'row_id' in kwargs: row_id = str(kwargs.pop('row_id')) url = '{}/{}'.format(url, row_id) resp = self.session.get(url, params=kwargs) if not resp.ok: raise resp.raise_for_status() return resp.json() def sheets(self, make_endpoints=False): sheets = self._get() if make_endpoints: self._make_sheet_endpoints(sheets) return sheets def list(self, sheet_name, **kwargs): return self._get(sheet_name=sheet_name, **kwargs) def get(self, sheet_name, row_id, **kwargs): kwargs['row_id'] = row_id return self._get(sheet_name=sheet_name, **kwargs)
true
true
f70fc008f539426def15dab6f35c0b85e1508550
17,667
py
Python
preprocess/funsd/preprocess_2nd.py
clovaai/bros
eb3aa51ad7348444bafb06be64c4604182275edd
[ "Apache-2.0" ]
36
2021-12-15T04:02:14.000Z
2022-03-24T01:21:00.000Z
preprocess/funsd/preprocess_2nd.py
clovaai/bros
eb3aa51ad7348444bafb06be64c4604182275edd
[ "Apache-2.0" ]
6
2022-01-20T11:18:08.000Z
2022-03-30T00:19:17.000Z
preprocess/funsd/preprocess_2nd.py
clovaai/bros
eb3aa51ad7348444bafb06be64c4604182275edd
[ "Apache-2.0" ]
2
2022-01-29T01:55:58.000Z
2022-02-09T07:10:42.000Z
""" BROS Copyright 2022-present NAVER Corp. Apache License v2.0 Do 2nd preprocess on top of the result of the 'preprocess.sh' file. Reference: https://github.com/microsoft/unilm/blob/master/layoutlm/deprecated/examples/seq_labeling/run_seq_labeling.py """ import json import os from collections import Counter from tqdm import tqdm from transformers import BertTokenizer MAX_SEQ_LENGTH = 512 MODEL_TYPE = "bert" VOCA = "bert-base-uncased" INPUT_PATH = "./data" OUTPUT_PATH = "../../datasets/funsd" os.makedirs(OUTPUT_PATH, exist_ok=True) os.makedirs(os.path.join(OUTPUT_PATH, "preprocessed"), exist_ok=True) def main(): for dataset_split in ["train", "val"]: print(f"dataset_split: {dataset_split}") do_2nd_preprocess(dataset_split) os.system(f"cp -r {os.path.join(INPUT_PATH, 'training_data')} {OUTPUT_PATH}") os.system(f"cp -r {os.path.join(INPUT_PATH, 'testing_data')} {OUTPUT_PATH}") os.system(f"cp {os.path.join(INPUT_PATH, 'labels.txt')} {OUTPUT_PATH}") def do_2nd_preprocess(dataset_split): label_fpath = os.path.join(INPUT_PATH, "labels.txt") labels = get_labels(label_fpath) tokenizer = BertTokenizer.from_pretrained(VOCA, do_lower_case=True) cls_token_id = tokenizer.convert_tokens_to_ids("[CLS]") sep_token_id = tokenizer.convert_tokens_to_ids("[SEP]") pad_token_id = tokenizer.convert_tokens_to_ids("[PAD]") ignore_index = -100 if dataset_split == "train": mode = "train" elif dataset_split == "val": mode = "test" else: raise ValueError(f"Invalid dataset_split={dataset_split}") examples = read_examples_from_file(INPUT_PATH, mode) features = convert_examples_to_features( examples, labels, MAX_SEQ_LENGTH, tokenizer, cls_token_at_end=bool(MODEL_TYPE in ["xlnet"]), # xlnet has a cls token at the end cls_token=tokenizer.cls_token, cls_token_segment_id=2 if MODEL_TYPE in ["xlnet"] else 0, sep_token=tokenizer.sep_token, sep_token_extra=bool(MODEL_TYPE in ["roberta"]), # roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805 pad_on_left=bool(MODEL_TYPE in ["xlnet"]), # pad on the left for xlnet pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0], pad_token_segment_id=4 if MODEL_TYPE in ["xlnet"] else 0, pad_token_label_id=ignore_index, ) # Save image ocr files image_cnter = Counter() preprocessed_fnames = [] for example, feature in tqdm(zip(examples, features), total=len(examples)): # Example: guid, words, labels, boxes, actual_bboxes, file_name, page_size # Feature: input_ids, input_mask, segment_ids, label_ids, # boxes, actual_bboxes, file_name, page_size this_file_name = "{}_{}.json".format( example.file_name[: example.file_name.rfind(".")], image_cnter[example.file_name], ) image_cnter[example.file_name] += 1 data_obj = {} # meta data_obj["meta"] = {} # data_obj["meta"]["image_size"] # = example.page_size[::-1] + [3] # [height, width, rgb?] height, width = example.page_size[::-1] data_obj["meta"]["imageSize"] = {"width": width, "height": height} data_obj["meta"]["voca"] = VOCA if mode == "train": data_obj["meta"]["image_path"] = os.path.join( "training_data", "images", example.file_name ) elif mode == "test": data_obj["meta"]["image_path"] = os.path.join( "testing_data", "images", example.file_name ) else: raise ValueError(f"Unknown mode={mode}") # words # text, tokens, boundingBox data_obj["words"] = [] this_input_ids = [] for word, bb in zip(example.words, example.actual_bboxes): word_tokens = [] for splitted_word in word.split(): word_tokens.append( tokenizer.convert_tokens_to_ids(tokenizer.tokenize(splitted_word)) ) tokens = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(word)) word_obj = { "text": word, "tokens": tokens, "boundingBox": [ [bb[0], bb[1]], [bb[2], bb[1]], [bb[2], bb[3]], [bb[0], bb[3]], ], } data_obj["words"].append(word_obj) this_input_ids.extend(tokens) if VOCA == "bert-base-uncased": feature_input_ids = feature.input_ids assert feature_input_ids[0] == cls_token_id feature_input_ids = feature_input_ids[ 1 : feature_input_ids.index(sep_token_id) ] assert feature_input_ids == this_input_ids else: raise NotImplementedError # masks, labels data_obj["parse"] = {} if VOCA == "bert-base-uncased": data_obj["parse"]["seq_len"] = sum(feature.input_mask) data_obj["parse"]["input_ids"] = feature.input_ids data_obj["parse"]["input_mask"] = feature.input_mask data_obj["parse"]["label_ids"] = feature.label_ids else: raise NotImplementedError # Save file name to list preprocessed_fnames.append(os.path.join("preprocessed", this_file_name)) # Save to file data_obj_file = os.path.join(OUTPUT_PATH, "preprocessed", this_file_name) with open(data_obj_file, "w", encoding="utf-8") as fp: json.dump(data_obj, fp, ensure_ascii=False) # Save file name list file preprocessed_filelist_file = os.path.join( OUTPUT_PATH, f"preprocessed_files_{dataset_split}.txt" ) with open(preprocessed_filelist_file, "w", encoding="utf-8") as fp: fp.write("\n".join(preprocessed_fnames)) def get_labels(path): with open(path, "r") as f: labels = f.read().splitlines() if "O" not in labels: labels = ["O"] + labels return labels class InputExample(object): """A single training/test example for token classification.""" def __init__(self, guid, words, labels, boxes, actual_bboxes, file_name, page_size): """Constructs a InputExample. Args: guid: Unique id for the example. words: list. The words of the sequence. labels: (Optional) list. The labels for each word of the sequence. This should be specified for train and dev examples, but not for test examples. """ self.guid = guid self.words = words self.labels = labels self.boxes = boxes self.actual_bboxes = actual_bboxes self.file_name = file_name self.page_size = page_size def read_examples_from_file(data_dir, mode): file_path = os.path.join(data_dir, "{}.txt".format(mode)) box_file_path = os.path.join(data_dir, "{}_box.txt".format(mode)) image_file_path = os.path.join(data_dir, "{}_image.txt".format(mode)) guid_index = 1 examples = [] with open(file_path, encoding="utf-8") as f, open( box_file_path, encoding="utf-8" ) as fb, open(image_file_path, encoding="utf-8") as fi: words = [] boxes = [] actual_bboxes = [] file_name = None page_size = None labels = [] for line, bline, iline in zip(f, fb, fi): if line.startswith("-DOCSTART-") or line == "" or line == "\n": if words: examples.append( InputExample( guid="{}-{}".format(mode, guid_index), words=words, labels=labels, boxes=boxes, actual_bboxes=actual_bboxes, file_name=file_name, page_size=page_size, ) ) guid_index += 1 words = [] boxes = [] actual_bboxes = [] file_name = None page_size = None labels = [] else: splits = line.split("\t") bsplits = bline.split("\t") isplits = iline.split("\t") assert len(splits) == 2 assert len(bsplits) == 2 assert len(isplits) == 4 assert splits[0] == bsplits[0] words.append(splits[0]) if len(splits) > 1: labels.append(splits[-1].replace("\n", "")) box = bsplits[-1].replace("\n", "") box = [int(b) for b in box.split()] boxes.append(box) actual_bbox = [int(b) for b in isplits[1].split()] actual_bboxes.append(actual_bbox) page_size = [int(i) for i in isplits[2].split()] file_name = isplits[3].strip() else: # Examples could have no label for mode = "test" labels.append("O") if words: examples.append( InputExample( guid="%s-%d".format(mode, guid_index), words=words, labels=labels, boxes=boxes, actual_bboxes=actual_bboxes, file_name=file_name, page_size=page_size, ) ) return examples class InputFeatures(object): """A single set of features of data.""" def __init__( self, input_ids, input_mask, segment_ids, label_ids, boxes, actual_bboxes, file_name, page_size, ): assert ( 0 <= all(boxes) <= 1000 ), "Error with input bbox ({}): the coordinate value is not between 0 and 1000".format( boxes ) self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_ids = label_ids self.boxes = boxes self.actual_bboxes = actual_bboxes self.file_name = file_name self.page_size = page_size def convert_examples_to_features( examples, label_list, max_seq_length, tokenizer, cls_token_at_end=False, cls_token="[CLS]", cls_token_segment_id=1, sep_token="[SEP]", sep_token_extra=False, pad_on_left=False, pad_token=0, cls_token_box=[0, 0, 0, 0], sep_token_box=[1000, 1000, 1000, 1000], pad_token_box=[0, 0, 0, 0], pad_token_segment_id=0, pad_token_label_id=-1, sequence_a_segment_id=0, mask_padding_with_zero=True, ): """Loads a data file into a list of `InputBatch`s `cls_token_at_end` define the location of the CLS token: - False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP] - True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS] `cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet) """ label_map = {label: i for i, label in enumerate(label_list)} features = [] for (ex_index, example) in enumerate(examples): file_name = example.file_name page_size = example.page_size width, height = page_size # if ex_index % 10000 == 0: # print("Writing example {} of {}".format(ex_index, len(examples))) tokens = [] token_boxes = [] actual_bboxes = [] label_ids = [] for word, label, box, actual_bbox in zip( example.words, example.labels, example.boxes, example.actual_bboxes ): word_tokens = tokenizer.tokenize(word) tokens.extend(word_tokens) token_boxes.extend([box] * len(word_tokens)) actual_bboxes.extend([actual_bbox] * len(word_tokens)) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend( [label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1) ) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. special_tokens_count = 3 if sep_token_extra else 2 if len(tokens) > max_seq_length - special_tokens_count: tokens = tokens[: (max_seq_length - special_tokens_count)] token_boxes = token_boxes[: (max_seq_length - special_tokens_count)] actual_bboxes = actual_bboxes[: (max_seq_length - special_tokens_count)] label_ids = label_ids[: (max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] token_boxes += [sep_token_box] actual_bboxes += [[0, 0, width, height]] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] token_boxes += [sep_token_box] actual_bboxes += [[0, 0, width, height]] label_ids += [pad_token_label_id] segment_ids = [sequence_a_segment_id] * len(tokens) if cls_token_at_end: tokens += [cls_token] token_boxes += [cls_token_box] actual_bboxes += [[0, 0, width, height]] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: tokens = [cls_token] + tokens token_boxes = [cls_token_box] + token_boxes actual_bboxes = [[0, 0, width, height]] + actual_bboxes label_ids = [pad_token_label_id] + label_ids segment_ids = [cls_token_segment_id] + segment_ids input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) # Zero-pad up to the sequence length. padding_length = max_seq_length - len(input_ids) if pad_on_left: input_ids = ([pad_token] * padding_length) + input_ids input_mask = ( [0 if mask_padding_with_zero else 1] * padding_length ) + input_mask segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids label_ids = ([pad_token_label_id] * padding_length) + label_ids token_boxes = ([pad_token_box] * padding_length) + token_boxes else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length token_boxes += [pad_token_box] * padding_length assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length assert len(label_ids) == max_seq_length assert len(token_boxes) == max_seq_length # if ex_index < 5: # print("*** Example ***") # print("guid: {}".format(example.guid)) # print("tokens: {}".format(" ".join([str(x) for x in tokens]))) # print("input_ids: {}".format(" ".join([str(x) for x in input_ids]))) # print("input_mask: {}".format(" ".join([str(x) for x in input_mask]))) # print("segment_ids: {}".format(" ".join([str(x) for x in segment_ids]))) # print("label_ids: {}".format(" ".join([str(x) for x in label_ids]))) # print("boxes: {}".format(" ".join([str(x) for x in token_boxes]))) # print("actual_bboxes: {}".format(" ".join([str(x) for x in actual_bboxes]))) features.append( InputFeatures( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_ids=label_ids, boxes=token_boxes, actual_bboxes=actual_bboxes, file_name=file_name, page_size=page_size, ) ) return features if __name__ == "__main__": main()
37.993548
144
0.574291
import json import os from collections import Counter from tqdm import tqdm from transformers import BertTokenizer MAX_SEQ_LENGTH = 512 MODEL_TYPE = "bert" VOCA = "bert-base-uncased" INPUT_PATH = "./data" OUTPUT_PATH = "../../datasets/funsd" os.makedirs(OUTPUT_PATH, exist_ok=True) os.makedirs(os.path.join(OUTPUT_PATH, "preprocessed"), exist_ok=True) def main(): for dataset_split in ["train", "val"]: print(f"dataset_split: {dataset_split}") do_2nd_preprocess(dataset_split) os.system(f"cp -r {os.path.join(INPUT_PATH, 'training_data')} {OUTPUT_PATH}") os.system(f"cp -r {os.path.join(INPUT_PATH, 'testing_data')} {OUTPUT_PATH}") os.system(f"cp {os.path.join(INPUT_PATH, 'labels.txt')} {OUTPUT_PATH}") def do_2nd_preprocess(dataset_split): label_fpath = os.path.join(INPUT_PATH, "labels.txt") labels = get_labels(label_fpath) tokenizer = BertTokenizer.from_pretrained(VOCA, do_lower_case=True) cls_token_id = tokenizer.convert_tokens_to_ids("[CLS]") sep_token_id = tokenizer.convert_tokens_to_ids("[SEP]") pad_token_id = tokenizer.convert_tokens_to_ids("[PAD]") ignore_index = -100 if dataset_split == "train": mode = "train" elif dataset_split == "val": mode = "test" else: raise ValueError(f"Invalid dataset_split={dataset_split}") examples = read_examples_from_file(INPUT_PATH, mode) features = convert_examples_to_features( examples, labels, MAX_SEQ_LENGTH, tokenizer, cls_token_at_end=bool(MODEL_TYPE in ["xlnet"]), cls_token=tokenizer.cls_token, cls_token_segment_id=2 if MODEL_TYPE in ["xlnet"] else 0, sep_token=tokenizer.sep_token, sep_token_extra=bool(MODEL_TYPE in ["roberta"]), pad_on_left=bool(MODEL_TYPE in ["xlnet"]), pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0], pad_token_segment_id=4 if MODEL_TYPE in ["xlnet"] else 0, pad_token_label_id=ignore_index, ) image_cnter = Counter() preprocessed_fnames = [] for example, feature in tqdm(zip(examples, features), total=len(examples)): this_file_name = "{}_{}.json".format( example.file_name[: example.file_name.rfind(".")], image_cnter[example.file_name], ) image_cnter[example.file_name] += 1 data_obj = {} data_obj["meta"] = {} = example.page_size[::-1] data_obj["meta"]["imageSize"] = {"width": width, "height": height} data_obj["meta"]["voca"] = VOCA if mode == "train": data_obj["meta"]["image_path"] = os.path.join( "training_data", "images", example.file_name ) elif mode == "test": data_obj["meta"]["image_path"] = os.path.join( "testing_data", "images", example.file_name ) else: raise ValueError(f"Unknown mode={mode}") data_obj["words"] = [] this_input_ids = [] for word, bb in zip(example.words, example.actual_bboxes): word_tokens = [] for splitted_word in word.split(): word_tokens.append( tokenizer.convert_tokens_to_ids(tokenizer.tokenize(splitted_word)) ) tokens = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(word)) word_obj = { "text": word, "tokens": tokens, "boundingBox": [ [bb[0], bb[1]], [bb[2], bb[1]], [bb[2], bb[3]], [bb[0], bb[3]], ], } data_obj["words"].append(word_obj) this_input_ids.extend(tokens) if VOCA == "bert-base-uncased": feature_input_ids = feature.input_ids assert feature_input_ids[0] == cls_token_id feature_input_ids = feature_input_ids[ 1 : feature_input_ids.index(sep_token_id) ] assert feature_input_ids == this_input_ids else: raise NotImplementedError data_obj["parse"] = {} if VOCA == "bert-base-uncased": data_obj["parse"]["seq_len"] = sum(feature.input_mask) data_obj["parse"]["input_ids"] = feature.input_ids data_obj["parse"]["input_mask"] = feature.input_mask data_obj["parse"]["label_ids"] = feature.label_ids else: raise NotImplementedError preprocessed_fnames.append(os.path.join("preprocessed", this_file_name)) data_obj_file = os.path.join(OUTPUT_PATH, "preprocessed", this_file_name) with open(data_obj_file, "w", encoding="utf-8") as fp: json.dump(data_obj, fp, ensure_ascii=False) preprocessed_filelist_file = os.path.join( OUTPUT_PATH, f"preprocessed_files_{dataset_split}.txt" ) with open(preprocessed_filelist_file, "w", encoding="utf-8") as fp: fp.write("\n".join(preprocessed_fnames)) def get_labels(path): with open(path, "r") as f: labels = f.read().splitlines() if "O" not in labels: labels = ["O"] + labels return labels class InputExample(object): def __init__(self, guid, words, labels, boxes, actual_bboxes, file_name, page_size): self.guid = guid self.words = words self.labels = labels self.boxes = boxes self.actual_bboxes = actual_bboxes self.file_name = file_name self.page_size = page_size def read_examples_from_file(data_dir, mode): file_path = os.path.join(data_dir, "{}.txt".format(mode)) box_file_path = os.path.join(data_dir, "{}_box.txt".format(mode)) image_file_path = os.path.join(data_dir, "{}_image.txt".format(mode)) guid_index = 1 examples = [] with open(file_path, encoding="utf-8") as f, open( box_file_path, encoding="utf-8" ) as fb, open(image_file_path, encoding="utf-8") as fi: words = [] boxes = [] actual_bboxes = [] file_name = None page_size = None labels = [] for line, bline, iline in zip(f, fb, fi): if line.startswith("-DOCSTART-") or line == "" or line == "\n": if words: examples.append( InputExample( guid="{}-{}".format(mode, guid_index), words=words, labels=labels, boxes=boxes, actual_bboxes=actual_bboxes, file_name=file_name, page_size=page_size, ) ) guid_index += 1 words = [] boxes = [] actual_bboxes = [] file_name = None page_size = None labels = [] else: splits = line.split("\t") bsplits = bline.split("\t") isplits = iline.split("\t") assert len(splits) == 2 assert len(bsplits) == 2 assert len(isplits) == 4 assert splits[0] == bsplits[0] words.append(splits[0]) if len(splits) > 1: labels.append(splits[-1].replace("\n", "")) box = bsplits[-1].replace("\n", "") box = [int(b) for b in box.split()] boxes.append(box) actual_bbox = [int(b) for b in isplits[1].split()] actual_bboxes.append(actual_bbox) page_size = [int(i) for i in isplits[2].split()] file_name = isplits[3].strip() else: labels.append("O") if words: examples.append( InputExample( guid="%s-%d".format(mode, guid_index), words=words, labels=labels, boxes=boxes, actual_bboxes=actual_bboxes, file_name=file_name, page_size=page_size, ) ) return examples class InputFeatures(object): def __init__( self, input_ids, input_mask, segment_ids, label_ids, boxes, actual_bboxes, file_name, page_size, ): assert ( 0 <= all(boxes) <= 1000 ), "Error with input bbox ({}): the coordinate value is not between 0 and 1000".format( boxes ) self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_ids = label_ids self.boxes = boxes self.actual_bboxes = actual_bboxes self.file_name = file_name self.page_size = page_size def convert_examples_to_features( examples, label_list, max_seq_length, tokenizer, cls_token_at_end=False, cls_token="[CLS]", cls_token_segment_id=1, sep_token="[SEP]", sep_token_extra=False, pad_on_left=False, pad_token=0, cls_token_box=[0, 0, 0, 0], sep_token_box=[1000, 1000, 1000, 1000], pad_token_box=[0, 0, 0, 0], pad_token_segment_id=0, pad_token_label_id=-1, sequence_a_segment_id=0, mask_padding_with_zero=True, ): label_map = {label: i for i, label in enumerate(label_list)} features = [] for (ex_index, example) in enumerate(examples): file_name = example.file_name page_size = example.page_size width, height = page_size tokens = [] token_boxes = [] actual_bboxes = [] label_ids = [] for word, label, box, actual_bbox in zip( example.words, example.labels, example.boxes, example.actual_bboxes ): word_tokens = tokenizer.tokenize(word) tokens.extend(word_tokens) token_boxes.extend([box] * len(word_tokens)) actual_bboxes.extend([actual_bbox] * len(word_tokens)) label_ids.extend( [label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1) ) special_tokens_count = 3 if sep_token_extra else 2 if len(tokens) > max_seq_length - special_tokens_count: tokens = tokens[: (max_seq_length - special_tokens_count)] token_boxes = token_boxes[: (max_seq_length - special_tokens_count)] actual_bboxes = actual_bboxes[: (max_seq_length - special_tokens_count)] label_ids = label_ids[: (max_seq_length - special_tokens_count)] [sep_token] token_boxes += [sep_token_box] actual_bboxes += [[0, 0, width, height]] label_ids += [pad_token_label_id] if sep_token_extra: tokens += [sep_token] token_boxes += [sep_token_box] actual_bboxes += [[0, 0, width, height]] label_ids += [pad_token_label_id] segment_ids = [sequence_a_segment_id] * len(tokens) if cls_token_at_end: tokens += [cls_token] token_boxes += [cls_token_box] actual_bboxes += [[0, 0, width, height]] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: tokens = [cls_token] + tokens token_boxes = [cls_token_box] + token_boxes actual_bboxes = [[0, 0, width, height]] + actual_bboxes label_ids = [pad_token_label_id] + label_ids segment_ids = [cls_token_segment_id] + segment_ids input_ids = tokenizer.convert_tokens_to_ids(tokens) input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) padding_length = max_seq_length - len(input_ids) if pad_on_left: input_ids = ([pad_token] * padding_length) + input_ids input_mask = ( [0 if mask_padding_with_zero else 1] * padding_length ) + input_mask segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids label_ids = ([pad_token_label_id] * padding_length) + label_ids token_boxes = ([pad_token_box] * padding_length) + token_boxes else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length token_boxes += [pad_token_box] * padding_length assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length assert len(label_ids) == max_seq_length assert len(token_boxes) == max_seq_length features.append( InputFeatures( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_ids=label_ids, boxes=token_boxes, actual_bboxes=actual_bboxes, file_name=file_name, page_size=page_size, ) ) return features if __name__ == "__main__": main()
true
true
f70fc144d0d4dade68af5d8aa612fa32ea819049
683
py
Python
tests/ast/nodes/test_evaluate_subscript.py
andrelfpinto/vyper
d9b73846aa14a6019faa4126ec7608acd05e480d
[ "Apache-2.0" ]
1
2020-07-04T01:47:26.000Z
2020-07-04T01:47:26.000Z
tests/ast/nodes/test_evaluate_subscript.py
erdnaag/vyper
22bef3a4b4161db18c7831041e20b917984cff83
[ "Apache-2.0" ]
null
null
null
tests/ast/nodes/test_evaluate_subscript.py
erdnaag/vyper
22bef3a4b4161db18c7831041e20b917984cff83
[ "Apache-2.0" ]
null
null
null
import pytest from hypothesis import given, settings from hypothesis import strategies as st from vyper import ast as vy_ast @pytest.mark.fuzzing @settings(max_examples=50, deadline=1000) @given( idx=st.integers(min_value=0, max_value=9), array=st.lists(st.integers(), min_size=10, max_size=10), ) def test_subscript(get_contract, array, idx): source = """ @public def foo(array: int128[10], idx: uint256) -> int128: return array[idx] """ contract = get_contract(source) vyper_ast = vy_ast.parse_to_ast(f"{array}[{idx}]") old_node = vyper_ast.body[0].value new_node = old_node.evaluate() assert contract.foo(array, idx) == new_node.value
25.296296
60
0.710102
import pytest from hypothesis import given, settings from hypothesis import strategies as st from vyper import ast as vy_ast @pytest.mark.fuzzing @settings(max_examples=50, deadline=1000) @given( idx=st.integers(min_value=0, max_value=9), array=st.lists(st.integers(), min_size=10, max_size=10), ) def test_subscript(get_contract, array, idx): source = """ @public def foo(array: int128[10], idx: uint256) -> int128: return array[idx] """ contract = get_contract(source) vyper_ast = vy_ast.parse_to_ast(f"{array}[{idx}]") old_node = vyper_ast.body[0].value new_node = old_node.evaluate() assert contract.foo(array, idx) == new_node.value
true
true
f70fc1a9600cd747693c17cbde32cc3ac240198d
1,331
py
Python
emsm/plugins/__init__.py
KronK0321/emsm
2d8882003ff6d688cd4074dcce17f3171f99a69f
[ "MIT" ]
82
2015-02-17T19:26:51.000Z
2022-03-30T20:13:43.000Z
emsm/plugins/__init__.py
KronK0321/emsm
2d8882003ff6d688cd4074dcce17f3171f99a69f
[ "MIT" ]
55
2015-01-01T19:49:25.000Z
2021-06-11T19:45:26.000Z
emsm/plugins/__init__.py
SilkAndSlug/emsm
2c1cdb305205942e797fdf47fd030c87080d19f9
[ "MIT" ]
32
2015-01-15T11:47:04.000Z
2021-12-19T21:49:20.000Z
#!/usr/bin/env python3 # The MIT License (MIT) # # Copyright (c) 2014-2018 <see AUTHORS.txt> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ This package contains the plugins, delivered with the EMSM. The instance folder contains also a ``plugins`` directory, where the user can place plugins he'd like to run. """
42.935484
79
0.768595
true
true
f70fc1dd54c95480d8d9cf4c427a8b9c598266f9
6,345
py
Python
grow/translations/locales.py
matthiasrohmer/grow
88fae5026040ad0f7dd9260ee290cebbe49b39d7
[ "MIT" ]
1
2019-01-25T17:00:42.000Z
2019-01-25T17:00:42.000Z
grow/translations/locales.py
kmcnellis/grow
4787f5a01681ef0800e9b4388a56cdbc48209368
[ "MIT" ]
null
null
null
grow/translations/locales.py
kmcnellis/grow
4787f5a01681ef0800e9b4388a56cdbc48209368
[ "MIT" ]
null
null
null
from babel import localedata from grow.pods import errors from grow.pods import messages import pickle import os import babel import re class Locales(object): def __init__(self, pod): self.pod = pod def list_groups(self): if 'locales' not in self.pod.yaml: return [] return self.pod.yaml['locales'].keys() def get_regions(self, group_name='default'): if 'regions' not in self.pod.yaml: return [] try: return self.pod.yaml['locales'][group_name].get('regions', []) except errors.PodConfigurationError: return [] def get_languages(self, group_name='default'): if 'locales' not in self.pod.yaml: return [] try: return self.pod.yaml['locales'][group_name].get('languages', []) except errors.PodConfigurationError: return [] def to_message(self): message = messages.LocalesMessage() message.groups = [] for group_name in self.list_groups(): group_message = messages.LocaleGroupMessage() group_message.group_name = group_name group_message.regions = self.get_regions(group_name) group_message.languages = self.get_languages(group_name) message.groups.append(group_message) return message class Locale(babel.Locale): RTL_REGEX = re.compile('^(he|ar|fa|ur)(\W|$)') _alias = None def __init__(self, language, *args, **kwargs): # Normalize from "de_de" to "de_DE" for case-sensitive filesystems. parts = language.rsplit('_', 1) if len(parts) > 1: language = '{}_{}'.format(parts[0], parts[1].upper()) super(Locale, self).__init__(language, *args, **kwargs) @classmethod def parse(cls, *args, **kwargs): locale = super(Locale, cls).parse(*args, **kwargs) # Weak attempt to permit fuzzy locales (locales for which we still have # language and country information, but not a full localedata file for), # but disallow completely invalid locales. See note at end of file. if locale and locale.get_display_name() is None: raise ValueError('{} is not a valid locale identifier'.format(args[0])) return locale def __hash__(self): return hash(str(self)) def __eq__(self, other): if isinstance(other, basestring): return str(self).lower() == other.lower() return super(Locale, self).__eq__(other) def __ne__(self, other): return not self.__eq__(other) def __repr__(self): return '<Locale: "{}">'.format(str(self)) @classmethod def parse_codes(cls, codes): return [cls.parse(code) for code in codes] @property def is_rtl(self): return Locale.RTL_REGEX.match(self.language) @property def direction(self): return 'rtl' if self.is_rtl else 'ltr' @classmethod def from_alias(cls, pod, alias): podspec = pod.get_podspec() config = podspec.get_config() if 'localization' in config and 'aliases' in config['localization']: aliases = config['localization']['aliases'] for custom_locale, babel_locale in aliases.iteritems(): if custom_locale == alias: return cls.parse(babel_locale) return cls.parse(alias) def set_alias(self, pod): podspec = pod.get_podspec() self._alias = podspec.get_locale_alias(str(self).lower()) @property def alias(self): return self._alias @alias.setter def alias(self, alias): self._alias = alias # NOTE: Babel does not support "fuzzy" locales. A locale is considered "fuzzy" # when a corresponding "localedata" file that matches a given locale's full # identifier (e.g. "en_US") does not exist. Here's one example: "en_BD". CLDR # does not have a localedata file matching "en_BD" (English in Bangladesh), but # it does have individual files for "en" and also "bn_BD". As it turns # out, localedata files that correspond to a locale's full identifier (e.g. # "bn_BD.dat") are actually pretty light on the content (largely containing # things like start-of-week information) and most of the "meat" of the data is # contained in the main localedata file, e.g. "en.dat". # # Users may need to generate pages corresponding to locales that we don't # have full localedata for, and until Babel supports fuzzy locales, we'll # monkeypatch two Babel functions to provide partial support for fuzzy locales. # # With this monkeypatch, locales will be valid even if Babel doesn't have a # localedata file matching a locale's full identifier, but locales will still # fail with a ValueError if the user specifies a territory that does not exist. # With this patch, a user can, however, specify an invalid language. Obviously, # this patch should be removed when/if Babel adds support for fuzzy locales. # Optionally, we may want to provide users with more control over whether a # locale is valid or invalid, but we can revisit that later. # See: https://github.com/grow/grow/issues/93 def fuzzy_load(name, merge_inherited=True): localedata._cache_lock.acquire() try: data = localedata._cache.get(name) if not data: # Load inherited data if name == 'root' or not merge_inherited: data = {} else: parts = name.split('_') if len(parts) == 1: parent = 'root' else: parent = '_'.join(parts[:-1]) data = fuzzy_load(parent).copy() filename = os.path.join(localedata._dirname, '%s.dat' % name) try: fileobj = open(filename, 'rb') try: if name != 'root' and merge_inherited: localedata.merge(data, pickle.load(fileobj)) else: data = pickle.load(fileobj) localedata._cache[name] = data finally: fileobj.close() except IOError: pass return data finally: localedata._cache_lock.release() localedata.exists = lambda name: True localedata.load = fuzzy_load
35.446927
83
0.622695
from babel import localedata from grow.pods import errors from grow.pods import messages import pickle import os import babel import re class Locales(object): def __init__(self, pod): self.pod = pod def list_groups(self): if 'locales' not in self.pod.yaml: return [] return self.pod.yaml['locales'].keys() def get_regions(self, group_name='default'): if 'regions' not in self.pod.yaml: return [] try: return self.pod.yaml['locales'][group_name].get('regions', []) except errors.PodConfigurationError: return [] def get_languages(self, group_name='default'): if 'locales' not in self.pod.yaml: return [] try: return self.pod.yaml['locales'][group_name].get('languages', []) except errors.PodConfigurationError: return [] def to_message(self): message = messages.LocalesMessage() message.groups = [] for group_name in self.list_groups(): group_message = messages.LocaleGroupMessage() group_message.group_name = group_name group_message.regions = self.get_regions(group_name) group_message.languages = self.get_languages(group_name) message.groups.append(group_message) return message class Locale(babel.Locale): RTL_REGEX = re.compile('^(he|ar|fa|ur)(\W|$)') _alias = None def __init__(self, language, *args, **kwargs): parts = language.rsplit('_', 1) if len(parts) > 1: language = '{}_{}'.format(parts[0], parts[1].upper()) super(Locale, self).__init__(language, *args, **kwargs) @classmethod def parse(cls, *args, **kwargs): locale = super(Locale, cls).parse(*args, **kwargs) if locale and locale.get_display_name() is None: raise ValueError('{} is not a valid locale identifier'.format(args[0])) return locale def __hash__(self): return hash(str(self)) def __eq__(self, other): if isinstance(other, basestring): return str(self).lower() == other.lower() return super(Locale, self).__eq__(other) def __ne__(self, other): return not self.__eq__(other) def __repr__(self): return '<Locale: "{}">'.format(str(self)) @classmethod def parse_codes(cls, codes): return [cls.parse(code) for code in codes] @property def is_rtl(self): return Locale.RTL_REGEX.match(self.language) @property def direction(self): return 'rtl' if self.is_rtl else 'ltr' @classmethod def from_alias(cls, pod, alias): podspec = pod.get_podspec() config = podspec.get_config() if 'localization' in config and 'aliases' in config['localization']: aliases = config['localization']['aliases'] for custom_locale, babel_locale in aliases.iteritems(): if custom_locale == alias: return cls.parse(babel_locale) return cls.parse(alias) def set_alias(self, pod): podspec = pod.get_podspec() self._alias = podspec.get_locale_alias(str(self).lower()) @property def alias(self): return self._alias @alias.setter def alias(self, alias): self._alias = alias # identifier (e.g. "en_US") does not exist. Here's one example: "en_BD". CLDR # "bn_BD.dat") are actually pretty light on the content (largely containing # things like start-of-week information) and most of the "meat" of the data is # contained in the main localedata file, e.g. "en.dat". # # Users may need to generate pages corresponding to locales that we don't # monkeypatch two Babel functions to provide partial support for fuzzy locales. # # With this monkeypatch, locales will be valid even if Babel doesn't have a # fail with a ValueError if the user specifies a territory that does not exist. # With this patch, a user can, however, specify an invalid language. Obviously, # this patch should be removed when/if Babel adds support for fuzzy locales. # Optionally, we may want to provide users with more control over whether a # locale is valid or invalid, but we can revisit that later. # See: https://github.com/grow/grow/issues/93 def fuzzy_load(name, merge_inherited=True): localedata._cache_lock.acquire() try: data = localedata._cache.get(name) if not data: # Load inherited data if name == 'root' or not merge_inherited: data = {} else: parts = name.split('_') if len(parts) == 1: parent = 'root' else: parent = '_'.join(parts[:-1]) data = fuzzy_load(parent).copy() filename = os.path.join(localedata._dirname, '%s.dat' % name) try: fileobj = open(filename, 'rb') try: if name != 'root' and merge_inherited: localedata.merge(data, pickle.load(fileobj)) else: data = pickle.load(fileobj) localedata._cache[name] = data finally: fileobj.close() except IOError: pass return data finally: localedata._cache_lock.release() localedata.exists = lambda name: True localedata.load = fuzzy_load
true
true
f70fc2ab84b8b81bc5e39c5a740166c588e6362f
7,625
py
Python
examples/pwr_run/checkpointing/final/final4_new2/job51.py
boringlee24/keras_old
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
[ "MIT" ]
null
null
null
examples/pwr_run/checkpointing/final/final4_new2/job51.py
boringlee24/keras_old
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
[ "MIT" ]
null
null
null
examples/pwr_run/checkpointing/final/final4_new2/job51.py
boringlee24/keras_old
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
[ "MIT" ]
null
null
null
""" #Trains a ResNet on the CIFAR10 dataset. """ from __future__ import print_function import keras from keras.layers import Dense, Conv2D, BatchNormalization, Activation from keras.layers import AveragePooling2D, Input, Flatten from keras.optimizers import Adam from keras.callbacks import ModelCheckpoint, LearningRateScheduler from keras.callbacks import ReduceLROnPlateau, TensorBoard from keras.preprocessing.image import ImageDataGenerator from keras.regularizers import l2 from keras import backend as K from keras.models import Model from keras.datasets import cifar10 from keras.applications.vgg16 import VGG16 from keras.applications.vgg19 import VGG19 from keras import models, layers, optimizers from datetime import datetime import tensorflow as tf import numpy as np import os import pdb import sys import argparse import time import signal import glob import json import send_signal parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training') parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name') parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint') parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use') parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)') parser.set_defaults(resume=False) args = parser.parse_args() os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num # Training parameters batch_size = 256 args_lr = 0.001 args_model = 'vgg16' epoch_begin_time = 0 job_name = sys.argv[0].split('.')[0] save_files = '/scratch/li.baol/checkpoint_final4_new2/' + job_name + '*' total_epochs = 6 starting_epoch = 0 # first step is to update the PID pid = os.getpid() message = job_name + ' pid ' + str(pid) # 'job50 pid 3333' send_signal.send(args.node, 10002, message) if args.resume: save_file = glob.glob(save_files)[0] # epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0]) starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1]) data_augmentation = True num_classes = 10 # Subtracting pixel mean improves accuracy subtract_pixel_mean = True n = 3 # Model name, depth and version model_type = args.tc #'P100_resnet50_he_256_1' # Load the CIFAR10 data. (x_train, y_train), (x_test, y_test) = cifar10.load_data() # Normalize data. x_train = x_train.astype('float32') / 255 x_test = x_test.astype('float32') / 255 # If subtract pixel mean is enabled if subtract_pixel_mean: x_train_mean = np.mean(x_train, axis=0) x_train -= x_train_mean x_test -= x_train_mean print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') print('y_train shape:', y_train.shape) # Convert class vectors to binary class matrices. y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) if args.resume: print('resume from checkpoint') message = job_name + ' b_end' send_signal.send(args.node, 10002, message) model = keras.models.load_model(save_file) message = job_name + ' c_end' send_signal.send(args.node, 10002, message) else: print('train from start') model = models.Sequential() if '16' in args_model: base_model = VGG16(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None) elif '19' in args_model: base_model = VGG19(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None) #base_model.summary() #pdb.set_trace() model.add(base_model) model.add(layers.Flatten()) model.add(layers.BatchNormalization()) model.add(layers.Dense(128, activation='relu'))#, kernel_initializer='he_uniform')) #model.add(layers.Dropout(0.2)) model.add(layers.BatchNormalization()) model.add(layers.Dense(64, activation='relu'))#, kernel_initializer='he_uniform')) #model.add(layers.Dropout(0.2)) model.add(layers.BatchNormalization()) model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform')) model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=args_lr), metrics=['accuracy']) #model.summary() print(model_type) #pdb.set_trace() current_epoch = 0 ################### connects interrupt signal to the process ##################### def terminateProcess(signalNumber, frame): # first record the wasted epoch time global epoch_begin_time if epoch_begin_time == 0: epoch_waste_time = 0 else: epoch_waste_time = int(time.time() - epoch_begin_time) message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100' if epoch_waste_time > 0: send_signal.send(args.node, 10002, message) print('checkpointing the model triggered by kill -15 signal') # delete whatever checkpoint that already exists for f in glob.glob(save_files): os.remove(f) model.save('/scratch/li.baol/checkpoint_final4_new2/' + job_name + '_' + str(current_epoch) + '.h5') print ('(SIGTERM) terminating the process') message = job_name + ' checkpoint' send_signal.send(args.node, 10002, message) sys.exit() signal.signal(signal.SIGTERM, terminateProcess) ################################################################################# logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch') first_epoch_start = 0 class PrintEpoch(keras.callbacks.Callback): def on_epoch_begin(self, epoch, logs=None): global current_epoch, first_epoch_start #remaining_epochs = epochs - epoch current_epoch = epoch print('current epoch ' + str(current_epoch)) global epoch_begin_time epoch_begin_time = time.time() if epoch == starting_epoch and args.resume: first_epoch_start = time.time() message = job_name + ' d_end' send_signal.send(args.node, 10002, message) elif epoch == starting_epoch: first_epoch_start = time.time() if epoch == starting_epoch: # send signal to indicate checkpoint is qualified message = job_name + ' ckpt_qual' send_signal.send(args.node, 10002, message) def on_epoch_end(self, epoch, logs=None): if epoch == starting_epoch: first_epoch_time = int(time.time() - first_epoch_start) message = job_name + ' 1st_epoch ' + str(first_epoch_time) send_signal.send(args.node, 10002, message) progress = round((epoch+1) / round(total_epochs/2), 2) message = job_name + ' completion ' + str(progress) send_signal.send(args.node, 10002, message) my_callback = PrintEpoch() callbacks = [tensorboard_callback, my_callback] #[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback] # Run training model.fit(x_train, y_train, batch_size=batch_size, epochs=round(total_epochs/2), validation_data=(x_test, y_test), shuffle=True, callbacks=callbacks, initial_epoch=starting_epoch, verbose=1 ) # Score trained model. scores = model.evaluate(x_test, y_test, verbose=1) print('Test loss:', scores[0]) print('Test accuracy:', scores[1]) # send signal to indicate job has finished message = job_name + ' finish' send_signal.send(args.node, 10002, message)
32.725322
118
0.693377
from __future__ import print_function import keras from keras.layers import Dense, Conv2D, BatchNormalization, Activation from keras.layers import AveragePooling2D, Input, Flatten from keras.optimizers import Adam from keras.callbacks import ModelCheckpoint, LearningRateScheduler from keras.callbacks import ReduceLROnPlateau, TensorBoard from keras.preprocessing.image import ImageDataGenerator from keras.regularizers import l2 from keras import backend as K from keras.models import Model from keras.datasets import cifar10 from keras.applications.vgg16 import VGG16 from keras.applications.vgg19 import VGG19 from keras import models, layers, optimizers from datetime import datetime import tensorflow as tf import numpy as np import os import pdb import sys import argparse import time import signal import glob import json import send_signal parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training') parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name') parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint') parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use') parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)') parser.set_defaults(resume=False) args = parser.parse_args() os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num batch_size = 256 args_lr = 0.001 args_model = 'vgg16' epoch_begin_time = 0 job_name = sys.argv[0].split('.')[0] save_files = '/scratch/li.baol/checkpoint_final4_new2/' + job_name + '*' total_epochs = 6 starting_epoch = 0 pid = os.getpid() message = job_name + ' pid ' + str(pid) send_signal.send(args.node, 10002, message) if args.resume: save_file = glob.glob(save_files)[0] starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1]) data_augmentation = True num_classes = 10 subtract_pixel_mean = True n = 3 model_type = args.tc (x_train, y_train), (x_test, y_test) = cifar10.load_data() x_train = x_train.astype('float32') / 255 x_test = x_test.astype('float32') / 255 if subtract_pixel_mean: x_train_mean = np.mean(x_train, axis=0) x_train -= x_train_mean x_test -= x_train_mean print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') print('y_train shape:', y_train.shape) y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) if args.resume: print('resume from checkpoint') message = job_name + ' b_end' send_signal.send(args.node, 10002, message) model = keras.models.load_model(save_file) message = job_name + ' c_end' send_signal.send(args.node, 10002, message) else: print('train from start') model = models.Sequential() if '16' in args_model: base_model = VGG16(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None) elif '19' in args_model: base_model = VGG19(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None) model.add(base_model) model.add(layers.Flatten()) model.add(layers.BatchNormalization()) model.add(layers.Dense(128, activation='relu')) model.add(layers.BatchNormalization()) model.add(layers.Dense(64, activation='relu')) model.add(layers.BatchNormalization()) model.add(layers.Dense(10, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=args_lr), metrics=['accuracy']) print(model_type) current_epoch = 0
true
true
f70fc307d1ca7708b58118c2e04d529c472e7d00
6,329
py
Python
patches/multiworld.py
kbranch/LADXR
21e795daecf90fa48f19e051a2977fffb71ade5c
[ "MIT" ]
null
null
null
patches/multiworld.py
kbranch/LADXR
21e795daecf90fa48f19e051a2977fffb71ade5c
[ "MIT" ]
null
null
null
patches/multiworld.py
kbranch/LADXR
21e795daecf90fa48f19e051a2977fffb71ade5c
[ "MIT" ]
null
null
null
from assembler import ASM from roomEditor import RoomEditor import entityData def addMultiworldShop(rom): # Make a copy of the shop into GrandpaUlrira house shop_room = RoomEditor(rom, 0x2A1) re = RoomEditor(rom, 0x2A9) re.objects = [obj for obj in shop_room.objects if obj.x is not None and obj.type_id != 0xCE] + re.getWarps() re.entities = [(1, 6, 0x77), (2, 6, 0x77)] re.animation_id = shop_room.animation_id re.floor_object = shop_room.floor_object re.store(rom) # Fix the tileset rom.banks[0x20][0x2EB3 + 0x2A9 - 0x100] = rom.banks[0x20][0x2EB3 + 0x2A1 - 0x100] # Load the shopkeeper sprites instead of Grandpa sprites entityData.SPRITE_DATA[0x77] = entityData.SPRITE_DATA[0x4D] labels = {} rom.patch(0x06, 0x2860, "00" * 0x215, ASM(""" shopItemsHandler: ; Render the shop items ld h, $00 loop: ; First load links position to render the item at ldh a, [$98] ; LinkX ldh [$EE], a ; X ldh a, [$99] ; LinkY sub $0E ldh [$EC], a ; Y ; Check if this is the item we have picked up ld a, [$C509] ; picked up item in shop dec a cp h jr z, .renderCarry ld a, h swap a add a, $20 ldh [$EE], a ; X ld a, $30 ldh [$EC], a ; Y .renderCarry: ld a, h push hl ldh [$F1], a ; variant cp $03 jr nc, .singleSprite ld de, ItemsDualSpriteData call $3BC0 ; render sprite pair jr .renderDone .singleSprite: ld de, ItemsSingleSpriteData call $3C77 ; render sprite .renderDone: pop hl .skipItem: inc h ld a, $07 cp h jr nz, loop ; check if we want to pickup or drop an item ldh a, [$CC] and $30 ; A or B button call nz, checkForPickup ; check if we have an item ld a, [$C509] ; carry item and a ret z ; Set that link has picked something up ld a, $01 ld [$C15C], a call $0CAF ; reset spin attack... ; Check if we are trying to exit the shop and so drop our item. ldh a, [$99] cp $78 ret c xor a ld [$C509], a ret checkForPickup: ldh a, [$9E] ; direction cp $02 ret nz ldh a, [$99] ; LinkY cp $48 ret nc ld a, $13 ldh [$F2], a ; play SFX ld a, [$C509] ; picked up shop item and a jr nz, .drop ldh a, [$98] ; LinkX sub $08 swap a and $07 ld [$C509], a ; picked up shop item ret .drop: xor a ld [$C509], a ret ItemsDualSpriteData: db $60, $08, $60, $28 ; zol db $68, $09 ; chicken (left) ItemsSingleSpriteData: ; (first 3 entries are still dual sprites) db $6A, $09 ; chicken (right) db $14, $02, $14, $22 ; piece of power ;Real single sprite data starts here db $00, $0F ; bomb db $38, $0A ; rupees db $20, $0C ; medicine db $28, $0C ; heart ;------------------------------------trying to buy something starts here talkHandler: ld a, [$C509] ; carry item add a, a ret z ; check if we have something to buy sub $02 ld hl, itemNames ld e, a ld d, b ; b=0 add hl, de ld e, [hl] inc hl ld d, [hl] ld hl, wCustomMessage call appendString dec hl call padString ld de, postMessage call appendString dec hl ld a, $fe ld [hl], a ld de, $FFEF add hl, de ldh a, [$EE] swap a and $0F add a, $30 ld [hl], a ld a, $C9 call $2385 ; open dialog call $3B12 ; increase entity state ret appendString: ld a, [de] inc de and a ret z ldi [hl], a jr appendString padString: ld a, l and $0F ret z ld a, $20 ldi [hl], a jr padString itemNames: dw itemZol dw itemChicken dw itemPieceOfPower dw itemBombs dw itemRupees dw itemMedicine dw itemHealth postMessage: db "For player X? Yes No ", $00 itemZol: db m"Slime storm|100 {RUPEES}", $00 itemChicken: db m"Coccu party|50 {RUPEES}", $00 itemPieceOfPower: db m"Piece of Power|50 {RUPEES}", $00 itemBombs: db m"20 Bombs|50 {RUPEES}", $00 itemRupees: db m"100 {RUPEES}|200 {RUPEES}", $00 itemMedicine: db m"Medicine|100 {RUPEES}", $00 itemHealth: db m"Health refill|10 {RUPEES}", $00 TalkResultHandler: ld hl, ItemPriceTableBCD ld a, [$C509] dec a add a, a ld c, a ; b=0 add hl, bc ldi a, [hl] ld d, [hl] ld e, a ld a, [$DB5D] cp d ret c jr nz, .highEnough ld a, [$DB5E] cp e ret c .highEnough: ; Got enough money, take it. ld hl, ItemPriceTableDEC ld a, [$C509] dec a ld c, a ; b=0 add hl, bc ld a, [hl] ld [$DB92], a ; No longer picked up item xor a ld [$C509], a ret ItemPriceTableBCD: dw $0100, $0050, $0050, $0050, $0200, $0100, $0010 ItemPriceTableDEC: db $64, $32, $32, $32, $C8, $64, $0A """, 0x6860, labels), fill_nop=True) # Patch GrandpaUlrira to work as a multiworld shop rom.patch(0x06, 0x1C0E, 0x1C89, ASM(""" ld a, $01 ld [$C50A], a ; this stops link from using items ;Draw shopkeeper ld de, OwnerSpriteData call $3BC0 ; render sprite pair ldh a, [$E7] ; frame counter swap a and $01 call $3B0C ; set sprite variant ldh a, [$F0] and a jr nz, checkTalkingResult call $641A ; prevent link from moving into the sprite call $645D ; check if talking to NPC call c, ${TALKHANDLER:04x} ; talk handling ldh a, [$EE] ; X cp $18 ret nz ; Jump to other code which is placed on the old owl code. As we do not have enough space here. jp ${SHOPITEMSHANDLER:04x} checkTalkingResult: ld a, [$C19F] and a ret nz ; still taking call $3B12 ; increase entity state ld [hl], $00 ld a, [$C177] ; dialog selection and a ret nz jp ${TALKRESULTHANDLER:04x} OwnerSpriteData: ;db $60, $03, $62, $03, $62, $23, $60, $23 ; down db $64, $03, $66, $03, $66, $23, $64, $23 ; up ;db $68, $03, $6A, $03, $6C, $03, $6E, $03 ; left ;db $6A, $23, $68, $23, $6E, $23, $6C, $23 ; right """.format(**labels), 0x5C0E), fill_nop=True)
21.674658
112
0.557592
from assembler import ASM from roomEditor import RoomEditor import entityData def addMultiworldShop(rom): shop_room = RoomEditor(rom, 0x2A1) re = RoomEditor(rom, 0x2A9) re.objects = [obj for obj in shop_room.objects if obj.x is not None and obj.type_id != 0xCE] + re.getWarps() re.entities = [(1, 6, 0x77), (2, 6, 0x77)] re.animation_id = shop_room.animation_id re.floor_object = shop_room.floor_object re.store(rom) rom.banks[0x20][0x2EB3 + 0x2A9 - 0x100] = rom.banks[0x20][0x2EB3 + 0x2A1 - 0x100] entityData.SPRITE_DATA[0x77] = entityData.SPRITE_DATA[0x4D] labels = {} rom.patch(0x06, 0x2860, "00" * 0x215, ASM(""" shopItemsHandler: ; Render the shop items ld h, $00 loop: ; First load links position to render the item at ldh a, [$98] ; LinkX ldh [$EE], a ; X ldh a, [$99] ; LinkY sub $0E ldh [$EC], a ; Y ; Check if this is the item we have picked up ld a, [$C509] ; picked up item in shop dec a cp h jr z, .renderCarry ld a, h swap a add a, $20 ldh [$EE], a ; X ld a, $30 ldh [$EC], a ; Y .renderCarry: ld a, h push hl ldh [$F1], a ; variant cp $03 jr nc, .singleSprite ld de, ItemsDualSpriteData call $3BC0 ; render sprite pair jr .renderDone .singleSprite: ld de, ItemsSingleSpriteData call $3C77 ; render sprite .renderDone: pop hl .skipItem: inc h ld a, $07 cp h jr nz, loop ; check if we want to pickup or drop an item ldh a, [$CC] and $30 ; A or B button call nz, checkForPickup ; check if we have an item ld a, [$C509] ; carry item and a ret z ; Set that link has picked something up ld a, $01 ld [$C15C], a call $0CAF ; reset spin attack... ; Check if we are trying to exit the shop and so drop our item. ldh a, [$99] cp $78 ret c xor a ld [$C509], a ret checkForPickup: ldh a, [$9E] ; direction cp $02 ret nz ldh a, [$99] ; LinkY cp $48 ret nc ld a, $13 ldh [$F2], a ; play SFX ld a, [$C509] ; picked up shop item and a jr nz, .drop ldh a, [$98] ; LinkX sub $08 swap a and $07 ld [$C509], a ; picked up shop item ret .drop: xor a ld [$C509], a ret ItemsDualSpriteData: db $60, $08, $60, $28 ; zol db $68, $09 ; chicken (left) ItemsSingleSpriteData: ; (first 3 entries are still dual sprites) db $6A, $09 ; chicken (right) db $14, $02, $14, $22 ; piece of power ;Real single sprite data starts here db $00, $0F ; bomb db $38, $0A ; rupees db $20, $0C ; medicine db $28, $0C ; heart ;------------------------------------trying to buy something starts here talkHandler: ld a, [$C509] ; carry item add a, a ret z ; check if we have something to buy sub $02 ld hl, itemNames ld e, a ld d, b ; b=0 add hl, de ld e, [hl] inc hl ld d, [hl] ld hl, wCustomMessage call appendString dec hl call padString ld de, postMessage call appendString dec hl ld a, $fe ld [hl], a ld de, $FFEF add hl, de ldh a, [$EE] swap a and $0F add a, $30 ld [hl], a ld a, $C9 call $2385 ; open dialog call $3B12 ; increase entity state ret appendString: ld a, [de] inc de and a ret z ldi [hl], a jr appendString padString: ld a, l and $0F ret z ld a, $20 ldi [hl], a jr padString itemNames: dw itemZol dw itemChicken dw itemPieceOfPower dw itemBombs dw itemRupees dw itemMedicine dw itemHealth postMessage: db "For player X? Yes No ", $00 itemZol: db m"Slime storm|100 {RUPEES}", $00 itemChicken: db m"Coccu party|50 {RUPEES}", $00 itemPieceOfPower: db m"Piece of Power|50 {RUPEES}", $00 itemBombs: db m"20 Bombs|50 {RUPEES}", $00 itemRupees: db m"100 {RUPEES}|200 {RUPEES}", $00 itemMedicine: db m"Medicine|100 {RUPEES}", $00 itemHealth: db m"Health refill|10 {RUPEES}", $00 TalkResultHandler: ld hl, ItemPriceTableBCD ld a, [$C509] dec a add a, a ld c, a ; b=0 add hl, bc ldi a, [hl] ld d, [hl] ld e, a ld a, [$DB5D] cp d ret c jr nz, .highEnough ld a, [$DB5E] cp e ret c .highEnough: ; Got enough money, take it. ld hl, ItemPriceTableDEC ld a, [$C509] dec a ld c, a ; b=0 add hl, bc ld a, [hl] ld [$DB92], a ; No longer picked up item xor a ld [$C509], a ret ItemPriceTableBCD: dw $0100, $0050, $0050, $0050, $0200, $0100, $0010 ItemPriceTableDEC: db $64, $32, $32, $32, $C8, $64, $0A """, 0x6860, labels), fill_nop=True) rom.patch(0x06, 0x1C0E, 0x1C89, ASM(""" ld a, $01 ld [$C50A], a ; this stops link from using items ;Draw shopkeeper ld de, OwnerSpriteData call $3BC0 ; render sprite pair ldh a, [$E7] ; frame counter swap a and $01 call $3B0C ; set sprite variant ldh a, [$F0] and a jr nz, checkTalkingResult call $641A ; prevent link from moving into the sprite call $645D ; check if talking to NPC call c, ${TALKHANDLER:04x} ; talk handling ldh a, [$EE] ; X cp $18 ret nz ; Jump to other code which is placed on the old owl code. As we do not have enough space here. jp ${SHOPITEMSHANDLER:04x} checkTalkingResult: ld a, [$C19F] and a ret nz ; still taking call $3B12 ; increase entity state ld [hl], $00 ld a, [$C177] ; dialog selection and a ret nz jp ${TALKRESULTHANDLER:04x} OwnerSpriteData: ;db $60, $03, $62, $03, $62, $23, $60, $23 ; down db $64, $03, $66, $03, $66, $23, $64, $23 ; up ;db $68, $03, $6A, $03, $6C, $03, $6E, $03 ; left ;db $6A, $23, $68, $23, $6E, $23, $6C, $23 ; right """.format(**labels), 0x5C0E), fill_nop=True)
true
true
f70fc3e33f50cffe0c707d86a267aec3f37ec6a4
4,449
py
Python
turbinia/state_manager_test.py
youjitwo/turbinia
aae3c41cf72f08347c119f0734d1ce74f57df831
[ "Apache-2.0" ]
null
null
null
turbinia/state_manager_test.py
youjitwo/turbinia
aae3c41cf72f08347c119f0734d1ce74f57df831
[ "Apache-2.0" ]
null
null
null
turbinia/state_manager_test.py
youjitwo/turbinia
aae3c41cf72f08347c119f0734d1ce74f57df831
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright 2016 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests the state manager module.""" from __future__ import unicode_literals import copy import os import tempfile import unittest from unittest import mock from turbinia import config from turbinia.workers import TurbiniaTask from turbinia.workers import TurbiniaTaskResult from turbinia import state_manager class TestPSQStateManager(unittest.TestCase): """Test PSQStateManager class.""" def _get_state_manager(self): """Gets a Datastore State Manager object for test.""" config.STATE_MANAGER = 'Datastore' return state_manager.get_state_manager() @mock.patch('turbinia.state_manager.datastore.Client') def setUp(self, _): self.remove_files = [] self.remove_dirs = [] self.state_manager = None config.LoadConfig() self.state_manager_save = config.STATE_MANAGER self.test_data = { 'name': 'TestTask', 'request_id': 'TestRequestId', 'group_id': 'TestGroupId', 'status': 'TestStatus', 'saved_paths': ['testpath1', 'testpath2'] } # Set up TurbiniaTask self.base_output_dir = tempfile.mkdtemp() self.task = TurbiniaTask( base_output_dir=self.base_output_dir, name=self.test_data['name'], request_id=self.test_data['request_id'], group_id=self.test_data['group_id']) self.task.output_manager = mock.MagicMock() self.task.output_manager.get_local_output_dirs.return_value = ( '/fake/tmp/dir', self.base_output_dir) # Set up TurbiniaTaskResult self.result = TurbiniaTaskResult(base_output_dir=self.base_output_dir) self.result.setup(self.task) self.result.status = self.test_data['status'] self.result.saved_paths = self.test_data['saved_paths'] self.task.result = self.result def tearDown(self): config.STATE_MANAGER = self.state_manager_save [os.remove(f) for f in self.remove_files if os.path.exists(f)] [os.rmdir(d) for d in self.remove_dirs if os.path.exists(d)] os.rmdir(self.base_output_dir) @mock.patch('turbinia.state_manager.datastore.Client') def testStateManagerGetTaskDict(self, _): """Test State Manager get_task_dict().""" self.state_manager = self._get_state_manager() task_dict = self.state_manager.get_task_dict(self.task) # Make the returned task_dict contains all of our test data self.assertEqual(task_dict['name'], self.test_data['name']) self.assertEqual(task_dict['request_id'], self.test_data['request_id']) self.assertEqual(task_dict['status'], self.test_data['status']) self.assertEqual(len(task_dict['saved_paths']), 2) self.assertEqual(task_dict['group_id'], self.test_data['group_id']) self.assertTrue('instance' in task_dict) self.assertIn(self.test_data['saved_paths'][0], task_dict['saved_paths']) @mock.patch('turbinia.state_manager.datastore.Client') def testStateManagerValidateDataValidDict(self, _): """Test State Manager _validate_data() base case.""" self.state_manager = self._get_state_manager() # pylint: disable=protected-access test_data = self.state_manager._validate_data(self.test_data) self.assertDictEqual(test_data, self.test_data) @mock.patch('turbinia.state_manager.datastore.Client') def testStateManagerValidateDataInvalidDict(self, _): """Test State Manager _validate_data() base case.""" self.state_manager = self._get_state_manager() invalid_dict = copy.deepcopy(self.test_data) invalid_dict['status'] = 'A' * state_manager.MAX_DATASTORE_STRLEN + 'BORKEN' # pylint: disable=protected-access test_data = self.state_manager._validate_data(invalid_dict) self.assertListEqual(list(test_data.keys()), list(self.test_data.keys())) self.assertNotEqual(test_data['status'], self.test_data['status']) self.assertLessEqual( len(test_data['status']), state_manager.MAX_DATASTORE_STRLEN)
38.353448
80
0.733423
from __future__ import unicode_literals import copy import os import tempfile import unittest from unittest import mock from turbinia import config from turbinia.workers import TurbiniaTask from turbinia.workers import TurbiniaTaskResult from turbinia import state_manager class TestPSQStateManager(unittest.TestCase): def _get_state_manager(self): config.STATE_MANAGER = 'Datastore' return state_manager.get_state_manager() @mock.patch('turbinia.state_manager.datastore.Client') def setUp(self, _): self.remove_files = [] self.remove_dirs = [] self.state_manager = None config.LoadConfig() self.state_manager_save = config.STATE_MANAGER self.test_data = { 'name': 'TestTask', 'request_id': 'TestRequestId', 'group_id': 'TestGroupId', 'status': 'TestStatus', 'saved_paths': ['testpath1', 'testpath2'] } self.base_output_dir = tempfile.mkdtemp() self.task = TurbiniaTask( base_output_dir=self.base_output_dir, name=self.test_data['name'], request_id=self.test_data['request_id'], group_id=self.test_data['group_id']) self.task.output_manager = mock.MagicMock() self.task.output_manager.get_local_output_dirs.return_value = ( '/fake/tmp/dir', self.base_output_dir) self.result = TurbiniaTaskResult(base_output_dir=self.base_output_dir) self.result.setup(self.task) self.result.status = self.test_data['status'] self.result.saved_paths = self.test_data['saved_paths'] self.task.result = self.result def tearDown(self): config.STATE_MANAGER = self.state_manager_save [os.remove(f) for f in self.remove_files if os.path.exists(f)] [os.rmdir(d) for d in self.remove_dirs if os.path.exists(d)] os.rmdir(self.base_output_dir) @mock.patch('turbinia.state_manager.datastore.Client') def testStateManagerGetTaskDict(self, _): self.state_manager = self._get_state_manager() task_dict = self.state_manager.get_task_dict(self.task) self.assertEqual(task_dict['name'], self.test_data['name']) self.assertEqual(task_dict['request_id'], self.test_data['request_id']) self.assertEqual(task_dict['status'], self.test_data['status']) self.assertEqual(len(task_dict['saved_paths']), 2) self.assertEqual(task_dict['group_id'], self.test_data['group_id']) self.assertTrue('instance' in task_dict) self.assertIn(self.test_data['saved_paths'][0], task_dict['saved_paths']) @mock.patch('turbinia.state_manager.datastore.Client') def testStateManagerValidateDataValidDict(self, _): self.state_manager = self._get_state_manager() test_data = self.state_manager._validate_data(self.test_data) self.assertDictEqual(test_data, self.test_data) @mock.patch('turbinia.state_manager.datastore.Client') def testStateManagerValidateDataInvalidDict(self, _): self.state_manager = self._get_state_manager() invalid_dict = copy.deepcopy(self.test_data) invalid_dict['status'] = 'A' * state_manager.MAX_DATASTORE_STRLEN + 'BORKEN' test_data = self.state_manager._validate_data(invalid_dict) self.assertListEqual(list(test_data.keys()), list(self.test_data.keys())) self.assertNotEqual(test_data['status'], self.test_data['status']) self.assertLessEqual( len(test_data['status']), state_manager.MAX_DATASTORE_STRLEN)
true
true
f70fc40e9143f9c3f8ffb69f403f565a54f0c2b4
1,049
bzl
Python
hack/repo-infra/defs/build.bzl
linxiulei/cri-tools
17484cda811c93b69e61448835db9559c7f3ab9c
[ "Apache-2.0" ]
777
2018-09-09T18:10:30.000Z
2022-03-31T15:29:00.000Z
hack/repo-infra/defs/build.bzl
linxiulei/cri-tools
17484cda811c93b69e61448835db9559c7f3ab9c
[ "Apache-2.0" ]
553
2018-09-07T02:36:56.000Z
2022-03-30T02:13:57.000Z
hack/repo-infra/defs/build.bzl
linxiulei/cri-tools
17484cda811c93b69e61448835db9559c7f3ab9c
[ "Apache-2.0" ]
217
2018-09-19T13:47:36.000Z
2022-03-25T13:58:12.000Z
def _gcs_upload_impl(ctx): targets = [] for target in ctx.files.data: targets.append(target.short_path) ctx.file_action( output = ctx.outputs.targets, content = "\n".join(targets), ) ctx.file_action( content = "%s --manifest %s --root $PWD -- $@" % ( ctx.attr.uploader.files_to_run.executable.short_path, ctx.outputs.targets.short_path, ), output = ctx.outputs.executable, executable = True, ) return struct( runfiles = ctx.runfiles( files = ctx.files.data + ctx.files.uploader + [ctx.version_file, ctx.outputs.targets] ) ) gcs_upload = rule( attrs = { "data": attr.label_list( mandatory = True, allow_files = True, ), "uploader": attr.label( default = Label("//defs:gcs_uploader"), allow_files = True, ), }, executable = True, outputs = { "targets": "%{name}-targets.txt", }, implementation = _gcs_upload_impl, )
23.840909
63
0.554814
def _gcs_upload_impl(ctx): targets = [] for target in ctx.files.data: targets.append(target.short_path) ctx.file_action( output = ctx.outputs.targets, content = "\n".join(targets), ) ctx.file_action( content = "%s --manifest %s --root $PWD -- $@" % ( ctx.attr.uploader.files_to_run.executable.short_path, ctx.outputs.targets.short_path, ), output = ctx.outputs.executable, executable = True, ) return struct( runfiles = ctx.runfiles( files = ctx.files.data + ctx.files.uploader + [ctx.version_file, ctx.outputs.targets] ) ) gcs_upload = rule( attrs = { "data": attr.label_list( mandatory = True, allow_files = True, ), "uploader": attr.label( default = Label("//defs:gcs_uploader"), allow_files = True, ), }, executable = True, outputs = { "targets": "%{name}-targets.txt", }, implementation = _gcs_upload_impl, )
true
true
f70fc42e1bdd3e9182fc5cb7fe03ea0f0092441a
34,760
py
Python
.history/neuroformer/model_perceiver_20220116213408.py
woanderer/neuroformer
df3462d55977b6c9adcb6753e7c474b8b76e8021
[ "MIT" ]
null
null
null
.history/neuroformer/model_perceiver_20220116213408.py
woanderer/neuroformer
df3462d55977b6c9adcb6753e7c474b8b76e8021
[ "MIT" ]
null
null
null
.history/neuroformer/model_perceiver_20220116213408.py
woanderer/neuroformer
df3462d55977b6c9adcb6753e7c474b8b76e8021
[ "MIT" ]
null
null
null
# from code.transformer_vid.utils import convert_weights # import rotary_embedding_torch from torch.nn.modules.activation import GELU, ReLU # from data.OneCombo3.trainer import TrainerConfig import math import numpy as np import itertools import logging import torch import torch.nn as nn from torch.nn import functional as F from torch.autograd import Variable from torchvision.models.video import r3d_18 # from ResNet3D import r3d_18 from scipy.optimize import linear_sum_assignment # from rotary_embedding_torch import apply_rotary_emb, RotaryEmbedding from einops.layers.torch import Rearrange logger = logging.getLogger(__name__) def convert_weights(model: nn.Module): """Convert applicable model parameters to fp16""" def _convert_weights_to_fp16(l): if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)): # nn.Conv3d, l.weight.data = l.weight.data.half() if l.bias is not None: l.bias.data = l.bias.data.half() if isinstance(l, nn.MultiheadAttention): for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]: tensor = getattr(l, attr) if tensor is not None: tensor.data = tensor.data.half() for name in ["text_projection", "proj"]: if hasattr(l, name): attr = getattr(l, name) if attr is not None: attr.data = attr.data.half() model.apply(_convert_weights_to_fp16) class GPTConfig: """ base GPT config, params common to all GPT versions """ embd_pdrop = 0.2 resid_pdrop = 0.2 attn_pdrop = 0.2 pos_pdrop = 0.2 temp_pdrop = 0.2 pos_emb = True temp_emb = True start_prune = 30 epoch = 0 def __init__(self, vocab_size, block_size, **kwargs): self.vocab_size = vocab_size self.block_size = block_size for k, v in kwargs.items(): setattr(self, k, v) class neuralGPTConfig: """ base GPT config, params common to all GPT versions """ n = 0.4 im_drop = 0.2 id_drop = n embd_pdrop = n resid_pdrop = n attn_pdrop = n pos_pdrop = n temp_pdrop = n pos_emb = True temp_emb = True def __init__(self, vocab_size, block_size, **kwargs): self.vocab_size = vocab_size self.block_size = block_size for k, v in kwargs.items(): setattr(self, k, v) class GPT1Config(GPTConfig): """ GPT-1 like network roughly 125M params """ n_layer = 12 n_head = 12 n_embd = 768 class VideoFeaturesExtractor(nn.Module): """ R3D: (3 x T x H x W) H, W = 112 """ def __init__(self): super().__init__() self.backbone = torch.nn.Sequential(*(list(r3d_18(pretrained=True).children())[:-2])) convert_weights(self.backbone) # # freeze backbone # for k, v in self.backbone.named_parameters(): # v.requires_grad = False def forward(self, x): # B = Batch, T, C, Fm, H, W features = self.backbone(x) # (B, C, T, H, W) B, C, T, H, W = features.shape features = features.permute(0, 2, 3, 4, 1) features = features.view(B, -1, C) return features class VideoEncoder(nn.Module): def __init__(self): super().__init__() self.to_patch_embedding = nn.Sequential( Rearrange('b c t (h p1) (w p2) -> b (t h w) (p1 p2 c)', p1=16, p2=16) ) def forward(self, x): return self.to_patch_embedding(x) class CausalSelfAttention(nn.Module): """ A vanilla multi-head masked self-attention layer with a projection at the end. """ def __init__(self, config): super().__init__() assert config.n_embd % config.n_head == 0 self.config = config # key, query, value projections for all heads self.key = nn.Linear(config.n_embd, config.n_embd) self.query = nn.Linear(config.n_embd, config.n_embd) self.value = nn.Linear(config.n_embd, config.n_embd) # regularization self.attn_drop = nn.Dropout(config.attn_pdrop) self.resid_drop = nn.Dropout(config.resid_pdrop) # output projection self.proj = nn.Linear(config.n_embd, config.n_embd) self.register_buffer("mask", self.build_mask(config.block_size)) self.n_head = config.n_head self.att = None self.T = config.block_size # self.rotary_embedding = RotarySpatioTemporalEmbedding(config) def build_mask(self, block_size): mask = torch.tril(torch.ones((block_size, block_size)), ).view(1, 1, block_size, block_size) return mask def generate_sparse_mask(self, att, p, config): """ Generate a sparse mask according to p. """ assert p >= 0 and p <= 1, "p should be in [0, 1]" T = config.block_size mask = torch.rand((1, T)) < p mask = mask.repeat(T, 1) mask[0, 0] = False # don't mask 1st step # check if any step is fully masked and umask it idx_all_true = (True == torch.all(mask, dim=0)).nonzero() for step in idx_all_true: sampler = torch.distributions.Uniform(low=0, high=step.item()+1) idx_false = sampler.sample((1,1)).long() mask[step, idx_false] = False # mask = mask.repeat(T, 1) mask = mask.view(1, 1, T, T).cuda() if att.is_cuda else mask.view(1, 1, T, T) att = att.masked_fill(mask, float('-inf')) return att def forward(self, x, pad=None, dtx=None): # B = Batch, T = Sequence, C = n_embed B, T, C = x.size() # calculate query, key, values for all head in batch and move head forward to the batch dim k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) # # apply rotary embeddings # if dtx is not None: # q, k = self.rotary_embedding(q, k, dtx) # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T) att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1))) att = att.masked_fill(self.mask[:,:,:T,:T] == 0, float('-inf')) if self.training: att = self.generate_sparse_mask(att, 0.25, self.config) if pad is not None: for idx, i in enumerate(pad): att[idx, :, :, self.T - i:] = float('-inf') # only able to see first padding token att = F.softmax(att, dim=-1) att = self.attn_drop(att) self.att = att y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs) y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side # output projection y = self.resid_drop(self.proj(y)) return y class PositionalEmbedding(nn.Module): """ Implement the PE function. """ def __init__(self, n_embd, p_drop, max_len=1500): super().__init__() self.dropout = nn.Dropout(p=p_drop) # Compute the positional encodings once in log space. pe = torch.zeros(max_len, n_embd) position = torch.arange(0, max_len).unsqueeze(1) div_term = torch.exp(torch.arange(0, n_embd, 2) * -(math.log(10000.0) / n_embd)) pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) pe = pe.unsqueeze(0) self.register_buffer('pe', pe) def forward(self, x): x = Variable(self.pe[:, :x.size(1)], requires_grad=False) return self.dropout(x) # class RotarySpatioTemporalEmbedding(nn.Module): # """ Rotary temporal embeddings - block_size = id_blk_sz """ # def __init__(self, config): # super().__init__() # self.frame_block_size = config.frame_block_size # self.id_block_size = config.id_block_size # self.emb = RotaryEmbedding(dim=32) # def forward(self, q, k, t): # b = t.shape[0] # tf = self.frame_block_size # queries = [] # keys = [] # for B in range(b): # im_temp_emb = torch.tensor([-0.5] * (tf//2) + [0.5] * (tf//2)) # im_pos_emb = torch.arange(self.frame_block_size) # im_emb = torch.stack([im_temp_emb, im_pos_emb], dim=0) # id_temp_emb = self.temp_emb(t[B], cache_key=self.block_size) # freqs = self.emb(torch.cat(im_emb, id_temp_emb)) # queries.append(apply_rotary_emb(freqs, q[B][None, ...])) # keys.append(apply_rotary_emb(freqs, k[B][None, ...])) # q, k = torch.cat(queries), torch.cat(keys) # return q, k class TemporalEmbedding(nn.Module): """ encoding temporal information using fourrier signals """ def __init__(self, n_embd, p_drop, max_len=1500): super().__init__() self.dropout = nn.Dropout(p=p_drop) # Compute the positional encodings once in log space. pe = torch.zeros(max_len, n_embd) position = torch.arange(0, max_len).unsqueeze(1) div_term = torch.exp(torch.arange(0, n_embd, 2) * -(math.log(10000.0) / n_embd)) pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) pe = pe.unsqueeze(0) self.register_buffer('pe', pe) def forward(self, x): x = Variable(self.pe[:, :x.size(1)], requires_grad=False) return self.dropout(x) class LearntTemporalEmbedding(nn.Module): """ Project B x T x 1 time sequence to B x T x C """ def __init__(self, block_sz, n_embd, p_drop=0.2): super().__init__() self.temp_emb = nn.Sequential( nn.Linear(1, n_embd // 2), nn.GELU(), nn.Linear(n_embd // 2, n_embd), nn.Dropout(p_drop) ) def forward(self, x): return self.temp_emb(x.unsqueeze(-1)) class Decoder(nn.Module): def __init__(self, config): super().__init__() # decoder_layer = nn.TransformerDecoderLayer(config.n_embd, config.n_head, # activation='gelu', dropout=0.2, batch_first=True) # self.decoder = nn.TransformerDecoder(decoder_layer, config.n_layer) self.decoder = nn.Transformer(d_model=config.n_embd, nhead=config.n_head, num_encoder_layers=3, num_decoder_layers=config.n_layer, activation="gelu", dropout=0.4, batch_first=True) self.register_buffer("tgt_mask", self.generate_square_subsequent_mask(config.id_block_size)) # self.register_buffer("tgt_pad_mask", self.generate_padding_mask(config.ids_block_size)) self.T = config.id_block_size def generate_square_subsequent_mask(self, sz: int, pad=None): r"""Generate a square mask for the sequence. The masked positions are filled with float('-inf'). Unmasked positions are filled with float(0.0). """ mask = (torch.triu(torch.ones(sz, sz), diagonal=0) == 1).transpose(0, 1) mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0)) return mask def generate_padding_mask(self, sz: int, pad=None): r"""Build a (B x T) mask that resides on the GPU and can be manipulated by build_padding_mask according to padded sequence """ mask = torch.zeros(1, sz, dtype=torch.bool) return mask def generate_sparse_mask(self, sz: int, pad=None): r""" Build a square mask that employs teacher forcing according to P """ rand_mat = torch.rand(1, sz) k = round(0.75 * sz) k_th_quant = torch.topk(rand_mat, k, largest = False)[0][:,-1:] bool_tensor = rand_mat <= k_th_quant mask = torch.where(bool_tensor, torch.tensor(1), torch.tensor(0)).repeat(sz, 1) mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0)) return mask.cuda(self.tgt_mask.get_device()) if self.tgt_mask.is_cuda else mask def build_padding_mask(self, tgt, pad): # mask = self.tgt_pad_mask.repeat(tgt.shape[0], 1) mask = torch.zeros(tgt.shape[0], self.T, dtype=torch.bool) for B, P in enumerate(pad): mask[B, self.T - P:] = True return mask # .to(torch.cuda.current_device()) def forward(self, tgt, memory, pad): # padding_mask = self.build_padding_mask(tgt, pad) # tgt_mask = self.generate_sparse_mask(self.T) if self.training else self.tgt_mask return self.decoder(src=memory, tgt=tgt, tgt_mask=self.tgt_mask, tgt_key_padding_mask=None) class ProjectNorm(nn.Module): def __init__(self, feat_size, target_size): super().__init__() self.ln = nn.LayerNorm(feat_size) self.mlp = nn.Sequential( nn.Linear(feat_size, math.floor(2 * feat_size), bias=False), nn.GELU(), nn.Linear(math.floor(2 * feat_size), target_size, bias=False), ) def forward(self, x): return self.mlp(self.ln(x)) class TimeProjection(nn.Module): def __init__(self, seq_size, id_seq_size, feat_size, target_size): super().__init__() self.mlp_seq = nn.Sequential( nn.Linear(seq_size, id_seq_size), nn.ReLU(), nn.Dropout(p=0.3), nn.Linear(id_seq_size, id_seq_size) ) self.mlp_t = nn.Sequential( nn.Linear(feat_size, feat_size // 2), nn.ReLU(), nn.Dropout(p=0.3), nn.Linear(feat_size // 2, target_size) ) def forward(self, x): x = x.permute(0, 2, 1) # B, T, C -> B, C, T x = self.mlp_seq(x) # B, C, T / 2 x = x.permute(0, 2, 1) # B, T / 2, C return self.mlp_t(x) # B, T / 2, 1 class PSTHProjection(nn.Module): """Takes Last Output of Block -> (B, C) Builds PSTH table """ def __init__(self, config): super().__init__() self.mlp = nn.Sequential( nn.Linear(config.n_embd, 4 * config.n_embd, bias=False), nn.Dropout(p=0.2), nn.GELU(), nn.Linear(config.n_embd * 4, config.id_vocab_size, bias=False) ) def forward(self, x): return self.mlp(x) # class PSTHProjection(nn.Module): # def __init__(self, config): # super().__init__() # self.mlp_seq = nn.Sequential( # nn.Linear(config.id_block_size, config.id_block_size // 2, bias=False), # nn.GELU(), # nn.Dropout(p=0.2), # nn.Linear(config.id_block_size // 2, 1, bias=False) # ) # self.mlp_t = nn.Sequential( # nn.Linear(config.n_embd, config.n_embd * 4, bias=False), # nn.GELU(), # nn.Dropout(p=0.2), # nn.Linear(config.n_embd * 4, config.id_vocab_size, bias=False) # ) # def forward(self, x): # x = x.transpose(-1, -2) # B, T, C -> B, C, T # x = self.mlp_seq(x) # B, C, 1 # x = x.transpose(-2, -1) # B, 1, Vocab_id # return self.mlp_t(x) class TimeRNN(nn.Module): def __init__(self, feat_size, target_size): super().__init__() class Block(nn.Module): """ an unassuming Transformer block """ def __init__(self, config): super().__init__() self.ln1 = nn.LayerNorm(config.n_embd) self.ln2 = nn.LayerNorm(config.n_embd) self.attn = CausalSelfAttention(config) self.mlp = nn.Sequential( nn.Linear(config.n_embd, 4 * config.n_embd), nn.GELU(), nn.Linear(4 * config.n_embd, config.n_embd), nn.Dropout(config.resid_pdrop), ) def forward(self, x, pad=None, dtx=None): x = x + self.attn(self.ln1(x), pad) x = x + self.mlp(self.ln2(x)) return x class BlockSequential(nn.Sequential): def forward(self, x, pad=None, dtx=None): for module in self._modules.values(): x = module(x, pad, dtx) return x class DiceLossPSTH(nn.Module): def __init__(self, size_average=True, smooth=1): super().__init__() def cross_entropy(self, input, target): return torch.mean(-torch.sum(target * torch.log(input), 1)) def forward(self, logits, targets, smooth=1, class_weights=None): total_logits = F.layer_norm(torch.sum(logits, dim=-2), [logits.size()[-1]]) # probs = F.log_softmax(logits, dim=-1) probs = F.softmax(total_logits, dim=-1) # logits = F.gelu(logits) # probs = logits / (logits.max(dim=-1).values.unsqueeze(-1)) # flatten label and prediction tensors outputs = probs.contiguous().view(-1) targets = targets.contiguous().view(-1) labels = torch.zeros_like(outputs) labels[targets] = 1 / len(targets) # intersection = (outputs * labels).sum() # dice = (2. * intersection + smooth) / (outputs.sum() + labels.sum() + smooth) return self.cross_entropy(outputs[None, ...], labels[None, ...]) class SetLoss(nn.Module): def __init__(self): super().__init__() def cross_entropy(self, input, target): return torch.mean(-torch.sum(target * torch.log(input), 1)) def forward(self, logits, targets): targets = targets.contiguous().view(-1) loss = 0 for n_step, n_logits in enumerate(logits): n_logits = F.softmax(n_logits, dim=-1) n_target = targets[n_step:] n_target_dist = torch.zeros_like(n_logits) if len(n_target) != 0: n_target_dist[n_target] = 1 / len(n_target) loss += self.cross_entropy(n_logits[None,...], n_target_dist[None, ...]) return loss / len(logits) class TruncatedLoss(nn.Module): def __init__(self, q=0.8, k=0.2, trainset_size=50000): super(TruncatedLoss, self).__init__() self.q = q self.k = k self.weight = torch.nn.Parameter(data=torch.ones(trainset_size, 1), requires_grad=False) def forward(self, logits, targets, indexes): p = F.softmax(logits, dim=-1) Yg = torch.gather(p, 2, targets.unsqueeze(2)) loss = ((1-(Yg**self.q))/self.q)*self.weight[indexes] - ((1-(self.k**self.q))/self.q)*self.weight[indexes] loss = torch.mean(loss) return loss def update_weight(self, logits, targets, indexes): p = F.softmax(logits, dim=-1) Yg = torch.gather(p, 2, targets.unsqueeze(2)) Lq = ((1-(Yg**self.q))/self.q) Lqk = np.repeat(((1-(self.k**self.q))/self.q), targets.size(0)) Lqk = torch.from_numpy(Lqk).type(torch.cuda.FloatTensor) Lqk = torch.unsqueeze(Lqk, 1) condition = torch.gt(Lqk, Lq) self.weight[indexes] = condition.type(torch.cuda.FloatTensor) # class PSTHLOSS(nn.Module): # def __init__(self): # super().__init__() # def forward(self, logits, targets): # total_logits = torch.sum(logits, dim=-2) # sum over sequence dimension # probs = F.softmax(total_logits, dim=-1) # outptu class HungarianMatcher(nn.Module): def __init__(self): super().__init__() @torch.no_grad() def forward(self, logits, targets): T, C = logits.size() probs = F.softmax(logits, dim=-1) cost_id = (1 - probs[:, targets]).cpu().view(T, -1).unsqueeze(0) indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_id.split(len(targets), -1))] return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices] class KLDivLoss(nn.Module): def __init__(self): super().__init__() self.log_softmax = nn.LogSoftmax(dim=-1) self.KLdiv = nn.KLDivLoss() def forward(self, logits, targets): log_probs = self.log_softmax(logits) return self.KLdiv(log_probs.long(), targets) class PoissonCrossEntropyLoss(nn.Module): def __init__(self): super().__init__() self.log_softmax = nn.LogSoftmax(dim=-1) # self.softmax = nn.Softmax(dim=-1) self.nll_poisson = nn.PoissonNLLLoss() # self.nll_poisson = nn.NLLLoss() def forward(self, logits, targets): log_probs = self.log_softmax(logits) return self.nll_poisson(log_probs, targets) class GPT(nn.Module): """ the full GPT language model, with a context size of block_size """ def __init__(self, config): super().__init__() self.device = 'cpu' if torch.cuda.is_available(): self.device = torch.cuda.current_device() self.config = config # input embedding stem self.n_embd = config.n_embd self.tok_emb = nn.Embedding(config.id_vocab_size, config.n_embd) self.pos_emb = PositionalEmbedding(config.n_embd, p_drop=0.2) # self.pos_emb_id = nn.Parameter(torch.zeros(1, config.id_block_size, config.n_embd)) self.pos_emb_frames = nn.Parameter(torch.zeros(1, config.frame_block_size, config.n_embd)) # self.temp_emb = TemporalEmbedding(config.n_embd, p_drop=0.2) # self.temp_emb = RotaryTemporalEmbedding(config.id_block_size) self.temp_emb = LearntTemporalEmbedding(config.id_block_size, config.n_embd) self.frame_temp_emb = LearntTemporalEmbedding(config.frame_block_size, config.n_embd) self.id_drop = nn.Dropout(config.id_drop) self.im_drop = nn.Dropout(config.im_drop) self.drop = nn.Dropout(config.embd_pdrop) # -- Visual Backbone -- # # self.visual_backbone = VideoFeaturesExtractor() self.video_encoder = VideoEncoder() frame_temp_emb = torch.tensor(list(itertools.chain(*[[n * 0.05] * (config.frame_block_size//20) for n in range(20)]))).unsqueeze(0) self.register_buffer("frame_temp_emb_seq", frame_temp_emb) # -- Contrastive Loss -- ## # self.proj_id = ProjectNorm(config.n_embd, config.n_embd) # self.proj_vid = VidProjectNorm(config.n_embd, config.n_embd) # im_shape ## -- IM_Decoder -- ## # self.blocks_id = BlockSequential(*[Block(config) for _ in range(2)]) # self.blocks_im = BlockSequential(*[Block(config) for _ in range(2)]) # self.ln_f_id = nn.LayerNorm(config.n_embd) # self.ln_f_im = nn.LayerNorm(config.n_embd) ## -- Decoder -- ## # self.ln_f = nn.LayerNorm(config.n_embd) ## GPT # self.blocks = BlockSequential(*[Block(config) for _ in range(config.n_layer)]) # self.ln_f = nn.LayerNorm(config.n_embd) ## enc_dec self.state_decoder = Decoder(config) self.ln_f_state_dec = nn.LayerNorm(config.n_embd) self.stimulus_decoder = Decoder(config) self.ln_f_stimulus_dec = nn.LayerNorm(config.n_embd) self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False) ## -- Time -- ## # self.proj_time = TimeProjection(config.block_size, config.id_block_size, config.n_embd, config.n_dt) # self.proj_time = ProjectNorm(config.n_embd, config.n_dt) # self.proj_time = ProjectNorm(config.n_embd, 1) ## -- PSTH -- ## # self.proj_psth = PSTHProjection(config) # Loss # self.dice_loss = DiceLossPSTH() # self.poisson_loss = PoissonCrossEntropyLoss() # self.hungarian_matcher = HungarianMatcher() # self.kldiv_loss = KLDivLoss() # self.truncated_loss = TruncatedLoss(trainset_size=config.data_size) # self.set_loss = SetLoss() # self.a = torch.tensor(0.5, requires_grad=True) self.block_size = config.block_size self.apply(self._init_weights) if config.class_weights is not None: self.register_buffer("class_weights", config.class_weights) logger.info("number of parameters: %e", sum(p.numel() for p in self.parameters())) def get_block_size(self): return self.block_size def _init_weights(self, module): if isinstance(module, (nn.Linear, nn.Embedding)): module.weight.data.normal_(mean=0.0, std=0.02) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) def configure_optimizers(self, train_config): """ Separates parameters into those who will experience weight decay and those that will not """ if train_config.decay_weights: decay = set() no_decay = set() whitelist_weight_modules = (torch.nn.Linear, ) blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding) for mn, m in self.named_modules(): for pn, p in m.named_parameters(): fpn = '%s.%s' % (mn, pn) if mn else pn # full param name if pn.endswith('bias'): # all biases will not be decayed no_decay.add(fpn) elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules): # weights of whitelist modules will be weight decayed decay.add(fpn) elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules): # weights of blacklist modules will NOT be weight decayed no_decay.add(fpn) else: no_decay.add(fpn) # special case the position embedding parameter in the root GPT module as not decayed black_list_mods = ['pos_emb', 'temp_emb'] for mods in black_list_mods: for name, param in self.named_parameters(): if mods in name: no_decay.add(name) # also pos_emb # validate that we considered every parameter param_dict = {pn: p for pn, p in self.named_parameters()} no_decay -= decay & no_decay inter_params = decay & no_decay union_params = decay | no_decay assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), ) assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \ % (str(param_dict.keys() - union_params), ) # create the pytorch optimizer object optim_groups = [ {"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": train_config.weight_decay}, {"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0}, ] optimizer = torch.optim.AdamW(optim_groups, lr=train_config.learning_rate, betas=train_config.betas) else: parameters = self.parameters() optimizer = torch.optim.Adam(parameters, lr=train_config.learning_rate) return optimizer def process_features(self, x): # batch, block_size, feature p_idx = x['id_prev'] idx = x['id'] dtx = x['dt'] dtx_prev = x['dt_prev'] frames = self.video_encoder(x['frames']) pad = x['pad'] b, t = idx.size() # b_p, t_p = p_idx.size() bf, tf = frames.size()[0:2] # forward the GPT model ''' positional and temporal embeddings implemented in multiple ways, learnt, fourrier decomposition and in the case of time, just passed as is. ''' # # Embeddings prev_id_position_embeddings = 0 # self.pos_emb(p_idx) prev_id_temporal_embeddings = self.temp_emb(dtx_prev.float()) id_position_embeddings = 0 # self.pos_emb(idx) im_position_embeddings = self.pos_emb_frames temporal_embeddings = self.temp_emb(dtx.float()) # Extract ID features prev_token_embeddings = self.id_drop(self.tok_emb(p_idx) + prev_id_temporal_embeddings + prev_id_position_embeddings) token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector token_embeddings = token_embeddings + temporal_embeddings + id_position_embeddings token_embeddings = self.id_drop(token_embeddings) # Extract image features and add time embeddings im_temporal_embeddings = self.frame_temp_emb(self.frame_temp_emb_seq) im_embeddings = frames # self.tok_emb(frames) im_embeddings = im_embeddings + im_position_embeddings + im_temporal_embeddings im_embeddings = self.im_drop(im_embeddings) # separate pos emb? # Tidy up features = dict() features['id_prev'] = prev_token_embeddings features['id'] = token_embeddings features['frames'] = im_embeddings return features, pad def perceiver(self, features, pad): x = self.state_decoder(tgt=features['id'], memory=features['id_prev'], pad=pad) x = self.ln_f_state_dec(x) x = self.stimulus_decoder(tgt=features['id'], memory=features['frames'], pad=pad) x = self.ln_f_stimulus_dec(x) logits = self.head(x) return logits, x def enc_dec(self, features, pad): x = self.stimulus_decoder(tgt=features['id'], memory=features['frames'], pad=pad) x = self.ln_f_stimulus_dec(x) logits = self.head(x) return logits, x def GPTdecoder(self, features, pad, dtx=None): # image + neural features x = torch.cat((features['frames'], features['id']), dim=1) # Decoder x = self.blocks(x, pad, dtx) # (B, T, C) x = self.ln_f(x) logits = self.head(x) # print(logits.shape) # (B, T, Vocab) # logits_psth = x[:, -1] # (B, C) return logits, x def forward(self, x, targets=None): idx = x['id'] dtx = x['dt'] frames = x['frames'] pad = x['pad'] b, t = idx.size() # b, t = x['id'].shape[0], x['id'].shape[1] + x['id_prev'].shape[1] bf, tf = frames.size()[0:2] tf = self.config.frame_block_size # assert t + tf == self.config.block_size, f"{tf} {t}" # assert t <= self.block_size, "Cannot forward, model block size is exhausted" features, pad = self.process_features(x) logits, x = self.perceiver(features, pad) # logits, x = self.enc_dec(features, pad) # logits, x = self.GPTdecoder(features, pad) # time = self.proj_time(x) # (B, T_id, 1) # print(x[:, 0].shape) # psth = self.proj_psth(x) # (B, Vocab_id) # if targets, calculate loss # calculate loss on logits up to padding token for each batch loss = None loss_frames = 0 loss_id = [] loss_time = [] loss_dice = [] loss_psth = [] loss_hungarian = [] if targets is not None: # loss_psth = self.dice_loss(psth, targets['modes'][:, tf:]) for B, P in enumerate(pad): tf = 0 # im_logits = logits[B, :tf] # im_targets = targets['frames'][B, :tf] # loss_frames += F.cross_entropy(im_logits.view(-1, im_logits.size(-1)), im_targets.view(-1)) id_logits = logits[B, tf:tf + t - P] id_targets = targets['id'][B, :t - P] loss_id_ = F.cross_entropy(id_logits.view(-1, id_logits.size(-1)), id_targets.view(-1)) # if self.config.epoch >= 15: # self.truncated_loss.update_weight(id_logits[None, ...], id_targets[None, ...], id_indexes[None, ...]) # loss_id_ = self.truncated_loss(id_logits[None, ...], id_targets[None, ...], id_indexes[None, ...]) # time_preds = time[B, :t - P] # time_targets = targets['dt'][B, :t - P] # loss_time_ = F.cross_entropy(time_preds.view(-1, time_preds.size(-1)), time_targets.view(-1)) # loss_time_ = F.mse_loss(time_preds.squeeze(-1), time_targets) # loss_id_ = self.poisson_loss(id_logits.view(-1, id_logits.size(-1)), F.one_hot(id_targets, self.config.vocab_size)) # if len(id_targets) > 0: # indices = self.hungarian_matcher(id_logits, id_targets) # probs_matching, targets_matching = id_logits[indices[0][0]], id_targets[indices[0][1]] # loss_hungarian_ = F.cross_entropy(probs_matching, targets_matching, weight=self.class_weights).to(self.device) # loss_hungarian.append(loss_hungarian_) # # psth = self.proj_psth(x[B, -1]) # from the EOS position # loss_psth.append(torch.nan_to_num(self.set_loss(id_logits, id_targets))) # loss_psth_ = self.dice_loss(id_logits, id_targets) # loss_psth.append(torch.nan_to_num(loss_psth_)) # loss_time.append(torch.nan_to_num(loss_time_)) loss_id.append(torch.nan_to_num(loss_id_)) loss = dict() # loss['frames'] = loss_frames / (b / 3) loss['id'] = sum(loss_id) / (b) # sum(loss_id) / (b * 2) # / len(loss_id) # loss['time'] = sum(loss_time) / (b * 2) # loss['dice'] = sum(loss_dice) / len(loss_dice) # loss['dt'] = loss_time / (b * 50) # loss['hungarian'] = sum(loss_hungarian) / (b * 2) # loss['psth'] = sum(loss_psth) / (b * 2) for key in list(loss): if isinstance(loss[key], float): del loss[key] preds = dict() preds['logits'] = logits # [:, tf:] # only id logits # preds['dt'] = time return preds, features, loss
39.232506
139
0.581761
from torch.nn.modules.activation import GELU, ReLU import math import numpy as np import itertools import logging import torch import torch.nn as nn from torch.nn import functional as F from torch.autograd import Variable from torchvision.models.video import r3d_18 from scipy.optimize import linear_sum_assignment from einops.layers.torch import Rearrange logger = logging.getLogger(__name__) def convert_weights(model: nn.Module): def _convert_weights_to_fp16(l): if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)): l.weight.data = l.weight.data.half() if l.bias is not None: l.bias.data = l.bias.data.half() if isinstance(l, nn.MultiheadAttention): for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]: tensor = getattr(l, attr) if tensor is not None: tensor.data = tensor.data.half() for name in ["text_projection", "proj"]: if hasattr(l, name): attr = getattr(l, name) if attr is not None: attr.data = attr.data.half() model.apply(_convert_weights_to_fp16) class GPTConfig: embd_pdrop = 0.2 resid_pdrop = 0.2 attn_pdrop = 0.2 pos_pdrop = 0.2 temp_pdrop = 0.2 pos_emb = True temp_emb = True start_prune = 30 epoch = 0 def __init__(self, vocab_size, block_size, **kwargs): self.vocab_size = vocab_size self.block_size = block_size for k, v in kwargs.items(): setattr(self, k, v) class neuralGPTConfig: n = 0.4 im_drop = 0.2 id_drop = n embd_pdrop = n resid_pdrop = n attn_pdrop = n pos_pdrop = n temp_pdrop = n pos_emb = True temp_emb = True def __init__(self, vocab_size, block_size, **kwargs): self.vocab_size = vocab_size self.block_size = block_size for k, v in kwargs.items(): setattr(self, k, v) class GPT1Config(GPTConfig): n_layer = 12 n_head = 12 n_embd = 768 class VideoFeaturesExtractor(nn.Module): def __init__(self): super().__init__() self.backbone = torch.nn.Sequential(*(list(r3d_18(pretrained=True).children())[:-2])) convert_weights(self.backbone) def forward(self, x): features = self.backbone(x) B, C, T, H, W = features.shape features = features.permute(0, 2, 3, 4, 1) features = features.view(B, -1, C) return features class VideoEncoder(nn.Module): def __init__(self): super().__init__() self.to_patch_embedding = nn.Sequential( Rearrange('b c t (h p1) (w p2) -> b (t h w) (p1 p2 c)', p1=16, p2=16) ) def forward(self, x): return self.to_patch_embedding(x) class CausalSelfAttention(nn.Module): def __init__(self, config): super().__init__() assert config.n_embd % config.n_head == 0 self.config = config self.key = nn.Linear(config.n_embd, config.n_embd) self.query = nn.Linear(config.n_embd, config.n_embd) self.value = nn.Linear(config.n_embd, config.n_embd) self.attn_drop = nn.Dropout(config.attn_pdrop) self.resid_drop = nn.Dropout(config.resid_pdrop) self.proj = nn.Linear(config.n_embd, config.n_embd) self.register_buffer("mask", self.build_mask(config.block_size)) self.n_head = config.n_head self.att = None self.T = config.block_size def build_mask(self, block_size): mask = torch.tril(torch.ones((block_size, block_size)), ).view(1, 1, block_size, block_size) return mask def generate_sparse_mask(self, att, p, config): assert p >= 0 and p <= 1, "p should be in [0, 1]" T = config.block_size mask = torch.rand((1, T)) < p mask = mask.repeat(T, 1) mask[0, 0] = False # check if any step is fully masked and umask it idx_all_true = (True == torch.all(mask, dim=0)).nonzero() for step in idx_all_true: sampler = torch.distributions.Uniform(low=0, high=step.item()+1) idx_false = sampler.sample((1,1)).long() mask[step, idx_false] = False # mask = mask.repeat(T, 1) mask = mask.view(1, 1, T, T).cuda() if att.is_cuda else mask.view(1, 1, T, T) att = att.masked_fill(mask, float('-inf')) return att def forward(self, x, pad=None, dtx=None): # B = Batch, T = Sequence, C = n_embed B, T, C = x.size() # calculate query, key, values for all head in batch and move head forward to the batch dim k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) # # apply rotary embeddings # if dtx is not None: # q, k = self.rotary_embedding(q, k, dtx) # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T) att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1))) att = att.masked_fill(self.mask[:,:,:T,:T] == 0, float('-inf')) if self.training: att = self.generate_sparse_mask(att, 0.25, self.config) if pad is not None: for idx, i in enumerate(pad): att[idx, :, :, self.T - i:] = float('-inf') # only able to see first padding token att = F.softmax(att, dim=-1) att = self.attn_drop(att) self.att = att y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs) y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side # output projection y = self.resid_drop(self.proj(y)) return y class PositionalEmbedding(nn.Module): def __init__(self, n_embd, p_drop, max_len=1500): super().__init__() self.dropout = nn.Dropout(p=p_drop) # Compute the positional encodings once in log space. pe = torch.zeros(max_len, n_embd) position = torch.arange(0, max_len).unsqueeze(1) div_term = torch.exp(torch.arange(0, n_embd, 2) * -(math.log(10000.0) / n_embd)) pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) pe = pe.unsqueeze(0) self.register_buffer('pe', pe) def forward(self, x): x = Variable(self.pe[:, :x.size(1)], requires_grad=False) return self.dropout(x) # class RotarySpatioTemporalEmbedding(nn.Module): # """ Rotary temporal embeddings - block_size = id_blk_sz """ # def __init__(self, config): # super().__init__() # self.frame_block_size = config.frame_block_size # self.id_block_size = config.id_block_size # self.emb = RotaryEmbedding(dim=32) # def forward(self, q, k, t): # b = t.shape[0] # tf = self.frame_block_size # queries = [] # keys = [] # for B in range(b): # im_temp_emb = torch.tensor([-0.5] * (tf//2) + [0.5] * (tf//2)) # im_pos_emb = torch.arange(self.frame_block_size) # im_emb = torch.stack([im_temp_emb, im_pos_emb], dim=0) # id_temp_emb = self.temp_emb(t[B], cache_key=self.block_size) # freqs = self.emb(torch.cat(im_emb, id_temp_emb)) # queries.append(apply_rotary_emb(freqs, q[B][None, ...])) # keys.append(apply_rotary_emb(freqs, k[B][None, ...])) # q, k = torch.cat(queries), torch.cat(keys) # return q, k class TemporalEmbedding(nn.Module): def __init__(self, n_embd, p_drop, max_len=1500): super().__init__() self.dropout = nn.Dropout(p=p_drop) # Compute the positional encodings once in log space. pe = torch.zeros(max_len, n_embd) position = torch.arange(0, max_len).unsqueeze(1) div_term = torch.exp(torch.arange(0, n_embd, 2) * -(math.log(10000.0) / n_embd)) pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) pe = pe.unsqueeze(0) self.register_buffer('pe', pe) def forward(self, x): x = Variable(self.pe[:, :x.size(1)], requires_grad=False) return self.dropout(x) class LearntTemporalEmbedding(nn.Module): def __init__(self, block_sz, n_embd, p_drop=0.2): super().__init__() self.temp_emb = nn.Sequential( nn.Linear(1, n_embd // 2), nn.GELU(), nn.Linear(n_embd // 2, n_embd), nn.Dropout(p_drop) ) def forward(self, x): return self.temp_emb(x.unsqueeze(-1)) class Decoder(nn.Module): def __init__(self, config): super().__init__() # decoder_layer = nn.TransformerDecoderLayer(config.n_embd, config.n_head, # activation='gelu', dropout=0.2, batch_first=True) # self.decoder = nn.TransformerDecoder(decoder_layer, config.n_layer) self.decoder = nn.Transformer(d_model=config.n_embd, nhead=config.n_head, num_encoder_layers=3, num_decoder_layers=config.n_layer, activation="gelu", dropout=0.4, batch_first=True) self.register_buffer("tgt_mask", self.generate_square_subsequent_mask(config.id_block_size)) # self.register_buffer("tgt_pad_mask", self.generate_padding_mask(config.ids_block_size)) self.T = config.id_block_size def generate_square_subsequent_mask(self, sz: int, pad=None): mask = (torch.triu(torch.ones(sz, sz), diagonal=0) == 1).transpose(0, 1) mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0)) return mask def generate_padding_mask(self, sz: int, pad=None): mask = torch.zeros(1, sz, dtype=torch.bool) return mask def generate_sparse_mask(self, sz: int, pad=None): rand_mat = torch.rand(1, sz) k = round(0.75 * sz) k_th_quant = torch.topk(rand_mat, k, largest = False)[0][:,-1:] bool_tensor = rand_mat <= k_th_quant mask = torch.where(bool_tensor, torch.tensor(1), torch.tensor(0)).repeat(sz, 1) mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0)) return mask.cuda(self.tgt_mask.get_device()) if self.tgt_mask.is_cuda else mask def build_padding_mask(self, tgt, pad): # mask = self.tgt_pad_mask.repeat(tgt.shape[0], 1) mask = torch.zeros(tgt.shape[0], self.T, dtype=torch.bool) for B, P in enumerate(pad): mask[B, self.T - P:] = True return mask # .to(torch.cuda.current_device()) def forward(self, tgt, memory, pad): # padding_mask = self.build_padding_mask(tgt, pad) # tgt_mask = self.generate_sparse_mask(self.T) if self.training else self.tgt_mask return self.decoder(src=memory, tgt=tgt, tgt_mask=self.tgt_mask, tgt_key_padding_mask=None) class ProjectNorm(nn.Module): def __init__(self, feat_size, target_size): super().__init__() self.ln = nn.LayerNorm(feat_size) self.mlp = nn.Sequential( nn.Linear(feat_size, math.floor(2 * feat_size), bias=False), nn.GELU(), nn.Linear(math.floor(2 * feat_size), target_size, bias=False), ) def forward(self, x): return self.mlp(self.ln(x)) class TimeProjection(nn.Module): def __init__(self, seq_size, id_seq_size, feat_size, target_size): super().__init__() self.mlp_seq = nn.Sequential( nn.Linear(seq_size, id_seq_size), nn.ReLU(), nn.Dropout(p=0.3), nn.Linear(id_seq_size, id_seq_size) ) self.mlp_t = nn.Sequential( nn.Linear(feat_size, feat_size // 2), nn.ReLU(), nn.Dropout(p=0.3), nn.Linear(feat_size // 2, target_size) ) def forward(self, x): x = x.permute(0, 2, 1) # B, T, C -> B, C, T x = self.mlp_seq(x) # B, C, T / 2 x = x.permute(0, 2, 1) # B, T / 2, C return self.mlp_t(x) # B, T / 2, 1 class PSTHProjection(nn.Module): def __init__(self, config): super().__init__() self.mlp = nn.Sequential( nn.Linear(config.n_embd, 4 * config.n_embd, bias=False), nn.Dropout(p=0.2), nn.GELU(), nn.Linear(config.n_embd * 4, config.id_vocab_size, bias=False) ) def forward(self, x): return self.mlp(x) # class PSTHProjection(nn.Module): # def __init__(self, config): # super().__init__() # self.mlp_seq = nn.Sequential( # nn.Linear(config.id_block_size, config.id_block_size // 2, bias=False), # nn.GELU(), # nn.Dropout(p=0.2), # nn.Linear(config.id_block_size // 2, 1, bias=False) # ) # self.mlp_t = nn.Sequential( # nn.Linear(config.n_embd, config.n_embd * 4, bias=False), # nn.GELU(), # nn.Dropout(p=0.2), # nn.Linear(config.n_embd * 4, config.id_vocab_size, bias=False) # ) # def forward(self, x): # x = x.transpose(-1, -2) # B, T, C -> B, C, T # x = self.mlp_seq(x) # B, C, 1 # x = x.transpose(-2, -1) # B, 1, Vocab_id # return self.mlp_t(x) class TimeRNN(nn.Module): def __init__(self, feat_size, target_size): super().__init__() class Block(nn.Module): def __init__(self, config): super().__init__() self.ln1 = nn.LayerNorm(config.n_embd) self.ln2 = nn.LayerNorm(config.n_embd) self.attn = CausalSelfAttention(config) self.mlp = nn.Sequential( nn.Linear(config.n_embd, 4 * config.n_embd), nn.GELU(), nn.Linear(4 * config.n_embd, config.n_embd), nn.Dropout(config.resid_pdrop), ) def forward(self, x, pad=None, dtx=None): x = x + self.attn(self.ln1(x), pad) x = x + self.mlp(self.ln2(x)) return x class BlockSequential(nn.Sequential): def forward(self, x, pad=None, dtx=None): for module in self._modules.values(): x = module(x, pad, dtx) return x class DiceLossPSTH(nn.Module): def __init__(self, size_average=True, smooth=1): super().__init__() def cross_entropy(self, input, target): return torch.mean(-torch.sum(target * torch.log(input), 1)) def forward(self, logits, targets, smooth=1, class_weights=None): total_logits = F.layer_norm(torch.sum(logits, dim=-2), [logits.size()[-1]]) # probs = F.log_softmax(logits, dim=-1) probs = F.softmax(total_logits, dim=-1) # logits = F.gelu(logits) # probs = logits / (logits.max(dim=-1).values.unsqueeze(-1)) # flatten label and prediction tensors outputs = probs.contiguous().view(-1) targets = targets.contiguous().view(-1) labels = torch.zeros_like(outputs) labels[targets] = 1 / len(targets) # intersection = (outputs * labels).sum() # dice = (2. * intersection + smooth) / (outputs.sum() + labels.sum() + smooth) return self.cross_entropy(outputs[None, ...], labels[None, ...]) class SetLoss(nn.Module): def __init__(self): super().__init__() def cross_entropy(self, input, target): return torch.mean(-torch.sum(target * torch.log(input), 1)) def forward(self, logits, targets): targets = targets.contiguous().view(-1) loss = 0 for n_step, n_logits in enumerate(logits): n_logits = F.softmax(n_logits, dim=-1) n_target = targets[n_step:] n_target_dist = torch.zeros_like(n_logits) if len(n_target) != 0: n_target_dist[n_target] = 1 / len(n_target) loss += self.cross_entropy(n_logits[None,...], n_target_dist[None, ...]) return loss / len(logits) class TruncatedLoss(nn.Module): def __init__(self, q=0.8, k=0.2, trainset_size=50000): super(TruncatedLoss, self).__init__() self.q = q self.k = k self.weight = torch.nn.Parameter(data=torch.ones(trainset_size, 1), requires_grad=False) def forward(self, logits, targets, indexes): p = F.softmax(logits, dim=-1) Yg = torch.gather(p, 2, targets.unsqueeze(2)) loss = ((1-(Yg**self.q))/self.q)*self.weight[indexes] - ((1-(self.k**self.q))/self.q)*self.weight[indexes] loss = torch.mean(loss) return loss def update_weight(self, logits, targets, indexes): p = F.softmax(logits, dim=-1) Yg = torch.gather(p, 2, targets.unsqueeze(2)) Lq = ((1-(Yg**self.q))/self.q) Lqk = np.repeat(((1-(self.k**self.q))/self.q), targets.size(0)) Lqk = torch.from_numpy(Lqk).type(torch.cuda.FloatTensor) Lqk = torch.unsqueeze(Lqk, 1) condition = torch.gt(Lqk, Lq) self.weight[indexes] = condition.type(torch.cuda.FloatTensor) # class PSTHLOSS(nn.Module): # def __init__(self): # super().__init__() # def forward(self, logits, targets): # total_logits = torch.sum(logits, dim=-2) # sum over sequence dimension # probs = F.softmax(total_logits, dim=-1) # outptu class HungarianMatcher(nn.Module): def __init__(self): super().__init__() @torch.no_grad() def forward(self, logits, targets): T, C = logits.size() probs = F.softmax(logits, dim=-1) cost_id = (1 - probs[:, targets]).cpu().view(T, -1).unsqueeze(0) indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_id.split(len(targets), -1))] return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices] class KLDivLoss(nn.Module): def __init__(self): super().__init__() self.log_softmax = nn.LogSoftmax(dim=-1) self.KLdiv = nn.KLDivLoss() def forward(self, logits, targets): log_probs = self.log_softmax(logits) return self.KLdiv(log_probs.long(), targets) class PoissonCrossEntropyLoss(nn.Module): def __init__(self): super().__init__() self.log_softmax = nn.LogSoftmax(dim=-1) # self.softmax = nn.Softmax(dim=-1) self.nll_poisson = nn.PoissonNLLLoss() # self.nll_poisson = nn.NLLLoss() def forward(self, logits, targets): log_probs = self.log_softmax(logits) return self.nll_poisson(log_probs, targets) class GPT(nn.Module): def __init__(self, config): super().__init__() self.device = 'cpu' if torch.cuda.is_available(): self.device = torch.cuda.current_device() self.config = config # input embedding stem self.n_embd = config.n_embd self.tok_emb = nn.Embedding(config.id_vocab_size, config.n_embd) self.pos_emb = PositionalEmbedding(config.n_embd, p_drop=0.2) # self.pos_emb_id = nn.Parameter(torch.zeros(1, config.id_block_size, config.n_embd)) self.pos_emb_frames = nn.Parameter(torch.zeros(1, config.frame_block_size, config.n_embd)) # self.temp_emb = TemporalEmbedding(config.n_embd, p_drop=0.2) # self.temp_emb = RotaryTemporalEmbedding(config.id_block_size) self.temp_emb = LearntTemporalEmbedding(config.id_block_size, config.n_embd) self.frame_temp_emb = LearntTemporalEmbedding(config.frame_block_size, config.n_embd) self.id_drop = nn.Dropout(config.id_drop) self.im_drop = nn.Dropout(config.im_drop) self.drop = nn.Dropout(config.embd_pdrop) # -- Visual Backbone -- # # self.visual_backbone = VideoFeaturesExtractor() self.video_encoder = VideoEncoder() frame_temp_emb = torch.tensor(list(itertools.chain(*[[n * 0.05] * (config.frame_block_size//20) for n in range(20)]))).unsqueeze(0) self.register_buffer("frame_temp_emb_seq", frame_temp_emb) # -- Contrastive Loss -- ## # self.proj_id = ProjectNorm(config.n_embd, config.n_embd) # self.proj_vid = VidProjectNorm(config.n_embd, config.n_embd) # im_shape ## -- IM_Decoder -- ## # self.blocks_id = BlockSequential(*[Block(config) for _ in range(2)]) # self.blocks_im = BlockSequential(*[Block(config) for _ in range(2)]) # self.ln_f_id = nn.LayerNorm(config.n_embd) # self.ln_f_im = nn.LayerNorm(config.n_embd) ## -- Decoder -- ## # self.ln_f = nn.LayerNorm(config.n_embd) ## GPT # self.blocks = BlockSequential(*[Block(config) for _ in range(config.n_layer)]) # self.ln_f = nn.LayerNorm(config.n_embd) ## enc_dec self.state_decoder = Decoder(config) self.ln_f_state_dec = nn.LayerNorm(config.n_embd) self.stimulus_decoder = Decoder(config) self.ln_f_stimulus_dec = nn.LayerNorm(config.n_embd) self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False) ## -- Time -- ## # self.proj_time = TimeProjection(config.block_size, config.id_block_size, config.n_embd, config.n_dt) # self.proj_time = ProjectNorm(config.n_embd, config.n_dt) # self.proj_time = ProjectNorm(config.n_embd, 1) ## -- PSTH -- ## # self.proj_psth = PSTHProjection(config) # Loss # self.dice_loss = DiceLossPSTH() # self.poisson_loss = PoissonCrossEntropyLoss() # self.hungarian_matcher = HungarianMatcher() # self.kldiv_loss = KLDivLoss() # self.truncated_loss = TruncatedLoss(trainset_size=config.data_size) # self.set_loss = SetLoss() # self.a = torch.tensor(0.5, requires_grad=True) self.block_size = config.block_size self.apply(self._init_weights) if config.class_weights is not None: self.register_buffer("class_weights", config.class_weights) logger.info("number of parameters: %e", sum(p.numel() for p in self.parameters())) def get_block_size(self): return self.block_size def _init_weights(self, module): if isinstance(module, (nn.Linear, nn.Embedding)): module.weight.data.normal_(mean=0.0, std=0.02) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) def configure_optimizers(self, train_config): if train_config.decay_weights: decay = set() no_decay = set() whitelist_weight_modules = (torch.nn.Linear, ) blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding) for mn, m in self.named_modules(): for pn, p in m.named_parameters(): fpn = '%s.%s' % (mn, pn) if mn else pn # full param name if pn.endswith('bias'): # all biases will not be decayed no_decay.add(fpn) elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules): # weights of whitelist modules will be weight decayed decay.add(fpn) elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules): # weights of blacklist modules will NOT be weight decayed no_decay.add(fpn) else: no_decay.add(fpn) # special case the position embedding parameter in the root GPT module as not decayed black_list_mods = ['pos_emb', 'temp_emb'] for mods in black_list_mods: for name, param in self.named_parameters(): if mods in name: no_decay.add(name) # also pos_emb # validate that we considered every parameter param_dict = {pn: p for pn, p in self.named_parameters()} no_decay -= decay & no_decay inter_params = decay & no_decay union_params = decay | no_decay assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), ) assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \ % (str(param_dict.keys() - union_params), ) # create the pytorch optimizer object optim_groups = [ {"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": train_config.weight_decay}, {"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0}, ] optimizer = torch.optim.AdamW(optim_groups, lr=train_config.learning_rate, betas=train_config.betas) else: parameters = self.parameters() optimizer = torch.optim.Adam(parameters, lr=train_config.learning_rate) return optimizer def process_features(self, x): # batch, block_size, feature p_idx = x['id_prev'] idx = x['id'] dtx = x['dt'] dtx_prev = x['dt_prev'] frames = self.video_encoder(x['frames']) pad = x['pad'] b, t = idx.size() # b_p, t_p = p_idx.size() bf, tf = frames.size()[0:2] # forward the GPT model # # Embeddings prev_id_position_embeddings = 0 # self.pos_emb(p_idx) prev_id_temporal_embeddings = self.temp_emb(dtx_prev.float()) id_position_embeddings = 0 # self.pos_emb(idx) im_position_embeddings = self.pos_emb_frames temporal_embeddings = self.temp_emb(dtx.float()) # Extract ID features prev_token_embeddings = self.id_drop(self.tok_emb(p_idx) + prev_id_temporal_embeddings + prev_id_position_embeddings) token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector token_embeddings = token_embeddings + temporal_embeddings + id_position_embeddings token_embeddings = self.id_drop(token_embeddings) # Extract image features and add time embeddings im_temporal_embeddings = self.frame_temp_emb(self.frame_temp_emb_seq) im_embeddings = frames # self.tok_emb(frames) im_embeddings = im_embeddings + im_position_embeddings + im_temporal_embeddings im_embeddings = self.im_drop(im_embeddings) # separate pos emb? # Tidy up features = dict() features['id_prev'] = prev_token_embeddings features['id'] = token_embeddings features['frames'] = im_embeddings return features, pad def perceiver(self, features, pad): x = self.state_decoder(tgt=features['id'], memory=features['id_prev'], pad=pad) x = self.ln_f_state_dec(x) x = self.stimulus_decoder(tgt=features['id'], memory=features['frames'], pad=pad) x = self.ln_f_stimulus_dec(x) logits = self.head(x) return logits, x def enc_dec(self, features, pad): x = self.stimulus_decoder(tgt=features['id'], memory=features['frames'], pad=pad) x = self.ln_f_stimulus_dec(x) logits = self.head(x) return logits, x def GPTdecoder(self, features, pad, dtx=None): # image + neural features x = torch.cat((features['frames'], features['id']), dim=1) # Decoder x = self.blocks(x, pad, dtx) # (B, T, C) x = self.ln_f(x) logits = self.head(x) # print(logits.shape) # (B, T, Vocab) # logits_psth = x[:, -1] # (B, C) return logits, x def forward(self, x, targets=None): idx = x['id'] dtx = x['dt'] frames = x['frames'] pad = x['pad'] b, t = idx.size() # b, t = x['id'].shape[0], x['id'].shape[1] + x['id_prev'].shape[1] bf, tf = frames.size()[0:2] tf = self.config.frame_block_size # assert t + tf == self.config.block_size, f"{tf} {t}" # assert t <= self.block_size, "Cannot forward, model block size is exhausted" features, pad = self.process_features(x) logits, x = self.perceiver(features, pad) # logits, x = self.enc_dec(features, pad) # logits, x = self.GPTdecoder(features, pad) # time = self.proj_time(x) # (B, T_id, 1) # print(x[:, 0].shape) # psth = self.proj_psth(x) # (B, Vocab_id) # if targets, calculate loss # calculate loss on logits up to padding token for each batch loss = None loss_frames = 0 loss_id = [] loss_time = [] loss_dice = [] loss_psth = [] loss_hungarian = [] if targets is not None: # loss_psth = self.dice_loss(psth, targets['modes'][:, tf:]) for B, P in enumerate(pad): tf = 0 # im_logits = logits[B, :tf] # im_targets = targets['frames'][B, :tf] # loss_frames += F.cross_entropy(im_logits.view(-1, im_logits.size(-1)), im_targets.view(-1)) id_logits = logits[B, tf:tf + t - P] id_targets = targets['id'][B, :t - P] loss_id_ = F.cross_entropy(id_logits.view(-1, id_logits.size(-1)), id_targets.view(-1)) # if self.config.epoch >= 15: # self.truncated_loss.update_weight(id_logits[None, ...], id_targets[None, ...], id_indexes[None, ...]) # loss_id_ = self.truncated_loss(id_logits[None, ...], id_targets[None, ...], id_indexes[None, ...]) # time_preds = time[B, :t - P] # time_targets = targets['dt'][B, :t - P] # loss_time_ = F.cross_entropy(time_preds.view(-1, time_preds.size(-1)), time_targets.view(-1)) # loss_time_ = F.mse_loss(time_preds.squeeze(-1), time_targets) # loss_id_ = self.poisson_loss(id_logits.view(-1, id_logits.size(-1)), F.one_hot(id_targets, self.config.vocab_size)) # if len(id_targets) > 0: # indices = self.hungarian_matcher(id_logits, id_targets) # probs_matching, targets_matching = id_logits[indices[0][0]], id_targets[indices[0][1]] # loss_hungarian_ = F.cross_entropy(probs_matching, targets_matching, weight=self.class_weights).to(self.device) # loss_hungarian.append(loss_hungarian_) # # psth = self.proj_psth(x[B, -1]) # from the EOS position # loss_psth.append(torch.nan_to_num(self.set_loss(id_logits, id_targets))) # loss_psth_ = self.dice_loss(id_logits, id_targets) # loss_psth.append(torch.nan_to_num(loss_psth_)) # loss_time.append(torch.nan_to_num(loss_time_)) loss_id.append(torch.nan_to_num(loss_id_)) loss = dict() # loss['frames'] = loss_frames / (b / 3) loss['id'] = sum(loss_id) / (b) # sum(loss_id) / (b * 2) # / len(loss_id) # loss['time'] = sum(loss_time) / (b * 2) # loss['dice'] = sum(loss_dice) / len(loss_dice) # loss['dt'] = loss_time / (b * 50) # loss['hungarian'] = sum(loss_hungarian) / (b * 2) # loss['psth'] = sum(loss_psth) / (b * 2) for key in list(loss): if isinstance(loss[key], float): del loss[key] preds = dict() preds['logits'] = logits # [:, tf:] # only id logits # preds['dt'] = time return preds, features, loss
true
true
f70fc4b75705f42bbe31c2e4b05c351f37b97eeb
2,243
py
Python
driller/prioritization_techniques/unique.py
ruaronicola/driller
9f581dee0fa0365c0738d947428d0d9462290c5e
[ "BSD-2-Clause" ]
null
null
null
driller/prioritization_techniques/unique.py
ruaronicola/driller
9f581dee0fa0365c0738d947428d0d9462290c5e
[ "BSD-2-Clause" ]
null
null
null
driller/prioritization_techniques/unique.py
ruaronicola/driller
9f581dee0fa0365c0738d947428d0d9462290c5e
[ "BSD-2-Clause" ]
null
null
null
from . import PrioritizationTechnique from collections import Counter class UniqueSearch(PrioritizationTechnique): def __init__(self, binary, target_os, target_arch, similarity_func=None): super(UniqueSearch, self).__init__(binary=binary, target_os=target_os, target_arch=target_arch) self.uniqueness = dict() self.similarity = dict() self.similarity_func = similarity_func or self.l2_similarity def update(self, seeds): super(UniqueSearch, self).update(seeds=seeds) if all([s in self.uniqueness for s in seeds]): return # clean up self.uniqueness = {k:(0,0) for k in seeds} self.similarity = {(a,b):v for (a,b),v in self.similarity.items() if a in seeds and b in seeds} def update_average(seed, new): prev, size = self.uniqueness[seed] new_average = float(prev * size + new) / (size + 1) self.uniqueness[seed] = new_average, size + 1 for a in seeds: for b in seeds: similarity = self.similarity.get((a, b), None) or self.similarity_func(a, b) self.similarity[(a, b)] = self.similarity[(b, a)] = similarity update_average(a, similarity) update_average(b, similarity) self.uniqueness = {k:v for k,(v,_) in self.uniqueness.items()} def pop_best(self, not_drilled): best = max({k:v for k,v in self.uniqueness.items() if k in not_drilled}, key=self.uniqueness.get) self.uniqueness.pop(best) return best def l2_similarity(self, seed_a, seed_b): """ The (L2) distance between the counts of the state addresses in the history of the path. :param seed_a: The first seed to compare :param seed_b: The second seed to compare """ if seed_a == seed_b: return 1.0 try: count_a = Counter(self.trace(seed_a)) count_b = Counter(self.trace(seed_b)) normal_distance = sum((count_a.get(addr, 0) - count_b.get(addr, 0)) ** 2 for addr in set(list(count_a.keys()) + list(count_b.keys()))) ** 0.5 return 1.0 / (1 + normal_distance) except: return 0.0
39.350877
105
0.607222
from . import PrioritizationTechnique from collections import Counter class UniqueSearch(PrioritizationTechnique): def __init__(self, binary, target_os, target_arch, similarity_func=None): super(UniqueSearch, self).__init__(binary=binary, target_os=target_os, target_arch=target_arch) self.uniqueness = dict() self.similarity = dict() self.similarity_func = similarity_func or self.l2_similarity def update(self, seeds): super(UniqueSearch, self).update(seeds=seeds) if all([s in self.uniqueness for s in seeds]): return self.uniqueness = {k:(0,0) for k in seeds} self.similarity = {(a,b):v for (a,b),v in self.similarity.items() if a in seeds and b in seeds} def update_average(seed, new): prev, size = self.uniqueness[seed] new_average = float(prev * size + new) / (size + 1) self.uniqueness[seed] = new_average, size + 1 for a in seeds: for b in seeds: similarity = self.similarity.get((a, b), None) or self.similarity_func(a, b) self.similarity[(a, b)] = self.similarity[(b, a)] = similarity update_average(a, similarity) update_average(b, similarity) self.uniqueness = {k:v for k,(v,_) in self.uniqueness.items()} def pop_best(self, not_drilled): best = max({k:v for k,v in self.uniqueness.items() if k in not_drilled}, key=self.uniqueness.get) self.uniqueness.pop(best) return best def l2_similarity(self, seed_a, seed_b): if seed_a == seed_b: return 1.0 try: count_a = Counter(self.trace(seed_a)) count_b = Counter(self.trace(seed_b)) normal_distance = sum((count_a.get(addr, 0) - count_b.get(addr, 0)) ** 2 for addr in set(list(count_a.keys()) + list(count_b.keys()))) ** 0.5 return 1.0 / (1 + normal_distance) except: return 0.0
true
true
f70fc4d24cffa26af63c6d398dd5b0f208d70f01
7,543
py
Python
pymultifracs/simul/mrw.py
agramfort/pymultifracs
3a8896f3f26180b05ccecb4a905b05a3ebc0308b
[ "MIT" ]
9
2019-03-29T05:28:42.000Z
2019-12-29T12:41:15.000Z
pymultifracs/simul/mrw.py
agramfort/pymultifracs
3a8896f3f26180b05ccecb4a905b05a3ebc0308b
[ "MIT" ]
4
2021-01-20T14:58:03.000Z
2021-03-01T11:52:09.000Z
pymultifracs/simul/mrw.py
agramfort/pymultifracs
3a8896f3f26180b05ccecb4a905b05a3ebc0308b
[ "MIT" ]
6
2021-02-08T15:23:39.000Z
2022-03-28T13:30:46.000Z
# Synthesis of multifractal random walk and derived processes. # # Roberto Fabio Leonarduzzi # January, 2019 import numpy as np from .fbm import fgn from .pzutils import gaussian_cme, gaussian_chol from numpy.fft import fft, ifft # import math # import matplotlib.pyplot as plt def mrw(shape, H, lam, L, sigma=1, method='cme', z0=(None, None)): ''' Create a realization of fractional Brownian motion using circulant matrix embedding. Parameters ---------- shape : int | tuple(int) If scalar, it is the number of samples. If tuple it is (N, R), the number of samples and realizations, respectively. H : float Hurst exponent lam : float Lambda, intermittency parameter L : float Integral scale sigma : float Variance of process Returns ------- mrw : ndarray Synthesized mrw realizations. If `shape` is scalar, fbm is ofshape (N,). Otherwise, it is of shape (N, R). References ---------- .. [1] Bacry, Delour, Muzy, "Multifractal Random Walk", Physical Review E, 2001 ''' try: N, R = shape do_squeeze = False except TypeError: # shape is scalar N, R = shape, 1 do_squeeze = True # Is 0.5 or 0 the lower bound ? Search biblio if not 0 <= H <= 1: raise ValueError('H must satisfy 0 <= H <= 1') if L > N: raise ValueError('Integral scale L is larger than data length N') # 1) Gaussian process w w = gaussian_w(N, R, L, lam, 1, method, z0[1]) # Adjust mean to ensure convergence of variance r = 1/2 # see Bacry, Delour & Muzy, Phys Rev E, 2001, page 4 w = w - np.mean(w, axis=0) - r * lam**2 * np.log(L) # 2) fGn e e = fgn((N, R), H, sigma, method=method, z0=z0[0]) # 3) mrw mrw = np.cumsum(e * np.exp(w), axis=0) return mrw.squeeze() if do_squeeze else mrw def mrw_cumul(shape, c1, c2, L, **kwargs): ''' Wrapper for mrw generation from cumulants. Parameters ---------- shape : int | tuple(int) If scalar, it is the number of samples. If tuple it is (N, R), the number of samples and realizations, respectively. c1 : float First order cumulant c2 : float Second order cumulant L : float Integral scale kwargs : dict Optional parameters passed to :obj:`mrw` Returns ------- mrw : ndarray Synthesized mrw realizations. If `shape` is scalar, fbm is ofshape (N,). Otherwise, it is of shape (N, R). References ---------- .. [1] Bacry, Delour, Muzy, "Multifractal Random Walk", Physical Review E, 2001 ''' H = c1 + c2 lam = np.sqrt(-c2) return mrw(shape, H, lam, L, **kwargs) def skewed_mrw(shape, H, lam, L, K0=1, alpha=1, sigma=1, dt=1, beta=1, do_mirror=False): ''' Create skewed mrw as in Pochart & Bouchaud Assumes :math:`\\Delta_t=1`, so no parameter beta is needed. ''' try: N, R = shape do_squeeze = False except TypeError: # shape is scalar N, R = shape, 1 do_squeeze = True # Is 0.5 or 0 the lower bound ? Search biblio if not 0 <= H <= 1: raise ValueError('H must satisfy 0 <= H <= 1') if L / dt > N: raise ValueError('Integral scale L/dt is larger than data length N') # 1) Gaussian process w w = gaussian_w(N, R, L, lam, dt) # Adjust mean to ensure convergence of variance r = 1 # see Bacry, Delour & Muzy, Phys Rev E, 2001, page 4 w = w - np.mean(w, axis=0) - r * lam**2 * np.log(L / dt) # 2) fGn e e = fgn((2*N + 1, R), H, sigma, dt) # 3) Correlate components past = skewness_convolution(e, K0, alpha, beta, dt) wtilde = w - past # 4) skewed mrw smrw = np.cumsum(e[N:] * np.exp(wtilde), axis=0) if do_squeeze: smrw = smrw.squeeze() if do_mirror: past_mirror = skewness_convolution(-e, K0, alpha, beta, dt) wtilde_mirror = w - past_mirror smrw_mirror = np.cumsum(-e[N:] * np.exp(wtilde_mirror), axis=0) if do_squeeze: smrw_mirror = smrw_mirror.squeeze() return smrw, smrw_mirror else: return smrw def gaussian_w(N, R, L, lam, dt=1, method='cme', z0=None): ''' Auxiliar function to create gaussian process w ''' kmax = int(L / dt) k = np.arange(kmax) rho = np.ones((N)) rho[:kmax] = L / (k + 1) / dt cov = (lam ** 2) * np.log(rho) if method == 'cme': w = gaussian_cme(cov, N, R, z0) elif method == 'chol': w = gaussian_chol(cov, N, R, z0) return w def skewness_convolution(e, K0, alpha, beta=1, dt=1): ''' Noise e should be of length 2*N, with "N false past variables" at the beginning to avoid spurious correlations due to cutoffs in convolution. ''' N, _ = e.shape N = N // 2 tau = np.arange(1, N+1) Kbar = np.zeros((2*N)) Kbar[1:N+1] = K0 / (tau**alpha) / (dt**beta) skew_conv = np.real(ifft(fft(Kbar[:, None], axis=0) * fft(e, axis=0), axis=0)) return skew_conv[N:] def skewness_convolution_dumb(e, K0, alpha, beta=1, dt=1): ''' Direct and inefficient calculation for testing purposes. Receives "true" input noise of size N. ''' N, R = e.shape def K(i, j): return K0 / (j-i)**alpha / dt**beta scorr = np.zeros((N, R)) for k in range(N): for i in range(k): scorr[k, :] += K(i, k) * e[i, :] return scorr def mrw2D(shape, H, lam, L, sigma=1): ''' Create a realization of fractional Brownian motion using circulant matrix embedding. Parameters ---------- shape : int | tuple(int) If scalar, it is the number of samples. If tuple it is (N, R), the number of samples and realizations, respectively. H : float Hurst exponent lambda : float Intermittency parameter L : float Integral scale sigma : float Variance of process Returns ------- mrw : ndarray Synthesized mrw realizations. If 'shape' is scalar, fbm is of shape (N,). Otherwise, it is of shape (N, N, R). References ---------- .. [1] Bacry, Delour, Muzy, "Multifractal Random Walk", Physical Review E, 2001 ''' try: N, R = shape # do_squeeze = False except TypeError: # shape is scalar N, R = shape, 1 # do_squeeze = True N = int(2 * np.ceil(N / 2)) # dim = 2 n = np.arange(-N // 2, N // 2) d = np.sqrt(n[:, None]**2 + n[None, :]**2) corr = lam**2 * np.log(np.maximum(L / (1 + d), 1)) L = np.fft.fft2(corr) z1 = np.random.randn(N, N, R) + 1j * np.random.randn(N, N, R) w = np.exp(np.real(np.fft.ifft2(z1 * np.sqrt(L[..., None]), axes=(0, 1)))) # Increment process: X = np.random.randn(N, N, R) * w # Fractional integration to produce motion: BX = fract_int_2d(X, H + 1) return BX, X def fract_int_2d(x, alpha): ''' Assumes size of x divisible by two ''' N = x.shape[0] # Create Fourier filter k = np.arange(-N/2, N/2) d = np.sqrt(k[:, None]**2 + k[None, :]**2) mini = np.min(d[d != 0]) d[d == 0] = mini filt = 1 / (d ** alpha) yhat = np.fft.fftshift(np.fft.fft2(x, axes=(0, 1)), axes=(0, 1)) yhat *= filt[..., None] y = np.real(np.fft.ifft2(np.fft.ifftshift(yhat, axes=(0, 1)), axes=(0, 1))) return y
25.832192
79
0.561978
import numpy as np from .fbm import fgn from .pzutils import gaussian_cme, gaussian_chol from numpy.fft import fft, ifft def mrw(shape, H, lam, L, sigma=1, method='cme', z0=(None, None)): try: N, R = shape do_squeeze = False except TypeError: N, R = shape, 1 do_squeeze = True if not 0 <= H <= 1: raise ValueError('H must satisfy 0 <= H <= 1') if L > N: raise ValueError('Integral scale L is larger than data length N') w = gaussian_w(N, R, L, lam, 1, method, z0[1]) r = 1/2 w = w - np.mean(w, axis=0) - r * lam**2 * np.log(L) e = fgn((N, R), H, sigma, method=method, z0=z0[0]) mrw = np.cumsum(e * np.exp(w), axis=0) return mrw.squeeze() if do_squeeze else mrw def mrw_cumul(shape, c1, c2, L, **kwargs): H = c1 + c2 lam = np.sqrt(-c2) return mrw(shape, H, lam, L, **kwargs) def skewed_mrw(shape, H, lam, L, K0=1, alpha=1, sigma=1, dt=1, beta=1, do_mirror=False): try: N, R = shape do_squeeze = False except TypeError: N, R = shape, 1 do_squeeze = True if not 0 <= H <= 1: raise ValueError('H must satisfy 0 <= H <= 1') if L / dt > N: raise ValueError('Integral scale L/dt is larger than data length N') w = gaussian_w(N, R, L, lam, dt) r = 1 w = w - np.mean(w, axis=0) - r * lam**2 * np.log(L / dt) e = fgn((2*N + 1, R), H, sigma, dt) past = skewness_convolution(e, K0, alpha, beta, dt) wtilde = w - past smrw = np.cumsum(e[N:] * np.exp(wtilde), axis=0) if do_squeeze: smrw = smrw.squeeze() if do_mirror: past_mirror = skewness_convolution(-e, K0, alpha, beta, dt) wtilde_mirror = w - past_mirror smrw_mirror = np.cumsum(-e[N:] * np.exp(wtilde_mirror), axis=0) if do_squeeze: smrw_mirror = smrw_mirror.squeeze() return smrw, smrw_mirror else: return smrw def gaussian_w(N, R, L, lam, dt=1, method='cme', z0=None): kmax = int(L / dt) k = np.arange(kmax) rho = np.ones((N)) rho[:kmax] = L / (k + 1) / dt cov = (lam ** 2) * np.log(rho) if method == 'cme': w = gaussian_cme(cov, N, R, z0) elif method == 'chol': w = gaussian_chol(cov, N, R, z0) return w def skewness_convolution(e, K0, alpha, beta=1, dt=1): N, _ = e.shape N = N // 2 tau = np.arange(1, N+1) Kbar = np.zeros((2*N)) Kbar[1:N+1] = K0 / (tau**alpha) / (dt**beta) skew_conv = np.real(ifft(fft(Kbar[:, None], axis=0) * fft(e, axis=0), axis=0)) return skew_conv[N:] def skewness_convolution_dumb(e, K0, alpha, beta=1, dt=1): N, R = e.shape def K(i, j): return K0 / (j-i)**alpha / dt**beta scorr = np.zeros((N, R)) for k in range(N): for i in range(k): scorr[k, :] += K(i, k) * e[i, :] return scorr def mrw2D(shape, H, lam, L, sigma=1): try: N, R = shape except TypeError: N, R = shape, 1 N = int(2 * np.ceil(N / 2)) n = np.arange(-N // 2, N // 2) d = np.sqrt(n[:, None]**2 + n[None, :]**2) corr = lam**2 * np.log(np.maximum(L / (1 + d), 1)) L = np.fft.fft2(corr) z1 = np.random.randn(N, N, R) + 1j * np.random.randn(N, N, R) w = np.exp(np.real(np.fft.ifft2(z1 * np.sqrt(L[..., None]), axes=(0, 1)))) X = np.random.randn(N, N, R) * w BX = fract_int_2d(X, H + 1) return BX, X def fract_int_2d(x, alpha): N = x.shape[0] k = np.arange(-N/2, N/2) d = np.sqrt(k[:, None]**2 + k[None, :]**2) mini = np.min(d[d != 0]) d[d == 0] = mini filt = 1 / (d ** alpha) yhat = np.fft.fftshift(np.fft.fft2(x, axes=(0, 1)), axes=(0, 1)) yhat *= filt[..., None] y = np.real(np.fft.ifft2(np.fft.ifftshift(yhat, axes=(0, 1)), axes=(0, 1))) return y
true
true
f70fc5015c78a597d0e8b3f39a7c892676afd6d0
1,296
py
Python
tests/pyregex/test_file_ext.py
BrianLusina/PyCharm
144dd4f6b2d254507237f46c8ee175c407fe053d
[ "Apache-2.0", "MIT" ]
null
null
null
tests/pyregex/test_file_ext.py
BrianLusina/PyCharm
144dd4f6b2d254507237f46c8ee175c407fe053d
[ "Apache-2.0", "MIT" ]
null
null
null
tests/pyregex/test_file_ext.py
BrianLusina/PyCharm
144dd4f6b2d254507237f46c8ee175c407fe053d
[ "Apache-2.0", "MIT" ]
null
null
null
import unittest from pyregex.file_extensions import is_audio, is_img class FileExtTests(unittest.TestCase): def test_1(self): self.assertEqual(is_audio("Nothing Else Matters.mp3"), False) def test_2(self): self.assertEqual(is_audio("NothingElseMatters.mp3"), True) def test_3(self): self.assertEqual(is_audio("DaftPunk.FLAC"), False) def test_4(self): self.assertEqual(is_audio("DaftPunk.flac"), True) def test_5(self): self.assertEqual(is_audio("AmonTobin.aac"), True) def test_6(self): self.assertEqual(is_audio(" Amon Tobin.alac"), False) def test_7(self): self.assertEqual(is_audio("tobin.alac"), True) def test_8(self): self.assertEqual(is_img("Home.jpg"), True) def test_9(self): self.assertEqual(is_img("flat.jpeg"), True) def test_10(self): self.assertEqual(is_img("icon.bmp"), True) def test_11(self): self.assertEqual(is_img("icon2.jpg"), False) def test_12(self): self.assertEqual(is_img("bounce.gif"), True) def test_13(self): self.assertEqual(is_img("animate bounce.GIF"), False) def test_14(self): self.assertEqual(is_img("transparency.png"), True) if __name__ == "__main__": unittest.main()
24.923077
69
0.655093
import unittest from pyregex.file_extensions import is_audio, is_img class FileExtTests(unittest.TestCase): def test_1(self): self.assertEqual(is_audio("Nothing Else Matters.mp3"), False) def test_2(self): self.assertEqual(is_audio("NothingElseMatters.mp3"), True) def test_3(self): self.assertEqual(is_audio("DaftPunk.FLAC"), False) def test_4(self): self.assertEqual(is_audio("DaftPunk.flac"), True) def test_5(self): self.assertEqual(is_audio("AmonTobin.aac"), True) def test_6(self): self.assertEqual(is_audio(" Amon Tobin.alac"), False) def test_7(self): self.assertEqual(is_audio("tobin.alac"), True) def test_8(self): self.assertEqual(is_img("Home.jpg"), True) def test_9(self): self.assertEqual(is_img("flat.jpeg"), True) def test_10(self): self.assertEqual(is_img("icon.bmp"), True) def test_11(self): self.assertEqual(is_img("icon2.jpg"), False) def test_12(self): self.assertEqual(is_img("bounce.gif"), True) def test_13(self): self.assertEqual(is_img("animate bounce.GIF"), False) def test_14(self): self.assertEqual(is_img("transparency.png"), True) if __name__ == "__main__": unittest.main()
true
true
f70fc52500dca37eb444c1562ccc361d0506ca18
2,151
py
Python
aide_validation/lfom_validation.py
AguaClara/aide_validation
997b9da8077d1f560d4aa9ccb236b21580da4808
[ "MIT" ]
null
null
null
aide_validation/lfom_validation.py
AguaClara/aide_validation
997b9da8077d1f560d4aa9ccb236b21580da4808
[ "MIT" ]
3
2021-01-28T02:30:34.000Z
2021-03-30T16:57:21.000Z
aide_validation/lfom_validation.py
AguaClara/aide_validation
997b9da8077d1f560d4aa9ccb236b21580da4808
[ "MIT" ]
null
null
null
"""Helper functions for validating LFOM. Created on September 18, 2020 @author: jcs528@cornell.edu """ from aguaclara.core.units import u import aguaclara.core.physchem as pc import aguaclara.core.constants as con def flow_lfom_vert(height, d_ori, h_ori, n_oris): """Returns the flow through the LFOM as a function of height Args: height: height of water in the LFOM (u.m) d_ori: diameter of each orifice (u.m) h_ori: height of each row of the LFOM (list) n_oris: number of orifices at each row of the LFOM (list of lists) Returns: flow: flow rate through the LFOM (u.L / u.s) """ flow = pc.flow_orifice_vert(d_ori, height - h_ori, con.VC_ORIFICE_RATIO) * n_oris return (sum(flow)).to(u.L / u.s) def check_flow_lfom_vert( diameter, ori_heights, ori_numbers, cutoff, q_input, report_writer ): """Evaluates the flow Args: diameter: diameter of each orifice (u.m) ori_heights: height of each row of the LFOM (list) ori_numbers: number of orifices at each row of the LFOM (list of lists) cutoff: allowable tolerance between design and expected flow as a percent q_input: design flow rate (u.L / u.s) report_writer: ReportWriter object to record validation results Returns: flow: flow rate through the LFOM (u.L / u.s) """ try: q_calc = flow_lfom_vert( ori_heights[-1] + 0.5 * diameter, diameter, ori_heights, ori_numbers ) assert cutoff > (q_calc - q_input) / q_input assert -cutoff < (q_calc - q_input) / q_input report_writer.write_message( "The expected flow rate, {!s}, was very close " "to the one calculated by this validation " "code, {!s}.\n".format(q_input, q_calc) ) except AssertionError: report_writer.write_message( "INVALID: The expected flow rate, {!s}, is " "different from the one calculated by this " "validation code, {!s}.\n".format(q_input, q_calc) ) report_writer.set_result("Invalid: Check Validation Report")
30.728571
85
0.641562
from aguaclara.core.units import u import aguaclara.core.physchem as pc import aguaclara.core.constants as con def flow_lfom_vert(height, d_ori, h_ori, n_oris): flow = pc.flow_orifice_vert(d_ori, height - h_ori, con.VC_ORIFICE_RATIO) * n_oris return (sum(flow)).to(u.L / u.s) def check_flow_lfom_vert( diameter, ori_heights, ori_numbers, cutoff, q_input, report_writer ): try: q_calc = flow_lfom_vert( ori_heights[-1] + 0.5 * diameter, diameter, ori_heights, ori_numbers ) assert cutoff > (q_calc - q_input) / q_input assert -cutoff < (q_calc - q_input) / q_input report_writer.write_message( "The expected flow rate, {!s}, was very close " "to the one calculated by this validation " "code, {!s}.\n".format(q_input, q_calc) ) except AssertionError: report_writer.write_message( "INVALID: The expected flow rate, {!s}, is " "different from the one calculated by this " "validation code, {!s}.\n".format(q_input, q_calc) ) report_writer.set_result("Invalid: Check Validation Report")
true
true
f70fc536e34579924ddaa83ede4401b79ce9f249
2,796
py
Python
script popolamento DB/env/lib/python3.7/site-packages/pip/_internal/__init__.py
2dadsgn/Smart-vase-webapp-flask-
0714d960ec21c77be069dd07b1bc8407f33e0b72
[ "Apache-2.0" ]
null
null
null
script popolamento DB/env/lib/python3.7/site-packages/pip/_internal/__init__.py
2dadsgn/Smart-vase-webapp-flask-
0714d960ec21c77be069dd07b1bc8407f33e0b72
[ "Apache-2.0" ]
null
null
null
script popolamento DB/env/lib/python3.7/site-packages/pip/_internal/__init__.py
2dadsgn/Smart-vase-webapp-flask-
0714d960ec21c77be069dd07b1bc8407f33e0b72
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python from __future__ import absolute_import import locale import logging import os import sys import warnings # 2016-06-17 barry@debian.org: urllib3 1.14 added optional support for socks, # but if invoked (i.e. imported), it will issue a warning to stderr if socks # isn't available. requests unconditionally imports urllib3's socks contrib # module, triggering this warning. The warning breaks DEP-8 tests (because of # the stderr output) and is just plain annoying in normal usage. I don't want # to add socks as yet another dependency for pip, nor do I want to allow-stder # in the DEP-8 tests, so just suppress the warning. pdb tells me this has to # be done before the import of pip.vcs. from pip._vendor.urllib3.exceptions import DependencyWarning warnings.filterwarnings("ignore", category=DependencyWarning) # noqa # We want to inject the use of SecureTransport as early as possible so that any # references or sessions or what have you are ensured to have it, however we # only want to do this in the case that we're running on macOS and the linked # OpenSSL is too old to handle TLSv1.2 try: import ssl except ImportError: pass else: # Checks for OpenSSL 1.0.1 on MacOS if sys.platform == "darwin" and ssl.OPENSSL_VERSION_NUMBER < 0x1000100f: try: from pip._vendor.urllib3.contrib import securetransport except (ImportError, OSError): pass else: securetransport.inject_into_urllib3() from pip._internal.cli.autocompletion import autocomplete from pip._internal.cli.main_parser import parse_command from pip._internal.commands import commands_dict from pip._internal.exceptions import PipError from pip._internal.utils import deprecation from pip._vendor.urllib3.exceptions import InsecureRequestWarning logger = logging.getLogger(__name__) # Hide the InsecureRequestWarning from urllib3 warnings.filterwarnings("ignore", category=InsecureRequestWarning) def main(args=None): if args is None: args = sys.argv[1:] # Configure our deprecation warnings to be sent through loggers deprecation.install_warning_logger() autocomplete() try: cmd_name, cmd_args = parse_command(args) except PipError as exc: sys.stderr.write("ERROR: %s" % exc) sys.stderr.write(os.linesep) sys.exit(1) # Needed for locale.getpreferredencoding(False) to work # in pip._internal.utils.encoding.auto_decode try: locale.setlocale(locale.LC_ALL, '') except locale.Error as e: # setlocale can apparently crash if locale are uninitialized logger.debug("Ignoring error %s when setting locale", e) command = commands_dict[cmd_name](isolated=("--isolated" in cmd_args)) return command.main(cmd_args)
35.846154
79
0.741059
from __future__ import absolute_import import locale import logging import os import sys import warnings # to add socks as yet another dependency for pip, nor do I want to allow-stder # in the DEP-8 tests, so just suppress the warning. pdb tells me this has to # be done before the import of pip.vcs. from pip._vendor.urllib3.exceptions import DependencyWarning warnings.filterwarnings("ignore", category=DependencyWarning) # noqa # We want to inject the use of SecureTransport as early as possible so that any # references or sessions or what have you are ensured to have it, however we # only want to do this in the case that we're running on macOS and the linked try: import ssl except ImportError: pass else: if sys.platform == "darwin" and ssl.OPENSSL_VERSION_NUMBER < 0x1000100f: try: from pip._vendor.urllib3.contrib import securetransport except (ImportError, OSError): pass else: securetransport.inject_into_urllib3() from pip._internal.cli.autocompletion import autocomplete from pip._internal.cli.main_parser import parse_command from pip._internal.commands import commands_dict from pip._internal.exceptions import PipError from pip._internal.utils import deprecation from pip._vendor.urllib3.exceptions import InsecureRequestWarning logger = logging.getLogger(__name__) warnings.filterwarnings("ignore", category=InsecureRequestWarning) def main(args=None): if args is None: args = sys.argv[1:] deprecation.install_warning_logger() autocomplete() try: cmd_name, cmd_args = parse_command(args) except PipError as exc: sys.stderr.write("ERROR: %s" % exc) sys.stderr.write(os.linesep) sys.exit(1) try: locale.setlocale(locale.LC_ALL, '') except locale.Error as e: logger.debug("Ignoring error %s when setting locale", e) command = commands_dict[cmd_name](isolated=("--isolated" in cmd_args)) return command.main(cmd_args)
true
true
f70fc6b477776e6e8c300af6054a006f420fbd61
13,684
py
Python
venv/lib/python3.7/site-packages/rqdatac/services/stock_status.py
CatTiger/vnpy
7901a0fb80a5b44d6fc752bd4b2b64ec62c8f84b
[ "MIT" ]
null
null
null
venv/lib/python3.7/site-packages/rqdatac/services/stock_status.py
CatTiger/vnpy
7901a0fb80a5b44d6fc752bd4b2b64ec62c8f84b
[ "MIT" ]
1
2020-04-21T02:42:32.000Z
2020-04-21T02:42:32.000Z
venv/lib/python3.7/site-packages/rqdatac/services/stock_status.py
CatTiger/vnpy
7901a0fb80a5b44d6fc752bd4b2b64ec62c8f84b
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import datetime import warnings import pandas as pd import numpy as np from rqdatac.utils import to_datetime, to_date from rqdatac.validators import ( ensure_date_range, ensure_date_or_today_int, ensure_list_of_string, check_items_in_container, ensure_order, ensure_order_book_id, ensure_order_book_ids, ensure_dates_base_on_listed_date, ensure_string, ensure_date_int ) from rqdatac.services.basic import instruments from rqdatac.services.calendar import ( get_trading_dates, get_previous_trading_date, get_trading_dates_in_type, ) from rqdatac.client import get_client from rqdatac.decorators import export_as_api, compatible_with_parm @export_as_api def is_st_stock(order_book_ids, start_date=None, end_date=None, market="cn"): """判断股票在给定的时间段是否是ST股, 返回值为一个DataFrame :param order_book_ids: 股票 id :param start_date: (Default value = None) :param end_date: (Default value = None) :param market: (Default value = "cn") """ order_book_ids = ensure_order_book_ids(order_book_ids, type="CS", market=market) if len(order_book_ids) == 1: instrument = instruments(order_book_ids[0], market=market) start_date, end_date = ensure_dates_base_on_listed_date(instrument, start_date, end_date, market) if start_date is None: return start_date, end_date = ensure_date_range(start_date, end_date) trading_dates = pd.to_datetime(get_trading_dates(start_date, end_date, market=market)) data = get_client().execute( "get_st_days", order_book_ids, start_date=start_date, end_date=end_date ) df = pd.DataFrame(data=False, columns=order_book_ids, index=trading_dates) for idx, dates in data.items(): for date in dates: date = to_datetime(date) df.at[date, idx] = True return df @export_as_api def _is_st_stock(order_book_id, date=None, market="cn"): """判断股票在给定日期是否是ST股 :param order_book_id: 股票id :param date: (Default value = None) :param market: (Default value = "cn") :returns: True or False """ order_book_id = ensure_order_book_id(order_book_id, type="CS", market=market) date = ensure_date_or_today_int(date) df = is_st_stock(order_book_id, start_date=date, end_date=date, market=market) if df is None or df.empty: return False else: return df[order_book_id][0] @export_as_api @compatible_with_parm(name="country", value="cn", replace="market") def is_suspended(order_book_ids, start_date=None, end_date=None, market="cn"): """获取股票停牌信息 :param order_book_ids: 股票名称 :param start_date: 开始日期, 如'2013-01-04' (Default value = None) :param end_date: 结束日期,如'2014-01-04' (Default value = None) :param market: 地区代码, 如 'cn' (Default value = "cn") :returns: DataFrame """ order_book_ids = ensure_order_book_ids(order_book_ids, type="CS", market=market) if len(order_book_ids) == 1: instrument = instruments(order_book_ids[0], market=market) start_date, end_date = ensure_dates_base_on_listed_date(instrument, start_date, end_date, market) if start_date is None: return start_date, end_date = ensure_date_range(start_date, end_date) trading_dates = pd.to_datetime(get_trading_dates(start_date, end_date, market=market)) df = pd.DataFrame(data=False, columns=order_book_ids, index=trading_dates) data = get_client().execute("get_suspended_days", order_book_ids, start_date, end_date, market=market) for idx, dates in data.items(): for date in dates: date = to_datetime(int(date)) df.at[date, idx] = True return df stock_fields = {"shares_holding": "shares_holding", "holding_ratio": "holding_ratio"} special_symbols = ["all_connect", "shanghai_connect", "shenzhen_connect"] symbols_map = {"shanghai_connect": "SH", "shenzhen_connect": "SZ"} @export_as_api def get_stock_connect(order_book_ids, start_date=None, end_date=None, fields=None, expect_df=False): """获取"陆股通"的持股、持股比例 :param order_book_ids: 股票列表 :param start_date: 开始日期: 如'2017-03-17' (Default value = None) :param end_date: 结束日期: 如'2018-03-16' (Default value = None) :param fields: 默认为所有字段,可输入shares_holding或者holding_ratio (Default value = None) :param expect_df: 返回 MultiIndex DataFrame (Default value = False) :returns: 返回pandas.DataFrame or pandas.Panel """ if order_book_ids not in ("shanghai_connect", "shenzhen_connect", "all_connect"): order_book_ids = ensure_order_book_ids(order_book_ids, type="CS") start_date, end_date = ensure_date_range(start_date, end_date) if fields is not None: fields = ensure_list_of_string(fields) for f in fields: if f not in ("shares_holding", "holding_ratio"): raise ValueError("invalid field: {}".format(f)) else: fields = ["shares_holding", "holding_ratio"] data = get_client().execute("get_stock_connect", order_book_ids, start_date, end_date, fields) if not data: return None df = pd.DataFrame(data, columns=["trading_date", "order_book_id"] + fields) if expect_df: df.sort_values(["order_book_id", "trading_date"], inplace=True) df.set_index(["order_book_id", "trading_date"], inplace=True) return df df = df.set_index(["trading_date", "order_book_id"]) df = df.to_panel() df.major_axis.name = None df.minor_axis.name = None if len(order_book_ids) == 1: df = df.minor_xs(order_book_ids[0]) if len(fields) == 1: df = df[fields[0]] if len(order_book_ids) != 1 and len(fields) != 1: warnings.warn("Panel is removed after pandas version 0.25.0." " the default value of 'expect_df' will change to True in the future.") return df MARGIN_FIELDS = ( "margin_balance", "buy_on_margin_value", "short_sell_quantity", "margin_repayment", "short_balance_quantity", "short_repayment_quantity", "short_balance", "total_balance", ) MARGIN_SUMMARY_MAP = {"SH": "XSHG", "XSHG": "XSHG", "SZ": "XSHE", "XSHE": "XSHE"} @export_as_api def get_securities_margin( order_book_ids, start_date=None, end_date=None, fields=None, expect_df=False, market="cn" ): """获取股票融资融券数据 :param order_book_ids: 股票代码或代码列表 :param start_date: 开始时间,支持 str, date, datetime, pandasTimestamp 默认为 end_date 之前一个月 (Default value = None) :param end_date: 结束时间 默认为当前日期前一天 (Default value = None) :param fields: str 或 list 类型. 默认为 None, 返回所有字段。可选字段包括: today, week, month, three_month, six_month, year, current_year, total (Default value = None) :param expect_df: 返回 MultiIndex DataFrame (Default value = False) :param market: 地区代码, 如: 'cn' (Default value = "cn") :returns: 如果传入多个股票代码,且 fields 为多个或者 None,返回 pandas.Panel 如果传入一只股票或者 fields 为单个字段,则返回 pandas.DataFrame 如果传入的股票代码和字段数都是1,则返回 pandas.Series """ order_book_ids = ensure_list_of_string(order_book_ids, "order_book_ids") all_list = [] for order_book_id in order_book_ids: if order_book_id.upper() in MARGIN_SUMMARY_MAP: all_list.append(MARGIN_SUMMARY_MAP[order_book_id.upper()]) else: inst = instruments(order_book_id, market) if inst.type in ["CS", "ETF", "LOF"]: all_list.append(inst.order_book_id) else: warnings.warn("{} is not stock, ETF, or LOF.".format(order_book_id)) order_book_ids = all_list if not order_book_ids: raise ValueError("no valid securities in {}".format(order_book_ids)) if fields is None: fields = list(MARGIN_FIELDS) else: fields = ensure_list_of_string(fields, "fields") check_items_in_container(fields, MARGIN_FIELDS, "fields") fields = ensure_order(fields, MARGIN_FIELDS) start_date, end_date = ensure_date_range(start_date, end_date) if end_date > ensure_date_or_today_int(None): end_date = ensure_date_or_today_int(get_previous_trading_date(datetime.date.today())) trading_dates = pd.to_datetime(get_trading_dates(start_date, end_date, market=market)) data = get_client().execute( "get_securities_margin", order_book_ids, start_date, end_date, market=market ) if not data: return if expect_df: df = pd.DataFrame(data) df.sort_values(["order_book_id", "date"], inplace=True) df.set_index(["order_book_id", "date"], inplace=True) df = df.reindex(columns=fields) return df pl = pd.Panel(items=fields, major_axis=trading_dates, minor_axis=order_book_ids) for r in data: for field in fields: value = r.get(field) pl.at[field, r["date"], r["order_book_id"]] = value if len(order_book_ids) == 1: pl = pl.minor_xs(order_book_ids[0]) if len(fields) == 1: pl = pl[fields[0]] if len(order_book_ids) != 1 and len(fields) != 1: warnings.warn("Panel is removed after pandas version 0.25.0." " the default value of 'expect_df' will change to True in the future.") return pl MARGIN_TYPE = ("stock", "cash") EXCHANGE_TYPE = {"SZ": "XSHE", "sz": "XSHE", "xshe": "XSHE", "SH": "XSHG", "sh": "XSHG", "xshg": "XSHG"} EXCHANGE_CONTENT = ["XSHE", "XSHG"] @export_as_api def get_margin_stocks(date=None, exchange=None, margin_type='stock', market="cn"): """获取融资融券信息 :param date: 查询日期,默认返回今天上一交易日,支持 str, timestamp, datetime 类型 :param exchange: 交易所信息,默认不填写则返回全部。 str类型,默认为 None,返回所有字段。可选字段包括: 'XSHE', 'sz' 代表深交所;'XSHG', 'sh' 代表上交所,不区分大小写 (Default value = None) :param margin_type: 'stock' 代表融券卖出,'cash',代表融资买入,默认为'stock' """ if date: date = ensure_date_int(date) else: date = get_previous_trading_date(datetime.date.today()) date = date.year * 10000 + date.month * 100 + date.day if exchange is None: exchange = EXCHANGE_CONTENT else: exchange = ensure_string(exchange, "exchange") if exchange in EXCHANGE_TYPE: exchange = EXCHANGE_TYPE[exchange] check_items_in_container(exchange, EXCHANGE_CONTENT, "exchange") exchange = [exchange] margin_type = ensure_string(margin_type, "margin_type") check_items_in_container(margin_type, MARGIN_TYPE, "margin_type") data = get_client().execute( "get_margin_stocks", date, exchange, margin_type, market=market ) if not data: return [] else: return sorted(data) share_fields = { "total": "total_shares", "circulation_a": "a_cir_shares", "non_circulation_a": "a_non_cir_shares", "total_a": "a_total_shares", } anti_fields = {v: k for k, v in share_fields.items()} @export_as_api @compatible_with_parm(name="country", value="cn", replace="market") def get_shares(order_book_ids, start_date=None, end_date=None, fields=None, expect_df=False, market="cn"): """获取流通股本信息 :param order_book_ids: 股票名称 :param start_date: 开始日期, 如'2013-01-04' (Default value = None) :param end_date: 结束日期,如'2014-01-04' (Default value = None) :param fields: 如'total', 'circulation_a' (Default value = None) :param expect_df: 返回 MultiIndex DataFrame (Default value = False) :param market: 地区代码,如'cn' (Default value = "cn") :returns: 返回一个DataFrame """ order_book_ids = ensure_order_book_ids(order_book_ids, market=market) start_date, end_date = ensure_date_range(start_date, end_date) if fields: fields = ensure_list_of_string(fields, "fields") if 'management_circulation' in fields: fields.remove('management_circulation') if fields: warnings.warn("management_circulation is removed") else: raise ValueError("management_circulation is removed") check_items_in_container(fields, set(share_fields), "fields") fields = [share_fields[i] for i in fields] else: fields = list(share_fields.values()) all_shares = get_client().execute("get_shares", order_book_ids, fields, market=market) if not all_shares: return dates = get_trading_dates_in_type(start_date, end_date, expect_type="datetime", market=market) df = pd.DataFrame(all_shares) unique = set(df.order_book_id) for order_book_id in order_book_ids: if order_book_id not in unique: df = df.append( {"order_book_id": order_book_id, "date": df.date.iloc[-1]}, ignore_index=True ) df.set_index(["date", "order_book_id"], inplace=True) df.sort_index(inplace=True) df = df.unstack(level=1) index = df.index.union(dates) df = df.reindex(index) df = df.fillna(method="ffill") df = df.loc[list(dates)] df = df.dropna(how="all") df = df[fields] if expect_df: df = df.stack(1) df.index.set_names(["date", "order_book_id"], inplace=True) df = df.reorder_levels(["order_book_id", "date"]).sort_index() df = df.rename(columns=anti_fields) return df pl = df.stack(1).to_panel() pl.items = [anti_fields[i] for i in pl.items] if len(order_book_ids) == 1: pl = pl.minor_xs(order_book_ids[0]) if len(fields) == 1: pl = pl[anti_fields[fields[0]]] if len(order_book_ids) != 1 and len(fields) != 1: warnings.warn("Panel is removed after pandas version 0.25.0." " the default value of 'expect_df' will change to True in the future.") return pl
36.297082
106
0.668372
import datetime import warnings import pandas as pd import numpy as np from rqdatac.utils import to_datetime, to_date from rqdatac.validators import ( ensure_date_range, ensure_date_or_today_int, ensure_list_of_string, check_items_in_container, ensure_order, ensure_order_book_id, ensure_order_book_ids, ensure_dates_base_on_listed_date, ensure_string, ensure_date_int ) from rqdatac.services.basic import instruments from rqdatac.services.calendar import ( get_trading_dates, get_previous_trading_date, get_trading_dates_in_type, ) from rqdatac.client import get_client from rqdatac.decorators import export_as_api, compatible_with_parm @export_as_api def is_st_stock(order_book_ids, start_date=None, end_date=None, market="cn"): order_book_ids = ensure_order_book_ids(order_book_ids, type="CS", market=market) if len(order_book_ids) == 1: instrument = instruments(order_book_ids[0], market=market) start_date, end_date = ensure_dates_base_on_listed_date(instrument, start_date, end_date, market) if start_date is None: return start_date, end_date = ensure_date_range(start_date, end_date) trading_dates = pd.to_datetime(get_trading_dates(start_date, end_date, market=market)) data = get_client().execute( "get_st_days", order_book_ids, start_date=start_date, end_date=end_date ) df = pd.DataFrame(data=False, columns=order_book_ids, index=trading_dates) for idx, dates in data.items(): for date in dates: date = to_datetime(date) df.at[date, idx] = True return df @export_as_api def _is_st_stock(order_book_id, date=None, market="cn"): order_book_id = ensure_order_book_id(order_book_id, type="CS", market=market) date = ensure_date_or_today_int(date) df = is_st_stock(order_book_id, start_date=date, end_date=date, market=market) if df is None or df.empty: return False else: return df[order_book_id][0] @export_as_api @compatible_with_parm(name="country", value="cn", replace="market") def is_suspended(order_book_ids, start_date=None, end_date=None, market="cn"): order_book_ids = ensure_order_book_ids(order_book_ids, type="CS", market=market) if len(order_book_ids) == 1: instrument = instruments(order_book_ids[0], market=market) start_date, end_date = ensure_dates_base_on_listed_date(instrument, start_date, end_date, market) if start_date is None: return start_date, end_date = ensure_date_range(start_date, end_date) trading_dates = pd.to_datetime(get_trading_dates(start_date, end_date, market=market)) df = pd.DataFrame(data=False, columns=order_book_ids, index=trading_dates) data = get_client().execute("get_suspended_days", order_book_ids, start_date, end_date, market=market) for idx, dates in data.items(): for date in dates: date = to_datetime(int(date)) df.at[date, idx] = True return df stock_fields = {"shares_holding": "shares_holding", "holding_ratio": "holding_ratio"} special_symbols = ["all_connect", "shanghai_connect", "shenzhen_connect"] symbols_map = {"shanghai_connect": "SH", "shenzhen_connect": "SZ"} @export_as_api def get_stock_connect(order_book_ids, start_date=None, end_date=None, fields=None, expect_df=False): if order_book_ids not in ("shanghai_connect", "shenzhen_connect", "all_connect"): order_book_ids = ensure_order_book_ids(order_book_ids, type="CS") start_date, end_date = ensure_date_range(start_date, end_date) if fields is not None: fields = ensure_list_of_string(fields) for f in fields: if f not in ("shares_holding", "holding_ratio"): raise ValueError("invalid field: {}".format(f)) else: fields = ["shares_holding", "holding_ratio"] data = get_client().execute("get_stock_connect", order_book_ids, start_date, end_date, fields) if not data: return None df = pd.DataFrame(data, columns=["trading_date", "order_book_id"] + fields) if expect_df: df.sort_values(["order_book_id", "trading_date"], inplace=True) df.set_index(["order_book_id", "trading_date"], inplace=True) return df df = df.set_index(["trading_date", "order_book_id"]) df = df.to_panel() df.major_axis.name = None df.minor_axis.name = None if len(order_book_ids) == 1: df = df.minor_xs(order_book_ids[0]) if len(fields) == 1: df = df[fields[0]] if len(order_book_ids) != 1 and len(fields) != 1: warnings.warn("Panel is removed after pandas version 0.25.0." " the default value of 'expect_df' will change to True in the future.") return df MARGIN_FIELDS = ( "margin_balance", "buy_on_margin_value", "short_sell_quantity", "margin_repayment", "short_balance_quantity", "short_repayment_quantity", "short_balance", "total_balance", ) MARGIN_SUMMARY_MAP = {"SH": "XSHG", "XSHG": "XSHG", "SZ": "XSHE", "XSHE": "XSHE"} @export_as_api def get_securities_margin( order_book_ids, start_date=None, end_date=None, fields=None, expect_df=False, market="cn" ): order_book_ids = ensure_list_of_string(order_book_ids, "order_book_ids") all_list = [] for order_book_id in order_book_ids: if order_book_id.upper() in MARGIN_SUMMARY_MAP: all_list.append(MARGIN_SUMMARY_MAP[order_book_id.upper()]) else: inst = instruments(order_book_id, market) if inst.type in ["CS", "ETF", "LOF"]: all_list.append(inst.order_book_id) else: warnings.warn("{} is not stock, ETF, or LOF.".format(order_book_id)) order_book_ids = all_list if not order_book_ids: raise ValueError("no valid securities in {}".format(order_book_ids)) if fields is None: fields = list(MARGIN_FIELDS) else: fields = ensure_list_of_string(fields, "fields") check_items_in_container(fields, MARGIN_FIELDS, "fields") fields = ensure_order(fields, MARGIN_FIELDS) start_date, end_date = ensure_date_range(start_date, end_date) if end_date > ensure_date_or_today_int(None): end_date = ensure_date_or_today_int(get_previous_trading_date(datetime.date.today())) trading_dates = pd.to_datetime(get_trading_dates(start_date, end_date, market=market)) data = get_client().execute( "get_securities_margin", order_book_ids, start_date, end_date, market=market ) if not data: return if expect_df: df = pd.DataFrame(data) df.sort_values(["order_book_id", "date"], inplace=True) df.set_index(["order_book_id", "date"], inplace=True) df = df.reindex(columns=fields) return df pl = pd.Panel(items=fields, major_axis=trading_dates, minor_axis=order_book_ids) for r in data: for field in fields: value = r.get(field) pl.at[field, r["date"], r["order_book_id"]] = value if len(order_book_ids) == 1: pl = pl.minor_xs(order_book_ids[0]) if len(fields) == 1: pl = pl[fields[0]] if len(order_book_ids) != 1 and len(fields) != 1: warnings.warn("Panel is removed after pandas version 0.25.0." " the default value of 'expect_df' will change to True in the future.") return pl MARGIN_TYPE = ("stock", "cash") EXCHANGE_TYPE = {"SZ": "XSHE", "sz": "XSHE", "xshe": "XSHE", "SH": "XSHG", "sh": "XSHG", "xshg": "XSHG"} EXCHANGE_CONTENT = ["XSHE", "XSHG"] @export_as_api def get_margin_stocks(date=None, exchange=None, margin_type='stock', market="cn"): if date: date = ensure_date_int(date) else: date = get_previous_trading_date(datetime.date.today()) date = date.year * 10000 + date.month * 100 + date.day if exchange is None: exchange = EXCHANGE_CONTENT else: exchange = ensure_string(exchange, "exchange") if exchange in EXCHANGE_TYPE: exchange = EXCHANGE_TYPE[exchange] check_items_in_container(exchange, EXCHANGE_CONTENT, "exchange") exchange = [exchange] margin_type = ensure_string(margin_type, "margin_type") check_items_in_container(margin_type, MARGIN_TYPE, "margin_type") data = get_client().execute( "get_margin_stocks", date, exchange, margin_type, market=market ) if not data: return [] else: return sorted(data) share_fields = { "total": "total_shares", "circulation_a": "a_cir_shares", "non_circulation_a": "a_non_cir_shares", "total_a": "a_total_shares", } anti_fields = {v: k for k, v in share_fields.items()} @export_as_api @compatible_with_parm(name="country", value="cn", replace="market") def get_shares(order_book_ids, start_date=None, end_date=None, fields=None, expect_df=False, market="cn"): order_book_ids = ensure_order_book_ids(order_book_ids, market=market) start_date, end_date = ensure_date_range(start_date, end_date) if fields: fields = ensure_list_of_string(fields, "fields") if 'management_circulation' in fields: fields.remove('management_circulation') if fields: warnings.warn("management_circulation is removed") else: raise ValueError("management_circulation is removed") check_items_in_container(fields, set(share_fields), "fields") fields = [share_fields[i] for i in fields] else: fields = list(share_fields.values()) all_shares = get_client().execute("get_shares", order_book_ids, fields, market=market) if not all_shares: return dates = get_trading_dates_in_type(start_date, end_date, expect_type="datetime", market=market) df = pd.DataFrame(all_shares) unique = set(df.order_book_id) for order_book_id in order_book_ids: if order_book_id not in unique: df = df.append( {"order_book_id": order_book_id, "date": df.date.iloc[-1]}, ignore_index=True ) df.set_index(["date", "order_book_id"], inplace=True) df.sort_index(inplace=True) df = df.unstack(level=1) index = df.index.union(dates) df = df.reindex(index) df = df.fillna(method="ffill") df = df.loc[list(dates)] df = df.dropna(how="all") df = df[fields] if expect_df: df = df.stack(1) df.index.set_names(["date", "order_book_id"], inplace=True) df = df.reorder_levels(["order_book_id", "date"]).sort_index() df = df.rename(columns=anti_fields) return df pl = df.stack(1).to_panel() pl.items = [anti_fields[i] for i in pl.items] if len(order_book_ids) == 1: pl = pl.minor_xs(order_book_ids[0]) if len(fields) == 1: pl = pl[anti_fields[fields[0]]] if len(order_book_ids) != 1 and len(fields) != 1: warnings.warn("Panel is removed after pandas version 0.25.0." " the default value of 'expect_df' will change to True in the future.") return pl
true
true
f70fc6f681ce167e1c1f5581804480dafb4f6607
3,118
py
Python
backend/apps/endpoints/models.py
alcibiadesBustillo/ml_service
99417531e2dbd43f212b5b0a397307f31c8131ca
[ "MIT" ]
null
null
null
backend/apps/endpoints/models.py
alcibiadesBustillo/ml_service
99417531e2dbd43f212b5b0a397307f31c8131ca
[ "MIT" ]
null
null
null
backend/apps/endpoints/models.py
alcibiadesBustillo/ml_service
99417531e2dbd43f212b5b0a397307f31c8131ca
[ "MIT" ]
null
null
null
from django.db import models # Create your models here. class Endpoint(models.Model): """ The Endpoint object represents ML API endpoints Attributes: name: The name of the endpoints, it will be used in API URL, owner: The string with owner name, created_at: The date when endpoint was created """ name = models.CharField(max_length=128) owner = models.CharField(max_length=128) created_at = models.DateTimeField(auto_now_add=True, blank=True) class MLAlgorithm(models.Model): ''' The MLAlgorithm represent the ML algorithm object. Attributes: name: The name of the algorithm. description: The short description of how the algorithm works. code: The code of the algorithm. version: The version of the algorithm similar to software versioning. owner: The name of the owner. created_at: The date when MLAlgorithm was added. parent_endpoint: The reference to the Endpoint. ''' name = models.CharField(max_length=128) description = models.CharField(max_length=1000) code = models.CharField(max_length=50000) version = models.CharField(max_length=128) owner = models.CharField(max_length=128) created_at = models.DateTimeField(auto_now_add=True, blank=True) parent_endpoint = models.ForeignKey(Endpoint, on_delete=models.CASCADE) class MLAlgorithmStatus(models.Model): ''' The MLAlgorithmStatus represent status of the MLAlgorithm which can change during the time. Attributes: status: The status of algorithm in the endpoint. Can be: testing, staging, production, ab_testing. active: The boolean flag which point to currently active status. created_by: The name of creator. created_at: The date of status creation. parent_mlalgorithm: The reference to corresponding MLAlgorithm. ''' status = models.CharField(max_length=128) active = models.BooleanField() created_by = models.CharField(max_length=128) created_at = models.DateTimeField(auto_now_add=True, blank=True) parent_mlalgorithm = models.ForeignKey(MLAlgorithm, on_delete=models.CASCADE, related_name = "status") class MLRequest(models.Model): ''' The MLRequest will keep information about all requests to ML algorithms. Attributes: input_data: The input data to ML algorithm in JSON format. full_response: The response of the ML algorithm. response: The response of the ML algorithm in JSON format. feedback: The feedback about the response in JSON format. created_at: The date when request was created. parent_mlalgorithm: The reference to MLAlgorithm used to compute response. ''' input_data = models.CharField(max_length=10000) full_response = models.CharField(max_length=10000) response = models.CharField(max_length=10000) feedback = models.CharField(max_length=10000, blank=True, null=True) created_at = models.DateTimeField(auto_now_add=True, blank=True) parent_mlalgorithm = models.ForeignKey(MLAlgorithm, on_delete=models.CASCADE)
41.573333
106
0.724182
from django.db import models class Endpoint(models.Model): name = models.CharField(max_length=128) owner = models.CharField(max_length=128) created_at = models.DateTimeField(auto_now_add=True, blank=True) class MLAlgorithm(models.Model): name = models.CharField(max_length=128) description = models.CharField(max_length=1000) code = models.CharField(max_length=50000) version = models.CharField(max_length=128) owner = models.CharField(max_length=128) created_at = models.DateTimeField(auto_now_add=True, blank=True) parent_endpoint = models.ForeignKey(Endpoint, on_delete=models.CASCADE) class MLAlgorithmStatus(models.Model): status = models.CharField(max_length=128) active = models.BooleanField() created_by = models.CharField(max_length=128) created_at = models.DateTimeField(auto_now_add=True, blank=True) parent_mlalgorithm = models.ForeignKey(MLAlgorithm, on_delete=models.CASCADE, related_name = "status") class MLRequest(models.Model): input_data = models.CharField(max_length=10000) full_response = models.CharField(max_length=10000) response = models.CharField(max_length=10000) feedback = models.CharField(max_length=10000, blank=True, null=True) created_at = models.DateTimeField(auto_now_add=True, blank=True) parent_mlalgorithm = models.ForeignKey(MLAlgorithm, on_delete=models.CASCADE)
true
true
f70fc7967c36f339b881f758de114c8b5641438b
162
py
Python
tests/model_control/detailed/transf_Difference/model_control_one_enabled_Difference_PolyTrend_Seasonal_Hour_MLP.py
jmabry/pyaf
afbc15a851a2445a7824bf255af612dc429265af
[ "BSD-3-Clause" ]
null
null
null
tests/model_control/detailed/transf_Difference/model_control_one_enabled_Difference_PolyTrend_Seasonal_Hour_MLP.py
jmabry/pyaf
afbc15a851a2445a7824bf255af612dc429265af
[ "BSD-3-Clause" ]
1
2019-11-30T23:39:38.000Z
2019-12-01T04:34:35.000Z
tests/model_control/detailed/transf_Difference/model_control_one_enabled_Difference_PolyTrend_Seasonal_Hour_MLP.py
jmabry/pyaf
afbc15a851a2445a7824bf255af612dc429265af
[ "BSD-3-Clause" ]
null
null
null
import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod testmod.build_model( ['Difference'] , ['PolyTrend'] , ['Seasonal_Hour'] , ['MLP'] );
40.5
84
0.753086
import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod testmod.build_model( ['Difference'] , ['PolyTrend'] , ['Seasonal_Hour'] , ['MLP'] );
true
true
f70fc914b27823d8d36316353c43270277541831
4,591
py
Python
u24_lymphocyte/third_party/treeano/sandbox/nodes/randomized_relu.py
ALSM-PhD/quip_classification
7347bfaa5cf11ae2d7a528fbcc43322a12c795d3
[ "BSD-3-Clause" ]
45
2015-04-26T04:45:51.000Z
2022-01-24T15:03:55.000Z
u24_lymphocyte/third_party/treeano/sandbox/nodes/randomized_relu.py
ALSM-PhD/quip_classification
7347bfaa5cf11ae2d7a528fbcc43322a12c795d3
[ "BSD-3-Clause" ]
8
2018-07-20T20:54:51.000Z
2020-06-12T05:36:04.000Z
u24_lymphocyte/third_party/treeano/sandbox/nodes/randomized_relu.py
ALSM-PhD/quip_classification
7347bfaa5cf11ae2d7a528fbcc43322a12c795d3
[ "BSD-3-Clause" ]
22
2018-05-21T23:57:20.000Z
2022-02-21T00:48:32.000Z
import numpy as np import theano import theano.tensor as T import treeano import treeano.nodes as tn from theano.sandbox.rng_mrg import MRG_RandomStreams @treeano.register_node("randomized_relu") class RandomizedReLUNode(treeano.NodeImpl): """ from "Empirical Evaluation of Rectified Activations in Convolutional Network" http://arxiv.org/abs/1505.00853 """ hyperparameter_names = ("alpha_lower", "alpha_upper", "deterministic") def compute_output(self, network, in_vw): # gather hyperparameters deterministic = network.find_hyperparameter(["deterministic"]) l = network.find_hyperparameter(["alpha_lower"], 3) u = network.find_hyperparameter(["alpha_upper"], 8) if deterministic: negative_coefficient = 2.0 / (l + u) else: # TODO save this state so that we can seed the rng srng = MRG_RandomStreams() alphas = srng.uniform(size=in_vw.symbolic_shape(), low=l, high=u) negative_coefficient = 1.0 / alphas # return output network.create_vw( "default", variable=treeano.utils.rectify( in_vw.variable, negative_coefficient=negative_coefficient), shape=in_vw.shape, tags={"output"}, ) @treeano.register_node("uniform_randomized_relu") class UniformRandomizedReLUNode(treeano.NodeImpl): """ like RandomizedReLUNode, but instead of sampling from 1 / uniform(l, u), sample from uniform(l, u) """ hyperparameter_names = ("alpha_lower", "alpha_upper", "deterministic") def compute_output(self, network, in_vw): # gather hyperparameters deterministic = network.find_hyperparameter(["deterministic"]) l = network.find_hyperparameter(["alpha_lower"], 1 / 8.) u = network.find_hyperparameter(["alpha_upper"], 1 / 3.) if deterministic: negative_coefficient = (l + u) / 2. else: # TODO save this state so that we can seed the rng srng = MRG_RandomStreams() negative_coefficient = srng.uniform(size=in_vw.symbolic_shape(), low=l, high=u) # return output network.create_vw( "default", variable=treeano.utils.rectify( in_vw.variable, negative_coefficient=negative_coefficient), shape=in_vw.shape, tags={"output"}, ) @treeano.register_node("random_walk_relu") class RandomWalkReLUNode(treeano.NodeImpl): """ leaky ReLU node, where leak alpha changes randomly over time """ hyperparameter_names = ("step_size", "initial_alpha", "inits") def compute_output(self, network, in_vw): # gather hyperparameters initial_alpha = network.find_hyperparameter( ["initial_alpha"], 0) alpha = network.create_vw( "alpha", is_shared=True, shape=(in_vw.shape[1],), tags={"state"}, default_inits=[treeano.inits.ConstantInit(initial_alpha)], ).variable pattern = ["x"] * in_vw.ndim pattern[1] = 0 alpha_b = alpha.dimshuffle(*pattern) # return output network.create_vw( "default", variable=treeano.utils.rectify(in_vw.variable, negative_coefficient=alpha_b), shape=in_vw.shape, tags={"output"}, ) def new_update_deltas(self, network): alpha_vw = network.get_vw("alpha") step_size = network.find_hyperparameter(["step_size"]) # NOTE: each MRG_RandomStreams has the same seed, so # all nodes with the same shape end up with the same alphas srng = MRG_RandomStreams() steps = srng.uniform(size=alpha_vw.shape, low=-step_size, high=step_size) # TODO clip value of alpha (to prevent it becoming linear) return treeano.UpdateDeltas({alpha_vw.variable: steps})
33.268116
76
0.544544
import numpy as np import theano import theano.tensor as T import treeano import treeano.nodes as tn from theano.sandbox.rng_mrg import MRG_RandomStreams @treeano.register_node("randomized_relu") class RandomizedReLUNode(treeano.NodeImpl): hyperparameter_names = ("alpha_lower", "alpha_upper", "deterministic") def compute_output(self, network, in_vw): deterministic = network.find_hyperparameter(["deterministic"]) l = network.find_hyperparameter(["alpha_lower"], 3) u = network.find_hyperparameter(["alpha_upper"], 8) if deterministic: negative_coefficient = 2.0 / (l + u) else: srng = MRG_RandomStreams() alphas = srng.uniform(size=in_vw.symbolic_shape(), low=l, high=u) negative_coefficient = 1.0 / alphas network.create_vw( "default", variable=treeano.utils.rectify( in_vw.variable, negative_coefficient=negative_coefficient), shape=in_vw.shape, tags={"output"}, ) @treeano.register_node("uniform_randomized_relu") class UniformRandomizedReLUNode(treeano.NodeImpl): hyperparameter_names = ("alpha_lower", "alpha_upper", "deterministic") def compute_output(self, network, in_vw): deterministic = network.find_hyperparameter(["deterministic"]) l = network.find_hyperparameter(["alpha_lower"], 1 / 8.) u = network.find_hyperparameter(["alpha_upper"], 1 / 3.) if deterministic: negative_coefficient = (l + u) / 2. else: srng = MRG_RandomStreams() negative_coefficient = srng.uniform(size=in_vw.symbolic_shape(), low=l, high=u) network.create_vw( "default", variable=treeano.utils.rectify( in_vw.variable, negative_coefficient=negative_coefficient), shape=in_vw.shape, tags={"output"}, ) @treeano.register_node("random_walk_relu") class RandomWalkReLUNode(treeano.NodeImpl): hyperparameter_names = ("step_size", "initial_alpha", "inits") def compute_output(self, network, in_vw): initial_alpha = network.find_hyperparameter( ["initial_alpha"], 0) alpha = network.create_vw( "alpha", is_shared=True, shape=(in_vw.shape[1],), tags={"state"}, default_inits=[treeano.inits.ConstantInit(initial_alpha)], ).variable pattern = ["x"] * in_vw.ndim pattern[1] = 0 alpha_b = alpha.dimshuffle(*pattern) network.create_vw( "default", variable=treeano.utils.rectify(in_vw.variable, negative_coefficient=alpha_b), shape=in_vw.shape, tags={"output"}, ) def new_update_deltas(self, network): alpha_vw = network.get_vw("alpha") step_size = network.find_hyperparameter(["step_size"]) srng = MRG_RandomStreams() steps = srng.uniform(size=alpha_vw.shape, low=-step_size, high=step_size) return treeano.UpdateDeltas({alpha_vw.variable: steps})
true
true
f70fca0643f281fccf3a6c2b02a8457d15910cb5
930
py
Python
abrv/db.py
shwnchpl/abrv
762a96a408104c5cd0623387bf0b6163f7367ca6
[ "MIT" ]
null
null
null
abrv/db.py
shwnchpl/abrv
762a96a408104c5cd0623387bf0b6163f7367ca6
[ "MIT" ]
1
2021-03-20T05:30:27.000Z
2021-03-20T05:30:27.000Z
abrv/db.py
shwnchpl/abrv.io
762a96a408104c5cd0623387bf0b6163f7367ca6
[ "MIT" ]
null
null
null
import click import psycopg2 as pg2 from flask import current_app, g from flask.cli import with_appcontext from psycopg2.extras import DictCursor def get_db(): if 'db' not in g: g.db = pg2.connect( **current_app.config['DATABASE'], ) g.db.cursor_factory = DictCursor return g.db def close_db(e=None): db = g.pop('db', None) if db is not None: db.close() def init_db(): db = get_db() db.autocommit = True cur = db.cursor() with current_app.open_resource('schema.sql') as f: cur.execute(f.read().decode('utf8')) cur.close() db.autocommit = False @click.command('init-db') @with_appcontext def init_db_command(): """Clear the existing data and create new tables.""" init_db() click.echo('Initialized the database.') def init_app(app): app.teardown_appcontext(close_db) app.cli.add_command(init_db_command)
18.979592
56
0.648387
import click import psycopg2 as pg2 from flask import current_app, g from flask.cli import with_appcontext from psycopg2.extras import DictCursor def get_db(): if 'db' not in g: g.db = pg2.connect( **current_app.config['DATABASE'], ) g.db.cursor_factory = DictCursor return g.db def close_db(e=None): db = g.pop('db', None) if db is not None: db.close() def init_db(): db = get_db() db.autocommit = True cur = db.cursor() with current_app.open_resource('schema.sql') as f: cur.execute(f.read().decode('utf8')) cur.close() db.autocommit = False @click.command('init-db') @with_appcontext def init_db_command(): init_db() click.echo('Initialized the database.') def init_app(app): app.teardown_appcontext(close_db) app.cli.add_command(init_db_command)
true
true
f70fca09f62c53b1a4b27f6e0b1daad65396083a
17,810
py
Python
twilio/rest/sync/v1/service/sync_list/__init__.py
ethan-schaffer/MassMessenger
3042ed98864d012a7276a6a365f81690431d5157
[ "MIT" ]
1
2019-02-08T01:13:38.000Z
2019-02-08T01:13:38.000Z
twilio/rest/sync/v1/service/sync_list/__init__.py
kkrlee/twilio-python
260de9df17c5a1440d9c037a971e2182da7f4ced
[ "MIT" ]
2
2019-09-20T19:08:16.000Z
2021-04-02T13:28:29.000Z
twilio/rest/sync/v1/service/sync_list/__init__.py
kkrlee/twilio-python
260de9df17c5a1440d9c037a971e2182da7f4ced
[ "MIT" ]
null
null
null
# coding=utf-8 """ This code was generated by \ / _ _ _| _ _ | (_)\/(_)(_|\/| |(/_ v1.0.0 / / """ from twilio.base import deserialize from twilio.base import values from twilio.base.instance_context import InstanceContext from twilio.base.instance_resource import InstanceResource from twilio.base.list_resource import ListResource from twilio.base.page import Page from twilio.rest.sync.v1.service.sync_list.sync_list_item import SyncListItemList from twilio.rest.sync.v1.service.sync_list.sync_list_permission import SyncListPermissionList class SyncListList(ListResource): """ PLEASE NOTE that this class contains beta products that are subject to change. Use them with caution. """ def __init__(self, version, service_sid): """ Initialize the SyncListList :param Version version: Version that contains the resource :param service_sid: The unique SID identifier of the Service Instance that hosts this List object. :returns: twilio.rest.sync.v1.service.sync_list.SyncListList :rtype: twilio.rest.sync.v1.service.sync_list.SyncListList """ super(SyncListList, self).__init__(version) # Path Solution self._solution = {'service_sid': service_sid, } self._uri = '/Services/{service_sid}/Lists'.format(**self._solution) def create(self, unique_name=values.unset, ttl=values.unset): """ Create a new SyncListInstance :param unicode unique_name: Human-readable name for this list :param unicode ttl: Time-to-live of this List in seconds, defaults to no expiration. :returns: Newly created SyncListInstance :rtype: twilio.rest.sync.v1.service.sync_list.SyncListInstance """ data = values.of({'UniqueName': unique_name, 'Ttl': ttl, }) payload = self._version.create( 'POST', self._uri, data=data, ) return SyncListInstance(self._version, payload, service_sid=self._solution['service_sid'], ) def stream(self, limit=None, page_size=None): """ Streams SyncListInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.sync.v1.service.sync_list.SyncListInstance] """ limits = self._version.read_limits(limit, page_size) page = self.page(page_size=limits['page_size'], ) return self._version.stream(page, limits['limit'], limits['page_limit']) def list(self, limit=None, page_size=None): """ Lists SyncListInstance records from the API as a list. Unlike stream(), this operation is eager and will load `limit` records into memory before returning. :param int limit: Upper limit for the number of records to return. list() guarantees never to return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, list() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.sync.v1.service.sync_list.SyncListInstance] """ return list(self.stream(limit=limit, page_size=page_size, )) def page(self, page_token=values.unset, page_number=values.unset, page_size=values.unset): """ Retrieve a single page of SyncListInstance records from the API. Request is executed immediately :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of SyncListInstance :rtype: twilio.rest.sync.v1.service.sync_list.SyncListPage """ params = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, }) response = self._version.page( 'GET', self._uri, params=params, ) return SyncListPage(self._version, response, self._solution) def get_page(self, target_url): """ Retrieve a specific page of SyncListInstance records from the API. Request is executed immediately :param str target_url: API-generated URL for the requested results page :returns: Page of SyncListInstance :rtype: twilio.rest.sync.v1.service.sync_list.SyncListPage """ response = self._version.domain.twilio.request( 'GET', target_url, ) return SyncListPage(self._version, response, self._solution) def get(self, sid): """ Constructs a SyncListContext :param sid: The sid :returns: twilio.rest.sync.v1.service.sync_list.SyncListContext :rtype: twilio.rest.sync.v1.service.sync_list.SyncListContext """ return SyncListContext(self._version, service_sid=self._solution['service_sid'], sid=sid, ) def __call__(self, sid): """ Constructs a SyncListContext :param sid: The sid :returns: twilio.rest.sync.v1.service.sync_list.SyncListContext :rtype: twilio.rest.sync.v1.service.sync_list.SyncListContext """ return SyncListContext(self._version, service_sid=self._solution['service_sid'], sid=sid, ) def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ return '<Twilio.Sync.V1.SyncListList>' class SyncListPage(Page): """ PLEASE NOTE that this class contains beta products that are subject to change. Use them with caution. """ def __init__(self, version, response, solution): """ Initialize the SyncListPage :param Version version: Version that contains the resource :param Response response: Response from the API :param service_sid: The unique SID identifier of the Service Instance that hosts this List object. :returns: twilio.rest.sync.v1.service.sync_list.SyncListPage :rtype: twilio.rest.sync.v1.service.sync_list.SyncListPage """ super(SyncListPage, self).__init__(version, response) # Path Solution self._solution = solution def get_instance(self, payload): """ Build an instance of SyncListInstance :param dict payload: Payload response from the API :returns: twilio.rest.sync.v1.service.sync_list.SyncListInstance :rtype: twilio.rest.sync.v1.service.sync_list.SyncListInstance """ return SyncListInstance(self._version, payload, service_sid=self._solution['service_sid'], ) def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ return '<Twilio.Sync.V1.SyncListPage>' class SyncListContext(InstanceContext): """ PLEASE NOTE that this class contains beta products that are subject to change. Use them with caution. """ def __init__(self, version, service_sid, sid): """ Initialize the SyncListContext :param Version version: Version that contains the resource :param service_sid: The service_sid :param sid: The sid :returns: twilio.rest.sync.v1.service.sync_list.SyncListContext :rtype: twilio.rest.sync.v1.service.sync_list.SyncListContext """ super(SyncListContext, self).__init__(version) # Path Solution self._solution = {'service_sid': service_sid, 'sid': sid, } self._uri = '/Services/{service_sid}/Lists/{sid}'.format(**self._solution) # Dependents self._sync_list_items = None self._sync_list_permissions = None def fetch(self): """ Fetch a SyncListInstance :returns: Fetched SyncListInstance :rtype: twilio.rest.sync.v1.service.sync_list.SyncListInstance """ params = values.of({}) payload = self._version.fetch( 'GET', self._uri, params=params, ) return SyncListInstance( self._version, payload, service_sid=self._solution['service_sid'], sid=self._solution['sid'], ) def delete(self): """ Deletes the SyncListInstance :returns: True if delete succeeds, False otherwise :rtype: bool """ return self._version.delete('delete', self._uri) def update(self, ttl=values.unset): """ Update the SyncListInstance :param unicode ttl: Time-to-live of this List in seconds, defaults to no expiration. :returns: Updated SyncListInstance :rtype: twilio.rest.sync.v1.service.sync_list.SyncListInstance """ data = values.of({'Ttl': ttl, }) payload = self._version.update( 'POST', self._uri, data=data, ) return SyncListInstance( self._version, payload, service_sid=self._solution['service_sid'], sid=self._solution['sid'], ) @property def sync_list_items(self): """ Access the sync_list_items :returns: twilio.rest.sync.v1.service.sync_list.sync_list_item.SyncListItemList :rtype: twilio.rest.sync.v1.service.sync_list.sync_list_item.SyncListItemList """ if self._sync_list_items is None: self._sync_list_items = SyncListItemList( self._version, service_sid=self._solution['service_sid'], list_sid=self._solution['sid'], ) return self._sync_list_items @property def sync_list_permissions(self): """ Access the sync_list_permissions :returns: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionList :rtype: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionList """ if self._sync_list_permissions is None: self._sync_list_permissions = SyncListPermissionList( self._version, service_sid=self._solution['service_sid'], list_sid=self._solution['sid'], ) return self._sync_list_permissions def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items()) return '<Twilio.Sync.V1.SyncListContext {}>'.format(context) class SyncListInstance(InstanceResource): """ PLEASE NOTE that this class contains beta products that are subject to change. Use them with caution. """ def __init__(self, version, payload, service_sid, sid=None): """ Initialize the SyncListInstance :returns: twilio.rest.sync.v1.service.sync_list.SyncListInstance :rtype: twilio.rest.sync.v1.service.sync_list.SyncListInstance """ super(SyncListInstance, self).__init__(version) # Marshaled Properties self._properties = { 'sid': payload['sid'], 'unique_name': payload['unique_name'], 'account_sid': payload['account_sid'], 'service_sid': payload['service_sid'], 'url': payload['url'], 'links': payload['links'], 'revision': payload['revision'], 'date_expires': deserialize.iso8601_datetime(payload['date_expires']), 'date_created': deserialize.iso8601_datetime(payload['date_created']), 'date_updated': deserialize.iso8601_datetime(payload['date_updated']), 'created_by': payload['created_by'], } # Context self._context = None self._solution = {'service_sid': service_sid, 'sid': sid or self._properties['sid'], } @property def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: SyncListContext for this SyncListInstance :rtype: twilio.rest.sync.v1.service.sync_list.SyncListContext """ if self._context is None: self._context = SyncListContext( self._version, service_sid=self._solution['service_sid'], sid=self._solution['sid'], ) return self._context @property def sid(self): """ :returns: The unique 34-character SID identifier of the List. :rtype: unicode """ return self._properties['sid'] @property def unique_name(self): """ :returns: The unique and addressable name of this List. :rtype: unicode """ return self._properties['unique_name'] @property def account_sid(self): """ :returns: The unique SID identifier of the Twilio Account. :rtype: unicode """ return self._properties['account_sid'] @property def service_sid(self): """ :returns: The unique SID identifier of the Service Instance that hosts this List object. :rtype: unicode """ return self._properties['service_sid'] @property def url(self): """ :returns: The absolute URL for this List. :rtype: unicode """ return self._properties['url'] @property def links(self): """ :returns: A dictionary of URL links to nested resources of this List. :rtype: unicode """ return self._properties['links'] @property def revision(self): """ :returns: Contains the current revision of this List, represented by a string identifier. :rtype: unicode """ return self._properties['revision'] @property def date_expires(self): """ :returns: Contains the date this List expires and gets deleted automatically. :rtype: datetime """ return self._properties['date_expires'] @property def date_created(self): """ :returns: The date this List was created, given in UTC ISO 8601 format. :rtype: datetime """ return self._properties['date_created'] @property def date_updated(self): """ :returns: Specifies the date this List was last updated, given in UTC ISO 8601 format. :rtype: datetime """ return self._properties['date_updated'] @property def created_by(self): """ :returns: The identity of the List creator. :rtype: unicode """ return self._properties['created_by'] def fetch(self): """ Fetch a SyncListInstance :returns: Fetched SyncListInstance :rtype: twilio.rest.sync.v1.service.sync_list.SyncListInstance """ return self._proxy.fetch() def delete(self): """ Deletes the SyncListInstance :returns: True if delete succeeds, False otherwise :rtype: bool """ return self._proxy.delete() def update(self, ttl=values.unset): """ Update the SyncListInstance :param unicode ttl: Time-to-live of this List in seconds, defaults to no expiration. :returns: Updated SyncListInstance :rtype: twilio.rest.sync.v1.service.sync_list.SyncListInstance """ return self._proxy.update(ttl=ttl, ) @property def sync_list_items(self): """ Access the sync_list_items :returns: twilio.rest.sync.v1.service.sync_list.sync_list_item.SyncListItemList :rtype: twilio.rest.sync.v1.service.sync_list.sync_list_item.SyncListItemList """ return self._proxy.sync_list_items @property def sync_list_permissions(self): """ Access the sync_list_permissions :returns: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionList :rtype: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionList """ return self._proxy.sync_list_permissions def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items()) return '<Twilio.Sync.V1.SyncListInstance {}>'.format(context)
33.54049
106
0.626727
from twilio.base import deserialize from twilio.base import values from twilio.base.instance_context import InstanceContext from twilio.base.instance_resource import InstanceResource from twilio.base.list_resource import ListResource from twilio.base.page import Page from twilio.rest.sync.v1.service.sync_list.sync_list_item import SyncListItemList from twilio.rest.sync.v1.service.sync_list.sync_list_permission import SyncListPermissionList class SyncListList(ListResource): def __init__(self, version, service_sid): super(SyncListList, self).__init__(version) self._solution = {'service_sid': service_sid, } self._uri = '/Services/{service_sid}/Lists'.format(**self._solution) def create(self, unique_name=values.unset, ttl=values.unset): data = values.of({'UniqueName': unique_name, 'Ttl': ttl, }) payload = self._version.create( 'POST', self._uri, data=data, ) return SyncListInstance(self._version, payload, service_sid=self._solution['service_sid'], ) def stream(self, limit=None, page_size=None): limits = self._version.read_limits(limit, page_size) page = self.page(page_size=limits['page_size'], ) return self._version.stream(page, limits['limit'], limits['page_limit']) def list(self, limit=None, page_size=None): return list(self.stream(limit=limit, page_size=page_size, )) def page(self, page_token=values.unset, page_number=values.unset, page_size=values.unset): params = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, }) response = self._version.page( 'GET', self._uri, params=params, ) return SyncListPage(self._version, response, self._solution) def get_page(self, target_url): response = self._version.domain.twilio.request( 'GET', target_url, ) return SyncListPage(self._version, response, self._solution) def get(self, sid): return SyncListContext(self._version, service_sid=self._solution['service_sid'], sid=sid, ) def __call__(self, sid): return SyncListContext(self._version, service_sid=self._solution['service_sid'], sid=sid, ) def __repr__(self): return '<Twilio.Sync.V1.SyncListList>' class SyncListPage(Page): def __init__(self, version, response, solution): super(SyncListPage, self).__init__(version, response) self._solution = solution def get_instance(self, payload): return SyncListInstance(self._version, payload, service_sid=self._solution['service_sid'], ) def __repr__(self): return '<Twilio.Sync.V1.SyncListPage>' class SyncListContext(InstanceContext): def __init__(self, version, service_sid, sid): super(SyncListContext, self).__init__(version) self._solution = {'service_sid': service_sid, 'sid': sid, } self._uri = '/Services/{service_sid}/Lists/{sid}'.format(**self._solution) self._sync_list_items = None self._sync_list_permissions = None def fetch(self): params = values.of({}) payload = self._version.fetch( 'GET', self._uri, params=params, ) return SyncListInstance( self._version, payload, service_sid=self._solution['service_sid'], sid=self._solution['sid'], ) def delete(self): return self._version.delete('delete', self._uri) def update(self, ttl=values.unset): data = values.of({'Ttl': ttl, }) payload = self._version.update( 'POST', self._uri, data=data, ) return SyncListInstance( self._version, payload, service_sid=self._solution['service_sid'], sid=self._solution['sid'], ) @property def sync_list_items(self): if self._sync_list_items is None: self._sync_list_items = SyncListItemList( self._version, service_sid=self._solution['service_sid'], list_sid=self._solution['sid'], ) return self._sync_list_items @property def sync_list_permissions(self): if self._sync_list_permissions is None: self._sync_list_permissions = SyncListPermissionList( self._version, service_sid=self._solution['service_sid'], list_sid=self._solution['sid'], ) return self._sync_list_permissions def __repr__(self): context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items()) return '<Twilio.Sync.V1.SyncListContext {}>'.format(context) class SyncListInstance(InstanceResource): def __init__(self, version, payload, service_sid, sid=None): super(SyncListInstance, self).__init__(version) self._properties = { 'sid': payload['sid'], 'unique_name': payload['unique_name'], 'account_sid': payload['account_sid'], 'service_sid': payload['service_sid'], 'url': payload['url'], 'links': payload['links'], 'revision': payload['revision'], 'date_expires': deserialize.iso8601_datetime(payload['date_expires']), 'date_created': deserialize.iso8601_datetime(payload['date_created']), 'date_updated': deserialize.iso8601_datetime(payload['date_updated']), 'created_by': payload['created_by'], } self._context = None self._solution = {'service_sid': service_sid, 'sid': sid or self._properties['sid'], } @property def _proxy(self): if self._context is None: self._context = SyncListContext( self._version, service_sid=self._solution['service_sid'], sid=self._solution['sid'], ) return self._context @property def sid(self): return self._properties['sid'] @property def unique_name(self): return self._properties['unique_name'] @property def account_sid(self): return self._properties['account_sid'] @property def service_sid(self): return self._properties['service_sid'] @property def url(self): return self._properties['url'] @property def links(self): return self._properties['links'] @property def revision(self): return self._properties['revision'] @property def date_expires(self): return self._properties['date_expires'] @property def date_created(self): return self._properties['date_created'] @property def date_updated(self): return self._properties['date_updated'] @property def created_by(self): return self._properties['created_by'] def fetch(self): return self._proxy.fetch() def delete(self): return self._proxy.delete() def update(self, ttl=values.unset): return self._proxy.update(ttl=ttl, ) @property def sync_list_items(self): return self._proxy.sync_list_items @property def sync_list_permissions(self): return self._proxy.sync_list_permissions def __repr__(self): context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items()) return '<Twilio.Sync.V1.SyncListInstance {}>'.format(context)
true
true
f70fca2a46b2b6b5b759453f5fa1bf80d9210bfe
4,988
py
Python
var/spack/repos/builtin/packages/phist/package.py
mrzv/spack
a0fb2838ea60f020179f480a2db1438da9d2e2ab
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
null
null
null
var/spack/repos/builtin/packages/phist/package.py
mrzv/spack
a0fb2838ea60f020179f480a2db1438da9d2e2ab
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
null
null
null
var/spack/repos/builtin/packages/phist/package.py
mrzv/spack
a0fb2838ea60f020179f480a2db1438da9d2e2ab
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
null
null
null
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Phist(CMakePackage): """The Pipelined, Hybrid-parallel Iterative Solver Toolkit provides implementations of and interfaces to block iterative solvers for sparse linear and eigenvalue problems. In contrast to other libraries we support multiple backends (e.g. Trilinos, PETSc and our own optimized kernels), and interfaces in multiple languages such as C, C++, Fortran 2003 and Python. PHIST has a clear focus on portability and hardware performance: in particular support row-major storage of block vectors and using GPUs (via the ghost library or Trilinos/Tpetra). """ homepage = "https://bitbucket.org/essex/phist/" url = "https://bitbucket.org/essex/phist/get/phist-1.4.3.tar.gz" git = "https://bitbucket.org/essex/phist/phist.git" version('develop', branch='devel') version('master', branch='master') version('1.7.2', sha256='29b504d78b5efd57b87d2ca6e20bc8a32b1ba55b40f5a5b7189cc0d28e43bcc0') version('1.6.1', sha256='4ed4869f24f920a494aeae0f7d1d94fe9efce55ebe0d298a5948c9603e07994d') version('1.6.0', '751f855230d6227b972b5ab7bce2c65f') version('1.4.3', 'af3300378d4282366d148e38c3a3199a') variant(name='kernel_lib', default='builtin', description='select the kernel library (backend) for phist', values=['builtin', 'epetra', 'tpetra', 'petsc', 'eigen', 'ghost']) variant(name='outlev', default='2', values=['0', '1', '2', '3', '4', '5'], description='verbosity. 0: errors 1: +warnings 2: +info ' '3: +verbose 4: +extreme 5; +debug') variant('shared', default=True, description='Enables the build of shared libraries') variant('mpi', default=True, description='enable/disable MPI (note that the kernel library may ' 'not support this choice)') variant('parmetis', default=False, description='enable/disable ParMETIS partitioning (only actually ' 'used with kernel_lib=builtin)') variant('trilinos', default=False, description='enable/disable Trilinos third-party libraries. ' 'For all kernel_libs, we can use Belos and Anasazi ' 'iterative solvers. For the Trilinos backends ' '(kernel_lib=epetra|tpetra) we can use preconditioner ' 'packages such as Ifpack, Ifpack2 and ML.') # ###################### Dependencies ########################## depends_on('cmake@3.8:', type='build') depends_on('blas') depends_on('lapack') depends_on('python@3:', when='@1.7:', type='build') depends_on('mpi', when='+mpi') depends_on('trilinos+anasazi+belos+teuchos', when='+trilinos') depends_on('trilinos@12:+tpetra', when='kernel_lib=tpetra') # Epetra backend also works with older Trilinos versions depends_on('trilinos+epetra', when='kernel_lib=epetra') depends_on('petsc', when='kernel_lib=petsc') depends_on('eigen', when='kernel_lib=eigen') depends_on('ghost', when='kernel_lib=ghost') depends_on('trilinos', when='+trilinos') depends_on('parmetis ^metis+int64', when='+parmetis') def cmake_args(self): spec = self.spec kernel_lib = spec.variants['kernel_lib'].value outlev = spec.variants['outlev'].value lapacke_libs = \ (spec['lapack:c'].libs + spec['blas:c'].libs).joined(';') lapacke_include_dir = spec['lapack:c'].headers.directories[0] args = ['-DPHIST_KERNEL_LIB=%s' % kernel_lib, '-DPHIST_OUTLEV=%s' % outlev, '-DTPL_LAPACKE_LIBRARIES=%s' % lapacke_libs, '-DTPL_LAPACKE_INCLUDE_DIRS=%s' % lapacke_include_dir, '-DPHIST_ENABLE_MPI:BOOL=%s' % ('ON' if '+mpi' in spec else 'OFF'), '-DBUILD_SHARED_LIBS:BOOL=%s' % ('ON' if '+shared' in spec else 'OFF'), '-DPHIST_USE_TRILINOS_TPLS:BOOL=%s' % ('ON' if '+trilinos' in spec else 'OFF'), '-DPHIST_USE_SOLVER_TPLS:BOOL=%s' % ('ON' if '+trilinos' in spec else 'OFF'), '-DPHIST_USE_PRECON_TPLS:BOOL=%s' % ('ON' if '+trilinos' in spec else 'OFF'), ] return args @run_after('build') @on_package_attributes(run_tests=True) def check(self): with working_dir(self.build_directory): make("check") @run_after('install') @on_package_attributes(run_tests=True) def test_install(self): with working_dir(self.build_directory): make("test_install")
42.632479
95
0.611668
from spack import * class Phist(CMakePackage): homepage = "https://bitbucket.org/essex/phist/" url = "https://bitbucket.org/essex/phist/get/phist-1.4.3.tar.gz" git = "https://bitbucket.org/essex/phist/phist.git" version('develop', branch='devel') version('master', branch='master') version('1.7.2', sha256='29b504d78b5efd57b87d2ca6e20bc8a32b1ba55b40f5a5b7189cc0d28e43bcc0') version('1.6.1', sha256='4ed4869f24f920a494aeae0f7d1d94fe9efce55ebe0d298a5948c9603e07994d') version('1.6.0', '751f855230d6227b972b5ab7bce2c65f') version('1.4.3', 'af3300378d4282366d148e38c3a3199a') variant(name='kernel_lib', default='builtin', description='select the kernel library (backend) for phist', values=['builtin', 'epetra', 'tpetra', 'petsc', 'eigen', 'ghost']) variant(name='outlev', default='2', values=['0', '1', '2', '3', '4', '5'], description='verbosity. 0: errors 1: +warnings 2: +info ' '3: +verbose 4: +extreme 5; +debug') variant('shared', default=True, description='Enables the build of shared libraries') variant('mpi', default=True, description='enable/disable MPI (note that the kernel library may ' 'not support this choice)') variant('parmetis', default=False, description='enable/disable ParMETIS partitioning (only actually ' 'used with kernel_lib=builtin)') variant('trilinos', default=False, description='enable/disable Trilinos third-party libraries. ' 'For all kernel_libs, we can use Belos and Anasazi ' 'iterative solvers. For the Trilinos backends ' '(kernel_lib=epetra|tpetra) we can use preconditioner ' 'packages such as Ifpack, Ifpack2 and ML.') pec else 'OFF'), '-DPHIST_USE_SOLVER_TPLS:BOOL=%s' % ('ON' if '+trilinos' in spec else 'OFF'), '-DPHIST_USE_PRECON_TPLS:BOOL=%s' % ('ON' if '+trilinos' in spec else 'OFF'), ] return args @run_after('build') @on_package_attributes(run_tests=True) def check(self): with working_dir(self.build_directory): make("check") @run_after('install') @on_package_attributes(run_tests=True) def test_install(self): with working_dir(self.build_directory): make("test_install")
true
true
f70fcd603a54e94391c4758ae07ce19fce6dcb4f
6,385
py
Python
src/bindings/python/tests/test_ngraph/test_ops_binary.py
pazamelin/openvino
b7e8ef910d7ed8e52326d14dc6fd53b71d16ed48
[ "Apache-2.0" ]
1
2019-09-22T01:05:07.000Z
2019-09-22T01:05:07.000Z
src/bindings/python/tests/test_ngraph/test_ops_binary.py
pazamelin/openvino
b7e8ef910d7ed8e52326d14dc6fd53b71d16ed48
[ "Apache-2.0" ]
58
2020-11-06T12:13:45.000Z
2022-03-28T13:20:11.000Z
src/bindings/python/tests/test_ngraph/test_ops_binary.py
pazamelin/openvino
b7e8ef910d7ed8e52326d14dc6fd53b71d16ed48
[ "Apache-2.0" ]
2
2021-07-14T07:40:50.000Z
2021-07-27T01:40:03.000Z
# Copyright (C) 2018-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import operator import numpy as np import pytest import openvino.runtime.opset8 as ov from tests.runtime import get_runtime from tests.test_ngraph.util import run_op_node @pytest.mark.parametrize( "ng_api_helper,numpy_function", [ (ov.add, np.add), (ov.divide, np.divide), (ov.multiply, np.multiply), (ov.subtract, np.subtract), (ov.minimum, np.minimum), (ov.maximum, np.maximum), (ov.mod, np.mod), (ov.equal, np.equal), (ov.not_equal, np.not_equal), (ov.greater, np.greater), (ov.greater_equal, np.greater_equal), (ov.less, np.less), (ov.less_equal, np.less_equal), ], ) def test_binary_op(ng_api_helper, numpy_function): runtime = get_runtime() shape = [2, 2] parameter_a = ov.parameter(shape, name="A", dtype=np.float32) parameter_b = ov.parameter(shape, name="B", dtype=np.float32) model = ng_api_helper(parameter_a, parameter_b) computation = runtime.computation(model, parameter_a, parameter_b) value_a = np.array([[1, 2], [3, 4]], dtype=np.float32) value_b = np.array([[5, 6], [7, 8]], dtype=np.float32) result = computation(value_a, value_b) expected = numpy_function(value_a, value_b) assert np.allclose(result, expected) @pytest.mark.parametrize( "ng_api_helper,numpy_function", [ (ov.add, np.add), (ov.divide, np.divide), (ov.multiply, np.multiply), (ov.subtract, np.subtract), (ov.minimum, np.minimum), (ov.maximum, np.maximum), (ov.mod, np.mod), (ov.equal, np.equal), (ov.not_equal, np.not_equal), (ov.greater, np.greater), (ov.greater_equal, np.greater_equal), (ov.less, np.less), (ov.less_equal, np.less_equal), ], ) def test_binary_op_with_scalar(ng_api_helper, numpy_function): runtime = get_runtime() value_a = np.array([[1, 2], [3, 4]], dtype=np.float32) value_b = np.array([[5, 6], [7, 8]], dtype=np.float32) shape = [2, 2] parameter_a = ov.parameter(shape, name="A", dtype=np.float32) model = ng_api_helper(parameter_a, value_b) computation = runtime.computation(model, parameter_a) result = computation(value_a) expected = numpy_function(value_a, value_b) assert np.allclose(result, expected) @pytest.mark.parametrize( "ng_api_helper,numpy_function", [(ov.logical_and, np.logical_and), (ov.logical_or, np.logical_or), (ov.logical_xor, np.logical_xor)], ) def test_binary_logical_op(ng_api_helper, numpy_function): runtime = get_runtime() shape = [2, 2] parameter_a = ov.parameter(shape, name="A", dtype=np.bool) parameter_b = ov.parameter(shape, name="B", dtype=np.bool) model = ng_api_helper(parameter_a, parameter_b) computation = runtime.computation(model, parameter_a, parameter_b) value_a = np.array([[True, False], [False, True]], dtype=np.bool) value_b = np.array([[False, True], [False, True]], dtype=np.bool) result = computation(value_a, value_b) expected = numpy_function(value_a, value_b) assert np.allclose(result, expected) @pytest.mark.parametrize( "ng_api_helper,numpy_function", [(ov.logical_and, np.logical_and), (ov.logical_or, np.logical_or), (ov.logical_xor, np.logical_xor)], ) def test_binary_logical_op_with_scalar(ng_api_helper, numpy_function): runtime = get_runtime() value_a = np.array([[True, False], [False, True]], dtype=np.bool) value_b = np.array([[False, True], [False, True]], dtype=np.bool) shape = [2, 2] parameter_a = ov.parameter(shape, name="A", dtype=np.bool) model = ng_api_helper(parameter_a, value_b) computation = runtime.computation(model, parameter_a) result = computation(value_a) expected = numpy_function(value_a, value_b) assert np.allclose(result, expected) @pytest.mark.parametrize( "operator,numpy_function", [ (operator.add, np.add), (operator.sub, np.subtract), (operator.mul, np.multiply), (operator.truediv, np.divide), (operator.eq, np.equal), (operator.ne, np.not_equal), (operator.gt, np.greater), (operator.ge, np.greater_equal), (operator.lt, np.less), (operator.le, np.less_equal), ], ) def test_binary_operators(operator, numpy_function): runtime = get_runtime() value_a = np.array([[1, 2], [3, 4]], dtype=np.float32) value_b = np.array([[4, 5], [1, 7]], dtype=np.float32) shape = [2, 2] parameter_a = ov.parameter(shape, name="A", dtype=np.float32) model = operator(parameter_a, value_b) computation = runtime.computation(model, parameter_a) result = computation(value_a) expected = numpy_function(value_a, value_b) assert np.allclose(result, expected) @pytest.mark.parametrize( "operator,numpy_function", [ (operator.add, np.add), (operator.sub, np.subtract), (operator.mul, np.multiply), (operator.truediv, np.divide), (operator.eq, np.equal), (operator.ne, np.not_equal), (operator.gt, np.greater), (operator.ge, np.greater_equal), (operator.lt, np.less), (operator.le, np.less_equal), ], ) def test_binary_operators_with_scalar(operator, numpy_function): runtime = get_runtime() value_a = np.array([[1, 2], [3, 4]], dtype=np.float32) value_b = np.array([[5, 6], [7, 8]], dtype=np.float32) shape = [2, 2] parameter_a = ov.parameter(shape, name="A", dtype=np.float32) model = operator(parameter_a, value_b) computation = runtime.computation(model, parameter_a) result = computation(value_a) expected = numpy_function(value_a, value_b) assert np.allclose(result, expected) def test_multiply(): A = np.arange(48, dtype=np.int32).reshape((8, 1, 6, 1)) B = np.arange(35, dtype=np.int32).reshape((7, 1, 5)) expected = np.multiply(A, B) result = run_op_node([A, B], ov.multiply) assert np.allclose(result, expected) def test_power_v1(): A = np.arange(48, dtype=np.float32).reshape((8, 1, 6, 1)) B = np.arange(20, dtype=np.float32).reshape((4, 1, 5)) expected = np.power(A, B) result = run_op_node([A, B], ov.power) assert np.allclose(result, expected)
30.404762
105
0.649021
import operator import numpy as np import pytest import openvino.runtime.opset8 as ov from tests.runtime import get_runtime from tests.test_ngraph.util import run_op_node @pytest.mark.parametrize( "ng_api_helper,numpy_function", [ (ov.add, np.add), (ov.divide, np.divide), (ov.multiply, np.multiply), (ov.subtract, np.subtract), (ov.minimum, np.minimum), (ov.maximum, np.maximum), (ov.mod, np.mod), (ov.equal, np.equal), (ov.not_equal, np.not_equal), (ov.greater, np.greater), (ov.greater_equal, np.greater_equal), (ov.less, np.less), (ov.less_equal, np.less_equal), ], ) def test_binary_op(ng_api_helper, numpy_function): runtime = get_runtime() shape = [2, 2] parameter_a = ov.parameter(shape, name="A", dtype=np.float32) parameter_b = ov.parameter(shape, name="B", dtype=np.float32) model = ng_api_helper(parameter_a, parameter_b) computation = runtime.computation(model, parameter_a, parameter_b) value_a = np.array([[1, 2], [3, 4]], dtype=np.float32) value_b = np.array([[5, 6], [7, 8]], dtype=np.float32) result = computation(value_a, value_b) expected = numpy_function(value_a, value_b) assert np.allclose(result, expected) @pytest.mark.parametrize( "ng_api_helper,numpy_function", [ (ov.add, np.add), (ov.divide, np.divide), (ov.multiply, np.multiply), (ov.subtract, np.subtract), (ov.minimum, np.minimum), (ov.maximum, np.maximum), (ov.mod, np.mod), (ov.equal, np.equal), (ov.not_equal, np.not_equal), (ov.greater, np.greater), (ov.greater_equal, np.greater_equal), (ov.less, np.less), (ov.less_equal, np.less_equal), ], ) def test_binary_op_with_scalar(ng_api_helper, numpy_function): runtime = get_runtime() value_a = np.array([[1, 2], [3, 4]], dtype=np.float32) value_b = np.array([[5, 6], [7, 8]], dtype=np.float32) shape = [2, 2] parameter_a = ov.parameter(shape, name="A", dtype=np.float32) model = ng_api_helper(parameter_a, value_b) computation = runtime.computation(model, parameter_a) result = computation(value_a) expected = numpy_function(value_a, value_b) assert np.allclose(result, expected) @pytest.mark.parametrize( "ng_api_helper,numpy_function", [(ov.logical_and, np.logical_and), (ov.logical_or, np.logical_or), (ov.logical_xor, np.logical_xor)], ) def test_binary_logical_op(ng_api_helper, numpy_function): runtime = get_runtime() shape = [2, 2] parameter_a = ov.parameter(shape, name="A", dtype=np.bool) parameter_b = ov.parameter(shape, name="B", dtype=np.bool) model = ng_api_helper(parameter_a, parameter_b) computation = runtime.computation(model, parameter_a, parameter_b) value_a = np.array([[True, False], [False, True]], dtype=np.bool) value_b = np.array([[False, True], [False, True]], dtype=np.bool) result = computation(value_a, value_b) expected = numpy_function(value_a, value_b) assert np.allclose(result, expected) @pytest.mark.parametrize( "ng_api_helper,numpy_function", [(ov.logical_and, np.logical_and), (ov.logical_or, np.logical_or), (ov.logical_xor, np.logical_xor)], ) def test_binary_logical_op_with_scalar(ng_api_helper, numpy_function): runtime = get_runtime() value_a = np.array([[True, False], [False, True]], dtype=np.bool) value_b = np.array([[False, True], [False, True]], dtype=np.bool) shape = [2, 2] parameter_a = ov.parameter(shape, name="A", dtype=np.bool) model = ng_api_helper(parameter_a, value_b) computation = runtime.computation(model, parameter_a) result = computation(value_a) expected = numpy_function(value_a, value_b) assert np.allclose(result, expected) @pytest.mark.parametrize( "operator,numpy_function", [ (operator.add, np.add), (operator.sub, np.subtract), (operator.mul, np.multiply), (operator.truediv, np.divide), (operator.eq, np.equal), (operator.ne, np.not_equal), (operator.gt, np.greater), (operator.ge, np.greater_equal), (operator.lt, np.less), (operator.le, np.less_equal), ], ) def test_binary_operators(operator, numpy_function): runtime = get_runtime() value_a = np.array([[1, 2], [3, 4]], dtype=np.float32) value_b = np.array([[4, 5], [1, 7]], dtype=np.float32) shape = [2, 2] parameter_a = ov.parameter(shape, name="A", dtype=np.float32) model = operator(parameter_a, value_b) computation = runtime.computation(model, parameter_a) result = computation(value_a) expected = numpy_function(value_a, value_b) assert np.allclose(result, expected) @pytest.mark.parametrize( "operator,numpy_function", [ (operator.add, np.add), (operator.sub, np.subtract), (operator.mul, np.multiply), (operator.truediv, np.divide), (operator.eq, np.equal), (operator.ne, np.not_equal), (operator.gt, np.greater), (operator.ge, np.greater_equal), (operator.lt, np.less), (operator.le, np.less_equal), ], ) def test_binary_operators_with_scalar(operator, numpy_function): runtime = get_runtime() value_a = np.array([[1, 2], [3, 4]], dtype=np.float32) value_b = np.array([[5, 6], [7, 8]], dtype=np.float32) shape = [2, 2] parameter_a = ov.parameter(shape, name="A", dtype=np.float32) model = operator(parameter_a, value_b) computation = runtime.computation(model, parameter_a) result = computation(value_a) expected = numpy_function(value_a, value_b) assert np.allclose(result, expected) def test_multiply(): A = np.arange(48, dtype=np.int32).reshape((8, 1, 6, 1)) B = np.arange(35, dtype=np.int32).reshape((7, 1, 5)) expected = np.multiply(A, B) result = run_op_node([A, B], ov.multiply) assert np.allclose(result, expected) def test_power_v1(): A = np.arange(48, dtype=np.float32).reshape((8, 1, 6, 1)) B = np.arange(20, dtype=np.float32).reshape((4, 1, 5)) expected = np.power(A, B) result = run_op_node([A, B], ov.power) assert np.allclose(result, expected)
true
true
f70fcd9fc74db1c3ee037f4d11c4ff2fdad8ae4a
1,015
py
Python
autoindex.py
langsci/91
bca083ed2f5f30dd4c2d6587366e8e0f649285c3
[ "CC-BY-4.0" ]
null
null
null
autoindex.py
langsci/91
bca083ed2f5f30dd4c2d6587366e8e0f649285c3
[ "CC-BY-4.0" ]
null
null
null
autoindex.py
langsci/91
bca083ed2f5f30dd4c2d6587366e8e0f649285c3
[ "CC-BY-4.0" ]
null
null
null
#!/usr/bin/python3 import glob import re lgs=open("locallanguages.txt").read().split('\n') terms=open("localsubjectterms.txt").read().split('\n')[::-1]#reverse to avoid double indexing print("found %i language names for autoindexing" % len(lgs)) print("found %i subject terms for autoindexing" % len(terms)) files = glob.glob('chapters/*tex') for f in files: print("indexing %s" % f) c = open(f).read() for lg in lgs: lg = lg.strip() if lg == '': continue c = re.sub('(?<!ili{)%s(?![\w}])'%lg, '\ili{%s}'%lg, c) for term in terms: term = term.strip() if term == '': continue c = re.sub('(?<!isi{)%s(?![\w}])'%term, '\isi{%s}'%term, c) nlg = len(re.findall('\\ili{',c)) nt = len(re.findall('\\isi{',c)) outfile = open(f.replace('chapters','indexed'), 'w') outfile.write(c) outfile.close() print(" %s now contains %i indexed languages and %i indexed subject terms"%(f.split('/')[-1],nlg,nt)) print("indexed files are in the folder 'indexed'")
28.194444
103
0.595074
import glob import re lgs=open("locallanguages.txt").read().split('\n') terms=open("localsubjectterms.txt").read().split('\n')[::-1] print("found %i language names for autoindexing" % len(lgs)) print("found %i subject terms for autoindexing" % len(terms)) files = glob.glob('chapters/*tex') for f in files: print("indexing %s" % f) c = open(f).read() for lg in lgs: lg = lg.strip() if lg == '': continue c = re.sub('(?<!ili{)%s(?![\w}])'%lg, '\ili{%s}'%lg, c) for term in terms: term = term.strip() if term == '': continue c = re.sub('(?<!isi{)%s(?![\w}])'%term, '\isi{%s}'%term, c) nlg = len(re.findall('\\ili{',c)) nt = len(re.findall('\\isi{',c)) outfile = open(f.replace('chapters','indexed'), 'w') outfile.write(c) outfile.close() print(" %s now contains %i indexed languages and %i indexed subject terms"%(f.split('/')[-1],nlg,nt)) print("indexed files are in the folder 'indexed'")
true
true
f70fcdc4a7591387a0e661b787c802ae0ddafa4c
3,137
py
Python
mir/qualia/comment.py
darkfeline/qualia
28ccb419dd82b75878c2f52227f291b249b489d7
[ "Apache-2.0" ]
23
2017-01-18T13:53:05.000Z
2020-05-30T10:41:56.000Z
mir/qualia/comment.py
project-mir/mir.qualia
28ccb419dd82b75878c2f52227f291b249b489d7
[ "Apache-2.0" ]
4
2016-10-16T00:19:15.000Z
2017-10-25T13:28:05.000Z
mir/qualia/comment.py
project-mir/mir.qualia
28ccb419dd82b75878c2f52227f291b249b489d7
[ "Apache-2.0" ]
5
2016-10-16T00:07:38.000Z
2022-03-30T13:11:30.000Z
# Copyright (C) 2016 Allen Li # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Comment and uncomment lines. Classes: CommentPrefix """ import re from mir.qualia.indent import common_indent class CommentPrefix: r"""Comments and uncomments lines, given a prefix. >>> prefix = CommentPrefix('#') >>> prefix.uncomment(['#export EDITOR=vi\n']) ['export EDITOR=vi\n'] >>> prefix.comment(['export EDITOR=vi\n']) ['#export EDITOR=vi\n'] >>> prefix.is_commented(['export EDITOR=vi\n']) False Do not modify the comment_prefix attribute on an instance. """ def __init__(self, comment_prefix): self._comment_prefix = comment_prefix self._prefix_pattern = re.compile( fr'^(?P<indent>\s*){re.escape(comment_prefix)}') def __repr__(self): cls = type(self).__qualname__ return f'{cls}({self._comment_prefix!r})' def is_commented(self, lines): """Return True if all lines are commented.""" pattern = self._prefix_pattern return all(pattern.search(line) for line in lines) def uncomment(self, lines): r"""Uncomment a sequence of lines. This will keep uncommenting so long as the lines are all commented. This is so that uncommenting is an idempotent operation. >>> prefix = CommentPrefix('#') >>> prefix.uncomment(['##foo\n', '##bar\n']) ['foo\n', 'bar\n'] >>> prefix.uncomment(prefix.uncomment(['##foo\n', '##bar\n'])) ['foo\n', 'bar\n'] In almost all cases, this is desired behavior, but if you need to preserve levels of commenting, include a line to protect them: >>> prefix = CommentPrefix('#') >>> prefix.uncomment(['##foo\n', '##bar\n', '#\n']) ['#foo\n', '#bar\n', '\n'] """ if not lines: return [] while self.is_commented(lines): lines = self._force_uncomment(lines) return lines def _force_uncomment(self, lines): """Unconditionally uncomment a sequence of lines once.""" return [self._prefix_pattern.sub(r'\g<indent>', line) for line in lines] def comment(self, lines): """Comment a sequence of lines.""" if not self.is_commented(lines): return self._force_comment(lines) return lines def _force_comment(self, lines): """Unconditionally comment a sequence of lines.""" indent = common_indent(lines) indent_len = len(indent) prefix = self._comment_prefix return [f'{indent}{prefix}{line[indent_len:]}' for line in lines]
32.340206
75
0.63277
import re from mir.qualia.indent import common_indent class CommentPrefix: def __init__(self, comment_prefix): self._comment_prefix = comment_prefix self._prefix_pattern = re.compile( fr'^(?P<indent>\s*){re.escape(comment_prefix)}') def __repr__(self): cls = type(self).__qualname__ return f'{cls}({self._comment_prefix!r})' def is_commented(self, lines): pattern = self._prefix_pattern return all(pattern.search(line) for line in lines) def uncomment(self, lines): if not lines: return [] while self.is_commented(lines): lines = self._force_uncomment(lines) return lines def _force_uncomment(self, lines): return [self._prefix_pattern.sub(r'\g<indent>', line) for line in lines] def comment(self, lines): if not self.is_commented(lines): return self._force_comment(lines) return lines def _force_comment(self, lines): indent = common_indent(lines) indent_len = len(indent) prefix = self._comment_prefix return [f'{indent}{prefix}{line[indent_len:]}' for line in lines]
true
true
f70fcec201aadc2ae8017ed9ede79957d301a8d7
1,785
py
Python
eloquent/query/processors/mysql_processor.py
KarthickNamakkalKrishnan/eloquent
0638b688d5fd0c1a46b7471dd465eeb4c2f84666
[ "MIT" ]
47
2015-03-19T02:11:36.000Z
2022-03-29T07:16:42.000Z
eloquent/query/processors/mysql_processor.py
KarthickNamakkalKrishnan/eloquent
0638b688d5fd0c1a46b7471dd465eeb4c2f84666
[ "MIT" ]
20
2015-03-16T02:56:51.000Z
2015-05-24T17:51:29.000Z
eloquent/query/processors/mysql_processor.py
sdispater/eloquent
0638b688d5fd0c1a46b7471dd465eeb4c2f84666
[ "MIT" ]
4
2018-08-29T13:42:50.000Z
2021-03-14T11:28:31.000Z
# -*- coding: utf-8 -*- from .processor import QueryProcessor class MySqlQueryProcessor(QueryProcessor): def process_insert_get_id(self, query, sql, values, sequence=None): """ Process an "insert get ID" query. :param query: A QueryBuilder instance :type query: QueryBuilder :param sql: The sql query to execute :type sql: str :param values: The value bindings :type values: list :param sequence: The ids sequence :type sequence: str :return: The inserted row id :rtype: int """ if not query.get_connection().transaction_level(): with query.get_connection().transaction(): query.get_connection().insert(sql, values) cursor = query.get_connection().get_cursor() if hasattr(cursor, 'lastrowid'): id = cursor.lastrowid else: id = query.get_connection().statement('SELECT LAST_INSERT_ID()') else: query.get_connection().insert(sql, values) cursor = query.get_connection().get_cursor() if hasattr(cursor, 'lastrowid'): id = cursor.lastrowid else: id = query.get_connection().statement('SELECT LAST_INSERT_ID()') if isinstance(id, int): return id if str(id).isdigit(): return int(id) return id def process_column_listing(self, results): """ Process the results of a column listing query :param results: The query results :type results: dict :return: The processed results :return: dict """ return map(lambda x: x['column_name'], results)
27.890625
84
0.568067
from .processor import QueryProcessor class MySqlQueryProcessor(QueryProcessor): def process_insert_get_id(self, query, sql, values, sequence=None): if not query.get_connection().transaction_level(): with query.get_connection().transaction(): query.get_connection().insert(sql, values) cursor = query.get_connection().get_cursor() if hasattr(cursor, 'lastrowid'): id = cursor.lastrowid else: id = query.get_connection().statement('SELECT LAST_INSERT_ID()') else: query.get_connection().insert(sql, values) cursor = query.get_connection().get_cursor() if hasattr(cursor, 'lastrowid'): id = cursor.lastrowid else: id = query.get_connection().statement('SELECT LAST_INSERT_ID()') if isinstance(id, int): return id if str(id).isdigit(): return int(id) return id def process_column_listing(self, results): return map(lambda x: x['column_name'], results)
true
true
f70fcef80f20ce380e02798f1200de49413bc6f2
202
py
Python
src/simmate/toolkit/creators/vector/__init__.py
sionab/simmate
6dedea7310829aae425bf3393e7923e454a0129f
[ "BSD-3-Clause" ]
null
null
null
src/simmate/toolkit/creators/vector/__init__.py
sionab/simmate
6dedea7310829aae425bf3393e7923e454a0129f
[ "BSD-3-Clause" ]
null
null
null
src/simmate/toolkit/creators/vector/__init__.py
sionab/simmate
6dedea7310829aae425bf3393e7923e454a0129f
[ "BSD-3-Clause" ]
null
null
null
from simmate.toolkit.creators.vector.uniform_distribution import ( UniformlyDistributedVectors, ) from simmate.toolkit.creators.vector.normal_distribution import ( NormallyDistributedVectors, )
28.857143
66
0.831683
from simmate.toolkit.creators.vector.uniform_distribution import ( UniformlyDistributedVectors, ) from simmate.toolkit.creators.vector.normal_distribution import ( NormallyDistributedVectors, )
true
true
f70fcf41f7c218d9425b0cdbb130c4b7ef454437
3,403
py
Python
pepys_admin/maintenance/dialogs/progress_dialog.py
debrief/pepys-import
12d29c0e0f69e1119400334983947893e7679b6b
[ "Apache-2.0" ]
4
2021-05-14T08:22:47.000Z
2022-02-04T19:48:25.000Z
pepys_admin/maintenance/dialogs/progress_dialog.py
debrief/pepys-import
12d29c0e0f69e1119400334983947893e7679b6b
[ "Apache-2.0" ]
1,083
2019-11-06T17:01:07.000Z
2022-03-25T10:26:51.000Z
pepys_admin/maintenance/dialogs/progress_dialog.py
debrief/pepys-import
12d29c0e0f69e1119400334983947893e7679b6b
[ "Apache-2.0" ]
4
2019-11-06T12:00:45.000Z
2021-06-09T04:18:28.000Z
import asyncio from asyncio import Future from asyncio.tasks import ensure_future from functools import partial from prompt_toolkit.application.current import get_app from prompt_toolkit.layout.containers import HSplit from prompt_toolkit.layout.dimension import D from prompt_toolkit.widgets import Button, Label, ProgressBar from prompt_toolkit.widgets.dialogs import Dialog class ProgressDialog: """Dialog showing a progress bar, with an optional Cancel button.""" def __init__(self, title, run_callback, show_cancel=True): """Creates a dialog object which will show a dialog with a progress bar and an optional cancel button. Arguments: - `title`: Title for the dialog box - `run_callback`: Function to be called to do the actual work. This must be a normal, non-async function. It must take two keyword arguments: set_percentage and is_cancelled. When the function is called, two separate functions will be passed in as those two arguments. The set_percentage argument can be called with a number between 0 and 100 to set the progress bar to that value, and the is_cancelled function will return True if the cancel button has been pressed. The function given will be called with those two arguments only, if other arguments need passing then use functools.partial to pass them. The function must be thread-safe, as it is called in a separate thread. - `show_cancel`: Whether to show a cancel button or not (boolean, default True) """ self.future = Future() def set_cancelled(): self.cancelled = True self.future.set_result(None) cancel_button = Button(text="Cancel", handler=(lambda: set_cancelled())) self.progressbar = ProgressBar() self.progressbar.percentage = 0 self.run_callback = run_callback self.cancelled = False self.dialog = Dialog( title=title, body=HSplit([Label(text="In progress..."), self.progressbar]), buttons=[cancel_button] if show_cancel else [], width=D(preferred=80), modal=True, ) async def coroutine(): # This runs the run_callback function in a separate thread # but as part of the asyncio loop, so the GUI can still update # while a potentially-blocking function runs in the background try: loop = asyncio.get_running_loop() result = await loop.run_in_executor( None, partial( self.run_callback, set_percentage=self.set_percentage, is_cancelled=self.is_cancelled, ), ) self.future.set_result(result) except Exception as e: try: self.future.set_result(e) except asyncio.InvalidStateError: pass ensure_future(coroutine()) def set_percentage(self, value: int) -> None: self.progressbar.percentage = int(value) # Refresh the GUI app = get_app() app.invalidate() def is_cancelled(self): return self.cancelled def __pt_container__(self): return self.dialog
38.235955
88
0.628563
import asyncio from asyncio import Future from asyncio.tasks import ensure_future from functools import partial from prompt_toolkit.application.current import get_app from prompt_toolkit.layout.containers import HSplit from prompt_toolkit.layout.dimension import D from prompt_toolkit.widgets import Button, Label, ProgressBar from prompt_toolkit.widgets.dialogs import Dialog class ProgressDialog: def __init__(self, title, run_callback, show_cancel=True): self.future = Future() def set_cancelled(): self.cancelled = True self.future.set_result(None) cancel_button = Button(text="Cancel", handler=(lambda: set_cancelled())) self.progressbar = ProgressBar() self.progressbar.percentage = 0 self.run_callback = run_callback self.cancelled = False self.dialog = Dialog( title=title, body=HSplit([Label(text="In progress..."), self.progressbar]), buttons=[cancel_button] if show_cancel else [], width=D(preferred=80), modal=True, ) async def coroutine(): try: loop = asyncio.get_running_loop() result = await loop.run_in_executor( None, partial( self.run_callback, set_percentage=self.set_percentage, is_cancelled=self.is_cancelled, ), ) self.future.set_result(result) except Exception as e: try: self.future.set_result(e) except asyncio.InvalidStateError: pass ensure_future(coroutine()) def set_percentage(self, value: int) -> None: self.progressbar.percentage = int(value) app = get_app() app.invalidate() def is_cancelled(self): return self.cancelled def __pt_container__(self): return self.dialog
true
true
f70fcf542f3210fc83f2ec7cfff75251815ab90f
5,071
py
Python
python/configuration.py
robertosilviu/differential_evolution_bridge
dc9e7692d4e88aaca6d6db38f52926286879efb6
[ "MIT" ]
1
2020-06-10T05:59:34.000Z
2020-06-10T05:59:34.000Z
python/configuration.py
robertosilviu/differential_evolution_bridge
dc9e7692d4e88aaca6d6db38f52926286879efb6
[ "MIT" ]
null
null
null
python/configuration.py
robertosilviu/differential_evolution_bridge
dc9e7692d4e88aaca6d6db38f52926286879efb6
[ "MIT" ]
2
2020-02-26T02:10:58.000Z
2021-07-02T08:24:42.000Z
import json class Configuration(): # class to organize Netlogo simulation parameters def __init__(self): # costants self.constants={ 'strategy?' : 3, 'drone.radius': 0.2, 'drone.speedMax': 8.5, 'drone.cruisingSpeed': 2, 'drone.acceleration': 2, 'drone.deceleration': -2, 'drone.velocityAngularMax': 2.6, 'drone.accelerationAng': 7, 'drone.decelerationAng': -7, 'drone.endurance': 24, 'sensing.radius': 2.5, 'sensing.angle': 360, 'rectangleBase': 5, #sensingBase 'rectangleHeight': 4, #sensingHeight 'drone.reachable.radius': 4, 'drone.reachable.angle': 360, 'drone.collision.vision': 6, 'drone.sight.angleMax': 60, 'drone.collision.gapAngle': 20 } #configuration parameters self.parameters={ 'strategy?' : 3, 'drone.radius': 0.2, 'drone.speedMax': 8.5, 'drone.cruisingSpeed': 2, 'drone.acceleration': 2, 'drone.deceleration': -2, 'drone.velocityAngularMax': 2.6, 'drone.accelerationAng': 7, 'drone.decelerationAng': -7, 'drone.endurance': 24, 'sensing.radius': 2.5, 'sensing.angle': 360, 'rectangleBase': 5, #sensingBase 'rectangleHeight': 4, #sensingHeight 'drone.reachable.radius': 4, 'drone.reachable.angle': 360, 'drone.collision.vision': 6, 'drone.sight.angleMax': 60, 'drone.collision.gapAngle': 20, 'mark.radiusTop': 8, 'mark.radiusDown': 18, 'track.evapRate': 0.16, 'olfactoryHabituation': 22, 'drone.flocking.angle': 42, 'drone.flocking.wiggleVar': 14, 'drone.flocking.radiusSeparate': 15, 'drone.flocking.maxSeparateTurn': 33, 'drone.flocking.radiusAlign': 19, 'drone.flocking.maxAlignTurn': 33, 'drone.flocking.radiusCohere': 21, 'drone.flocking.maxCohereTurn': 24 } #boundaries of parameters self.paramBoundaries={ 'mark.radiusTop': (1,13), 'mark.radiusDown': (13,19), 'track.evapRate': (0.01,0.2), 'olfactoryHabituation': (1,100), 'drone.flocking.angle': (15,45), 'drone.flocking.wiggleVar': (5,15), 'drone.flocking.radiusSeparate': (6,16), 'drone.flocking.maxSeparateTurn': (30,45), 'drone.flocking.radiusAlign': (16,22), 'drone.flocking.maxAlignTurn': (30,45), 'drone.flocking.radiusCohere': (18,26), 'drone.flocking.maxCohereTurn': (15,30) } # print parameters and boundaries def showParameters(self): for key,value in self.parameters.items(): if key in self.paramBoundaries: bounds=self.paramBoundaries[key] print( key,' =',value,' | bounds= ',bounds) else: print( key,' =',value,' | bounds= const value') # create list for differential_evolution algorythm def createBoundsList(self): bounds=[] for key,value in self.paramBoundaries.items(): bounds.append(value) return bounds # name passed as 'name' def addParameter(self,name,value,min_bounder,max_bounder): self.parameters[name]=value self.paramBoundaries[name]=(min_bounder,max_bounder) #remove parameter def removeParameter(self,name): del self.parameters[name] del self.paramBoundaries[name] print('removed ' + ' ' + name + ' : ' + str(self.parameters[name]) + ', bounds = ' + str(self.paramBoundaries[name]) ) # set parameters value from a specified array # the order of values in the array must # be the same of Configuration.parameters def refreshConfiguration(self,x): count=0 for key,value in self.paramBoundaries.items(): self.parameters[key]=x[count] count+=1 for key,value in self.constants.items(): self.parameters[key]=self.constants[key] print('saved new configuration!') # save parameters to JSON file def save_toFile(self): filename='optimized_parameters.json' with open(filename,'w') as f_obj: json.dump(self.parameters,f_obj) print('saved optimized parameters to file!') # load parameters from JSON file def loadParameters_fromFile(self): filename='optimized_parameters.json' try: with open(filename) as f_obj: self.parameters=json.load(f_obj) except FileNotFoundError: print('file not found!') else: print('loaded parameters from file!')
35.964539
63
0.549399
import json class Configuration(): def __init__(self): self.constants={ 'strategy?' : 3, 'drone.radius': 0.2, 'drone.speedMax': 8.5, 'drone.cruisingSpeed': 2, 'drone.acceleration': 2, 'drone.deceleration': -2, 'drone.velocityAngularMax': 2.6, 'drone.accelerationAng': 7, 'drone.decelerationAng': -7, 'drone.endurance': 24, 'sensing.radius': 2.5, 'sensing.angle': 360, 'rectangleBase': 5, 'rectangleHeight': 4, 'drone.reachable.radius': 4, 'drone.reachable.angle': 360, 'drone.collision.vision': 6, 'drone.sight.angleMax': 60, 'drone.collision.gapAngle': 20 } self.parameters={ 'strategy?' : 3, 'drone.radius': 0.2, 'drone.speedMax': 8.5, 'drone.cruisingSpeed': 2, 'drone.acceleration': 2, 'drone.deceleration': -2, 'drone.velocityAngularMax': 2.6, 'drone.accelerationAng': 7, 'drone.decelerationAng': -7, 'drone.endurance': 24, 'sensing.radius': 2.5, 'sensing.angle': 360, 'rectangleBase': 5, 'rectangleHeight': 4, 'drone.reachable.radius': 4, 'drone.reachable.angle': 360, 'drone.collision.vision': 6, 'drone.sight.angleMax': 60, 'drone.collision.gapAngle': 20, 'mark.radiusTop': 8, 'mark.radiusDown': 18, 'track.evapRate': 0.16, 'olfactoryHabituation': 22, 'drone.flocking.angle': 42, 'drone.flocking.wiggleVar': 14, 'drone.flocking.radiusSeparate': 15, 'drone.flocking.maxSeparateTurn': 33, 'drone.flocking.radiusAlign': 19, 'drone.flocking.maxAlignTurn': 33, 'drone.flocking.radiusCohere': 21, 'drone.flocking.maxCohereTurn': 24 } self.paramBoundaries={ 'mark.radiusTop': (1,13), 'mark.radiusDown': (13,19), 'track.evapRate': (0.01,0.2), 'olfactoryHabituation': (1,100), 'drone.flocking.angle': (15,45), 'drone.flocking.wiggleVar': (5,15), 'drone.flocking.radiusSeparate': (6,16), 'drone.flocking.maxSeparateTurn': (30,45), 'drone.flocking.radiusAlign': (16,22), 'drone.flocking.maxAlignTurn': (30,45), 'drone.flocking.radiusCohere': (18,26), 'drone.flocking.maxCohereTurn': (15,30) } def showParameters(self): for key,value in self.parameters.items(): if key in self.paramBoundaries: bounds=self.paramBoundaries[key] print( key,' =',value,' | bounds= ',bounds) else: print( key,' =',value,' | bounds= const value') def createBoundsList(self): bounds=[] for key,value in self.paramBoundaries.items(): bounds.append(value) return bounds def addParameter(self,name,value,min_bounder,max_bounder): self.parameters[name]=value self.paramBoundaries[name]=(min_bounder,max_bounder) def removeParameter(self,name): del self.parameters[name] del self.paramBoundaries[name] print('removed ' + ' ' + name + ' : ' + str(self.parameters[name]) + ', bounds = ' + str(self.paramBoundaries[name]) ) def refreshConfiguration(self,x): count=0 for key,value in self.paramBoundaries.items(): self.parameters[key]=x[count] count+=1 for key,value in self.constants.items(): self.parameters[key]=self.constants[key] print('saved new configuration!') def save_toFile(self): filename='optimized_parameters.json' with open(filename,'w') as f_obj: json.dump(self.parameters,f_obj) print('saved optimized parameters to file!') def loadParameters_fromFile(self): filename='optimized_parameters.json' try: with open(filename) as f_obj: self.parameters=json.load(f_obj) except FileNotFoundError: print('file not found!') else: print('loaded parameters from file!')
true
true
f70fd0b4258ce75e8178e9428e3b7a835b135324
1,925
py
Python
docs-source/source/conf.py
bengranett/synmock
def8f73792d1b756feb2203c1e03e71ce9326d9b
[ "MIT" ]
1
2020-03-18T15:16:50.000Z
2020-03-18T15:16:50.000Z
docs-source/source/conf.py
bengranett/synmock
def8f73792d1b756feb2203c1e03e71ce9326d9b
[ "MIT" ]
null
null
null
docs-source/source/conf.py
bengranett/synmock
def8f73792d1b756feb2203c1e03e71ce9326d9b
[ "MIT" ]
null
null
null
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) # -- Project information ----------------------------------------------------- project = 'Synmock' copyright = '2020, Ben Granett' author = 'Ben Granett' # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.githubpages', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'alabaster' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static']
37.019231
79
0.661299
project = 'Synmock' copyright = '2020, Ben Granett' author = 'Ben Granett' extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.githubpages', 'sphinx.ext.viewcode'] templates_path = ['_templates'] exclude_patterns = [] html_theme = 'alabaster' html_static_path = ['_static']
true
true
f70fd182757e69c7390c6872baa160b5dc484126
378
py
Python
dictances/intersection_squared_variation.py
DavidBerdik/dictances
7b804b62032bbdecc8e73946cf74b171681fe4f5
[ "MIT" ]
30
2018-08-30T16:00:14.000Z
2022-03-14T14:36:17.000Z
dictances/intersection_squared_variation.py
DavidBerdik/dictances
7b804b62032bbdecc8e73946cf74b171681fe4f5
[ "MIT" ]
6
2019-06-18T15:37:23.000Z
2021-04-15T12:40:42.000Z
dictances/intersection_squared_variation.py
DavidBerdik/dictances
7b804b62032bbdecc8e73946cf74b171681fe4f5
[ "MIT" ]
6
2019-02-10T23:22:25.000Z
2020-10-01T16:25:40.000Z
"""Return the squared distance beetween the intersection of a and b.""" from .intersection_nth_variation import intersection_nth_variation from typing import Dict def intersection_squared_variation(a: Dict, b: Dict, overlap: bool=False)->float: """Return the squared distance beetween the intersection of a and b.""" return intersection_nth_variation(a, b, 2, overlap)
47.25
81
0.783069
from .intersection_nth_variation import intersection_nth_variation from typing import Dict def intersection_squared_variation(a: Dict, b: Dict, overlap: bool=False)->float: return intersection_nth_variation(a, b, 2, overlap)
true
true
f70fd192b0b86d4d1b0220c841fb600a1a2d7002
503
py
Python
generated-libraries/python/netapp/exports/security_flavor.py
radekg/netapp-ontap-lib-get
6445ebb071ec147ea82a486fbe9f094c56c5c40d
[ "MIT" ]
2
2017-03-28T15:31:26.000Z
2018-08-16T22:15:18.000Z
generated-libraries/python/netapp/exports/security_flavor.py
radekg/netapp-ontap-lib-get
6445ebb071ec147ea82a486fbe9f094c56c5c40d
[ "MIT" ]
null
null
null
generated-libraries/python/netapp/exports/security_flavor.py
radekg/netapp-ontap-lib-get
6445ebb071ec147ea82a486fbe9f094c56c5c40d
[ "MIT" ]
null
null
null
class SecurityFlavor(basestring): """ any|none|never|krb5|ntlm|sys Possible values: <ul> <li> "any" - Any, <li> "none" - Anonymous Access Allowed If Security Type Not Already Listed, <li> "never" - Never, <li> "krb5" - Kerberos 5 Authentication, <li> "ntlm" - CIFS NTLM, <li> "sys" - NFS AUTH_SYS, <li> "spinauth" - SpinAuth </ul> """ @staticmethod def get_api_name(): return "security-flavor"
23.952381
63
0.540755
class SecurityFlavor(basestring): @staticmethod def get_api_name(): return "security-flavor"
true
true
f70fd19c066bebb0ed59dbe39549e05e6dbe3832
19,393
py
Python
powderBot.py
JordanGoodridge/Weather-Discord-Bot
5fd0056a38e753fc6de5eb3d9af428cf6e244fb8
[ "MIT" ]
null
null
null
powderBot.py
JordanGoodridge/Weather-Discord-Bot
5fd0056a38e753fc6de5eb3d9af428cf6e244fb8
[ "MIT" ]
null
null
null
powderBot.py
JordanGoodridge/Weather-Discord-Bot
5fd0056a38e753fc6de5eb3d9af428cf6e244fb8
[ "MIT" ]
null
null
null
#Weather #Functions TODO # precip accumilation works well hourly # sign up for storm alert per IKON or EPIC resort # timer to check the 3 day for storms # highest winter in state from datetime import datetime, timedelta from dateutil import tz import discord import googlemaps import aiohttp import asyncio from PIL import Image, ImageDraw, ImageFont client = discord.Client() #Keys gmaps_key = '' api_key = '' gmaps = googlemaps.Client(key=gmaps_key) #Coordinates latitude = 0 longitude = 0 #URLs api_url = 'https://api.darksky.net/forecast/' excludeExceptHourly = "currently,minutely,daily" excludeExceptDaily = "currently,hourly,minutely" @client.event async def on_ready(): print('We have logged in as {0.user}'.format(client)) #help() #func: Takes message author mentionable string and returns a list of commands with an @ author #param: author: mentionable string for the author of the message def help(author): return author + "\n __**Command List:**__ \n **!help:** Displays list of commands \n **!current location:** Displays hourly weather for specified location \n **!forecast location:** Displays 5 day forecast for specified location" ### ### Helper Functions ### #get_url() #func: Recieves the message content and exclusion parameter and splits the string, takes second string and any after as location. Inputs into geocoder to gather coordinates and formatted address #params: message: string contents of message sent, "$cw location", exclude: string that inputs which data to exclude in API JSON request #returns URL and Location def get_url(message, exclude): temp = message.split() if len(temp) > 2: count = 1 location = "" while count < len(temp): location = location + " " + temp[count] count = count + 1 #if out of range else: try: location = temp[1] except IndexError: return "Index Error", None geocode_result = gmaps.geocode(location) #if bad input if not geocode_result: return "Input Error", None latitude = geocode_result[0]["geometry"]["location"]['lat'] longitude = geocode_result[0]["geometry"]["location"]['lng'] location = geocode_result[0]["formatted_address"] # print(geocode_result[0]["geometry"]["location"]) url = api_url + str(api_key) + "/" + str(latitude) + "," + str(longitude) + "?units=us&exclude=" + exclude return url, location #time_zone_util() #func: Recieves time in UTC and timezone and converts time to specified time zone, returns new time's hour in 12 hour format and either AM or PM def time_zone_util(time, time_zone): to_zone = tz.gettz(time_zone) new_time = int(time.astimezone(to_zone).strftime('%#I')) am_pm = time.astimezone(to_zone).strftime('%p') return new_time, am_pm ### ### Primary Functions ### #currentWeather() #func: recieves weather API JSON and the formatted address and fills list of data every 3 hours for a total of 12 hours. Creates image to display values #params: JSON_data is weather API JSON, location is the formatted address for location def currentWeather(json_data, location): count = 0 temp, precipChance, precipType, precipIntensity, icon = [None] * 5, [None] * 5, [None] * 5, [None] * 5, [None] * 5 time = json_data["hourly"]["data"][0]["time"] time_zone = json_data["timezone"] #Loop goes through the JSON file and outputs the temperature and precip every 4 hours for 8 hours while count < 5: hours = 3*count summary = json_data["hourly"]["summary"] temp[count]= round(json_data["hourly"]["data"][hours]["temperature"]) icon[count] = json_data["hourly"]["data"][hours]["icon"] if(icon[count] == "clear-day"): icon[count] = "clear_day" if (icon[count] == "clear-night"): icon[count] = "clear_night" if (icon[count] == "partly-cloudy-day"): icon[count] = "partly_cloudy_day" if (icon[count] == "partly-cloudy-night"): icon[count] = "partly_cloudy_night" precipChance[count] = "{:.0%}".format(json_data["hourly"]["data"][hours]["precipProbability"]) if precipChance[count] != "0%" and precipChance[count] != "1%" and precipChance[count] != "2%" and precipChance[count] != "3%" and precipChance[count] != "4%": precipType[count] = json_data["hourly"]["data"][hours]["precipType"] precipIntensity[count] = json_data["hourly"]["data"][hours]["precipIntensity"] if precipType[count] != "snow" and precipIntensity[count] <= .01: icon[count] = "drizzle" if precipType[count] != "snow" and .3 <= precipIntensity[count]: icon[count] = "storm" count = count + 1 img = Image.new('RGB', (1050, 375), color='white') #Declare fonts title_font = ImageFont.truetype('Lib/Fonts/FiraSans-ExtraBold.ttf', 50) location_font = ImageFont.truetype('Lib/Fonts/FiraSans-Regular.ttf', 34) summary_font = ImageFont.truetype('Lib/Fonts/FiraSans-Regular.ttf', 21) time_font = ImageFont.truetype('Lib/Fonts/FiraSans-ExtraBold.ttf', 31) degree_font = ImageFont.truetype('Lib/Fonts/FiraSans-SemiBold.ttf', 34) precip_font = ImageFont.truetype('Lib/Fonts/FiraSans-Bold.ttf', 24) precip_value_font = ImageFont.truetype('Lib/Fonts/FiraSans-Regular.ttf', 24) #Icons clear_day = Image.open('Lib/Icons/Sun.jpg') clear_night = Image.open('Lib/Icons/Moon.jpg') rain = Image.open('Lib/Icons/Cloud-Rain.jpg') partly_cloudy_day = Image.open('Lib/Icons/Cloud-Sun.jpg') partly_cloudy_night = Image.open('Lib/Icons/Cloud-Moon.jpg') cloudy = Image.open('Lib/Icons/Cloud.jpg') snow = Image.open('Lib/Icons/Cloud-Snow-Alt.jpg') sleet = Image.open('Lib/Icons/Cloud-Snow-Alt.jpg') wind = Image.open('Lib/Icons/Cloud-Wind.jpg') fog = Image.open('Lib/Icons/Cloud-Fog-Alt.jpg') drizzle = Image.open('Lib/Icons/Cloud-Drizzle.jpg') storm = Image.open('Lib/Icons/Cloud-Lightning.jpg') #Title + Subtitle d = ImageDraw.Draw(img) d.text((35, 11), "Hourly Forecast", font=title_font, fill='black') d.text((400, 26), location, font=location_font, fill='black') d.text((35, 68), summary, font=summary_font, fill='black') # Rectangle d.rectangle([(24, 96), (218, 352)], fill=(214, 214, 214), outline=None) d.rectangle([(226, 96), (420, 352)], fill=(214, 214, 214), outline=None) d.rectangle([(427, 96), (621, 352)], fill=(214, 214, 214), outline=None) d.rectangle([(629, 96), (823, 352)], fill=(214, 214, 214), outline=None) d.rectangle([(830, 96), (1024, 352)], fill=(214, 214, 214), outline=None) # Time from_zone = tz.gettz('UTC') utc = datetime.utcnow() time_utc = utc.replace(tzinfo = from_zone) time_hour1, am_pm1 = time_zone_util(time_utc, time_zone) time_hour2,am_pm2 = time_zone_util(time_utc + timedelta(hours=3), time_zone) time_hour3,am_pm3 = time_zone_util(time_utc + timedelta(hours=6),time_zone) time_hour4,am_pm4 = time_zone_util(time_utc + timedelta(hours=9),time_zone) time_hour5,am_pm5 = time_zone_util(time_utc + timedelta(hours=12),time_zone) # Time Width time_width, trash = d.textsize(str(time_hour1)+ am_pm1, font=time_font) time_width2, trash = d.textsize(str(time_hour2)+ am_pm2, font=time_font) time_width3, trash = d.textsize(str(time_hour3)+ am_pm3, font=time_font) time_width4, trash = d.textsize(str(time_hour4)+ am_pm4, font=time_font) time_width5, trash = d.textsize(str(time_hour5)+ am_pm5, font=time_font) # Time input d.text((((194 - time_width) / 2) + 24, 105), str(time_hour1) + am_pm1, font=time_font, fill="black") d.text((((194 - time_width2) / 2) + 226, 105), str(time_hour2) + am_pm2, font=time_font, fill="black") d.text((((194 - time_width3) / 2) + 427, 105), str(time_hour3) + am_pm3, font=time_font, fill="black") d.text((((194 - time_width4) / 2) + 629, 105), str(time_hour4) + am_pm4, font=time_font, fill="black") d.text((((194 - time_width5) / 2) + 830, 105), str(time_hour5) + am_pm5, font=time_font, fill="black") # Icon img.paste(eval(icon[0]), (59, 147)) img.paste(eval(icon[1]), (261, 147)) img.paste(eval(icon[2]), (462, 147)) img.paste(eval(icon[3]), (664, 147)) img.paste(eval(icon[4]), (865, 147)) # Degree Text Width temp_holder = str(str(temp[0]) + u"\u00b0" + "F") temp_width, throwaway = d.textsize(temp_holder, font=degree_font) # Degree d.text((((194 - temp_width) / 2) + 24, 263), str(temp[0]) + u"\u00b0" + "F",font=degree_font, fill="black") d.text((((194 - temp_width) / 2) + 226, 263), str(temp[1]) + u"\u00b0" + "F",font=degree_font, fill="black") d.text((((194 - temp_width) / 2) + 427, 263), str(temp[2]) + u"\u00b0" + "F",font=degree_font, fill="black") d.text((((194 - temp_width) / 2) + 629, 263), str(temp[3]) + u"\u00b0" + "F",font=degree_font, fill="black") d.text((((194 - temp_width) / 2) + 830, 263), str(temp[4]) + u"\u00b0" + "F",font=degree_font, fill="black") # Precip d.text((61, 300), "Precip", font=precip_font, fill=(43, 43, 43)) d.text((263, 300), "Precip", font=precip_font, fill=(43, 43, 43)) d.text((465, 300), "Precip", font=precip_font, fill=(43, 43, 43)) d.text((666, 300), "Precip", font=precip_font, fill=(43, 43, 43)) d.text((867, 300), "Precip", font=precip_font, fill=(43, 43, 43)) # Precip Value d.text((139, 300), str(precipChance[0]), font=precip_value_font, fill="black") d.text((341, 300), str(precipChance[1]), font=precip_value_font, fill="black") d.text((541, 300), str(precipChance[2]), font=precip_value_font, fill="black") d.text((744, 300), str(precipChance[3]), font=precip_value_font, fill="black") d.text((945, 300), str(precipChance[4]), font=precip_value_font, fill="black") img.save("hourly_rendered_image.png") return #forecast() #func: Recieves weather API JSON and the formatted address and fills list of data for every day for a total of 5 days. Creates image to display values #param: json_data: weather data from API, location: formatted address of location def forecast(json_data, location): count = 0 #Loop goes through the JSON file and outputs the temperature and precip every 4 hours for 8 hours icon, temp_high, temp_low, precipChance, precipType, precipIntensity = [None] * 5, [None] * 5, [None] * 5, [0] * 5, [None] * 5, [None] * 5 while count < 5: hours = count summary = json_data["daily"]["summary"] temp_high[count] = round(json_data["daily"]["data"][hours]["temperatureHigh"]) temp_low[count] = round(json_data["daily"]["data"][hours]["temperatureLow"]) icon[count] = json_data["daily"]["data"][hours]["icon"] if(icon[count] == "clear-day"): icon[count] = "clear_day" if (icon[count] == "clear-night"): icon[count] = "clear_night" if (icon[count] == "partly-cloudy-day"): icon[count] = "partly_cloudy_day" if (icon[count] == "partly-cloudy-night"): icon[count] = "partly_cloudy_night" precipChance[count] = "{:.0%}".format(json_data["daily"]["data"][hours]["precipProbability"]) #Below 4% rain type is not displayed if precipChance[count] != "0%" and precipChance[count] != "1%" and precipChance[count] != "2%" and precipChance[count] != "3%" and precipChance[count] != "4%": precipType[count] = json_data["daily"]["data"][hours]["precipType"] precipIntensity[count] = json_data["daily"]["data"][hours]["precipIntensity"] if precipType[count] != "snow" and precipIntensity[count] <= .01: icon[count] = "drizzle" if precipType[count] != "snow" and .3 <= precipIntensity[count]: icon[count] = "storm" count+=1 img = Image.new('RGB', (1050, 375), color='white') #Declare fonts title_font = ImageFont.truetype('Lib/Fonts/FiraSans-ExtraBold.ttf', 50) location_font = ImageFont.truetype('Lib/Fonts/FiraSans-Regular.ttf', 34) summary_font = ImageFont.truetype('Lib/Fonts/FiraSans-Regular.ttf', 21) day_font = ImageFont.truetype('Lib/Fonts/FiraSans-ExtraBold.ttf', 31) degree_font = ImageFont.truetype('Lib/Fonts/FiraSans-SemiBold.ttf', 34) precip_font = ImageFont.truetype('Lib/Fonts/FiraSans-Bold.ttf', 24) precip_value_font = ImageFont.truetype('Lib/Fonts/FiraSans-Regular.ttf', 24) #Day Values day_of_week = datetime.today().weekday() week = ["Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","Sunday"] forecast_days = [None] * 5 #For Loop to get next 5 days day_count = 0 for day_count in range(0,5): forecast_days[day_count] = week[day_of_week] day_of_week = day_of_week + 1 day_count = day_count + 1 if day_of_week == 7: day_of_week = 0 #Icons clear_day = Image.open('Lib/Icons/Sun.jpg') clear_night = Image.open('Lib/Icons/Moon.jpg') rain = Image.open('Lib/Icons/Cloud-Rain.jpg') partly_cloudy_day = Image.open('Lib/Icons/Cloud-Sun.jpg') partly_cloudy_night = Image.open('Lib/Icons/Cloud-Moon.jpg') cloudy = Image.open('Lib/Icons/Cloud.jpg') snow = Image.open('Lib/Icons/Cloud-Snow-Alt.jpg') sleet = Image.open('Lib/Icons/Cloud-Snow-Alt.jpg') wind = Image.open('Lib/Icons/Cloud-Wind.jpg') fog = Image.open('Lib/Icons/Cloud-Fog-Alt.jpg') drizzle = Image.open('Lib/Icons/Cloud-Drizzle.jpg') storm = Image.open('Lib/Icons/Cloud-Lightning.jpg') #Title + Subtitle d = ImageDraw.Draw(img) d.text((35, 11), "5 Day Forecast", font=title_font, fill='black') d.text((375, 26), location, font=location_font, fill='black') d.text((35, 68), summary, font=summary_font, fill= 'black') #Rectangle d.rectangle([(24,96), (218,352)], fill = (214,214,214), outline=None) d.rectangle([(226,96), (420,352)], fill = (214,214,214), outline=None) d.rectangle([(427,96), (621,352)], fill = (214,214,214), outline=None) d.rectangle([(629,96), (823,352)], fill = (214,214,214), outline=None) d.rectangle([(830,96), (1024,352)], fill = (214,214,214), outline=None) #Day of The Week Text Width text_width, trash =d.textsize(forecast_days[0], font=day_font) text_width2, trash =d.textsize(forecast_days[1], font=day_font) text_width3, trash =d.textsize(forecast_days[2], font=day_font) text_width4, trash =d.textsize(forecast_days[3], font=day_font) text_width5, trash =d.textsize(forecast_days[4], font=day_font) #Day of The Week d.text((((194 - text_width) / 2) + 24, 105), forecast_days[0], font=day_font, fill= "black") d.text((((194 - text_width2) / 2) + 226, 105), forecast_days[1], font=day_font, fill= "black") d.text((((194 - text_width3) / 2) + 427, 105), forecast_days[2], font=day_font, fill= "black") d.text((((194 - text_width4) / 2) + 629, 105), forecast_days[3], font=day_font, fill= "black") d.text((((194 - text_width5) / 2) + 830, 105), forecast_days[4], font=day_font, fill= "black") #Icon img.paste(eval(icon[0]), (59, 147)) img.paste(eval(icon[1]), (261, 147)) img.paste(eval(icon[2]), (462, 147)) img.paste(eval(icon[3]), (664, 147)) img.paste(eval(icon[4]), (865, 147)) #Degree Text Width temp_holder = str(temp_high[0]) + " - " + str(temp_low[0]) + u"\u00b0" + "F" temp_width, throwaway = d.textsize(temp_holder, font=degree_font) #Degree d.text((((194 - temp_width) / 2) + 24, 263), str(temp_high[0]) + " - " + str(temp_low[0]) + u"\u00b0" + "F", font=degree_font, fill= "black") d.text((((194 - temp_width) / 2) + 226, 263),str(temp_high[1]) + " - " + str(temp_low[1]) + u"\u00b0" + "F", font=degree_font, fill= "black") d.text((((194 - temp_width) / 2) + 427, 263), str(temp_high[2]) + " - " + str(temp_low[2]) + u"\u00b0" + "F", font=degree_font, fill= "black") d.text((((194 - temp_width) / 2) + 629, 263), str(temp_high[3]) + " - " + str(temp_low[3]) + u"\u00b0" + "F", font=degree_font, fill= "black") d.text((((194 - temp_width) / 2) + 830, 263), str(temp_high[4]) + " - " + str(temp_low[4]) + u"\u00b0" + "F", font=degree_font, fill= "black") #Precip d.text((61, 300), "Precip", font=precip_font, fill= (43, 43, 43)) d.text((263, 300), "Precip", font=precip_font, fill= (43, 43, 43)) d.text((465, 300), "Precip", font=precip_font, fill= (43, 43, 43)) d.text((666, 300), "Precip", font=precip_font, fill= (43, 43, 43)) d.text((867, 300), "Precip", font=precip_font, fill= (43, 43, 43)) #Precip Value d.text((139, 300), str(precipChance[0]), font=precip_value_font, fill= "black") d.text((341, 300), str(precipChance[1]), font=precip_value_font, fill= "black") d.text((541, 300), str(precipChance[2]), font=precip_value_font, fill= "black") d.text((744, 300), str(precipChance[3]), font=precip_value_font, fill= "black") d.text((945, 300), str(precipChance[4]), font=precip_value_font, fill= "black") img.save("forecast_rendered_image.png") return #Event Function that activates different functions on command message @client.event async def on_message(message): if message.author == client.user: return if message.content.startswith('!help'): output = help(message.author.mention) await message.channel.send(output) if message.content.startswith('!current'): url, location = get_url(message.content, excludeExceptHourly) print(url) if url == "Index Error" or url == "Input Error": if url == "Index Error": await message.channel.send(message.author.mention + "\n**Error:** Incorrect format, ```!current location``` ") if url == "Input Error": await message.channel.send(message.author.mention + "\n**Error:** Invalid input, input name or address of location ```!current location``` ") else: async with aiohttp.ClientSession() as session: async with session.get(url) as r: if r.status == 200: json_data = await r.json() print(await r.json()) output = currentWeather(json_data, location) await message.channel.send(file=discord.File('hourly_rendered_image.png')) if message.content.startswith('!forecast'): url, location = get_url(message.content, excludeExceptDaily) print(url) if url == "Index Error" or url == "Input Error": if url == "Index Error": await message.channel.send(message.author.mention + "**\nError:** Incorrect format, ```!forecast location``` ") if url == "Input Error": await message.channel.send(message.author.mention + "**\nError:** Invalid input, input name or address of location ```!forecast location``` ") else: async with aiohttp.ClientSession() as session: async with session.get(url) as r: if r.status == 200: json_data = await r.json() #print(await r.json()) output = forecast(json_data, location) await message.channel.send(file=discord.File('forecast_rendered_image.png')) client.run('.XRMUFw.-kdM')
48.726131
233
0.640437
from datetime import datetime, timedelta from dateutil import tz import discord import googlemaps import aiohttp import asyncio from PIL import Image, ImageDraw, ImageFont client = discord.Client() gmaps_key = '' api_key = '' gmaps = googlemaps.Client(key=gmaps_key) latitude = 0 longitude = 0 api_url = 'https://api.darksky.net/forecast/' excludeExceptHourly = "currently,minutely,daily" excludeExceptDaily = "currently,hourly,minutely" @client.event async def on_ready(): print('We have logged in as {0.user}'.format(client)) def help(author): return author + "\n __**Command List:**__ \n **!help:** Displays list of commands \n **!current location:** Displays hourly weather for specified location \n **!forecast location:** Displays 5 day forecast for specified location" temp = message.split() if len(temp) > 2: count = 1 location = "" while count < len(temp): location = location + " " + temp[count] count = count + 1 else: try: location = temp[1] except IndexError: return "Index Error", None geocode_result = gmaps.geocode(location) if not geocode_result: return "Input Error", None latitude = geocode_result[0]["geometry"]["location"]['lat'] longitude = geocode_result[0]["geometry"]["location"]['lng'] location = geocode_result[0]["formatted_address"] url = api_url + str(api_key) + "/" + str(latitude) + "," + str(longitude) + "?units=us&exclude=" + exclude return url, location def time_zone_util(time, time_zone): to_zone = tz.gettz(time_zone) new_time = int(time.astimezone(to_zone).strftime('% am_pm = time.astimezone(to_zone).strftime('%p') return new_time, am_pm ### ### Primary Functions ### #currentWeather() #func: recieves weather API JSON and the formatted address and fills list of data every 3 hours for a total of 12 hours. Creates image to display values #params: JSON_data is weather API JSON, location is the formatted address for location def currentWeather(json_data, location): count = 0 temp, precipChance, precipType, precipIntensity, icon = [None] * 5, [None] * 5, [None] * 5, [None] * 5, [None] * 5 time = json_data["hourly"]["data"][0]["time"] time_zone = json_data["timezone"] #Loop goes through the JSON file and outputs the temperature and precip every 4 hours for 8 hours while count < 5: hours = 3*count summary = json_data["hourly"]["summary"] temp[count]= round(json_data["hourly"]["data"][hours]["temperature"]) icon[count] = json_data["hourly"]["data"][hours]["icon"] if(icon[count] == "clear-day"): icon[count] = "clear_day" if (icon[count] == "clear-night"): icon[count] = "clear_night" if (icon[count] == "partly-cloudy-day"): icon[count] = "partly_cloudy_day" if (icon[count] == "partly-cloudy-night"): icon[count] = "partly_cloudy_night" precipChance[count] = "{:.0%}".format(json_data["hourly"]["data"][hours]["precipProbability"]) if precipChance[count] != "0%" and precipChance[count] != "1%" and precipChance[count] != "2%" and precipChance[count] != "3%" and precipChance[count] != "4%": precipType[count] = json_data["hourly"]["data"][hours]["precipType"] precipIntensity[count] = json_data["hourly"]["data"][hours]["precipIntensity"] if precipType[count] != "snow" and precipIntensity[count] <= .01: icon[count] = "drizzle" if precipType[count] != "snow" and .3 <= precipIntensity[count]: icon[count] = "storm" count = count + 1 img = Image.new('RGB', (1050, 375), color='white') #Declare fonts title_font = ImageFont.truetype('Lib/Fonts/FiraSans-ExtraBold.ttf', 50) location_font = ImageFont.truetype('Lib/Fonts/FiraSans-Regular.ttf', 34) summary_font = ImageFont.truetype('Lib/Fonts/FiraSans-Regular.ttf', 21) time_font = ImageFont.truetype('Lib/Fonts/FiraSans-ExtraBold.ttf', 31) degree_font = ImageFont.truetype('Lib/Fonts/FiraSans-SemiBold.ttf', 34) precip_font = ImageFont.truetype('Lib/Fonts/FiraSans-Bold.ttf', 24) precip_value_font = ImageFont.truetype('Lib/Fonts/FiraSans-Regular.ttf', 24) #Icons clear_day = Image.open('Lib/Icons/Sun.jpg') clear_night = Image.open('Lib/Icons/Moon.jpg') rain = Image.open('Lib/Icons/Cloud-Rain.jpg') partly_cloudy_day = Image.open('Lib/Icons/Cloud-Sun.jpg') partly_cloudy_night = Image.open('Lib/Icons/Cloud-Moon.jpg') cloudy = Image.open('Lib/Icons/Cloud.jpg') snow = Image.open('Lib/Icons/Cloud-Snow-Alt.jpg') sleet = Image.open('Lib/Icons/Cloud-Snow-Alt.jpg') wind = Image.open('Lib/Icons/Cloud-Wind.jpg') fog = Image.open('Lib/Icons/Cloud-Fog-Alt.jpg') drizzle = Image.open('Lib/Icons/Cloud-Drizzle.jpg') storm = Image.open('Lib/Icons/Cloud-Lightning.jpg') #Title + Subtitle d = ImageDraw.Draw(img) d.text((35, 11), "Hourly Forecast", font=title_font, fill='black') d.text((400, 26), location, font=location_font, fill='black') d.text((35, 68), summary, font=summary_font, fill='black') # Rectangle d.rectangle([(24, 96), (218, 352)], fill=(214, 214, 214), outline=None) d.rectangle([(226, 96), (420, 352)], fill=(214, 214, 214), outline=None) d.rectangle([(427, 96), (621, 352)], fill=(214, 214, 214), outline=None) d.rectangle([(629, 96), (823, 352)], fill=(214, 214, 214), outline=None) d.rectangle([(830, 96), (1024, 352)], fill=(214, 214, 214), outline=None) # Time from_zone = tz.gettz('UTC') utc = datetime.utcnow() time_utc = utc.replace(tzinfo = from_zone) time_hour1, am_pm1 = time_zone_util(time_utc, time_zone) time_hour2,am_pm2 = time_zone_util(time_utc + timedelta(hours=3), time_zone) time_hour3,am_pm3 = time_zone_util(time_utc + timedelta(hours=6),time_zone) time_hour4,am_pm4 = time_zone_util(time_utc + timedelta(hours=9),time_zone) time_hour5,am_pm5 = time_zone_util(time_utc + timedelta(hours=12),time_zone) # Time Width time_width, trash = d.textsize(str(time_hour1)+ am_pm1, font=time_font) time_width2, trash = d.textsize(str(time_hour2)+ am_pm2, font=time_font) time_width3, trash = d.textsize(str(time_hour3)+ am_pm3, font=time_font) time_width4, trash = d.textsize(str(time_hour4)+ am_pm4, font=time_font) time_width5, trash = d.textsize(str(time_hour5)+ am_pm5, font=time_font) # Time input d.text((((194 - time_width) / 2) + 24, 105), str(time_hour1) + am_pm1, font=time_font, fill="black") d.text((((194 - time_width2) / 2) + 226, 105), str(time_hour2) + am_pm2, font=time_font, fill="black") d.text((((194 - time_width3) / 2) + 427, 105), str(time_hour3) + am_pm3, font=time_font, fill="black") d.text((((194 - time_width4) / 2) + 629, 105), str(time_hour4) + am_pm4, font=time_font, fill="black") d.text((((194 - time_width5) / 2) + 830, 105), str(time_hour5) + am_pm5, font=time_font, fill="black") # Icon img.paste(eval(icon[0]), (59, 147)) img.paste(eval(icon[1]), (261, 147)) img.paste(eval(icon[2]), (462, 147)) img.paste(eval(icon[3]), (664, 147)) img.paste(eval(icon[4]), (865, 147)) # Degree Text Width temp_holder = str(str(temp[0]) + u"\u00b0" + "F") temp_width, throwaway = d.textsize(temp_holder, font=degree_font) # Degree d.text((((194 - temp_width) / 2) + 24, 263), str(temp[0]) + u"\u00b0" + "F",font=degree_font, fill="black") d.text((((194 - temp_width) / 2) + 226, 263), str(temp[1]) + u"\u00b0" + "F",font=degree_font, fill="black") d.text((((194 - temp_width) / 2) + 427, 263), str(temp[2]) + u"\u00b0" + "F",font=degree_font, fill="black") d.text((((194 - temp_width) / 2) + 629, 263), str(temp[3]) + u"\u00b0" + "F",font=degree_font, fill="black") d.text((((194 - temp_width) / 2) + 830, 263), str(temp[4]) + u"\u00b0" + "F",font=degree_font, fill="black") # Precip d.text((61, 300), "Precip", font=precip_font, fill=(43, 43, 43)) d.text((263, 300), "Precip", font=precip_font, fill=(43, 43, 43)) d.text((465, 300), "Precip", font=precip_font, fill=(43, 43, 43)) d.text((666, 300), "Precip", font=precip_font, fill=(43, 43, 43)) d.text((867, 300), "Precip", font=precip_font, fill=(43, 43, 43)) # Precip Value d.text((139, 300), str(precipChance[0]), font=precip_value_font, fill="black") d.text((341, 300), str(precipChance[1]), font=precip_value_font, fill="black") d.text((541, 300), str(precipChance[2]), font=precip_value_font, fill="black") d.text((744, 300), str(precipChance[3]), font=precip_value_font, fill="black") d.text((945, 300), str(precipChance[4]), font=precip_value_font, fill="black") img.save("hourly_rendered_image.png") return #forecast() #func: Recieves weather API JSON and the formatted address and fills list of data for every day for a total of 5 days. Creates image to display values #param: json_data: weather data from API, location: formatted address of location def forecast(json_data, location): count = 0 #Loop goes through the JSON file and outputs the temperature and precip every 4 hours for 8 hours icon, temp_high, temp_low, precipChance, precipType, precipIntensity = [None] * 5, [None] * 5, [None] * 5, [0] * 5, [None] * 5, [None] * 5 while count < 5: hours = count summary = json_data["daily"]["summary"] temp_high[count] = round(json_data["daily"]["data"][hours]["temperatureHigh"]) temp_low[count] = round(json_data["daily"]["data"][hours]["temperatureLow"]) icon[count] = json_data["daily"]["data"][hours]["icon"] if(icon[count] == "clear-day"): icon[count] = "clear_day" if (icon[count] == "clear-night"): icon[count] = "clear_night" if (icon[count] == "partly-cloudy-day"): icon[count] = "partly_cloudy_day" if (icon[count] == "partly-cloudy-night"): icon[count] = "partly_cloudy_night" precipChance[count] = "{:.0%}".format(json_data["daily"]["data"][hours]["precipProbability"]) #Below 4% rain type is not displayed if precipChance[count] != "0%" and precipChance[count] != "1%" and precipChance[count] != "2%" and precipChance[count] != "3%" and precipChance[count] != "4%": precipType[count] = json_data["daily"]["data"][hours]["precipType"] precipIntensity[count] = json_data["daily"]["data"][hours]["precipIntensity"] if precipType[count] != "snow" and precipIntensity[count] <= .01: icon[count] = "drizzle" if precipType[count] != "snow" and .3 <= precipIntensity[count]: icon[count] = "storm" count+=1 img = Image.new('RGB', (1050, 375), color='white') #Declare fonts title_font = ImageFont.truetype('Lib/Fonts/FiraSans-ExtraBold.ttf', 50) location_font = ImageFont.truetype('Lib/Fonts/FiraSans-Regular.ttf', 34) summary_font = ImageFont.truetype('Lib/Fonts/FiraSans-Regular.ttf', 21) day_font = ImageFont.truetype('Lib/Fonts/FiraSans-ExtraBold.ttf', 31) degree_font = ImageFont.truetype('Lib/Fonts/FiraSans-SemiBold.ttf', 34) precip_font = ImageFont.truetype('Lib/Fonts/FiraSans-Bold.ttf', 24) precip_value_font = ImageFont.truetype('Lib/Fonts/FiraSans-Regular.ttf', 24) #Day Values day_of_week = datetime.today().weekday() week = ["Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","Sunday"] forecast_days = [None] * 5 #For Loop to get next 5 days day_count = 0 for day_count in range(0,5): forecast_days[day_count] = week[day_of_week] day_of_week = day_of_week + 1 day_count = day_count + 1 if day_of_week == 7: day_of_week = 0 #Icons clear_day = Image.open('Lib/Icons/Sun.jpg') clear_night = Image.open('Lib/Icons/Moon.jpg') rain = Image.open('Lib/Icons/Cloud-Rain.jpg') partly_cloudy_day = Image.open('Lib/Icons/Cloud-Sun.jpg') partly_cloudy_night = Image.open('Lib/Icons/Cloud-Moon.jpg') cloudy = Image.open('Lib/Icons/Cloud.jpg') snow = Image.open('Lib/Icons/Cloud-Snow-Alt.jpg') sleet = Image.open('Lib/Icons/Cloud-Snow-Alt.jpg') wind = Image.open('Lib/Icons/Cloud-Wind.jpg') fog = Image.open('Lib/Icons/Cloud-Fog-Alt.jpg') drizzle = Image.open('Lib/Icons/Cloud-Drizzle.jpg') storm = Image.open('Lib/Icons/Cloud-Lightning.jpg') #Title + Subtitle d = ImageDraw.Draw(img) d.text((35, 11), "5 Day Forecast", font=title_font, fill='black') d.text((375, 26), location, font=location_font, fill='black') d.text((35, 68), summary, font=summary_font, fill= 'black') #Rectangle d.rectangle([(24,96), (218,352)], fill = (214,214,214), outline=None) d.rectangle([(226,96), (420,352)], fill = (214,214,214), outline=None) d.rectangle([(427,96), (621,352)], fill = (214,214,214), outline=None) d.rectangle([(629,96), (823,352)], fill = (214,214,214), outline=None) d.rectangle([(830,96), (1024,352)], fill = (214,214,214), outline=None) #Day of The Week Text Width text_width, trash =d.textsize(forecast_days[0], font=day_font) text_width2, trash =d.textsize(forecast_days[1], font=day_font) text_width3, trash =d.textsize(forecast_days[2], font=day_font) text_width4, trash =d.textsize(forecast_days[3], font=day_font) text_width5, trash =d.textsize(forecast_days[4], font=day_font) #Day of The Week d.text((((194 - text_width) / 2) + 24, 105), forecast_days[0], font=day_font, fill= "black") d.text((((194 - text_width2) / 2) + 226, 105), forecast_days[1], font=day_font, fill= "black") d.text((((194 - text_width3) / 2) + 427, 105), forecast_days[2], font=day_font, fill= "black") d.text((((194 - text_width4) / 2) + 629, 105), forecast_days[3], font=day_font, fill= "black") d.text((((194 - text_width5) / 2) + 830, 105), forecast_days[4], font=day_font, fill= "black") #Icon img.paste(eval(icon[0]), (59, 147)) img.paste(eval(icon[1]), (261, 147)) img.paste(eval(icon[2]), (462, 147)) img.paste(eval(icon[3]), (664, 147)) img.paste(eval(icon[4]), (865, 147)) #Degree Text Width temp_holder = str(temp_high[0]) + " - " + str(temp_low[0]) + u"\u00b0" + "F" temp_width, throwaway = d.textsize(temp_holder, font=degree_font) #Degree d.text((((194 - temp_width) / 2) + 24, 263), str(temp_high[0]) + " - " + str(temp_low[0]) + u"\u00b0" + "F", font=degree_font, fill= "black") d.text((((194 - temp_width) / 2) + 226, 263),str(temp_high[1]) + " - " + str(temp_low[1]) + u"\u00b0" + "F", font=degree_font, fill= "black") d.text((((194 - temp_width) / 2) + 427, 263), str(temp_high[2]) + " - " + str(temp_low[2]) + u"\u00b0" + "F", font=degree_font, fill= "black") d.text((((194 - temp_width) / 2) + 629, 263), str(temp_high[3]) + " - " + str(temp_low[3]) + u"\u00b0" + "F", font=degree_font, fill= "black") d.text((((194 - temp_width) / 2) + 830, 263), str(temp_high[4]) + " - " + str(temp_low[4]) + u"\u00b0" + "F", font=degree_font, fill= "black") #Precip d.text((61, 300), "Precip", font=precip_font, fill= (43, 43, 43)) d.text((263, 300), "Precip", font=precip_font, fill= (43, 43, 43)) d.text((465, 300), "Precip", font=precip_font, fill= (43, 43, 43)) d.text((666, 300), "Precip", font=precip_font, fill= (43, 43, 43)) d.text((867, 300), "Precip", font=precip_font, fill= (43, 43, 43)) #Precip Value d.text((139, 300), str(precipChance[0]), font=precip_value_font, fill= "black") d.text((341, 300), str(precipChance[1]), font=precip_value_font, fill= "black") d.text((541, 300), str(precipChance[2]), font=precip_value_font, fill= "black") d.text((744, 300), str(precipChance[3]), font=precip_value_font, fill= "black") d.text((945, 300), str(precipChance[4]), font=precip_value_font, fill= "black") img.save("forecast_rendered_image.png") return #Event Function that activates different functions on command message @client.event async def on_message(message): if message.author == client.user: return if message.content.startswith('!help'): output = help(message.author.mention) await message.channel.send(output) if message.content.startswith('!current'): url, location = get_url(message.content, excludeExceptHourly) print(url) if url == "Index Error" or url == "Input Error": if url == "Index Error": await message.channel.send(message.author.mention + "\n**Error:** Incorrect format, ```!current location``` ") if url == "Input Error": await message.channel.send(message.author.mention + "\n**Error:** Invalid input, input name or address of location ```!current location``` ") else: async with aiohttp.ClientSession() as session: async with session.get(url) as r: if r.status == 200: json_data = await r.json() print(await r.json()) output = currentWeather(json_data, location) await message.channel.send(file=discord.File('hourly_rendered_image.png')) if message.content.startswith('!forecast'): url, location = get_url(message.content, excludeExceptDaily) print(url) if url == "Index Error" or url == "Input Error": if url == "Index Error": await message.channel.send(message.author.mention + "**\nError:** Incorrect format, ```!forecast location``` ") if url == "Input Error": await message.channel.send(message.author.mention + "**\nError:** Invalid input, input name or address of location ```!forecast location``` ") else: async with aiohttp.ClientSession() as session: async with session.get(url) as r: if r.status == 200: json_data = await r.json() #print(await r.json()) output = forecast(json_data, location) await message.channel.send(file=discord.File('forecast_rendered_image.png')) client.run('.XRMUFw.-kdM')
true
true
f70fd1d2052388cfe0df232571e7e732dc7273e5
3,702
py
Python
PythonClient/Framework/CmdRotate.py
SweetShot/AirSim
d43269f9387fdac03298d14416ecf6af43b6fd12
[ "MIT" ]
null
null
null
PythonClient/Framework/CmdRotate.py
SweetShot/AirSim
d43269f9387fdac03298d14416ecf6af43b6fd12
[ "MIT" ]
null
null
null
PythonClient/Framework/CmdRotate.py
SweetShot/AirSim
d43269f9387fdac03298d14416ecf6af43b6fd12
[ "MIT" ]
2
2018-03-07T18:23:42.000Z
2020-02-12T19:58:32.000Z
from CmdBase import * from PersistentModules import * # Cmd # turn left deg # turn right deg # turn to deg # turn rate deg class CmdRotate(CmdBase): def __init__(self, controller, line, engage_object = None): super().__init__(controller, line, engage_object) def start(self): self.mystate_module = self.get_persistent_module('mystate') self.constants_module = self.get_persistent_module('constants') self.intent_provider_module = self.get_persistent_module('intent_provider') self.full_rate = 60 self.low_rate = 10 # Note here we get yaw in radians but later we set it in deg pitch, roll, yaw = AirSimClientBase.toEulerianAngle(self.mystate_module.get_orientation()) #print("original yaw {0}".format(yaw)) if yaw < 0: yaw = 2 * 3.14 + yaw #print("updated yaw {0}".format(yaw)) if (self.line[1] in ['left', 'right', 'to']): delta = float(self.line[2])*3.14/180 if self.line[1] == 'left': self.full_rate *= -1 self.low_rate *= -1 yaw -= delta elif self.line[1] == 'right': yaw += delta elif self.line[1] == 'to': side = 1 # right side if delta > yaw + 3.14 or (yaw - delta < 3.14 and yaw - delta > 0): # left side # consider current yaw is 0 side = -1 self.full_rate *= side self.low_rate *= side yaw = delta #print("updated 2 yaw {0}".format(yaw)) if yaw > 3.14: yaw = -2 * 3.14 + yaw #print("final yaw {0}".format(yaw)) self.final_yaw = yaw self.intent_provider_module.submit_intent(CmdRotate.__name__, PModHIntents.ROTATE, [pitch, roll, yaw]) else: # rate self.rate = float(self.line[2]) self.intent_provider_module.submit_intent(CmdRotate.__name__, PModHIntents.ROTATE, [self.rate]) def update(self): if self.line[1] in ['left', 'right', 'to']: yaw = AirSimClientBase.toEulerianAngle(self.mystate_module.get_orientation())[2] if yaw < 0: yaw = 2 * 3.14 + yaw # Check if movement is complete or < 0.1 angle distance, anyway thats offset # dist to angle dist = min(abs(self.final_yaw - yaw), 2 * 3.14 - abs(self.final_yaw - yaw)) #print('{0} {1} {2}'.format(self.final_yaw, yaw, dist)) if abs(dist) < 0.1: self.get_client().hover() self.intent_provider_module.mark_as_complete(CmdRotate.__name__) if self.engage_object != None: self.engage_object.mark_done() return True # Note that this call is cancellable if other movement related call is called if abs(dist) < 0.5: self.get_client().rotateByYawRate(self.low_rate, 0.5) # note that this fun uses in degrees (inconsistency) else: # on full rate self.get_client().rotateByYawRate(self.full_rate, 0.5) # note that this fun uses in degrees (inconsistency) return False else: # Rate self.get_client().rotateByYawRate(self.rate, 0.5) # Update other can_process def can_process(line): try: if line[0] in ['turn'] and line[1] in ['left', 'right', 'to', 'rate'] and type(float(line[2])) is float: return True return False except: # some error only if command not proper return False
44.071429
123
0.555105
from CmdBase import * from PersistentModules import * class CmdRotate(CmdBase): def __init__(self, controller, line, engage_object = None): super().__init__(controller, line, engage_object) def start(self): self.mystate_module = self.get_persistent_module('mystate') self.constants_module = self.get_persistent_module('constants') self.intent_provider_module = self.get_persistent_module('intent_provider') self.full_rate = 60 self.low_rate = 10 pitch, roll, yaw = AirSimClientBase.toEulerianAngle(self.mystate_module.get_orientation()) if yaw < 0: yaw = 2 * 3.14 + yaw if (self.line[1] in ['left', 'right', 'to']): delta = float(self.line[2])*3.14/180 if self.line[1] == 'left': self.full_rate *= -1 self.low_rate *= -1 yaw -= delta elif self.line[1] == 'right': yaw += delta elif self.line[1] == 'to': side = 1 if delta > yaw + 3.14 or (yaw - delta < 3.14 and yaw - delta > 0): -1 self.full_rate *= side self.low_rate *= side yaw = delta if yaw > 3.14: yaw = -2 * 3.14 + yaw self.final_yaw = yaw self.intent_provider_module.submit_intent(CmdRotate.__name__, PModHIntents.ROTATE, [pitch, roll, yaw]) else: self.rate = float(self.line[2]) self.intent_provider_module.submit_intent(CmdRotate.__name__, PModHIntents.ROTATE, [self.rate]) def update(self): if self.line[1] in ['left', 'right', 'to']: yaw = AirSimClientBase.toEulerianAngle(self.mystate_module.get_orientation())[2] if yaw < 0: yaw = 2 * 3.14 + yaw dist = min(abs(self.final_yaw - yaw), 2 * 3.14 - abs(self.final_yaw - yaw)) if abs(dist) < 0.1: self.get_client().hover() self.intent_provider_module.mark_as_complete(CmdRotate.__name__) if self.engage_object != None: self.engage_object.mark_done() return True if abs(dist) < 0.5: self.get_client().rotateByYawRate(self.low_rate, 0.5) else: self.get_client().rotateByYawRate(self.full_rate, 0.5) return False else: self.get_client().rotateByYawRate(self.rate, 0.5) def can_process(line): try: if line[0] in ['turn'] and line[1] in ['left', 'right', 'to', 'rate'] and type(float(line[2])) is float: return True return False except: return False
true
true
f70fd24934cbde2839dbd458ccd8f3b8b099a413
4,017
py
Python
tests/model/test_priors.py
ihmeuw/cascade-at
a5b1b5da1698163fd3bbafc6288968dd9c398096
[ "MIT" ]
1
2019-10-14T23:18:04.000Z
2019-10-14T23:18:04.000Z
tests/model/test_priors.py
ihmeuw/cascade
a5b1b5da1698163fd3bbafc6288968dd9c398096
[ "MIT" ]
35
2018-07-17T18:37:33.000Z
2020-03-06T13:31:35.000Z
tests/model/test_priors.py
ihmeuw/cascade
a5b1b5da1698163fd3bbafc6288968dd9c398096
[ "MIT" ]
4
2018-07-13T00:01:35.000Z
2019-09-02T23:56:11.000Z
import pytest import numpy as np from numpy import isclose from numpy.random import RandomState from cascade_at.model.priors import ( Constant, Gaussian, Uniform, Laplace, StudentsT, LogGaussian, LogLaplace, LogStudentsT, PriorError, ) def test_happy_construction(): Uniform(-1, 1, 0, name="test") Uniform(-1, 1, 0, 0.5, name="test") Gaussian(0, 1, -10, 10, name="test2") Gaussian(0, 1, -10, 10, 0.5, name="test2") Laplace(0, 1, -10, 10, name="test3") Laplace(0, 1, -10, 10, 0.5, name="test3") StudentsT(0, 1, 2.5, -10, 10, name="test4") LogGaussian(0, 1, 0.5, -10, 10, name="test5") LogLaplace(0, 1, 0.5, -10, 10, name="test6") LogStudentsT(0, 1, 2.5, 0.5, -10, 10, name="test7") def test_prior_equality(): a = Gaussian(0, 1) b = Gaussian(0, 1) assert a == b a = Gaussian(0, 1, -1, 1) b = Gaussian(0, 1, -1, 1) assert a == b a = Uniform(0, 10) b = Uniform(0, 10) assert a == b a = Uniform(0, 10, name="test_prior") b = Uniform(0, 10, name="test_prior") assert a == b def test_prior_nonequality(): a = Gaussian(0, 1) b = Gaussian(1, 1) assert a != b a = Uniform(0, 1) b = Uniform(-1, 0) assert a != b a = Gaussian(0, 1, name="test_prior") b = Gaussian(0, 1, name="other_test_prior") assert a != b a = Gaussian(0, 1) b = Uniform(0, 1) assert a != b def test_prior_sort(): priors = [ Uniform(lower=1e-10, upper=1, mean=5e-5, name="iota"), Gaussian(0, 1, name="other_test_prior"), Uniform(0, 1), ] # NOTE: This is a weak test of actual sorting behavior however all I # actually care about is that the sort is stable, I don't really care # what the order is assert sorted(priors) == sorted(reversed(priors)) def test_prior_hashing(): s = {Gaussian(0, 1), Uniform(0, 1), Gaussian(0, 1), Uniform(0, 2), Uniform(0, 1)} assert len(s) == 3 assert Gaussian(0, 1) in s assert Uniform(0, 10) not in s def test_prior_hashing__near_miss(): assert hash(Gaussian(0, 1.0000000000000001)) == hash(Gaussian(0, 1)) assert hash(Gaussian(0, 1.000000000000001)) != hash(Gaussian(0, 1)) def test_bounds_check(): with pytest.raises(PriorError) as excinfo: Uniform(0, -1, 1) assert "Bounds are inconsistent" in str(excinfo.value) def test_validate_standard_deviation(): with pytest.raises(PriorError) as excinfo: Gaussian(0, -1) assert "must be positive" in str(excinfo.value) @pytest.mark.parametrize("bad_nu", [-1, -3, 0, 2, 1.99]) def test_validate_nu(bad_nu): with pytest.raises(PriorError) as excinfo: StudentsT(0, 1, bad_nu) assert "must be greater" in str(excinfo.value) @pytest.fixture def rng(): return RandomState(34257234) def test_const_fit(): """A constant distribution is unchanged.""" dist = Constant(0.023) assert isclose(dist.rvs(), 0.023) assert isclose(dist.mle([6, 24, 327]).mean, 0.023) def test_uniform_fit(rng): dist = Uniform(-0.4, 0.6, 0.5) draws = dist.rvs(size=10000, random_state=rng) new_dist = dist.mle(draws) assert isclose(new_dist.mean, 0.1, atol=0.01) @pytest.mark.parametrize("cls,params", [ (Gaussian, (0.1, 1, -10, 10)), (Gaussian, (0.1, 1, 0, 0.2)), (Laplace, (0, 1, -10, 10)), (StudentsT, (0, 1, 2.7, -10, 10)), ]) def test_mle(cls, params, rng): dist = cls(*params) draw_dist = dist if hasattr(dist, "mean"): draw_dist = draw_dist.assign(mean=0.1) if hasattr(dist, "standard_deviation"): draw_dist = draw_dist.assign(standard_deviation=0.04) draws = draw_dist.rvs(size=10000, random_state=rng) assert np.all((dist.lower <= draws) & (draws <= dist.upper)) new_dist = dist.mle(draws) if hasattr(dist, "mean"): assert isclose(new_dist.mean, 0.1, rtol=0.2) if hasattr(dist, "standard_deviation"): assert isclose(new_dist.standard_deviation, 0.04, rtol=0.2)
25.916129
85
0.618123
import pytest import numpy as np from numpy import isclose from numpy.random import RandomState from cascade_at.model.priors import ( Constant, Gaussian, Uniform, Laplace, StudentsT, LogGaussian, LogLaplace, LogStudentsT, PriorError, ) def test_happy_construction(): Uniform(-1, 1, 0, name="test") Uniform(-1, 1, 0, 0.5, name="test") Gaussian(0, 1, -10, 10, name="test2") Gaussian(0, 1, -10, 10, 0.5, name="test2") Laplace(0, 1, -10, 10, name="test3") Laplace(0, 1, -10, 10, 0.5, name="test3") StudentsT(0, 1, 2.5, -10, 10, name="test4") LogGaussian(0, 1, 0.5, -10, 10, name="test5") LogLaplace(0, 1, 0.5, -10, 10, name="test6") LogStudentsT(0, 1, 2.5, 0.5, -10, 10, name="test7") def test_prior_equality(): a = Gaussian(0, 1) b = Gaussian(0, 1) assert a == b a = Gaussian(0, 1, -1, 1) b = Gaussian(0, 1, -1, 1) assert a == b a = Uniform(0, 10) b = Uniform(0, 10) assert a == b a = Uniform(0, 10, name="test_prior") b = Uniform(0, 10, name="test_prior") assert a == b def test_prior_nonequality(): a = Gaussian(0, 1) b = Gaussian(1, 1) assert a != b a = Uniform(0, 1) b = Uniform(-1, 0) assert a != b a = Gaussian(0, 1, name="test_prior") b = Gaussian(0, 1, name="other_test_prior") assert a != b a = Gaussian(0, 1) b = Uniform(0, 1) assert a != b def test_prior_sort(): priors = [ Uniform(lower=1e-10, upper=1, mean=5e-5, name="iota"), Gaussian(0, 1, name="other_test_prior"), Uniform(0, 1), ] # what the order is assert sorted(priors) == sorted(reversed(priors)) def test_prior_hashing(): s = {Gaussian(0, 1), Uniform(0, 1), Gaussian(0, 1), Uniform(0, 2), Uniform(0, 1)} assert len(s) == 3 assert Gaussian(0, 1) in s assert Uniform(0, 10) not in s def test_prior_hashing__near_miss(): assert hash(Gaussian(0, 1.0000000000000001)) == hash(Gaussian(0, 1)) assert hash(Gaussian(0, 1.000000000000001)) != hash(Gaussian(0, 1)) def test_bounds_check(): with pytest.raises(PriorError) as excinfo: Uniform(0, -1, 1) assert "Bounds are inconsistent" in str(excinfo.value) def test_validate_standard_deviation(): with pytest.raises(PriorError) as excinfo: Gaussian(0, -1) assert "must be positive" in str(excinfo.value) @pytest.mark.parametrize("bad_nu", [-1, -3, 0, 2, 1.99]) def test_validate_nu(bad_nu): with pytest.raises(PriorError) as excinfo: StudentsT(0, 1, bad_nu) assert "must be greater" in str(excinfo.value) @pytest.fixture def rng(): return RandomState(34257234) def test_const_fit(): dist = Constant(0.023) assert isclose(dist.rvs(), 0.023) assert isclose(dist.mle([6, 24, 327]).mean, 0.023) def test_uniform_fit(rng): dist = Uniform(-0.4, 0.6, 0.5) draws = dist.rvs(size=10000, random_state=rng) new_dist = dist.mle(draws) assert isclose(new_dist.mean, 0.1, atol=0.01) @pytest.mark.parametrize("cls,params", [ (Gaussian, (0.1, 1, -10, 10)), (Gaussian, (0.1, 1, 0, 0.2)), (Laplace, (0, 1, -10, 10)), (StudentsT, (0, 1, 2.7, -10, 10)), ]) def test_mle(cls, params, rng): dist = cls(*params) draw_dist = dist if hasattr(dist, "mean"): draw_dist = draw_dist.assign(mean=0.1) if hasattr(dist, "standard_deviation"): draw_dist = draw_dist.assign(standard_deviation=0.04) draws = draw_dist.rvs(size=10000, random_state=rng) assert np.all((dist.lower <= draws) & (draws <= dist.upper)) new_dist = dist.mle(draws) if hasattr(dist, "mean"): assert isclose(new_dist.mean, 0.1, rtol=0.2) if hasattr(dist, "standard_deviation"): assert isclose(new_dist.standard_deviation, 0.04, rtol=0.2)
true
true
f70fd2fa94d673fff6f6c9d257ae2d677caba319
13,298
py
Python
models/rcnn.py
jiangbestone/DetectRccn
fb30491201f8c64d5ca75298d52aa1a20c4bc6e3
[ "MIT" ]
null
null
null
models/rcnn.py
jiangbestone/DetectRccn
fb30491201f8c64d5ca75298d52aa1a20c4bc6e3
[ "MIT" ]
null
null
null
models/rcnn.py
jiangbestone/DetectRccn
fb30491201f8c64d5ca75298d52aa1a20c4bc6e3
[ "MIT" ]
null
null
null
from torch.autograd import Variable from models.proposal_target_layer_cascade import * import torchvision.models as models from models.proposal import * #bocknet class ResNet(nn.Module): def __init__(self, block, layers, num_classes=1000,dropout_prob=0.2): self.inplanes = 64 super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.dropout = nn.Dropout(p=dropout_prob) self.avgpool = nn.AvgPool2d(7) self.fc = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.dropout(x) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) return x class _fasterRCNN(nn.Module): """ faster RCNN """ def __init__(self, classes, class_agnostic): super(_fasterRCNN, self).__init__() self.classes = classes self.n_classes = len(classes) self.class_agnostic = class_agnostic # loss self.RCNN_loss_cls = 0 self.RCNN_loss_bbox = 0 def forward(self, im_data, im_info, gt_boxes, num_boxes): batch_size = im_data.size(0) im_info = im_info.data gt_boxes = gt_boxes.data num_boxes = num_boxes.data # feed image cfgs to base model to obtain base feature map base_feat = self.RCNN_base(im_data) # feed base feature map to RPN to obtain rois rois, rpn_loss_cls, rpn_loss_bbox = self.RCNN_rpn(base_feat, im_info, gt_boxes, num_boxes) # if it is training phase, then use ground truth bboxes for refining if self.training: roi_data = self.RCNN_proposal_target(rois, gt_boxes, num_boxes) rois, rois_label, rois_target, rois_inside_ws, rois_outside_ws = roi_data rois_label = Variable(rois_label.view(-1).long()) else: rois_label = None rpn_loss_cls = 0 rpn_loss_bbox = 0 rois = Variable(rois) # do roi pooling based on predicted rois pooled_feat = self.RCNN_roi_pool(base_feat, rois.view(-1,5)) # feed pooled features to top model pooled_feat = self._head_to_tail(pooled_feat) # compute bbox offset bbox_pred = self.RCNN_bbox_pred(pooled_feat) if self.training and not self.class_agnostic: # select the corresponding columns according to roi labels bbox_pred_view = bbox_pred.view(bbox_pred.size(0), int(bbox_pred.size(1) / 4), 4) bbox_pred_select = torch.gather(bbox_pred_view, 1, rois_label.view(rois_label.size(0), 1, 1).expand(rois_label.size(0), 1, 4)) bbox_pred = bbox_pred_select.squeeze(1) # compute object classification probability cls_score = self.RCNN_cls_score(pooled_feat) cls_prob = F.softmax(cls_score, 1) RCNN_loss_cls = 0 RCNN_loss_bbox = 0 cls_prob = cls_prob.view(batch_size, rois.size(1), -1) bbox_pred = bbox_pred.view(batch_size, rois.size(1), -1) return rois, cls_prob, bbox_pred, rpn_loss_cls, rpn_loss_bbox, RCNN_loss_cls, RCNN_loss_bbox, rois_label def _init_weights(self): def normal_init(m, mean, stddev, truncated=False): """ weight initalizer: truncated normal and random normal. """ # x is a parameter if truncated: m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation else: m.weight.data.normal_(mean, stddev) m.bias.data.zero_() def create_architecture(self): self._init_modules() self._init_weights() # class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class Model(nn.Module): def __init__(self, model_cfg='datanet.yaml', ch=3, nc=None): super(Model, self).__init__() if type(model_cfg) is dict: self.md = model_cfg else: import yaml with open(model_cfg) as f: self.md = yaml.load(f, Loader=yaml.FullLoader) if nc and nc != self.md['nc']: print('Overriding %s nc=%g with nc=%g' % (model_cfg, self.md['nc'], nc)) self.md['nc'] = nc self.model, self.save = BasicBlock(self.md, ch=[ch]) m = self.model[-1] if isinstance(m, Detect): s = 128 m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) m.anchors /= m.stride.view(-1, 1, 1) check_anchor_order(m) self.stride = m.stride self._initialize_biases() torch_utils.initialize_weights(self) self._initialize_biases() torch_utils.model_info(self) print('') def forward(self, x, augment=False, profile=False): if augment: img_size = x.shape[-2:] s = [0.83, 0.67] y = [] for i, xi in enumerate((x, torch_utils.scale_img(x.flip(3), s[0]), torch_utils.scale_img(x, s[1]), )): y.append(self.forward_once(xi)[0]) y[1][..., :4] /= s[0] # scale y[1][..., 0] = img_size[1] - y[1][..., 0] # flip lr y[2][..., :4] /= s[1] # scale return torch.cat(y, 1), None else: return self.forward_once(x, profile) def forward_once(self, x, profile=False): y, dt = [], [] # outputs for m in self.model: if m.f != -1: # if not from previous layer x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers if profile: try: import thop o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # FLOPS except: o = 0 t = torch_utils.time_synchronized() for _ in range(10): _ = m(x) dt.append((torch_utils.time_synchronized() - t) * 100) print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type)) x = m(x) # run y.append(x if m.i in self.save else None) # save output if profile: print('%.1fms total' % sum(dt)) return x def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency m = self.model[-1] # Detect() module for f, s in zip(m.f, m.stride): #  from mi = self.model[f % m.i] b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) b[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) b[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) def _print_biases(self): m = self.model[-1] # Detect() module for f in sorted([x % m.i for x in m.f]): #  from b = self.model[f].bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85) print(('%g Conv2d.bias:' + '%10.3g' * 6) % (f, *b[:5].mean(1).tolist(), b[5:].mean())) def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers print('Fusing layers... ', end='') for m in self.model.modules(): if type(m) is Conv: m.conv = torch_utils.fuse_conv_and_bn(m.conv, m.bn) # update conv m.bn = None # remove batchnorm m.forward = m.fuseforward # update forward torch_utils.model_info(self) return self def BasicBlock(runwget, ch): anchors, nc, gd, gw = runwget['anchors'], runwget['nc'], runwget['depth_multiple'], runwget['width_multiple'] na = (len(anchors[0]) // 2) # number of anchors no = na * (nc + 5) # number of outputs = anchors * (classes + 5) layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out for i, (f, n, m, args) in enumerate(runwget['backbone'] + runwget['head']): # from, number, module, args m = eval(m) if isinstance(m, str) else m # eval strings for j, a in enumerate(args): try: args[j] = eval(a) if isinstance(a, str) else a # eval strings except: pass n = max(round(n * gd), 1) if n > 1 else n # depth gain if m in [nn.Conv2d, Conv, Bottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, C3]: c1, c2 = ch[f], args[0] c2 = make_divisible(c2 * gw, 8) if c2 != no else c2 args = [c1, c2, *args[1:]] if m in [BottleneckCSP, C3]: args.insert(2, n) n = 1 elif m is nn.BatchNorm2d: args = [ch[f]] elif m is Concat: c2 = sum([ch[-1 if x == -1 else x + 1] for x in f]) elif m is Detect: f = f or list(reversed([(-1 if j == i else j - 1) for j, x in enumerate(ch) if x == no])) else: c2 = ch[f] m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module t = str(m)[8:-2].replace('__main__.', '') # module type np = sum([x.numel() for x in m_.parameters()]) # number params m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist layers.append(m_) ch.append(c2) return nn.Sequential(*layers), sorted(save) class vgg16(_fasterRCNN): def __init__(self, classes, pretrained=False, class_agnostic=False): self.model_path = 'cfgs/pretrained_model/vgg16_caffe.pth' self.dout_base_model = 512 self.pretrained = pretrained self.class_agnostic = class_agnostic _fasterRCNN.__init__(self, classes, class_agnostic) def _init_modules(self): vgg = models.vgg16() if self.pretrained: print("Loading pretrained weights from %s" % (self.model_path)) state_dict = torch.load(self.model_path) vgg.load_state_dict({k: v for k, v in state_dict.items() if k in vgg.state_dict()}) vgg.classifier = nn.Sequential(*list(vgg.classifier._modules.values())[:-1]) self.RCNN_base = nn.Sequential(*list(vgg.features._modules.values())[:-1]) for layer in range(10): for p in self.RCNN_base[layer].parameters(): p.requires_grad = False self.RCNN_top = vgg.classifier self.RCNN_cls_score = nn.Linear(4096, self.n_classes) if self.class_agnostic: self.RCNN_bbox_pred = nn.Linear(4096, 4) else: self.RCNN_bbox_pred = nn.Linear(4096, 4 * self.n_classes) def _head_to_tail(self, pool5): pool5_flat = pool5.view(pool5.size(0), -1) fc7 = self.RCNN_top(pool5_flat) return fc7
37.2493
138
0.563769
from torch.autograd import Variable from models.proposal_target_layer_cascade import * import torchvision.models as models from models.proposal import * class ResNet(nn.Module): def __init__(self, block, layers, num_classes=1000,dropout_prob=0.2): self.inplanes = 64 super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.dropout = nn.Dropout(p=dropout_prob) self.avgpool = nn.AvgPool2d(7) self.fc = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.dropout(x) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) return x class _fasterRCNN(nn.Module): def __init__(self, classes, class_agnostic): super(_fasterRCNN, self).__init__() self.classes = classes self.n_classes = len(classes) self.class_agnostic = class_agnostic self.RCNN_loss_cls = 0 self.RCNN_loss_bbox = 0 def forward(self, im_data, im_info, gt_boxes, num_boxes): batch_size = im_data.size(0) im_info = im_info.data gt_boxes = gt_boxes.data num_boxes = num_boxes.data base_feat = self.RCNN_base(im_data) rois, rpn_loss_cls, rpn_loss_bbox = self.RCNN_rpn(base_feat, im_info, gt_boxes, num_boxes) if self.training: roi_data = self.RCNN_proposal_target(rois, gt_boxes, num_boxes) rois, rois_label, rois_target, rois_inside_ws, rois_outside_ws = roi_data rois_label = Variable(rois_label.view(-1).long()) else: rois_label = None rpn_loss_cls = 0 rpn_loss_bbox = 0 rois = Variable(rois) pooled_feat = self.RCNN_roi_pool(base_feat, rois.view(-1,5)) pooled_feat = self._head_to_tail(pooled_feat) bbox_pred = self.RCNN_bbox_pred(pooled_feat) if self.training and not self.class_agnostic: bbox_pred_view = bbox_pred.view(bbox_pred.size(0), int(bbox_pred.size(1) / 4), 4) bbox_pred_select = torch.gather(bbox_pred_view, 1, rois_label.view(rois_label.size(0), 1, 1).expand(rois_label.size(0), 1, 4)) bbox_pred = bbox_pred_select.squeeze(1) cls_score = self.RCNN_cls_score(pooled_feat) cls_prob = F.softmax(cls_score, 1) RCNN_loss_cls = 0 RCNN_loss_bbox = 0 cls_prob = cls_prob.view(batch_size, rois.size(1), -1) bbox_pred = bbox_pred.view(batch_size, rois.size(1), -1) return rois, cls_prob, bbox_pred, rpn_loss_cls, rpn_loss_bbox, RCNN_loss_cls, RCNN_loss_bbox, rois_label def _init_weights(self): def normal_init(m, mean, stddev, truncated=False): if truncated: m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) else: m.weight.data.normal_(mean, stddev) m.bias.data.zero_() def create_architecture(self): self._init_modules() self._init_weights() class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class Model(nn.Module): def __init__(self, model_cfg='datanet.yaml', ch=3, nc=None): super(Model, self).__init__() if type(model_cfg) is dict: self.md = model_cfg else: import yaml with open(model_cfg) as f: self.md = yaml.load(f, Loader=yaml.FullLoader) if nc and nc != self.md['nc']: print('Overriding %s nc=%g with nc=%g' % (model_cfg, self.md['nc'], nc)) self.md['nc'] = nc self.model, self.save = BasicBlock(self.md, ch=[ch]) m = self.model[-1] if isinstance(m, Detect): s = 128 m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) m.anchors /= m.stride.view(-1, 1, 1) check_anchor_order(m) self.stride = m.stride self._initialize_biases() torch_utils.initialize_weights(self) self._initialize_biases() torch_utils.model_info(self) print('') def forward(self, x, augment=False, profile=False): if augment: img_size = x.shape[-2:] s = [0.83, 0.67] y = [] for i, xi in enumerate((x, torch_utils.scale_img(x.flip(3), s[0]), torch_utils.scale_img(x, s[1]), )): y.append(self.forward_once(xi)[0]) y[1][..., :4] /= s[0] y[1][..., 0] = img_size[1] - y[1][..., 0] y[2][..., :4] /= s[1] return torch.cat(y, 1), None else: return self.forward_once(x, profile) def forward_once(self, x, profile=False): y, dt = [], [] for m in self.model: if m.f != -1: x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] if profile: try: import thop o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 except: o = 0 t = torch_utils.time_synchronized() for _ in range(10): _ = m(x) dt.append((torch_utils.time_synchronized() - t) * 100) print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type)) x = m(x) y.append(x if m.i in self.save else None) if profile: print('%.1fms total' % sum(dt)) return x def _initialize_biases(self, cf=None): m = self.model[-1] for f, s in zip(m.f, m.stride): mi = self.model[f % m.i] b = mi.bias.view(m.na, -1) b[:, 4] += math.log(8 / (640 / s) ** 2) b[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) def _print_biases(self): m = self.model[-1] for f in sorted([x % m.i for x in m.f]): b = self.model[f].bias.detach().view(m.na, -1).T print(('%g Conv2d.bias:' + '%10.3g' * 6) % (f, *b[:5].mean(1).tolist(), b[5:].mean())) def fuse(self): print('Fusing layers... ', end='') for m in self.model.modules(): if type(m) is Conv: m.conv = torch_utils.fuse_conv_and_bn(m.conv, m.bn) m.bn = None m.forward = m.fuseforward torch_utils.model_info(self) return self def BasicBlock(runwget, ch): anchors, nc, gd, gw = runwget['anchors'], runwget['nc'], runwget['depth_multiple'], runwget['width_multiple'] na = (len(anchors[0]) // 2) no = na * (nc + 5) layers, save, c2 = [], [], ch[-1] for i, (f, n, m, args) in enumerate(runwget['backbone'] + runwget['head']): m = eval(m) if isinstance(m, str) else m for j, a in enumerate(args): try: args[j] = eval(a) if isinstance(a, str) else a except: pass n = max(round(n * gd), 1) if n > 1 else n if m in [nn.Conv2d, Conv, Bottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, C3]: c1, c2 = ch[f], args[0] c2 = make_divisible(c2 * gw, 8) if c2 != no else c2 args = [c1, c2, *args[1:]] if m in [BottleneckCSP, C3]: args.insert(2, n) n = 1 elif m is nn.BatchNorm2d: args = [ch[f]] elif m is Concat: c2 = sum([ch[-1 if x == -1 else x + 1] for x in f]) elif m is Detect: f = f or list(reversed([(-1 if j == i else j - 1) for j, x in enumerate(ch) if x == no])) else: c2 = ch[f] m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) t = str(m)[8:-2].replace('__main__.', '') np = sum([x.numel() for x in m_.parameters()]) m_.i, m_.f, m_.type, m_.np = i, f, t, np save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) layers.append(m_) ch.append(c2) return nn.Sequential(*layers), sorted(save) class vgg16(_fasterRCNN): def __init__(self, classes, pretrained=False, class_agnostic=False): self.model_path = 'cfgs/pretrained_model/vgg16_caffe.pth' self.dout_base_model = 512 self.pretrained = pretrained self.class_agnostic = class_agnostic _fasterRCNN.__init__(self, classes, class_agnostic) def _init_modules(self): vgg = models.vgg16() if self.pretrained: print("Loading pretrained weights from %s" % (self.model_path)) state_dict = torch.load(self.model_path) vgg.load_state_dict({k: v for k, v in state_dict.items() if k in vgg.state_dict()}) vgg.classifier = nn.Sequential(*list(vgg.classifier._modules.values())[:-1]) self.RCNN_base = nn.Sequential(*list(vgg.features._modules.values())[:-1]) for layer in range(10): for p in self.RCNN_base[layer].parameters(): p.requires_grad = False self.RCNN_top = vgg.classifier self.RCNN_cls_score = nn.Linear(4096, self.n_classes) if self.class_agnostic: self.RCNN_bbox_pred = nn.Linear(4096, 4) else: self.RCNN_bbox_pred = nn.Linear(4096, 4 * self.n_classes) def _head_to_tail(self, pool5): pool5_flat = pool5.view(pool5.size(0), -1) fc7 = self.RCNN_top(pool5_flat) return fc7
true
true
f70fd34fcef7070d01158e4acd65734d35826d88
678
py
Python
world/load.py
Eoin-Irwin/wmap-geodjango-tutorial-master
f441567d5c9e6059e7192ee5f2d00a51e3d832d8
[ "MIT" ]
null
null
null
world/load.py
Eoin-Irwin/wmap-geodjango-tutorial-master
f441567d5c9e6059e7192ee5f2d00a51e3d832d8
[ "MIT" ]
null
null
null
world/load.py
Eoin-Irwin/wmap-geodjango-tutorial-master
f441567d5c9e6059e7192ee5f2d00a51e3d832d8
[ "MIT" ]
null
null
null
import os from django.contrib.gis.utils import LayerMapping from .models import WorldBorder world_mapping = { 'fips': 'FIPS', 'iso2': 'ISO2', 'iso3': 'ISO3', 'un': 'UN', 'name': 'NAME', 'area': 'AREA', 'pop2005': 'POP2005', 'region': 'REGION', 'subregion': 'SUBREGION', 'lon': 'LON', 'lat': 'LAT', 'mpoly': 'MULTIPOLYGON', } world_shp = os.path.abspath( os.path.join(os.path.dirname(__file__), 'data', 'TM_WORLD_BORDERS-0.3.shp'), ) def run(verbose=True): lm = LayerMapping( WorldBorder, world_shp, world_mapping, transform=False, encoding='iso-8859-1', ) lm.save(strict=True, verbose=verbose)
21.870968
80
0.60472
import os from django.contrib.gis.utils import LayerMapping from .models import WorldBorder world_mapping = { 'fips': 'FIPS', 'iso2': 'ISO2', 'iso3': 'ISO3', 'un': 'UN', 'name': 'NAME', 'area': 'AREA', 'pop2005': 'POP2005', 'region': 'REGION', 'subregion': 'SUBREGION', 'lon': 'LON', 'lat': 'LAT', 'mpoly': 'MULTIPOLYGON', } world_shp = os.path.abspath( os.path.join(os.path.dirname(__file__), 'data', 'TM_WORLD_BORDERS-0.3.shp'), ) def run(verbose=True): lm = LayerMapping( WorldBorder, world_shp, world_mapping, transform=False, encoding='iso-8859-1', ) lm.save(strict=True, verbose=verbose)
true
true
f70fd35d3f0c8c6b1837e551460d92a1bc63db21
6,306
py
Python
service/views.py
zingbretsen/diplomacy
e4c8d2c89540c0e2ea1929879fd303a170d0a723
[ "MIT" ]
null
null
null
service/views.py
zingbretsen/diplomacy
e4c8d2c89540c0e2ea1929879fd303a170d0a723
[ "MIT" ]
null
null
null
service/views.py
zingbretsen/diplomacy
e4c8d2c89540c0e2ea1929879fd303a170d0a723
[ "MIT" ]
null
null
null
from django.shortcuts import get_object_or_404 from django_filters.rest_framework import DjangoFilterBackend from rest_framework import filters, generics, views, exceptions from rest_framework.response import Response from core import models from core.models.base import GameStatus from service import serializers from service.permissions import IsAuthenticated # NOTE this could possibly be replaced by using options def get_game_filter_choices(): return { 'game_statuses': models.base.GameStatus.CHOICES, 'nation_choice_modes': models.base.NationChoiceMode.CHOICES, 'deadlines': models.base.DeadlineFrequency.CHOICES, 'variants': [(v.id, str(v)) for v in models.Variant.objects.all()], } class GameFilterChoicesView(views.APIView): def get(self, request, format=None): return Response(get_game_filter_choices()) class BaseMixin: game_key = 'game' def get_game(self): return get_object_or_404( models.Game.objects, id=self.kwargs[self.game_key], status=GameStatus.ACTIVE, participants=self.request.user.id, ) def get_user_nation_state(self): game = self.get_game() return get_object_or_404( models.NationState.objects, turn=game.get_current_turn(), user=self.request.user.id, ) class ListGames(generics.ListAPIView): permission_classes = [IsAuthenticated] queryset = models.Game.objects.all() serializer_class = serializers.GameSerializer filter_backends = [ DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter, ] search_fields = [ 'name', 'created_by__username' ] filterset_fields = [ 'variant', 'status', 'num_players', 'nation_choice_mode', 'order_deadline', 'retreat_deadline', 'build_deadline', ] ordering_fields = [ 'created_at', 'initialized_at' ] class CreateGameView(generics.CreateAPIView): permission_classes = [IsAuthenticated] serializer_class = serializers.CreateGameSerializer def create(self, request, *args, **kwargs): defaults = {'variant': 1, 'num_players': 7} request.data.update(defaults) return super().create(request, *args, **kwargs) class GameStateView(BaseMixin, generics.RetrieveAPIView): permission_classes = [IsAuthenticated] serializer_class = serializers.GameStateSerializer queryset = models.Game.objects.all() game_key = 'pk' class ToggleJoinGame(generics.UpdateAPIView): permission_classes = [IsAuthenticated] serializer_class = serializers.GameSerializer queryset = models.Game.objects.all() def check_object_permissions(self, request, obj): if request.user not in obj.participants.all(): if obj.participants.count() >= obj.num_players: raise exceptions.PermissionDenied( detail='Game is already full.' ) if obj.status != GameStatus.PENDING: raise exceptions.PermissionDenied( detail='Game is not pending.' ) class CreateOrderView(BaseMixin, generics.CreateAPIView): permission_classes = [IsAuthenticated] serializer_class = serializers.OrderSerializer def get_serializer_context(self): context = super().get_serializer_context() context['nation_state'] = self.get_user_nation_state() return context def perform_create(self, serializer): """ Delete existing order before creating new order. """ models.Order.objects.filter( source=serializer.validated_data['source'], turn=serializer.validated_data['turn'], nation=serializer.validated_data['nation'], ).delete() super().perform_create(serializer) class ListOrdersView(BaseMixin, generics.ListAPIView): permission_classes = [IsAuthenticated] serializer_class = serializers.OrderSerializer def get_queryset(self): game = get_object_or_404( models.Game.objects, id=self.kwargs['game'], ) user_nation_state = models.NationState.objects.filter( turn=game.get_current_turn(), user=self.request.user.id, ).first() if not user_nation_state: return models.Order.objects.none() return models.Order.objects.filter( turn=user_nation_state.turn, nation=user_nation_state.nation, ) class RetrievePrivateNationStateView(BaseMixin, generics.RetrieveAPIView): permission_classes = [IsAuthenticated] serializer_class = serializers.PrivateNationStateSerializer def get_object(self): game = get_object_or_404( models.Game.objects, id=self.kwargs['game'], ) return models.NationState.objects.filter( turn=game.get_current_turn(), user=self.request.user.id, ).first() def retrieve(self, request, *args, **kwargs): instance = self.get_object() if not instance: return Response({}) serializer = self.get_serializer(instance) return Response(serializer.data) class DestroyOrderView(BaseMixin, generics.DestroyAPIView): permission_classes = [IsAuthenticated] serializer_class = serializers.OrderSerializer queryset = models.Order.objects.all() def check_object_permissions(self, request, obj): user_nation_state = self.get_user_nation_state() if obj.nation != user_nation_state.nation: raise exceptions.PermissionDenied( detail='Order does not belong to this user.' ) class ToggleFinalizeOrdersView(generics.UpdateAPIView): permission_classes = [IsAuthenticated] serializer_class = serializers.PublicNationStateSerializer queryset = models.NationState.objects.filter( turn__game__status=GameStatus.ACTIVE ) def check_object_permissions(self, request, obj): if request.user != obj.user: raise exceptions.PermissionDenied( detail='Cannot finalize orders for other nation.' )
30.317308
75
0.664605
from django.shortcuts import get_object_or_404 from django_filters.rest_framework import DjangoFilterBackend from rest_framework import filters, generics, views, exceptions from rest_framework.response import Response from core import models from core.models.base import GameStatus from service import serializers from service.permissions import IsAuthenticated def get_game_filter_choices(): return { 'game_statuses': models.base.GameStatus.CHOICES, 'nation_choice_modes': models.base.NationChoiceMode.CHOICES, 'deadlines': models.base.DeadlineFrequency.CHOICES, 'variants': [(v.id, str(v)) for v in models.Variant.objects.all()], } class GameFilterChoicesView(views.APIView): def get(self, request, format=None): return Response(get_game_filter_choices()) class BaseMixin: game_key = 'game' def get_game(self): return get_object_or_404( models.Game.objects, id=self.kwargs[self.game_key], status=GameStatus.ACTIVE, participants=self.request.user.id, ) def get_user_nation_state(self): game = self.get_game() return get_object_or_404( models.NationState.objects, turn=game.get_current_turn(), user=self.request.user.id, ) class ListGames(generics.ListAPIView): permission_classes = [IsAuthenticated] queryset = models.Game.objects.all() serializer_class = serializers.GameSerializer filter_backends = [ DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter, ] search_fields = [ 'name', 'created_by__username' ] filterset_fields = [ 'variant', 'status', 'num_players', 'nation_choice_mode', 'order_deadline', 'retreat_deadline', 'build_deadline', ] ordering_fields = [ 'created_at', 'initialized_at' ] class CreateGameView(generics.CreateAPIView): permission_classes = [IsAuthenticated] serializer_class = serializers.CreateGameSerializer def create(self, request, *args, **kwargs): defaults = {'variant': 1, 'num_players': 7} request.data.update(defaults) return super().create(request, *args, **kwargs) class GameStateView(BaseMixin, generics.RetrieveAPIView): permission_classes = [IsAuthenticated] serializer_class = serializers.GameStateSerializer queryset = models.Game.objects.all() game_key = 'pk' class ToggleJoinGame(generics.UpdateAPIView): permission_classes = [IsAuthenticated] serializer_class = serializers.GameSerializer queryset = models.Game.objects.all() def check_object_permissions(self, request, obj): if request.user not in obj.participants.all(): if obj.participants.count() >= obj.num_players: raise exceptions.PermissionDenied( detail='Game is already full.' ) if obj.status != GameStatus.PENDING: raise exceptions.PermissionDenied( detail='Game is not pending.' ) class CreateOrderView(BaseMixin, generics.CreateAPIView): permission_classes = [IsAuthenticated] serializer_class = serializers.OrderSerializer def get_serializer_context(self): context = super().get_serializer_context() context['nation_state'] = self.get_user_nation_state() return context def perform_create(self, serializer): models.Order.objects.filter( source=serializer.validated_data['source'], turn=serializer.validated_data['turn'], nation=serializer.validated_data['nation'], ).delete() super().perform_create(serializer) class ListOrdersView(BaseMixin, generics.ListAPIView): permission_classes = [IsAuthenticated] serializer_class = serializers.OrderSerializer def get_queryset(self): game = get_object_or_404( models.Game.objects, id=self.kwargs['game'], ) user_nation_state = models.NationState.objects.filter( turn=game.get_current_turn(), user=self.request.user.id, ).first() if not user_nation_state: return models.Order.objects.none() return models.Order.objects.filter( turn=user_nation_state.turn, nation=user_nation_state.nation, ) class RetrievePrivateNationStateView(BaseMixin, generics.RetrieveAPIView): permission_classes = [IsAuthenticated] serializer_class = serializers.PrivateNationStateSerializer def get_object(self): game = get_object_or_404( models.Game.objects, id=self.kwargs['game'], ) return models.NationState.objects.filter( turn=game.get_current_turn(), user=self.request.user.id, ).first() def retrieve(self, request, *args, **kwargs): instance = self.get_object() if not instance: return Response({}) serializer = self.get_serializer(instance) return Response(serializer.data) class DestroyOrderView(BaseMixin, generics.DestroyAPIView): permission_classes = [IsAuthenticated] serializer_class = serializers.OrderSerializer queryset = models.Order.objects.all() def check_object_permissions(self, request, obj): user_nation_state = self.get_user_nation_state() if obj.nation != user_nation_state.nation: raise exceptions.PermissionDenied( detail='Order does not belong to this user.' ) class ToggleFinalizeOrdersView(generics.UpdateAPIView): permission_classes = [IsAuthenticated] serializer_class = serializers.PublicNationStateSerializer queryset = models.NationState.objects.filter( turn__game__status=GameStatus.ACTIVE ) def check_object_permissions(self, request, obj): if request.user != obj.user: raise exceptions.PermissionDenied( detail='Cannot finalize orders for other nation.' )
true
true
f70fd38f695b4de489bb91ad4acb68243448e994
6,573
py
Python
tests/ut/python/dataset/test_rgb_hsv.py
httpsgithu/mindspore
c29d6bb764e233b427319cb89ba79e420f1e2c64
[ "Apache-2.0" ]
1
2022-03-30T03:43:29.000Z
2022-03-30T03:43:29.000Z
tests/ut/python/dataset/test_rgb_hsv.py
949144093/mindspore
c29d6bb764e233b427319cb89ba79e420f1e2c64
[ "Apache-2.0" ]
null
null
null
tests/ut/python/dataset/test_rgb_hsv.py
949144093/mindspore
c29d6bb764e233b427319cb89ba79e420f1e2c64
[ "Apache-2.0" ]
null
null
null
# Copyright 2019 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ Testing RgbToHsv and HsvToRgb op in DE """ import colorsys import numpy as np from numpy.testing import assert_allclose import mindspore.dataset as ds import mindspore.dataset.transforms.transforms import mindspore.dataset.vision.transforms as vision import mindspore.dataset.vision.py_transforms_util as util DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json" def generate_numpy_random_rgb(shape): # Only generate floating points that are fractions like n / 256, since they # are RGB pixels. Some low-precision floating point types in this test can't # handle arbitrary precision floating points well. return np.random.randint(0, 256, shape) / 255. def test_rgb_hsv_hwc(): rgb_flat = generate_numpy_random_rgb((64, 3)).astype(np.float32) rgb_np = rgb_flat.reshape((8, 8, 3)) hsv_base = np.array([ colorsys.rgb_to_hsv( r.astype(np.float64), g.astype(np.float64), b.astype(np.float64)) for r, g, b in rgb_flat ]) hsv_base = hsv_base.reshape((8, 8, 3)) hsv_de = util.rgb_to_hsvs(rgb_np, True) assert hsv_base.shape == hsv_de.shape assert_allclose(hsv_base.flatten(), hsv_de.flatten(), rtol=1e-5, atol=0) hsv_flat = hsv_base.reshape(64, 3) rgb_base = np.array([ colorsys.hsv_to_rgb( h.astype(np.float64), s.astype(np.float64), v.astype(np.float64)) for h, s, v in hsv_flat ]) rgb_base = rgb_base.reshape((8, 8, 3)) rgb_de = util.hsv_to_rgbs(hsv_base, True) assert rgb_base.shape == rgb_de.shape assert_allclose(rgb_base.flatten(), rgb_de.flatten(), rtol=1e-5, atol=0) def test_rgb_hsv_batch_hwc(): rgb_flat = generate_numpy_random_rgb((64, 3)).astype(np.float32) rgb_np = rgb_flat.reshape((4, 2, 8, 3)) hsv_base = np.array([ colorsys.rgb_to_hsv( r.astype(np.float64), g.astype(np.float64), b.astype(np.float64)) for r, g, b in rgb_flat ]) hsv_base = hsv_base.reshape((4, 2, 8, 3)) hsv_de = util.rgb_to_hsvs(rgb_np, True) assert hsv_base.shape == hsv_de.shape assert_allclose(hsv_base.flatten(), hsv_de.flatten(), rtol=1e-5, atol=0) hsv_flat = hsv_base.reshape((64, 3)) rgb_base = np.array([ colorsys.hsv_to_rgb( h.astype(np.float64), s.astype(np.float64), v.astype(np.float64)) for h, s, v in hsv_flat ]) rgb_base = rgb_base.reshape((4, 2, 8, 3)) rgb_de = util.hsv_to_rgbs(hsv_base, True) assert rgb_de.shape == rgb_base.shape assert_allclose(rgb_base.flatten(), rgb_de.flatten(), rtol=1e-5, atol=0) def test_rgb_hsv_chw(): rgb_flat = generate_numpy_random_rgb((64, 3)).astype(np.float32) rgb_np = rgb_flat.reshape((3, 8, 8)) hsv_base = np.array([ np.vectorize(colorsys.rgb_to_hsv)( rgb_np[0, :, :].astype(np.float64), rgb_np[1, :, :].astype(np.float64), rgb_np[2, :, :].astype(np.float64)) ]) hsv_base = hsv_base.reshape((3, 8, 8)) hsv_de = util.rgb_to_hsvs(rgb_np, False) assert hsv_base.shape == hsv_de.shape assert_allclose(hsv_base.flatten(), hsv_de.flatten(), rtol=1e-5, atol=0) rgb_base = np.array([ np.vectorize(colorsys.hsv_to_rgb)( hsv_base[0, :, :].astype(np.float64), hsv_base[1, :, :].astype(np.float64), hsv_base[2, :, :].astype(np.float64)) ]) rgb_base = rgb_base.reshape((3, 8, 8)) rgb_de = util.hsv_to_rgbs(hsv_base, False) assert rgb_de.shape == rgb_base.shape assert_allclose(rgb_base.flatten(), rgb_de.flatten(), rtol=1e-5, atol=0) def test_rgb_hsv_batch_chw(): rgb_flat = generate_numpy_random_rgb((64, 3)).astype(np.float32) rgb_imgs = rgb_flat.reshape((4, 3, 2, 8)) hsv_base_imgs = np.array([ np.vectorize(colorsys.rgb_to_hsv)( img[0, :, :].astype(np.float64), img[1, :, :].astype(np.float64), img[2, :, :].astype(np.float64)) for img in rgb_imgs ]) hsv_de = util.rgb_to_hsvs(rgb_imgs, False) assert hsv_base_imgs.shape == hsv_de.shape assert_allclose(hsv_base_imgs.flatten(), hsv_de.flatten(), rtol=1e-5, atol=0) rgb_base = np.array([ np.vectorize(colorsys.hsv_to_rgb)( img[0, :, :].astype(np.float64), img[1, :, :].astype(np.float64), img[2, :, :].astype(np.float64)) for img in hsv_base_imgs ]) rgb_de = util.hsv_to_rgbs(hsv_base_imgs, False) assert rgb_base.shape == rgb_de.shape assert_allclose(rgb_base.flatten(), rgb_de.flatten(), rtol=1e-5, atol=0) def test_rgb_hsv_pipeline(): # First dataset transforms1 = [ vision.Decode(True), vision.Resize([64, 64]), vision.ToTensor() ] transforms1 = mindspore.dataset.transforms.transforms.Compose(transforms1) ds1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) ds1 = ds1.map(operations=transforms1, input_columns=["image"]) # Second dataset transforms2 = [ vision.Decode(True), vision.Resize([64, 64]), vision.ToTensor(), vision.RgbToHsv(), vision.HsvToRgb() ] transform2 = mindspore.dataset.transforms.transforms.Compose(transforms2) ds2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) ds2 = ds2.map(operations=transform2, input_columns=["image"]) num_iter = 0 for data1, data2 in zip(ds1.create_dict_iterator(num_epochs=1), ds2.create_dict_iterator(num_epochs=1)): num_iter += 1 ori_img = data1["image"].asnumpy() cvt_img = data2["image"].asnumpy() assert_allclose(ori_img.flatten(), cvt_img.flatten(), rtol=1e-5, atol=0) assert ori_img.shape == cvt_img.shape if __name__ == "__main__": test_rgb_hsv_hwc() test_rgb_hsv_batch_hwc() test_rgb_hsv_chw() test_rgb_hsv_batch_chw() test_rgb_hsv_pipeline()
38.215116
119
0.66621
import colorsys import numpy as np from numpy.testing import assert_allclose import mindspore.dataset as ds import mindspore.dataset.transforms.transforms import mindspore.dataset.vision.transforms as vision import mindspore.dataset.vision.py_transforms_util as util DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json" def generate_numpy_random_rgb(shape): # handle arbitrary precision floating points well. return np.random.randint(0, 256, shape) / 255. def test_rgb_hsv_hwc(): rgb_flat = generate_numpy_random_rgb((64, 3)).astype(np.float32) rgb_np = rgb_flat.reshape((8, 8, 3)) hsv_base = np.array([ colorsys.rgb_to_hsv( r.astype(np.float64), g.astype(np.float64), b.astype(np.float64)) for r, g, b in rgb_flat ]) hsv_base = hsv_base.reshape((8, 8, 3)) hsv_de = util.rgb_to_hsvs(rgb_np, True) assert hsv_base.shape == hsv_de.shape assert_allclose(hsv_base.flatten(), hsv_de.flatten(), rtol=1e-5, atol=0) hsv_flat = hsv_base.reshape(64, 3) rgb_base = np.array([ colorsys.hsv_to_rgb( h.astype(np.float64), s.astype(np.float64), v.astype(np.float64)) for h, s, v in hsv_flat ]) rgb_base = rgb_base.reshape((8, 8, 3)) rgb_de = util.hsv_to_rgbs(hsv_base, True) assert rgb_base.shape == rgb_de.shape assert_allclose(rgb_base.flatten(), rgb_de.flatten(), rtol=1e-5, atol=0) def test_rgb_hsv_batch_hwc(): rgb_flat = generate_numpy_random_rgb((64, 3)).astype(np.float32) rgb_np = rgb_flat.reshape((4, 2, 8, 3)) hsv_base = np.array([ colorsys.rgb_to_hsv( r.astype(np.float64), g.astype(np.float64), b.astype(np.float64)) for r, g, b in rgb_flat ]) hsv_base = hsv_base.reshape((4, 2, 8, 3)) hsv_de = util.rgb_to_hsvs(rgb_np, True) assert hsv_base.shape == hsv_de.shape assert_allclose(hsv_base.flatten(), hsv_de.flatten(), rtol=1e-5, atol=0) hsv_flat = hsv_base.reshape((64, 3)) rgb_base = np.array([ colorsys.hsv_to_rgb( h.astype(np.float64), s.astype(np.float64), v.astype(np.float64)) for h, s, v in hsv_flat ]) rgb_base = rgb_base.reshape((4, 2, 8, 3)) rgb_de = util.hsv_to_rgbs(hsv_base, True) assert rgb_de.shape == rgb_base.shape assert_allclose(rgb_base.flatten(), rgb_de.flatten(), rtol=1e-5, atol=0) def test_rgb_hsv_chw(): rgb_flat = generate_numpy_random_rgb((64, 3)).astype(np.float32) rgb_np = rgb_flat.reshape((3, 8, 8)) hsv_base = np.array([ np.vectorize(colorsys.rgb_to_hsv)( rgb_np[0, :, :].astype(np.float64), rgb_np[1, :, :].astype(np.float64), rgb_np[2, :, :].astype(np.float64)) ]) hsv_base = hsv_base.reshape((3, 8, 8)) hsv_de = util.rgb_to_hsvs(rgb_np, False) assert hsv_base.shape == hsv_de.shape assert_allclose(hsv_base.flatten(), hsv_de.flatten(), rtol=1e-5, atol=0) rgb_base = np.array([ np.vectorize(colorsys.hsv_to_rgb)( hsv_base[0, :, :].astype(np.float64), hsv_base[1, :, :].astype(np.float64), hsv_base[2, :, :].astype(np.float64)) ]) rgb_base = rgb_base.reshape((3, 8, 8)) rgb_de = util.hsv_to_rgbs(hsv_base, False) assert rgb_de.shape == rgb_base.shape assert_allclose(rgb_base.flatten(), rgb_de.flatten(), rtol=1e-5, atol=0) def test_rgb_hsv_batch_chw(): rgb_flat = generate_numpy_random_rgb((64, 3)).astype(np.float32) rgb_imgs = rgb_flat.reshape((4, 3, 2, 8)) hsv_base_imgs = np.array([ np.vectorize(colorsys.rgb_to_hsv)( img[0, :, :].astype(np.float64), img[1, :, :].astype(np.float64), img[2, :, :].astype(np.float64)) for img in rgb_imgs ]) hsv_de = util.rgb_to_hsvs(rgb_imgs, False) assert hsv_base_imgs.shape == hsv_de.shape assert_allclose(hsv_base_imgs.flatten(), hsv_de.flatten(), rtol=1e-5, atol=0) rgb_base = np.array([ np.vectorize(colorsys.hsv_to_rgb)( img[0, :, :].astype(np.float64), img[1, :, :].astype(np.float64), img[2, :, :].astype(np.float64)) for img in hsv_base_imgs ]) rgb_de = util.hsv_to_rgbs(hsv_base_imgs, False) assert rgb_base.shape == rgb_de.shape assert_allclose(rgb_base.flatten(), rgb_de.flatten(), rtol=1e-5, atol=0) def test_rgb_hsv_pipeline(): # First dataset transforms1 = [ vision.Decode(True), vision.Resize([64, 64]), vision.ToTensor() ] transforms1 = mindspore.dataset.transforms.transforms.Compose(transforms1) ds1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) ds1 = ds1.map(operations=transforms1, input_columns=["image"]) # Second dataset transforms2 = [ vision.Decode(True), vision.Resize([64, 64]), vision.ToTensor(), vision.RgbToHsv(), vision.HsvToRgb() ] transform2 = mindspore.dataset.transforms.transforms.Compose(transforms2) ds2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) ds2 = ds2.map(operations=transform2, input_columns=["image"]) num_iter = 0 for data1, data2 in zip(ds1.create_dict_iterator(num_epochs=1), ds2.create_dict_iterator(num_epochs=1)): num_iter += 1 ori_img = data1["image"].asnumpy() cvt_img = data2["image"].asnumpy() assert_allclose(ori_img.flatten(), cvt_img.flatten(), rtol=1e-5, atol=0) assert ori_img.shape == cvt_img.shape if __name__ == "__main__": test_rgb_hsv_hwc() test_rgb_hsv_batch_hwc() test_rgb_hsv_chw() test_rgb_hsv_batch_chw() test_rgb_hsv_pipeline()
true
true
f70fd4d19e6dc7edc35f39b372064e31f3be0c68
2,521
py
Python
algorithms/agents/intrinsic.py
rtloftin/strategically_efficient_rl
85a702b9361211d345a58cc60696e4e851d48ec4
[ "MIT" ]
7
2021-08-02T14:28:33.000Z
2021-12-27T03:30:47.000Z
algorithms/agents/intrinsic.py
rtloftin/strategically_efficient_rl
85a702b9361211d345a58cc60696e4e851d48ec4
[ "MIT" ]
3
2021-08-02T17:35:52.000Z
2022-02-10T04:42:17.000Z
algorithms/agents/intrinsic.py
rtloftin/strategically_efficient_rl
85a702b9361211d345a58cc60696e4e851d48ec4
[ "MIT" ]
2
2021-08-02T17:30:05.000Z
2021-09-13T12:16:29.000Z
import numpy as np import scipy.signal from ray.rllib.policy.sample_batch import SampleBatch from ray.rllib.evaluation.postprocessing import Postprocessing from algorithms.curiosity import INTRINSIC_REWARD INTRINSIC_VALUE_TARGETS = "intrinsic_value_targets" INTRINSIC_VF_PREDS = "intrinsic_vf_preds" def discount(x, gamma): return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1] def compute_advantages_intrinsic(rollout, last_r, last_intrinsic_r, gamma=0.9, intrinsic_gamma=0.9, lambda_=1.0, intrinsic_lambda_=1.0): """ Given a rollout, compute its value targets and the advantage. Assumes we are using separate value function heads for the extrinsic and intrinsic rewards Args: rollout (SampleBatch): SampleBatch of a single trajectory last_r (float): Value estimation for last observation gamma (float): Discount factor intrinsic_gamma (float): Discount factor lambda_ (float): Parameter for GAE intrinsic_lambda_ (float): Parameter for intrinsic GAE Returns: SampleBatch (SampleBatch): Object with experience from rollout and processed rewards. """ traj = {} trajsize = len(rollout[SampleBatch.ACTIONS]) for key in rollout: traj[key] = np.stack(rollout[key]) # Extrinsic value predictions and targets vpred_t = np.concatenate([rollout[SampleBatch.VF_PREDS], np.array([last_r])]) delta_t = (traj[SampleBatch.REWARDS] + gamma * vpred_t[1:] - vpred_t[:-1]) advantages = discount(delta_t, gamma * lambda_) traj[Postprocessing.VALUE_TARGETS] = ( advantages + traj[SampleBatch.VF_PREDS]).copy().astype(np.float32) # Intrinsic value predictions intrinsic_vpred_t = np.concatenate([rollout[INTRINSIC_VF_PREDS], np.array([last_intrinsic_r])]) intrinsic_delta_t = (traj[INTRINSIC_REWARD] + intrinsic_gamma * intrinsic_vpred_t[1:] - intrinsic_vpred_t[:-1]) intrinsic_advantages = discount(intrinsic_delta_t, intrinsic_gamma * intrinsic_lambda_) traj[INTRINSIC_VALUE_TARGETS] = ( intrinsic_advantages + traj[INTRINSIC_VF_PREDS]).copy().astype(np.float32) traj[Postprocessing.ADVANTAGES] = (advantages + intrinsic_advantages).copy().astype(np.float32) assert all(val.shape[0] == trajsize for val in traj.values()), \ "Rollout stacked incorrectly!" return SampleBatch(traj)
38.784615
115
0.685839
import numpy as np import scipy.signal from ray.rllib.policy.sample_batch import SampleBatch from ray.rllib.evaluation.postprocessing import Postprocessing from algorithms.curiosity import INTRINSIC_REWARD INTRINSIC_VALUE_TARGETS = "intrinsic_value_targets" INTRINSIC_VF_PREDS = "intrinsic_vf_preds" def discount(x, gamma): return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1] def compute_advantages_intrinsic(rollout, last_r, last_intrinsic_r, gamma=0.9, intrinsic_gamma=0.9, lambda_=1.0, intrinsic_lambda_=1.0): traj = {} trajsize = len(rollout[SampleBatch.ACTIONS]) for key in rollout: traj[key] = np.stack(rollout[key]) vpred_t = np.concatenate([rollout[SampleBatch.VF_PREDS], np.array([last_r])]) delta_t = (traj[SampleBatch.REWARDS] + gamma * vpred_t[1:] - vpred_t[:-1]) advantages = discount(delta_t, gamma * lambda_) traj[Postprocessing.VALUE_TARGETS] = ( advantages + traj[SampleBatch.VF_PREDS]).copy().astype(np.float32) intrinsic_vpred_t = np.concatenate([rollout[INTRINSIC_VF_PREDS], np.array([last_intrinsic_r])]) intrinsic_delta_t = (traj[INTRINSIC_REWARD] + intrinsic_gamma * intrinsic_vpred_t[1:] - intrinsic_vpred_t[:-1]) intrinsic_advantages = discount(intrinsic_delta_t, intrinsic_gamma * intrinsic_lambda_) traj[INTRINSIC_VALUE_TARGETS] = ( intrinsic_advantages + traj[INTRINSIC_VF_PREDS]).copy().astype(np.float32) traj[Postprocessing.ADVANTAGES] = (advantages + intrinsic_advantages).copy().astype(np.float32) assert all(val.shape[0] == trajsize for val in traj.values()), \ "Rollout stacked incorrectly!" return SampleBatch(traj)
true
true
f70fd5a77b2b7e1d279a9781bf732832a587791e
12,829
py
Python
maskrcnn_benchmark/modeling/roi_heads/relation_head/model_transformer.py
KaihuaTang/scene-graph-benchmark.pytorch
45cd54f7465b81d3154e94fcab2b554a09637f6f
[ "MIT" ]
16
2021-08-18T02:51:02.000Z
2022-03-19T12:43:27.000Z
maskrcnn_benchmark/modeling/roi_heads/relation_head/model_transformer.py
KaihuaTang/scene-graph-benchmark.pytorch
45cd54f7465b81d3154e94fcab2b554a09637f6f
[ "MIT" ]
4
2021-09-23T11:04:57.000Z
2022-02-21T01:57:30.000Z
maskrcnn_benchmark/modeling/roi_heads/relation_head/model_transformer.py
KaihuaTang/scene-graph-benchmark.pytorch
45cd54f7465b81d3154e94fcab2b554a09637f6f
[ "MIT" ]
2
2022-03-22T01:28:10.000Z
2022-03-28T13:26:25.000Z
""" Based on the implementation of https://github.com/jadore801120/attention-is-all-you-need-pytorch """ import torch import torch.nn as nn import torch.nn.functional as F import numpy as np from maskrcnn_benchmark.modeling.utils import cat from .utils_motifs import obj_edge_vectors, to_onehot, nms_overlaps, encode_box_info class ScaledDotProductAttention(nn.Module): ''' Scaled Dot-Product Attention ''' def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) self.softmax = nn.Softmax(dim=2) def forward(self, q, k, v, mask=None): """ Args: q (bsz, len_q, dim_q) k (bsz, len_k, dim_k) v (bsz, len_v, dim_v) Note: len_k==len_v, and dim_q==dim_k Returns: output (bsz, len_q, dim_v) attn (bsz, len_q, len_k) """ attn = torch.bmm(q, k.transpose(1, 2)) attn = attn / self.temperature if mask is not None: attn = attn.masked_fill(mask, -np.inf) attn = self.softmax(attn) attn = self.dropout(attn) output = torch.bmm(attn, v) return output, attn class MultiHeadAttention(nn.Module): ''' Multi-Head Attention module ''' def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1): super().__init__() self.n_head = n_head self.d_k = d_k self.d_v = d_v self.w_qs = nn.Linear(d_model, n_head * d_k) self.w_ks = nn.Linear(d_model, n_head * d_k) self.w_vs = nn.Linear(d_model, n_head * d_v) nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k))) nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k))) nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_v))) self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5)) self.layer_norm = nn.LayerNorm(d_model) self.fc = nn.Linear(n_head * d_v, d_model) nn.init.xavier_normal_(self.fc.weight) self.dropout = nn.Dropout(dropout) def forward(self, q, k, v, mask=None): """ Args: q (bsz, len_q, dim_q) k (bsz, len_k, dim_k) v (bsz, len_v, dim_v) Note: len_k==len_v, and dim_q==dim_k Returns: output (bsz, len_q, d_model) attn (bsz, len_q, len_k) """ d_k, d_v, n_head = self.d_k, self.d_v, self.n_head sz_b, len_q, _ = q.size() sz_b, len_k, _ = k.size() sz_b, len_v, _ = v.size() # len_k==len_v residual = q q = self.w_qs(q).view(sz_b, len_q, n_head, d_k) k = self.w_ks(k).view(sz_b, len_k, n_head, d_k) v = self.w_vs(v).view(sz_b, len_v, n_head, d_v) q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv mask = mask.repeat(n_head, 1, 1) # (n*b) x .. x .. output, attn = self.attention(q, k, v, mask=mask) output = output.view(n_head, sz_b, len_q, d_v) output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv) output = self.dropout(self.fc(output)) output = self.layer_norm(output + residual) return output, attn class PositionwiseFeedForward(nn.Module): ''' A two-feed-forward-layer module ''' def __init__(self, d_in, d_hid, dropout=0.1): super().__init__() self.w_1 = nn.Conv1d(d_in, d_hid, 1) # position-wise self.w_2 = nn.Conv1d(d_hid, d_in, 1) # position-wise self.layer_norm = nn.LayerNorm(d_in) self.dropout = nn.Dropout(dropout) def forward(self, x): """ Merge adjacent information. Equal to linear layer if kernel size is 1 Args: x (bsz, len, dim) Returns: output (bsz, len, dim) """ residual = x output = x.transpose(1, 2) output = self.w_2(F.relu(self.w_1(output))) output = output.transpose(1, 2) output = self.dropout(output) output = self.layer_norm(output + residual) return output class EncoderLayer(nn.Module): ''' Compose with two layers ''' def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1): super(EncoderLayer, self).__init__() self.slf_attn = MultiHeadAttention( n_head, d_model, d_k, d_v, dropout=dropout) self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=dropout) def forward(self, enc_input, non_pad_mask=None, slf_attn_mask=None): enc_output, enc_slf_attn = self.slf_attn( enc_input, enc_input, enc_input, mask=slf_attn_mask) enc_output *= non_pad_mask.float() enc_output = self.pos_ffn(enc_output) enc_output *= non_pad_mask.float() return enc_output, enc_slf_attn class TransformerEncoder(nn.Module): """ A encoder model with self attention mechanism. """ def __init__(self, n_layers, n_head, d_k, d_v, d_model, d_inner, dropout=0.1): super().__init__() self.layer_stack = nn.ModuleList([ EncoderLayer(d_model, d_inner, n_head, d_k, d_v, dropout=dropout) for _ in range(n_layers)]) def forward(self, input_feats, num_objs): """ Args: input_feats [Tensor] (#total_box, d_model) : bounding box features of a batch num_objs [list of int] (bsz, ) : number of bounding box of each image Returns: enc_output [Tensor] (#total_box, d_model) """ original_input_feats = input_feats input_feats = input_feats.split(num_objs, dim=0) input_feats = nn.utils.rnn.pad_sequence(input_feats, batch_first=True) # -- Prepare masks bsz = len(num_objs) device = input_feats.device pad_len = max(num_objs) num_objs_ = torch.LongTensor(num_objs).to(device).unsqueeze(1).expand(-1, pad_len) slf_attn_mask = torch.arange(pad_len, device=device).view(1, -1).expand(bsz, -1).ge(num_objs_).unsqueeze(1).expand(-1, pad_len, -1) # (bsz, pad_len, pad_len) non_pad_mask = torch.arange(pad_len, device=device).to(device).view(1, -1).expand(bsz, -1).lt(num_objs_).unsqueeze(-1) # (bsz, pad_len, 1) # -- Forward enc_output = input_feats for enc_layer in self.layer_stack: enc_output, enc_slf_attn = enc_layer( enc_output, non_pad_mask=non_pad_mask, slf_attn_mask=slf_attn_mask) enc_output = enc_output[non_pad_mask.squeeze(-1)] return enc_output class TransformerContext(nn.Module): def __init__(self, config, obj_classes, rel_classes, in_channels): super().__init__() self.cfg = config # setting parameters if self.cfg.MODEL.ROI_RELATION_HEAD.USE_GT_BOX: self.mode = 'predcls' if self.cfg.MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL else 'sgcls' else: self.mode = 'sgdet' self.obj_classes = obj_classes self.rel_classes = rel_classes self.num_obj_cls = len(obj_classes) self.num_rel_cls = len(rel_classes) self.in_channels = in_channels self.obj_dim = in_channels self.embed_dim = self.cfg.MODEL.ROI_RELATION_HEAD.EMBED_DIM self.hidden_dim = self.cfg.MODEL.ROI_RELATION_HEAD.CONTEXT_HIDDEN_DIM self.nms_thresh = self.cfg.TEST.RELATION.LATER_NMS_PREDICTION_THRES self.dropout_rate = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.DROPOUT_RATE self.obj_layer = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.OBJ_LAYER self.edge_layer = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.REL_LAYER self.num_head = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.NUM_HEAD self.inner_dim = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.INNER_DIM self.k_dim = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.KEY_DIM self.v_dim = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.VAL_DIM # the following word embedding layer should be initalize by glove.6B before using embed_vecs = obj_edge_vectors(self.obj_classes, wv_dir=self.cfg.GLOVE_DIR, wv_dim=self.embed_dim) self.obj_embed1 = nn.Embedding(self.num_obj_cls, self.embed_dim) self.obj_embed2 = nn.Embedding(self.num_obj_cls, self.embed_dim) with torch.no_grad(): self.obj_embed1.weight.copy_(embed_vecs, non_blocking=True) self.obj_embed2.weight.copy_(embed_vecs, non_blocking=True) # position embedding self.bbox_embed = nn.Sequential(*[ nn.Linear(9, 32), nn.ReLU(inplace=True), nn.Dropout(0.1), nn.Linear(32, 128), nn.ReLU(inplace=True), nn.Dropout(0.1), ]) self.lin_obj = nn.Linear(self.in_channels + self.embed_dim + 128, self.hidden_dim) self.lin_edge = nn.Linear(self.embed_dim + self.hidden_dim + self.in_channels, self.hidden_dim) self.out_obj = nn.Linear(self.hidden_dim, self.num_obj_cls) self.context_obj = TransformerEncoder(self.obj_layer, self.num_head, self.k_dim, self.v_dim, self.hidden_dim, self.inner_dim, self.dropout_rate) self.context_edge = TransformerEncoder(self.edge_layer, self.num_head, self.k_dim, self.v_dim, self.hidden_dim, self.inner_dim, self.dropout_rate) def forward(self, roi_features, proposals, logger=None): # labels will be used in DecoderRNN during training use_gt_label = self.training or self.cfg.MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL obj_labels = cat([proposal.get_field("labels") for proposal in proposals], dim=0) if use_gt_label else None # label/logits embedding will be used as input if self.cfg.MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL: obj_embed = self.obj_embed1(obj_labels) else: obj_logits = cat([proposal.get_field("predict_logits") for proposal in proposals], dim=0).detach() obj_embed = F.softmax(obj_logits, dim=1) @ self.obj_embed1.weight # bbox embedding will be used as input assert proposals[0].mode == 'xyxy' pos_embed = self.bbox_embed(encode_box_info(proposals)) # encode objects with transformer obj_pre_rep = cat((roi_features, obj_embed, pos_embed), -1) num_objs = [len(p) for p in proposals] obj_pre_rep = self.lin_obj(obj_pre_rep) obj_feats = self.context_obj(obj_pre_rep, num_objs) # predict obj_dists and obj_preds if self.mode == 'predcls': obj_preds = obj_labels obj_dists = to_onehot(obj_preds, self.num_obj_cls) edge_pre_rep = cat((roi_features, obj_feats, self.obj_embed2(obj_labels)), dim=-1) else: obj_dists = self.out_obj(obj_feats) use_decoder_nms = self.mode == 'sgdet' and not self.training if use_decoder_nms: boxes_per_cls = [proposal.get_field('boxes_per_cls') for proposal in proposals] obj_preds = self.nms_per_cls(obj_dists, boxes_per_cls, num_objs) else: obj_preds = obj_dists[:, 1:].max(1)[1] + 1 edge_pre_rep = cat((roi_features, obj_feats, self.obj_embed2(obj_preds)), dim=-1) # edge context edge_pre_rep = self.lin_edge(edge_pre_rep) edge_ctx = self.context_edge(edge_pre_rep, num_objs) return obj_dists, obj_preds, edge_ctx def nms_per_cls(self, obj_dists, boxes_per_cls, num_objs): obj_dists = obj_dists.split(num_objs, dim=0) obj_preds = [] for i in range(len(num_objs)): is_overlap = nms_overlaps(boxes_per_cls[i]).cpu().numpy() >= self.nms_thresh # (#box, #box, #class) out_dists_sampled = F.softmax(obj_dists[i], -1).cpu().numpy() out_dists_sampled[:, 0] = -1 out_label = obj_dists[i].new(num_objs[i]).fill_(0) for i in range(num_objs[i]): box_ind, cls_ind = np.unravel_index(out_dists_sampled.argmax(), out_dists_sampled.shape) out_label[int(box_ind)] = int(cls_ind) out_dists_sampled[is_overlap[box_ind,:,cls_ind], cls_ind] = 0.0 out_dists_sampled[box_ind] = -1.0 # This way we won't re-sample obj_preds.append(out_label.long()) obj_preds = torch.cat(obj_preds, dim=0) return obj_preds
41.924837
165
0.626159
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np from maskrcnn_benchmark.modeling.utils import cat from .utils_motifs import obj_edge_vectors, to_onehot, nms_overlaps, encode_box_info class ScaledDotProductAttention(nn.Module): def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) self.softmax = nn.Softmax(dim=2) def forward(self, q, k, v, mask=None): attn = torch.bmm(q, k.transpose(1, 2)) attn = attn / self.temperature if mask is not None: attn = attn.masked_fill(mask, -np.inf) attn = self.softmax(attn) attn = self.dropout(attn) output = torch.bmm(attn, v) return output, attn class MultiHeadAttention(nn.Module): def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1): super().__init__() self.n_head = n_head self.d_k = d_k self.d_v = d_v self.w_qs = nn.Linear(d_model, n_head * d_k) self.w_ks = nn.Linear(d_model, n_head * d_k) self.w_vs = nn.Linear(d_model, n_head * d_v) nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k))) nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k))) nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_v))) self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5)) self.layer_norm = nn.LayerNorm(d_model) self.fc = nn.Linear(n_head * d_v, d_model) nn.init.xavier_normal_(self.fc.weight) self.dropout = nn.Dropout(dropout) def forward(self, q, k, v, mask=None): d_k, d_v, n_head = self.d_k, self.d_v, self.n_head sz_b, len_q, _ = q.size() sz_b, len_k, _ = k.size() sz_b, len_v, _ = v.size() residual = q q = self.w_qs(q).view(sz_b, len_q, n_head, d_k) k = self.w_ks(k).view(sz_b, len_k, n_head, d_k) v = self.w_vs(v).view(sz_b, len_v, n_head, d_v) q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) mask = mask.repeat(n_head, 1, 1) output, attn = self.attention(q, k, v, mask=mask) output = output.view(n_head, sz_b, len_q, d_v) output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) output = self.dropout(self.fc(output)) output = self.layer_norm(output + residual) return output, attn class PositionwiseFeedForward(nn.Module): def __init__(self, d_in, d_hid, dropout=0.1): super().__init__() self.w_1 = nn.Conv1d(d_in, d_hid, 1) self.w_2 = nn.Conv1d(d_hid, d_in, 1) self.layer_norm = nn.LayerNorm(d_in) self.dropout = nn.Dropout(dropout) def forward(self, x): residual = x output = x.transpose(1, 2) output = self.w_2(F.relu(self.w_1(output))) output = output.transpose(1, 2) output = self.dropout(output) output = self.layer_norm(output + residual) return output class EncoderLayer(nn.Module): def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1): super(EncoderLayer, self).__init__() self.slf_attn = MultiHeadAttention( n_head, d_model, d_k, d_v, dropout=dropout) self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=dropout) def forward(self, enc_input, non_pad_mask=None, slf_attn_mask=None): enc_output, enc_slf_attn = self.slf_attn( enc_input, enc_input, enc_input, mask=slf_attn_mask) enc_output *= non_pad_mask.float() enc_output = self.pos_ffn(enc_output) enc_output *= non_pad_mask.float() return enc_output, enc_slf_attn class TransformerEncoder(nn.Module): def __init__(self, n_layers, n_head, d_k, d_v, d_model, d_inner, dropout=0.1): super().__init__() self.layer_stack = nn.ModuleList([ EncoderLayer(d_model, d_inner, n_head, d_k, d_v, dropout=dropout) for _ in range(n_layers)]) def forward(self, input_feats, num_objs): original_input_feats = input_feats input_feats = input_feats.split(num_objs, dim=0) input_feats = nn.utils.rnn.pad_sequence(input_feats, batch_first=True) bsz = len(num_objs) device = input_feats.device pad_len = max(num_objs) num_objs_ = torch.LongTensor(num_objs).to(device).unsqueeze(1).expand(-1, pad_len) slf_attn_mask = torch.arange(pad_len, device=device).view(1, -1).expand(bsz, -1).ge(num_objs_).unsqueeze(1).expand(-1, pad_len, -1) non_pad_mask = torch.arange(pad_len, device=device).to(device).view(1, -1).expand(bsz, -1).lt(num_objs_).unsqueeze(-1) enc_output = input_feats for enc_layer in self.layer_stack: enc_output, enc_slf_attn = enc_layer( enc_output, non_pad_mask=non_pad_mask, slf_attn_mask=slf_attn_mask) enc_output = enc_output[non_pad_mask.squeeze(-1)] return enc_output class TransformerContext(nn.Module): def __init__(self, config, obj_classes, rel_classes, in_channels): super().__init__() self.cfg = config if self.cfg.MODEL.ROI_RELATION_HEAD.USE_GT_BOX: self.mode = 'predcls' if self.cfg.MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL else 'sgcls' else: self.mode = 'sgdet' self.obj_classes = obj_classes self.rel_classes = rel_classes self.num_obj_cls = len(obj_classes) self.num_rel_cls = len(rel_classes) self.in_channels = in_channels self.obj_dim = in_channels self.embed_dim = self.cfg.MODEL.ROI_RELATION_HEAD.EMBED_DIM self.hidden_dim = self.cfg.MODEL.ROI_RELATION_HEAD.CONTEXT_HIDDEN_DIM self.nms_thresh = self.cfg.TEST.RELATION.LATER_NMS_PREDICTION_THRES self.dropout_rate = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.DROPOUT_RATE self.obj_layer = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.OBJ_LAYER self.edge_layer = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.REL_LAYER self.num_head = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.NUM_HEAD self.inner_dim = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.INNER_DIM self.k_dim = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.KEY_DIM self.v_dim = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.VAL_DIM embed_vecs = obj_edge_vectors(self.obj_classes, wv_dir=self.cfg.GLOVE_DIR, wv_dim=self.embed_dim) self.obj_embed1 = nn.Embedding(self.num_obj_cls, self.embed_dim) self.obj_embed2 = nn.Embedding(self.num_obj_cls, self.embed_dim) with torch.no_grad(): self.obj_embed1.weight.copy_(embed_vecs, non_blocking=True) self.obj_embed2.weight.copy_(embed_vecs, non_blocking=True) self.bbox_embed = nn.Sequential(*[ nn.Linear(9, 32), nn.ReLU(inplace=True), nn.Dropout(0.1), nn.Linear(32, 128), nn.ReLU(inplace=True), nn.Dropout(0.1), ]) self.lin_obj = nn.Linear(self.in_channels + self.embed_dim + 128, self.hidden_dim) self.lin_edge = nn.Linear(self.embed_dim + self.hidden_dim + self.in_channels, self.hidden_dim) self.out_obj = nn.Linear(self.hidden_dim, self.num_obj_cls) self.context_obj = TransformerEncoder(self.obj_layer, self.num_head, self.k_dim, self.v_dim, self.hidden_dim, self.inner_dim, self.dropout_rate) self.context_edge = TransformerEncoder(self.edge_layer, self.num_head, self.k_dim, self.v_dim, self.hidden_dim, self.inner_dim, self.dropout_rate) def forward(self, roi_features, proposals, logger=None): use_gt_label = self.training or self.cfg.MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL obj_labels = cat([proposal.get_field("labels") for proposal in proposals], dim=0) if use_gt_label else None if self.cfg.MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL: obj_embed = self.obj_embed1(obj_labels) else: obj_logits = cat([proposal.get_field("predict_logits") for proposal in proposals], dim=0).detach() obj_embed = F.softmax(obj_logits, dim=1) @ self.obj_embed1.weight assert proposals[0].mode == 'xyxy' pos_embed = self.bbox_embed(encode_box_info(proposals)) obj_pre_rep = cat((roi_features, obj_embed, pos_embed), -1) num_objs = [len(p) for p in proposals] obj_pre_rep = self.lin_obj(obj_pre_rep) obj_feats = self.context_obj(obj_pre_rep, num_objs) if self.mode == 'predcls': obj_preds = obj_labels obj_dists = to_onehot(obj_preds, self.num_obj_cls) edge_pre_rep = cat((roi_features, obj_feats, self.obj_embed2(obj_labels)), dim=-1) else: obj_dists = self.out_obj(obj_feats) use_decoder_nms = self.mode == 'sgdet' and not self.training if use_decoder_nms: boxes_per_cls = [proposal.get_field('boxes_per_cls') for proposal in proposals] obj_preds = self.nms_per_cls(obj_dists, boxes_per_cls, num_objs) else: obj_preds = obj_dists[:, 1:].max(1)[1] + 1 edge_pre_rep = cat((roi_features, obj_feats, self.obj_embed2(obj_preds)), dim=-1) edge_pre_rep = self.lin_edge(edge_pre_rep) edge_ctx = self.context_edge(edge_pre_rep, num_objs) return obj_dists, obj_preds, edge_ctx def nms_per_cls(self, obj_dists, boxes_per_cls, num_objs): obj_dists = obj_dists.split(num_objs, dim=0) obj_preds = [] for i in range(len(num_objs)): is_overlap = nms_overlaps(boxes_per_cls[i]).cpu().numpy() >= self.nms_thresh tmax(obj_dists[i], -1).cpu().numpy() out_dists_sampled[:, 0] = -1 out_label = obj_dists[i].new(num_objs[i]).fill_(0) for i in range(num_objs[i]): box_ind, cls_ind = np.unravel_index(out_dists_sampled.argmax(), out_dists_sampled.shape) out_label[int(box_ind)] = int(cls_ind) out_dists_sampled[is_overlap[box_ind,:,cls_ind], cls_ind] = 0.0 out_dists_sampled[box_ind] = -1.0 obj_preds.append(out_label.long()) obj_preds = torch.cat(obj_preds, dim=0) return obj_preds
true
true
f70fd62b300ff414f3f9d7f1839393e9f800dd25
2,666
py
Python
scripts/train.py
lenna-project/birds-plugin
c548790dcb0593b80ea6da4605e7aa32e3f141ae
[ "MIT" ]
null
null
null
scripts/train.py
lenna-project/birds-plugin
c548790dcb0593b80ea6da4605e7aa32e3f141ae
[ "MIT" ]
null
null
null
scripts/train.py
lenna-project/birds-plugin
c548790dcb0593b80ea6da4605e7aa32e3f141ae
[ "MIT" ]
null
null
null
import logging import numpy as np import os import PIL import PIL.Image import tensorflow as tf from tensorflow.keras.layers import Layer, Conv2D, MaxPool2D, Dense, Flatten, Dropout, GlobalAveragePooling2D from tensorflow.keras.applications import MobileNetV2 from tensorflow.keras import layers from tensorflow.keras import Model img_height = 224 img_width = 224 batch_size = 64 data_dir = './100-bird-species/' data_dir_train = os.path.join(data_dir, 'train') data_dir_valid = os.path.join(data_dir, 'valid') data_dir_test = os.path.join(data_dir, 'test') train_ds = tf.keras.utils.image_dataset_from_directory( data_dir_train, label_mode='categorical', seed=123, image_size=(img_height, img_width), batch_size=batch_size) valid_ds = tf.keras.utils.image_dataset_from_directory( data_dir_valid, label_mode='categorical', seed=123, image_size=(img_height, img_width), batch_size=batch_size) test_ds = tf.keras.utils.image_dataset_from_directory( data_dir_test, label_mode='categorical', seed=123, image_size=(img_height, img_width), batch_size=batch_size) def normalize(img, label): return img / 255.0, label data_augmentation = tf.keras.Sequential([ tf.keras.layers.RandomFlip("horizontal"), tf.keras.layers.RandomRotation(0.2), tf.keras.layers.RandomZoom(0.2) ]) train_dataset = (train_ds .map(normalize) .map(lambda x, y: (data_augmentation(x), y)) .prefetch(tf.data.AUTOTUNE)) valid_dataset = valid_ds.map(normalize) test_dataset = test_ds.map(normalize) def get_birds_mobilenet(): pre_trained_model = MobileNetV2( include_top=False, input_shape=(img_height, img_width, 3), classifier_activation='softmax' ) for layer in pre_trained_model.layers: layer.trainable = False last_layer = pre_trained_model.output last_layer.trainable = True x = GlobalAveragePooling2D()(last_layer) x = Dense(1024, activation='relu')(x) x = layers.Dense(325, activation='softmax')(x) model = Model(pre_trained_model.input, x) return model model = get_birds_mobilenet() model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) checkpoint_path = "./checkpoints/birds_mobilenet/" model.load_weights(checkpoint_path) model_history = model.fit( train_dataset, validation_data=valid_dataset, epochs=200, callbacks=[ #tf.keras.callbacks.EarlyStopping(patience=5), tf.keras.callbacks.ModelCheckpoint( filepath=checkpoint_path, verbose=0, save_freq="epoch") ])
26.137255
109
0.716429
import logging import numpy as np import os import PIL import PIL.Image import tensorflow as tf from tensorflow.keras.layers import Layer, Conv2D, MaxPool2D, Dense, Flatten, Dropout, GlobalAveragePooling2D from tensorflow.keras.applications import MobileNetV2 from tensorflow.keras import layers from tensorflow.keras import Model img_height = 224 img_width = 224 batch_size = 64 data_dir = './100-bird-species/' data_dir_train = os.path.join(data_dir, 'train') data_dir_valid = os.path.join(data_dir, 'valid') data_dir_test = os.path.join(data_dir, 'test') train_ds = tf.keras.utils.image_dataset_from_directory( data_dir_train, label_mode='categorical', seed=123, image_size=(img_height, img_width), batch_size=batch_size) valid_ds = tf.keras.utils.image_dataset_from_directory( data_dir_valid, label_mode='categorical', seed=123, image_size=(img_height, img_width), batch_size=batch_size) test_ds = tf.keras.utils.image_dataset_from_directory( data_dir_test, label_mode='categorical', seed=123, image_size=(img_height, img_width), batch_size=batch_size) def normalize(img, label): return img / 255.0, label data_augmentation = tf.keras.Sequential([ tf.keras.layers.RandomFlip("horizontal"), tf.keras.layers.RandomRotation(0.2), tf.keras.layers.RandomZoom(0.2) ]) train_dataset = (train_ds .map(normalize) .map(lambda x, y: (data_augmentation(x), y)) .prefetch(tf.data.AUTOTUNE)) valid_dataset = valid_ds.map(normalize) test_dataset = test_ds.map(normalize) def get_birds_mobilenet(): pre_trained_model = MobileNetV2( include_top=False, input_shape=(img_height, img_width, 3), classifier_activation='softmax' ) for layer in pre_trained_model.layers: layer.trainable = False last_layer = pre_trained_model.output last_layer.trainable = True x = GlobalAveragePooling2D()(last_layer) x = Dense(1024, activation='relu')(x) x = layers.Dense(325, activation='softmax')(x) model = Model(pre_trained_model.input, x) return model model = get_birds_mobilenet() model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) checkpoint_path = "./checkpoints/birds_mobilenet/" model.load_weights(checkpoint_path) model_history = model.fit( train_dataset, validation_data=valid_dataset, epochs=200, callbacks=[ tf.keras.callbacks.ModelCheckpoint( filepath=checkpoint_path, verbose=0, save_freq="epoch") ])
true
true
f70fd6ce4c9506385e733ceaf337664ae0b4ab6f
1,198
py
Python
learn_pipe/model/lstm.py
tpimentelms/meaning2form
624b3947b3ac2a7a521cf35c762fb56508236f74
[ "MIT" ]
1
2021-07-11T09:27:13.000Z
2021-07-11T09:27:13.000Z
learn_pipe/model/lstm.py
tpimentelms/meaning2form
624b3947b3ac2a7a521cf35c762fb56508236f74
[ "MIT" ]
2
2021-09-08T01:04:05.000Z
2022-03-11T23:50:19.000Z
learn_pipe/model/lstm.py
tpimentelms/meaning2form
624b3947b3ac2a7a521cf35c762fb56508236f74
[ "MIT" ]
1
2019-10-22T14:22:53.000Z
2019-10-22T14:22:53.000Z
import torch.nn as nn from .base import BaseLM class IpaLM(BaseLM): name = 'lstm' def __init__(self, vocab_size, hidden_size, nlayers=1, dropout=0.1, embedding_size=None, **kwargs): super().__init__( vocab_size, hidden_size, nlayers=nlayers, dropout=dropout, embedding_size=embedding_size, **kwargs) self.embedding = nn.Embedding(vocab_size, self.embedding_size) self.lstm = nn.LSTM( self.embedding_size, hidden_size, nlayers, dropout=(dropout if nlayers > 1 else 0), batch_first=True) self.dropout = nn.Dropout(dropout) self.out = nn.Linear(hidden_size, vocab_size) def forward(self, x, idx): h_old = self.context(idx) x_emb = self.dropout(self.get_embedding(x)) c_t, h_t = self.lstm(x_emb, h_old) c_t = self.dropout(c_t).contiguous() logits = self.out(c_t) return logits, h_t def get_embedding(self, x): return self.embedding(x) def initHidden(self, bsz=1): weight = next(self.parameters()).data return weight.new(self.nlayers, bsz, self.hidden_size).zero_(), \ weight.new(self.nlayers, bsz, self.hidden_size).zero_()
33.277778
113
0.649416
import torch.nn as nn from .base import BaseLM class IpaLM(BaseLM): name = 'lstm' def __init__(self, vocab_size, hidden_size, nlayers=1, dropout=0.1, embedding_size=None, **kwargs): super().__init__( vocab_size, hidden_size, nlayers=nlayers, dropout=dropout, embedding_size=embedding_size, **kwargs) self.embedding = nn.Embedding(vocab_size, self.embedding_size) self.lstm = nn.LSTM( self.embedding_size, hidden_size, nlayers, dropout=(dropout if nlayers > 1 else 0), batch_first=True) self.dropout = nn.Dropout(dropout) self.out = nn.Linear(hidden_size, vocab_size) def forward(self, x, idx): h_old = self.context(idx) x_emb = self.dropout(self.get_embedding(x)) c_t, h_t = self.lstm(x_emb, h_old) c_t = self.dropout(c_t).contiguous() logits = self.out(c_t) return logits, h_t def get_embedding(self, x): return self.embedding(x) def initHidden(self, bsz=1): weight = next(self.parameters()).data return weight.new(self.nlayers, bsz, self.hidden_size).zero_(), \ weight.new(self.nlayers, bsz, self.hidden_size).zero_()
true
true
f70fd8904b39f8d1a6fff3821f6d106d8fd7a3e2
6,582
py
Python
tests/lax_numpy_einsum_test.py
j-towns/jax
49f3f991d4faae22fcd9d8248f3d36575b5004f6
[ "ECL-2.0", "Apache-2.0" ]
3
2019-02-11T16:44:26.000Z
2019-12-21T06:17:36.000Z
tests/lax_numpy_einsum_test.py
j-towns/jax
49f3f991d4faae22fcd9d8248f3d36575b5004f6
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
tests/lax_numpy_einsum_test.py
j-towns/jax
49f3f991d4faae22fcd9d8248f3d36575b5004f6
[ "ECL-2.0", "Apache-2.0" ]
1
2020-10-13T13:25:49.000Z
2020-10-13T13:25:49.000Z
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function from collections import defaultdict import itertools import numpy as onp from absl.testing import absltest from absl.testing import parameterized import jax.numpy as np import jax.test_util as jtu from jax.config import config config.parse_flags_with_absl() def rng(): return onp.random.RandomState(0) class EinsumTest(jtu.JaxTestCase): def _check(self, s, *ops): a = onp.einsum(s, *ops) b = np.einsum(s, *ops) self.assertAllClose(a, b, atol=1e-4, rtol=1e-4, check_dtypes=True) def test_three_operands_1(self): r = rng() x = r.randn(3) y = r.randn(4) z = r.randn(5) s = 'i,j,k->ijk' self._check(s, x, y, z) def test_three_operands_2(self): r = rng() x = r.randn(3) y = r.randn(4) z = r.randn(5) s = 'i,j,k->ijk' self._check(s, x, y, z) def test_two_operands_1(self): r = rng() x = r.randn(3, 4) y = r.randn(4) s = 'ij,j->i' self._check(s, x, y) def test_two_operands_2(self): r = rng() x = r.randn(3, 4, 5) y = r.randn(4) s = 'ijk,j->i' self._check(s, x, y) def test_two_operands_3(self): r = rng() x = r.randn(3, 4, 3) y = r.randn(3) s = 'iji,i->j' self._check(s, x, y) def test_two_operands_4(self): r = rng() x = r.randn(3, 4) y = r.randn(3, 4) s = 'ij,ij->' self._check(s, x, y) def test_two_operands_5(self): r = rng() x = r.randn(10, 2, 3) y = r.randn(3, 4) s = 'nij,jk->nik' self._check(s, x, y) def test_two_operands_6(self): # based on https://github.com/google/jax/issues/37#issuecomment-448572187 r = rng() x = r.randn(2, 1) y = r.randn(2, 3, 4) s = 'sa,shb->shab' self._check(s, x, y) def test_one_operand_1(self): r = rng() x = r.randn(3, 4, 5) s = 'ijk->j' self._check(s, x) def test_one_operand_2(self): r = rng() x = r.randn(3, 4, 5) s = 'ijk->kij' self._check(s, x) def test_one_operand_3(self): r = rng() x = r.randn(3, 4, 5) s = 'ijk->ki' self._check(s, x) def test_one_operand_4(self): r = rng() x = r.randn(3, 4, 5) s = 'ijk->ki' self._check(s, x) def test_one_operand_5(self): r = rng() x = r.randn(2, 3, 4, 5) s = '...ijk->...ki' self._check(s, x) def test_one_operand_6(self): r = rng() x = r.randn(3, 4, 5) s = '...ijk->ki' self._check(s, x) def test_one_operand_7(self): r = rng() x = r.randn(3, 3) s = 'ii->' self._check(s, x) def test_one_operand_8(self): r = rng() x = r.randn(3, 3) s = 'ij->' self._check(s, x) def test_one_operand_9(self): r = rng() x = r.randn(3, 3, 3) s = 'iii->' self._check(s, x) def test_one_operand_10(self): r = rng() x = r.randn(3, 3) s = 'ii->i' self._check(s, x) def test_one_operand_11(self): r = rng() x = r.randn(3, 3, 4) s = 'iij->i' self._check(s, x) def test_one_operand_12(self): r = rng() x = r.randn(3, 3, 3) s = 'iii->i' self._check(s, x) def test_one_operand_13(self): r = rng() x = r.randn(3, 3, 5, 4, 4) s = 'iijkk->i' self._check(s, x) def test_one_operand_14(self): r = rng() x = r.randn(3, 3, 5, 4, 4) s = 'iijkk->ik' self._check(s, x) def test_one_operand_15(self): r = rng() x = r.randn(3, 3, 5, 4, 4) s = 'iijkl->il' self._check(s, x) def test_one_operand_16(self): r = rng() x = r.randn(3, 3) s = 'ij->ij' self._check(s, x) def test_tf_unsupported_1(self): # from https://www.tensorflow.org/api_docs/python/tf/einsum r = rng() x = r.randn(2, 3, 5, 1) y = r.randn(3, 4, 5, 1) s = 'ij...,jk...->ik...' self._check(s, x, y) def test_tf_unsupported_2(self): # from https://www.tensorflow.org/api_docs/python/tf/einsum r = rng() x = r.randn(2, 3, 3) y = r.randn(4) s = 'ijj,k->ik' self._check(s, x, y) def test_tf_unsupported_3(self): # from https://www.tensorflow.org/api_docs/python/tf/einsum r = rng() x = r.randn(2, 3) y = r.randn(2, 3) z = r.randn(3, 4) s = 'ij,ij,jk->ik' self._check(s, x, y, z) # these tests are based on https://github.com/dask/dask/pull/3412/files @parameterized.named_parameters( {"testcase_name": "_{}".format(einstr), "einstr": einstr} for einstr in [ 'abc,bad->abcd', 'abcdef,bcdfg->abcdeg', 'ea,fb,abcd,gc,hd->efgh', 'ab,b', 'aa', 'a,a->', 'a,a->a', 'a,a', 'a,b', 'a,b,c', 'a', 'ba,b', 'ba,b->', 'defab,fedbc->defac', 'ab...,bc...->ac...', 'a...a', 'abc...->cba...', '...ab->...a', 'a...a->a...', # Following 2 from # https://stackoverflow.com/a/19203475/1611416 '...abc,...abcd->...d', 'ab...,b->ab...', # https://github.com/dask/dask/pull/3412#discussion_r182413444 'aa->a', 'ab,ab,c->c', 'aab,bc->ac', 'aab,bcc->ac', 'fdf,cdd,ccd,afe->ae', 'fff,fae,bef,def->abd', ]) def test_from_dask(self, einstr): r = rng() if '->' in einstr: input_str, result_names = einstr.split('->') else: input_str = einstr input_names = input_str.split(',') dims = itertools.cycle([2, 3, 4]) shapes = defaultdict(lambda: next(dims)) input_shapes = [tuple(shapes[c] for c in names.replace('...', '01')) for names in input_names] operands = [r.randn(*shape) for shape in input_shapes] self._check(einstr, *operands) def test_ordered_front_batch_dim_case(self): x = onp.ones((1,8,20,4)) y = onp.ones((1,8,20,4)) s = 'ijkl,ijml->ijkm' self._check(s, x, y) if __name__ == '__main__': absltest.main()
23.257951
77
0.55272
from __future__ import absolute_import from __future__ import division from __future__ import print_function from collections import defaultdict import itertools import numpy as onp from absl.testing import absltest from absl.testing import parameterized import jax.numpy as np import jax.test_util as jtu from jax.config import config config.parse_flags_with_absl() def rng(): return onp.random.RandomState(0) class EinsumTest(jtu.JaxTestCase): def _check(self, s, *ops): a = onp.einsum(s, *ops) b = np.einsum(s, *ops) self.assertAllClose(a, b, atol=1e-4, rtol=1e-4, check_dtypes=True) def test_three_operands_1(self): r = rng() x = r.randn(3) y = r.randn(4) z = r.randn(5) s = 'i,j,k->ijk' self._check(s, x, y, z) def test_three_operands_2(self): r = rng() x = r.randn(3) y = r.randn(4) z = r.randn(5) s = 'i,j,k->ijk' self._check(s, x, y, z) def test_two_operands_1(self): r = rng() x = r.randn(3, 4) y = r.randn(4) s = 'ij,j->i' self._check(s, x, y) def test_two_operands_2(self): r = rng() x = r.randn(3, 4, 5) y = r.randn(4) s = 'ijk,j->i' self._check(s, x, y) def test_two_operands_3(self): r = rng() x = r.randn(3, 4, 3) y = r.randn(3) s = 'iji,i->j' self._check(s, x, y) def test_two_operands_4(self): r = rng() x = r.randn(3, 4) y = r.randn(3, 4) s = 'ij,ij->' self._check(s, x, y) def test_two_operands_5(self): r = rng() x = r.randn(10, 2, 3) y = r.randn(3, 4) s = 'nij,jk->nik' self._check(s, x, y) def test_two_operands_6(self): r.randn(2, 1) y = r.randn(2, 3, 4) s = 'sa,shb->shab' self._check(s, x, y) def test_one_operand_1(self): r = rng() x = r.randn(3, 4, 5) s = 'ijk->j' self._check(s, x) def test_one_operand_2(self): r = rng() x = r.randn(3, 4, 5) s = 'ijk->kij' self._check(s, x) def test_one_operand_3(self): r = rng() x = r.randn(3, 4, 5) s = 'ijk->ki' self._check(s, x) def test_one_operand_4(self): r = rng() x = r.randn(3, 4, 5) s = 'ijk->ki' self._check(s, x) def test_one_operand_5(self): r = rng() x = r.randn(2, 3, 4, 5) s = '...ijk->...ki' self._check(s, x) def test_one_operand_6(self): r = rng() x = r.randn(3, 4, 5) s = '...ijk->ki' self._check(s, x) def test_one_operand_7(self): r = rng() x = r.randn(3, 3) s = 'ii->' self._check(s, x) def test_one_operand_8(self): r = rng() x = r.randn(3, 3) s = 'ij->' self._check(s, x) def test_one_operand_9(self): r = rng() x = r.randn(3, 3, 3) s = 'iii->' self._check(s, x) def test_one_operand_10(self): r = rng() x = r.randn(3, 3) s = 'ii->i' self._check(s, x) def test_one_operand_11(self): r = rng() x = r.randn(3, 3, 4) s = 'iij->i' self._check(s, x) def test_one_operand_12(self): r = rng() x = r.randn(3, 3, 3) s = 'iii->i' self._check(s, x) def test_one_operand_13(self): r = rng() x = r.randn(3, 3, 5, 4, 4) s = 'iijkk->i' self._check(s, x) def test_one_operand_14(self): r = rng() x = r.randn(3, 3, 5, 4, 4) s = 'iijkk->ik' self._check(s, x) def test_one_operand_15(self): r = rng() x = r.randn(3, 3, 5, 4, 4) s = 'iijkl->il' self._check(s, x) def test_one_operand_16(self): r = rng() x = r.randn(3, 3) s = 'ij->ij' self._check(s, x) def test_tf_unsupported_1(self): r = rng() x = r.randn(2, 3, 5, 1) y = r.randn(3, 4, 5, 1) s = 'ij...,jk...->ik...' self._check(s, x, y) def test_tf_unsupported_2(self): r = rng() x = r.randn(2, 3, 3) y = r.randn(4) s = 'ijj,k->ik' self._check(s, x, y) def test_tf_unsupported_3(self): r = rng() x = r.randn(2, 3) y = r.randn(2, 3) z = r.randn(3, 4) s = 'ij,ij,jk->ik' self._check(s, x, y, z) @parameterized.named_parameters( {"testcase_name": "_{}".format(einstr), "einstr": einstr} for einstr in [ 'abc,bad->abcd', 'abcdef,bcdfg->abcdeg', 'ea,fb,abcd,gc,hd->efgh', 'ab,b', 'aa', 'a,a->', 'a,a->a', 'a,a', 'a,b', 'a,b,c', 'a', 'ba,b', 'ba,b->', 'defab,fedbc->defac', 'ab...,bc...->ac...', 'a...a', 'abc...->cba...', '...ab->...a', 'a...a->a...', ab...,b->ab...', 'ab,ab,c->c', 'aab,bc->ac', 'aab,bcc->ac', 'fdf,cdd,ccd,afe->ae', 'fff,fae,bef,def->abd', ]) def test_from_dask(self, einstr): r = rng() if '->' in einstr: input_str, result_names = einstr.split('->') else: input_str = einstr input_names = input_str.split(',') dims = itertools.cycle([2, 3, 4]) shapes = defaultdict(lambda: next(dims)) input_shapes = [tuple(shapes[c] for c in names.replace('...', '01')) for names in input_names] operands = [r.randn(*shape) for shape in input_shapes] self._check(einstr, *operands) def test_ordered_front_batch_dim_case(self): x = onp.ones((1,8,20,4)) y = onp.ones((1,8,20,4)) s = 'ijkl,ijml->ijkm' self._check(s, x, y) if __name__ == '__main__': absltest.main()
true
true
f70fd8bf2d8002f88cdec8c2042ed6b8aebfceb0
20,088
py
Python
pysnmp-with-texts/SW-STRCTURE-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
8
2019-05-09T17:04:00.000Z
2021-06-09T06:50:51.000Z
pysnmp-with-texts/SW-STRCTURE-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
4
2019-05-31T16:42:59.000Z
2020-01-31T21:57:17.000Z
pysnmp-with-texts/SW-STRCTURE-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
10
2019-04-30T05:51:36.000Z
2022-02-16T03:33:41.000Z
# # PySNMP MIB module SW-STRCTURE-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/SW-STRCTURE-MIB # Produced by pysmi-0.3.4 at Wed May 1 15:12:42 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ValueSizeConstraint, SingleValueConstraint, ConstraintsIntersection, ValueRangeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "ConstraintsUnion") NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance") MibIdentifier, Bits, Counter32, NotificationType, TimeTicks, ObjectIdentity, Integer32, NotificationType, Counter64, Gauge32, Unsigned32, IpAddress, enterprises, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, iso = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "Bits", "Counter32", "NotificationType", "TimeTicks", "ObjectIdentity", "Integer32", "NotificationType", "Counter64", "Gauge32", "Unsigned32", "IpAddress", "enterprises", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "iso") TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString") marconi = MibIdentifier((1, 3, 6, 1, 4, 1, 326)) systems = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2)) external = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20)) dlink = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1)) dlinkcommon = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 1)) golf = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2)) golfproducts = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1)) es2000 = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1, 3)) golfcommon = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2)) marconi_mgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2)).setLabel("marconi-mgmt") es2000Mgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28)) swStructure = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1)) swStructInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 1)) swStructDevType = MibScalar((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 1, 1), ObjectIdentifier()).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructDevType.setStatus('mandatory') if mibBuilder.loadTexts: swStructDevType.setDescription('Specifies the device type.') swStructDevDescr = MibScalar((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructDevDescr.setStatus('mandatory') if mibBuilder.loadTexts: swStructDevDescr.setDescription('Describes the type of the device.') swStructDevPortEncodingFactor = MibScalar((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 1, 3), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructDevPortEncodingFactor.setStatus('mandatory') if mibBuilder.loadTexts: swStructDevPortEncodingFactor.setDescription('The factor to encode the global port ID from unit ID and the local port ID. This global port ID is required to access the bridge MIB and spanning tree MIB defined by the standard body. This global port ID will provide a unigue port ID for each port across the entire device. Example: supoposed that the encoding factor is 16, then port 2 located on module 2 will be encoded as port 18') swStructDevLedInfo = MibScalar((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructDevLedInfo.setStatus('mandatory') if mibBuilder.loadTexts: swStructDevLedInfo.setDescription('Provides the LED informations of the cpu slot. bit7 - cpu status(always 1) bit6 - console status(0: console not in used, 1: console in used) bit5 - power status(always 1) bit 4 ~ bit 0 - not used.') swStructDevMaxModuleNum = MibScalar((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 1, 5), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructDevMaxModuleNum.setStatus('mandatory') if mibBuilder.loadTexts: swStructDevMaxModuleNum.setDescription('Maximum number of modules allowed on the unit.') swStructDevMaxPortNum = MibScalar((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 1, 6), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructDevMaxPortNum.setStatus('mandatory') if mibBuilder.loadTexts: swStructDevMaxPortNum.setDescription('Maximum number of ports allowed on the unit.') swStructDevNumOfPortInUse = MibScalar((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 1, 7), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructDevNumOfPortInUse.setStatus('mandatory') if mibBuilder.loadTexts: swStructDevNumOfPortInUse.setDescription('Number of ports which has link being connected to the port.') swStructDevOperStatus = MibScalar((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 9, 10))).clone(namedValues=NamedValues(("other", 1), ("notAvail", 2), ("removed", 3), ("disabled", 4), ("normal", 5), ("nonFatalErr", 9), ("fatalErr", 10)))).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructDevOperStatus.setStatus('mandatory') if mibBuilder.loadTexts: swStructDevOperStatus.setDescription('Describes the operation status for the unit.') swStructDevLastChange = MibScalar((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 1, 9), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructDevLastChange.setStatus('mandatory') if mibBuilder.loadTexts: swStructDevLastChange.setDescription('Provides the time that the unit is up last time.') swStructModuleTable = MibTable((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2), ) if mibBuilder.loadTexts: swStructModuleTable.setStatus('mandatory') if mibBuilder.loadTexts: swStructModuleTable.setDescription('A table that contains information about a module.') swStructModuleEntry = MibTableRow((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2, 1), ).setIndexNames((0, "SW-STRCTURE-MIB", "swStructModuleUnitIndex"), (0, "SW-STRCTURE-MIB", "swStructModuleIndex")) if mibBuilder.loadTexts: swStructModuleEntry.setStatus('mandatory') if mibBuilder.loadTexts: swStructModuleEntry.setDescription('A list of information for a module.') swStructModuleUnitIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2, 1, 1), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructModuleUnitIndex.setStatus('mandatory') if mibBuilder.loadTexts: swStructModuleUnitIndex.setDescription('ID of the unit in the device.') swStructModuleIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2, 1, 2), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructModuleIndex.setStatus('mandatory') if mibBuilder.loadTexts: swStructModuleIndex.setDescription('ID of the Module in the device.') swStructModuleType = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2, 1, 3), ObjectIdentifier()).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructModuleType.setStatus('mandatory') if mibBuilder.loadTexts: swStructModuleType.setDescription('Type of the module.') swStructModuleDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructModuleDescr.setStatus('mandatory') if mibBuilder.loadTexts: swStructModuleDescr.setDescription('Type of the module in displayed string format.') swStructModuleVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2, 1, 5), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructModuleVersion.setStatus('mandatory') if mibBuilder.loadTexts: swStructModuleVersion.setDescription('Provides PCB version of the module.') swStructModuleMaxPortNum = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2, 1, 6), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructModuleMaxPortNum.setStatus('mandatory') if mibBuilder.loadTexts: swStructModuleMaxPortNum.setDescription('Maximum number of ports allowed on the module.') swStructModuleEncodingOffset = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2, 1, 7), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructModuleEncodingOffset.setStatus('mandatory') if mibBuilder.loadTexts: swStructModuleEncodingOffset.setDescription('Each module has a offset for encoding the port ID relative to a unit. This encoding will provide a unigue port ID for ports located on the device. Example: Supposed that the offset for module 2 is 16, then port 2 located on module 2 will be encoded as port 18') swStructModuleLEDInfo = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2, 1, 8), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructModuleLEDInfo.setStatus('mandatory') if mibBuilder.loadTexts: swStructModuleLEDInfo.setDescription('Gets LED informations on specifiled module.') swStructModuleOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 9, 10))).clone(namedValues=NamedValues(("other", 1), ("notAvail", 2), ("removed", 3), ("disabled", 4), ("normal", 5), ("nonFatalErr", 9), ("fatalErr", 10)))).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructModuleOperStatus.setStatus('mandatory') if mibBuilder.loadTexts: swStructModuleOperStatus.setDescription('Provides operation status of the module.') swStructModuleLastChange = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2, 1, 10), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructModuleLastChange.setStatus('mandatory') if mibBuilder.loadTexts: swStructModuleLastChange.setDescription('Provides the time that the module is up last time.') swStructPowerTable = MibTable((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 3), ) if mibBuilder.loadTexts: swStructPowerTable.setStatus('mandatory') if mibBuilder.loadTexts: swStructPowerTable.setDescription('A table that contains information about every power.') swStructPowerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 3, 1), ).setIndexNames((0, "SW-STRCTURE-MIB", "swStructPowerIndex")) if mibBuilder.loadTexts: swStructPowerEntry.setStatus('mandatory') if mibBuilder.loadTexts: swStructPowerEntry.setDescription('A list of information for each power.') swStructPowerIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 3, 1, 1), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructPowerIndex.setStatus('mandatory') if mibBuilder.loadTexts: swStructPowerIndex.setDescription('ID of the power supply in the unit.') swStructPowerInfo = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 3, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 20))).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructPowerInfo.setStatus('mandatory') if mibBuilder.loadTexts: swStructPowerInfo.setDescription('Displays informations of power. Includes vendor, version and so on.') swStructPowerTemperature = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 3, 1, 3), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructPowerTemperature.setStatus('mandatory') if mibBuilder.loadTexts: swStructPowerTemperature.setDescription('Displays temperature value of power by Fahrenheit.') swStructPowerVolt = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 3, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 9))).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructPowerVolt.setStatus('mandatory') if mibBuilder.loadTexts: swStructPowerVolt.setDescription('Displays volt value of power by V unit.') swStructPowerAmp = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 3, 1, 5), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructPowerAmp.setStatus('mandatory') if mibBuilder.loadTexts: swStructPowerAmp.setDescription('Displays amp value of power by A unit.') swStructPowerFan1Status = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 3, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("fanOk", 1), ("fanFail", 2), ("other", 3)))).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructPowerFan1Status.setStatus('mandatory') if mibBuilder.loadTexts: swStructPowerFan1Status.setDescription('Describes the operation status of the power fan1.') swStructPowerFan2Status = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 3, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("fanOk", 1), ("fanFail", 2), ("other", 3)))).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructPowerFan2Status.setStatus('mandatory') if mibBuilder.loadTexts: swStructPowerFan2Status.setDescription('Describes the operation status of the power fan2.') swStructPowerStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 3, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("acFailPsFail", 1), ("acPresentPsFail", 2), ("psGood", 3), ("other", 4)))).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructPowerStatus.setStatus('mandatory') if mibBuilder.loadTexts: swStructPowerStatus.setDescription('Describes the operation status of the power supply.') swStructSystemFanTable = MibTable((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 4), ) if mibBuilder.loadTexts: swStructSystemFanTable.setStatus('mandatory') if mibBuilder.loadTexts: swStructSystemFanTable.setDescription('A table that contains informations about system fans.') swStructSystemFanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 4, 1), ).setIndexNames((0, "SW-STRCTURE-MIB", "swStructSystemFanIndex")) if mibBuilder.loadTexts: swStructSystemFanEntry.setStatus('mandatory') if mibBuilder.loadTexts: swStructSystemFanEntry.setDescription('A list of informations for each system fan.') swStructSystemFanIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 4, 1, 1), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructSystemFanIndex.setStatus('mandatory') if mibBuilder.loadTexts: swStructSystemFanIndex.setDescription('ID of designed system fans.') swStructSystemFanStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 4, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("fanOk", 1), ("fanFail", 2), ("other", 3)))).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructSystemFanStatus.setStatus('mandatory') if mibBuilder.loadTexts: swStructSystemFanStatus.setDescription('Describes the operation status of the system fans.') powerTemperatureWarnning = NotificationType((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1, 3) + (0,5)).setObjects(("SW-STRCTURE-MIB", "swStructPowerIndex")) if mibBuilder.loadTexts: powerTemperatureWarnning.setDescription('The trap is sent whenever the power state enter the temperature warnning state. ') powerVoltWarnning = NotificationType((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1, 3) + (0,6)).setObjects(("SW-STRCTURE-MIB", "swStructPowerIndex")) if mibBuilder.loadTexts: powerVoltWarnning.setDescription('The trap is sent whenever the power state enter the volt warnning state. ') powerCurrentWarnning = NotificationType((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1, 3) + (0,7)).setObjects(("SW-STRCTURE-MIB", "swStructPowerIndex")) if mibBuilder.loadTexts: powerCurrentWarnning.setDescription('The trap is sent whenever the power state enter the current warnning state. ') powerFan1Fail = NotificationType((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1, 3) + (0,8)).setObjects(("SW-STRCTURE-MIB", "swStructPowerIndex")) if mibBuilder.loadTexts: powerFan1Fail.setDescription('The trap is sent whenever the power state enter the power fan1 fail state. ') powerFan2Fail = NotificationType((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1, 3) + (0,9)).setObjects(("SW-STRCTURE-MIB", "swStructPowerIndex")) if mibBuilder.loadTexts: powerFan2Fail.setDescription('The trap is sent whenever the power state enter the power fan2 fail state. ') systemFanFail = NotificationType((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1, 3) + (0,10)).setObjects(("SW-STRCTURE-MIB", "swStructSystemFanIndex")) if mibBuilder.loadTexts: systemFanFail.setDescription('The trap is sent whenever the power state enter the system fans fail state. ') powerRemoved = NotificationType((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1, 3) + (0,11)).setObjects(("SW-STRCTURE-MIB", "swStructPowerIndex")) if mibBuilder.loadTexts: powerRemoved.setDescription('The trap is sent whenever the power is removed.') powerInserted = NotificationType((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1, 3) + (0,12)).setObjects(("SW-STRCTURE-MIB", "swStructPowerIndex")) if mibBuilder.loadTexts: powerInserted.setDescription('The trap is sent whenever the power is inserted.') powerBad = NotificationType((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1, 3) + (0,13)).setObjects(("SW-STRCTURE-MIB", "swStructPowerIndex")) if mibBuilder.loadTexts: powerBad.setDescription('The trap is sent whenever the power is bad.') mibBuilder.exportSymbols("SW-STRCTURE-MIB", swStructPowerInfo=swStructPowerInfo, swStructPowerAmp=swStructPowerAmp, dlinkcommon=dlinkcommon, swStructModuleType=swStructModuleType, swStructModuleMaxPortNum=swStructModuleMaxPortNum, powerCurrentWarnning=powerCurrentWarnning, swStructDevMaxPortNum=swStructDevMaxPortNum, swStructPowerTable=swStructPowerTable, swStructDevPortEncodingFactor=swStructDevPortEncodingFactor, dlink=dlink, swStructPowerFan1Status=swStructPowerFan1Status, swStructModuleOperStatus=swStructModuleOperStatus, powerInserted=powerInserted, external=external, swStructPowerVolt=swStructPowerVolt, powerBad=powerBad, swStructModuleIndex=swStructModuleIndex, swStructModuleVersion=swStructModuleVersion, es2000Mgmt=es2000Mgmt, swStructModuleEntry=swStructModuleEntry, golfcommon=golfcommon, swStructPowerEntry=swStructPowerEntry, swStructModuleUnitIndex=swStructModuleUnitIndex, swStructPowerStatus=swStructPowerStatus, swStructModuleDescr=swStructModuleDescr, swStructModuleLEDInfo=swStructModuleLEDInfo, powerFan1Fail=powerFan1Fail, swStructPowerTemperature=swStructPowerTemperature, swStructModuleTable=swStructModuleTable, swStructDevLastChange=swStructDevLastChange, swStructDevType=swStructDevType, swStructPowerFan2Status=swStructPowerFan2Status, swStructDevMaxModuleNum=swStructDevMaxModuleNum, es2000=es2000, swStructModuleLastChange=swStructModuleLastChange, marconi=marconi, swStructSystemFanStatus=swStructSystemFanStatus, swStructModuleEncodingOffset=swStructModuleEncodingOffset, powerRemoved=powerRemoved, swStructInfo=swStructInfo, systemFanFail=systemFanFail, swStructSystemFanEntry=swStructSystemFanEntry, swStructSystemFanIndex=swStructSystemFanIndex, swStructDevOperStatus=swStructDevOperStatus, golf=golf, swStructSystemFanTable=swStructSystemFanTable, marconi_mgmt=marconi_mgmt, swStructPowerIndex=swStructPowerIndex, powerVoltWarnning=powerVoltWarnning, powerFan2Fail=powerFan2Fail, systems=systems, swStructDevDescr=swStructDevDescr, swStructDevNumOfPortInUse=swStructDevNumOfPortInUse, golfproducts=golfproducts, powerTemperatureWarnning=powerTemperatureWarnning, swStructDevLedInfo=swStructDevLedInfo, swStructure=swStructure)
133.033113
2,170
0.759359
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ValueSizeConstraint, SingleValueConstraint, ConstraintsIntersection, ValueRangeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "ConstraintsUnion") NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance") MibIdentifier, Bits, Counter32, NotificationType, TimeTicks, ObjectIdentity, Integer32, NotificationType, Counter64, Gauge32, Unsigned32, IpAddress, enterprises, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, iso = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "Bits", "Counter32", "NotificationType", "TimeTicks", "ObjectIdentity", "Integer32", "NotificationType", "Counter64", "Gauge32", "Unsigned32", "IpAddress", "enterprises", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "iso") TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString") marconi = MibIdentifier((1, 3, 6, 1, 4, 1, 326)) systems = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2)) external = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20)) dlink = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1)) dlinkcommon = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 1)) golf = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2)) golfproducts = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1)) es2000 = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1, 3)) golfcommon = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2)) marconi_mgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2)).setLabel("marconi-mgmt") es2000Mgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28)) swStructure = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1)) swStructInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 1)) swStructDevType = MibScalar((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 1, 1), ObjectIdentifier()).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructDevType.setStatus('mandatory') if mibBuilder.loadTexts: swStructDevType.setDescription('Specifies the device type.') swStructDevDescr = MibScalar((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructDevDescr.setStatus('mandatory') if mibBuilder.loadTexts: swStructDevDescr.setDescription('Describes the type of the device.') swStructDevPortEncodingFactor = MibScalar((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 1, 3), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructDevPortEncodingFactor.setStatus('mandatory') if mibBuilder.loadTexts: swStructDevPortEncodingFactor.setDescription('The factor to encode the global port ID from unit ID and the local port ID. This global port ID is required to access the bridge MIB and spanning tree MIB defined by the standard body. This global port ID will provide a unigue port ID for each port across the entire device. Example: supoposed that the encoding factor is 16, then port 2 located on module 2 will be encoded as port 18') swStructDevLedInfo = MibScalar((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructDevLedInfo.setStatus('mandatory') if mibBuilder.loadTexts: swStructDevLedInfo.setDescription('Provides the LED informations of the cpu slot. bit7 - cpu status(always 1) bit6 - console status(0: console not in used, 1: console in used) bit5 - power status(always 1) bit 4 ~ bit 0 - not used.') swStructDevMaxModuleNum = MibScalar((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 1, 5), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructDevMaxModuleNum.setStatus('mandatory') if mibBuilder.loadTexts: swStructDevMaxModuleNum.setDescription('Maximum number of modules allowed on the unit.') swStructDevMaxPortNum = MibScalar((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 1, 6), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructDevMaxPortNum.setStatus('mandatory') if mibBuilder.loadTexts: swStructDevMaxPortNum.setDescription('Maximum number of ports allowed on the unit.') swStructDevNumOfPortInUse = MibScalar((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 1, 7), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructDevNumOfPortInUse.setStatus('mandatory') if mibBuilder.loadTexts: swStructDevNumOfPortInUse.setDescription('Number of ports which has link being connected to the port.') swStructDevOperStatus = MibScalar((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 9, 10))).clone(namedValues=NamedValues(("other", 1), ("notAvail", 2), ("removed", 3), ("disabled", 4), ("normal", 5), ("nonFatalErr", 9), ("fatalErr", 10)))).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructDevOperStatus.setStatus('mandatory') if mibBuilder.loadTexts: swStructDevOperStatus.setDescription('Describes the operation status for the unit.') swStructDevLastChange = MibScalar((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 1, 9), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructDevLastChange.setStatus('mandatory') if mibBuilder.loadTexts: swStructDevLastChange.setDescription('Provides the time that the unit is up last time.') swStructModuleTable = MibTable((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2), ) if mibBuilder.loadTexts: swStructModuleTable.setStatus('mandatory') if mibBuilder.loadTexts: swStructModuleTable.setDescription('A table that contains information about a module.') swStructModuleEntry = MibTableRow((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2, 1), ).setIndexNames((0, "SW-STRCTURE-MIB", "swStructModuleUnitIndex"), (0, "SW-STRCTURE-MIB", "swStructModuleIndex")) if mibBuilder.loadTexts: swStructModuleEntry.setStatus('mandatory') if mibBuilder.loadTexts: swStructModuleEntry.setDescription('A list of information for a module.') swStructModuleUnitIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2, 1, 1), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructModuleUnitIndex.setStatus('mandatory') if mibBuilder.loadTexts: swStructModuleUnitIndex.setDescription('ID of the unit in the device.') swStructModuleIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2, 1, 2), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructModuleIndex.setStatus('mandatory') if mibBuilder.loadTexts: swStructModuleIndex.setDescription('ID of the Module in the device.') swStructModuleType = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2, 1, 3), ObjectIdentifier()).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructModuleType.setStatus('mandatory') if mibBuilder.loadTexts: swStructModuleType.setDescription('Type of the module.') swStructModuleDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructModuleDescr.setStatus('mandatory') if mibBuilder.loadTexts: swStructModuleDescr.setDescription('Type of the module in displayed string format.') swStructModuleVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2, 1, 5), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructModuleVersion.setStatus('mandatory') if mibBuilder.loadTexts: swStructModuleVersion.setDescription('Provides PCB version of the module.') swStructModuleMaxPortNum = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2, 1, 6), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructModuleMaxPortNum.setStatus('mandatory') if mibBuilder.loadTexts: swStructModuleMaxPortNum.setDescription('Maximum number of ports allowed on the module.') swStructModuleEncodingOffset = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2, 1, 7), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructModuleEncodingOffset.setStatus('mandatory') if mibBuilder.loadTexts: swStructModuleEncodingOffset.setDescription('Each module has a offset for encoding the port ID relative to a unit. This encoding will provide a unigue port ID for ports located on the device. Example: Supposed that the offset for module 2 is 16, then port 2 located on module 2 will be encoded as port 18') swStructModuleLEDInfo = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2, 1, 8), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructModuleLEDInfo.setStatus('mandatory') if mibBuilder.loadTexts: swStructModuleLEDInfo.setDescription('Gets LED informations on specifiled module.') swStructModuleOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 9, 10))).clone(namedValues=NamedValues(("other", 1), ("notAvail", 2), ("removed", 3), ("disabled", 4), ("normal", 5), ("nonFatalErr", 9), ("fatalErr", 10)))).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructModuleOperStatus.setStatus('mandatory') if mibBuilder.loadTexts: swStructModuleOperStatus.setDescription('Provides operation status of the module.') swStructModuleLastChange = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2, 1, 10), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructModuleLastChange.setStatus('mandatory') if mibBuilder.loadTexts: swStructModuleLastChange.setDescription('Provides the time that the module is up last time.') swStructPowerTable = MibTable((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 3), ) if mibBuilder.loadTexts: swStructPowerTable.setStatus('mandatory') if mibBuilder.loadTexts: swStructPowerTable.setDescription('A table that contains information about every power.') swStructPowerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 3, 1), ).setIndexNames((0, "SW-STRCTURE-MIB", "swStructPowerIndex")) if mibBuilder.loadTexts: swStructPowerEntry.setStatus('mandatory') if mibBuilder.loadTexts: swStructPowerEntry.setDescription('A list of information for each power.') swStructPowerIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 3, 1, 1), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructPowerIndex.setStatus('mandatory') if mibBuilder.loadTexts: swStructPowerIndex.setDescription('ID of the power supply in the unit.') swStructPowerInfo = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 3, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 20))).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructPowerInfo.setStatus('mandatory') if mibBuilder.loadTexts: swStructPowerInfo.setDescription('Displays informations of power. Includes vendor, version and so on.') swStructPowerTemperature = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 3, 1, 3), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructPowerTemperature.setStatus('mandatory') if mibBuilder.loadTexts: swStructPowerTemperature.setDescription('Displays temperature value of power by Fahrenheit.') swStructPowerVolt = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 3, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 9))).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructPowerVolt.setStatus('mandatory') if mibBuilder.loadTexts: swStructPowerVolt.setDescription('Displays volt value of power by V unit.') swStructPowerAmp = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 3, 1, 5), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructPowerAmp.setStatus('mandatory') if mibBuilder.loadTexts: swStructPowerAmp.setDescription('Displays amp value of power by A unit.') swStructPowerFan1Status = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 3, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("fanOk", 1), ("fanFail", 2), ("other", 3)))).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructPowerFan1Status.setStatus('mandatory') if mibBuilder.loadTexts: swStructPowerFan1Status.setDescription('Describes the operation status of the power fan1.') swStructPowerFan2Status = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 3, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("fanOk", 1), ("fanFail", 2), ("other", 3)))).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructPowerFan2Status.setStatus('mandatory') if mibBuilder.loadTexts: swStructPowerFan2Status.setDescription('Describes the operation status of the power fan2.') swStructPowerStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 3, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("acFailPsFail", 1), ("acPresentPsFail", 2), ("psGood", 3), ("other", 4)))).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructPowerStatus.setStatus('mandatory') if mibBuilder.loadTexts: swStructPowerStatus.setDescription('Describes the operation status of the power supply.') swStructSystemFanTable = MibTable((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 4), ) if mibBuilder.loadTexts: swStructSystemFanTable.setStatus('mandatory') if mibBuilder.loadTexts: swStructSystemFanTable.setDescription('A table that contains informations about system fans.') swStructSystemFanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 4, 1), ).setIndexNames((0, "SW-STRCTURE-MIB", "swStructSystemFanIndex")) if mibBuilder.loadTexts: swStructSystemFanEntry.setStatus('mandatory') if mibBuilder.loadTexts: swStructSystemFanEntry.setDescription('A list of informations for each system fan.') swStructSystemFanIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 4, 1, 1), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructSystemFanIndex.setStatus('mandatory') if mibBuilder.loadTexts: swStructSystemFanIndex.setDescription('ID of designed system fans.') swStructSystemFanStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 4, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("fanOk", 1), ("fanFail", 2), ("other", 3)))).setMaxAccess("readonly") if mibBuilder.loadTexts: swStructSystemFanStatus.setStatus('mandatory') if mibBuilder.loadTexts: swStructSystemFanStatus.setDescription('Describes the operation status of the system fans.') powerTemperatureWarnning = NotificationType((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1, 3) + (0,5)).setObjects(("SW-STRCTURE-MIB", "swStructPowerIndex")) if mibBuilder.loadTexts: powerTemperatureWarnning.setDescription('The trap is sent whenever the power state enter the temperature warnning state. ') powerVoltWarnning = NotificationType((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1, 3) + (0,6)).setObjects(("SW-STRCTURE-MIB", "swStructPowerIndex")) if mibBuilder.loadTexts: powerVoltWarnning.setDescription('The trap is sent whenever the power state enter the volt warnning state. ') powerCurrentWarnning = NotificationType((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1, 3) + (0,7)).setObjects(("SW-STRCTURE-MIB", "swStructPowerIndex")) if mibBuilder.loadTexts: powerCurrentWarnning.setDescription('The trap is sent whenever the power state enter the current warnning state. ') powerFan1Fail = NotificationType((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1, 3) + (0,8)).setObjects(("SW-STRCTURE-MIB", "swStructPowerIndex")) if mibBuilder.loadTexts: powerFan1Fail.setDescription('The trap is sent whenever the power state enter the power fan1 fail state. ') powerFan2Fail = NotificationType((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1, 3) + (0,9)).setObjects(("SW-STRCTURE-MIB", "swStructPowerIndex")) if mibBuilder.loadTexts: powerFan2Fail.setDescription('The trap is sent whenever the power state enter the power fan2 fail state. ') systemFanFail = NotificationType((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1, 3) + (0,10)).setObjects(("SW-STRCTURE-MIB", "swStructSystemFanIndex")) if mibBuilder.loadTexts: systemFanFail.setDescription('The trap is sent whenever the power state enter the system fans fail state. ') powerRemoved = NotificationType((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1, 3) + (0,11)).setObjects(("SW-STRCTURE-MIB", "swStructPowerIndex")) if mibBuilder.loadTexts: powerRemoved.setDescription('The trap is sent whenever the power is removed.') powerInserted = NotificationType((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1, 3) + (0,12)).setObjects(("SW-STRCTURE-MIB", "swStructPowerIndex")) if mibBuilder.loadTexts: powerInserted.setDescription('The trap is sent whenever the power is inserted.') powerBad = NotificationType((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1, 3) + (0,13)).setObjects(("SW-STRCTURE-MIB", "swStructPowerIndex")) if mibBuilder.loadTexts: powerBad.setDescription('The trap is sent whenever the power is bad.') mibBuilder.exportSymbols("SW-STRCTURE-MIB", swStructPowerInfo=swStructPowerInfo, swStructPowerAmp=swStructPowerAmp, dlinkcommon=dlinkcommon, swStructModuleType=swStructModuleType, swStructModuleMaxPortNum=swStructModuleMaxPortNum, powerCurrentWarnning=powerCurrentWarnning, swStructDevMaxPortNum=swStructDevMaxPortNum, swStructPowerTable=swStructPowerTable, swStructDevPortEncodingFactor=swStructDevPortEncodingFactor, dlink=dlink, swStructPowerFan1Status=swStructPowerFan1Status, swStructModuleOperStatus=swStructModuleOperStatus, powerInserted=powerInserted, external=external, swStructPowerVolt=swStructPowerVolt, powerBad=powerBad, swStructModuleIndex=swStructModuleIndex, swStructModuleVersion=swStructModuleVersion, es2000Mgmt=es2000Mgmt, swStructModuleEntry=swStructModuleEntry, golfcommon=golfcommon, swStructPowerEntry=swStructPowerEntry, swStructModuleUnitIndex=swStructModuleUnitIndex, swStructPowerStatus=swStructPowerStatus, swStructModuleDescr=swStructModuleDescr, swStructModuleLEDInfo=swStructModuleLEDInfo, powerFan1Fail=powerFan1Fail, swStructPowerTemperature=swStructPowerTemperature, swStructModuleTable=swStructModuleTable, swStructDevLastChange=swStructDevLastChange, swStructDevType=swStructDevType, swStructPowerFan2Status=swStructPowerFan2Status, swStructDevMaxModuleNum=swStructDevMaxModuleNum, es2000=es2000, swStructModuleLastChange=swStructModuleLastChange, marconi=marconi, swStructSystemFanStatus=swStructSystemFanStatus, swStructModuleEncodingOffset=swStructModuleEncodingOffset, powerRemoved=powerRemoved, swStructInfo=swStructInfo, systemFanFail=systemFanFail, swStructSystemFanEntry=swStructSystemFanEntry, swStructSystemFanIndex=swStructSystemFanIndex, swStructDevOperStatus=swStructDevOperStatus, golf=golf, swStructSystemFanTable=swStructSystemFanTable, marconi_mgmt=marconi_mgmt, swStructPowerIndex=swStructPowerIndex, powerVoltWarnning=powerVoltWarnning, powerFan2Fail=powerFan2Fail, systems=systems, swStructDevDescr=swStructDevDescr, swStructDevNumOfPortInUse=swStructDevNumOfPortInUse, golfproducts=golfproducts, powerTemperatureWarnning=powerTemperatureWarnning, swStructDevLedInfo=swStructDevLedInfo, swStructure=swStructure)
true
true
f70fda2462f589a62308e52704ddeabc6bfe0104
21,792
py
Python
my_utils/my_geometry.py
cigar666/my_manim_projects
64d02fb829ce5befd75d91327be465902fabb9c1
[ "MIT" ]
159
2020-02-17T06:45:25.000Z
2022-03-24T06:11:00.000Z
my_utils/my_geometry.py
cigar666/my_manim_projects
64d02fb829ce5befd75d91327be465902fabb9c1
[ "MIT" ]
1
2020-08-06T02:01:29.000Z
2020-08-06T03:05:25.000Z
my_utils/my_geometry.py
cigar666/my_manim_projects
64d02fb829ce5befd75d91327be465902fabb9c1
[ "MIT" ]
53
2020-02-21T03:13:49.000Z
2022-03-14T09:03:59.000Z
from manimlib.constants import * from manimlib.mobject.types.vectorized_mobject import VMobject, VGroup from manimlib.mobject.geometry import Arc, Line, Dot, Polygon, Sector, Circle from manimlib.utils.color import color_gradient from manimlib.mobject.number_line import DecimalNumber from manimlib.mobject.svg.tex_mobject import TexMobject from manimlib.mobject.svg.text_mobject import Text from manimlib.utils.rate_functions import linear, smooth from manimlib.utils.space_ops import * class Arcs(VGroup): CONFIG = { 'colors': [RED, YELLOW, BLUE, PINK], 'radius': 1, 'start_angle':0, 'angle_list': [30 * DEGREES, 60 * DEGREES, 90 * DEGREES], 'stroke_width': 40, } def __init__(self, **kwargs): VMobject.__init__(self, **kwargs) self.create_arcs() def create_arcs(self, **kwargs): angle = self.start_angle colors = color_gradient(self.colors, len(self.angle_list)) for i in range(len(self.angle_list)): self.add(Arc(radius=self.radius, start_angle=angle, angle=self.angle_list[i], color=colors[i], stroke_width=self.stroke_width, **kwargs)) angle += self.angle_list[i] class Angle(VGroup): CONFIG = { 'radius': 1, 'color': RED, 'opacity': 0.4, 'stroke_width': 10, # 'below_180': True, } def __init__(self, A, O, B, **kwargs): VMobject.__init__(self, **kwargs) OA, OB = A-O, B-O theta = np.angle(complex(*OA[:2])/complex(*OB[:2])) # angle of OB to OA self.add(Arc(start_angle=Line(O, B).get_angle(), angle=theta, radius=self.radius/2, stroke_width=100 * self.radius, color=self.color).set_stroke(opacity=self.opacity).move_arc_center_to(O)) self.add(Arc(start_angle=Line(O, B).get_angle(), angle=theta, radius=self.radius, stroke_width=self.stroke_width, color=self.color).move_arc_center_to(O)) class Tracked_Point(VGroup): CONFIG = { 'size': 0.1, 'point_color': BLUE, 'num_decimal_places': 2, 'coordinates_scale': 0.8, 'coordinates_color': GREEN, 'coordinates_direction': DOWN * 0.25, 'bracket_color': WHITE, } def __init__(self, init_loc=ORIGIN, **kwargs): VGroup.__init__(self, **kwargs) self.point = Dot(init_loc, color=self.point_color).set_height(self.size) self.value_x = DecimalNumber(0, color=self.coordinates_color, num_decimal_places=self.num_decimal_places).scale(self.coordinates_scale) self.value_y = DecimalNumber(0, color=self.coordinates_color, num_decimal_places=self.num_decimal_places).scale(self.coordinates_scale) text = TexMobject('(', ',', ')').scale(self.coordinates_scale) self.coordinates_text = VGroup(text[0], self.value_x, text[1], self.value_y, text[2]) self.coordinates_text.add_updater(self.update_coordinates_text) self.add(self.point) def update_coordinates_text(self, coords): for i in range(1, len(coords)): coords[i].next_to(coords[i-1], RIGHT * 0.5) coords[2].align_to(coords[1], DOWN) pos = self.point.get_center() x, y = self.mapping_func(pos[0], pos[1]) coords[1].set_value(x) coords[3].set_value(y) coords.next_to(self.point, self.coordinates_direction) def mapping_func(self, x, y): return x, y class Dashed_Circle(VGroup): CONFIG = { 'arc_ratio': 0.6, 'arc_num': 36, 'arc_config':{ 'color': WHITE, 'stroke_width': 2.5, }, } def __init__(self, radius=1, center=ORIGIN, **kwargs): VGroup.__init__(self, **kwargs) theta = TAU/self.arc_num for i in range(self.arc_num): arc_i = Arc(radius=radius, angle=theta * self.arc_ratio, **self.arc_config) arc_i.rotate(theta * i, about_point=ORIGIN) self.add(arc_i) self.move_to(center) class Right_angle(VGroup): CONFIG = { 'size': 0.25, 'stroke_color': WHITE, 'stroke_width': 3.2, 'fill_color': BLUE, 'fill_opacity': 0.5, 'on_the_right': True, } def __init__(self, corner=ORIGIN, angle=0, **kwargs): VGroup.__init__(self, **kwargs) self.corner = ORIGIN self.angle = 0 r = UR if self.on_the_right else UL self.add(Polygon(ORIGIN, RIGHT * self.size * r, UR * self.size * r, UP * self.size * r, stroke_width=0, fill_color=self.fill_color, fill_opacity=self.fill_opacity), Line(RIGHT * self.size * r, UR * self.size * r + UP * self.stroke_width/100/2 * 0.8, stroke_width=self.stroke_width, stroke_color=self.stroke_color), Line(UR * self.size * r + RIGHT * self.stroke_width/100/2 * r * 0.8, UP * self.size * r, stroke_width=self.stroke_width, stroke_color=self.stroke_color), ) self.move_corner_to(corner) self.change_angle_to(angle) def move_corner_to(self, new_corner): self.shift(new_corner - self.corner) self.corner = new_corner return self def change_angle_to(self, new_angle): self.rotate(new_angle - self.angle, about_point=self.corner) self.angle = new_angle return self class Trail(VGroup): CONFIG = { 'max_width': 5, 'nums': 500, 'trail_color': BLUE_B, # 'rate_func': linear, 'rate_func': lambda t: t ** 1.25, } def __init__(self, mob, **kwargs): VGroup.__init__(self, **kwargs) self.add(mob) self.trail = VGroup() self.path_xyz = [] self.add(self.trail) self.pos_old = self[0].get_center() if type(self.trail_color) != str: self.colors = color_gradient(self.trail_color, self.nums) # def update_trail(self, trail): # err=1e-5 # pos_new = self[0].get_center() # pos_old = self.pos_old # self.pos_old = pos_new # # if np.sqrt(sum((pos_new - pos_old) ** 2))>err: # if sum(abs(pos_new - pos_old))>err: # trail.add(Line(pos_old, pos_new, color=self.trail_color, plot_depth=0)) # # if len(trail) > self.nums: # trail.remove(trail[0]) # # for k in range(self.nums): # # trail[k].set_stroke(width=self.max_width * self.rate_func(k/self.nums), # # opacity=self.rate_func(k/self.nums)) # for l in trail: # k = trail.submobjects.index(l) # l.set_stroke(width=self.max_width * self.rate_func(k/self.nums), # opacity=self.rate_func(k/self.nums)) # # if len(trail) <= self.nums and len(trail) > 0: # # for k in range(len(trail)): # # trail[k].set_stroke(width=self.max_width * self.rate_func(k/len(trail)), # # opacity=self.rate_func(k/len(trail))) # for l in trail: # k = trail.submobjects.index(l) # l.set_stroke(width=self.max_width * self.rate_func(k/len(trail)), # opacity=self.rate_func(k/len(trail))) def get_path_xyz(self, err=1e-6): pos_new = self[0].get_center() pos_old = self.pos_old if sum(abs(pos_new - pos_old))>err: self.path_xyz.append(pos_new) self.pos_old = pos_new while len(self.path_xyz) > self.nums: self.path_xyz.remove(self.path_xyz[0]) def create_path(self): path = VGroup() self.get_path_xyz() if len(self.path_xyz) > 1: for i in range(len(self.path_xyz)-1): if type(self.trail_color) == str: path.add(Line(self.path_xyz[i], self.path_xyz[i+1], stroke_color=self.trail_color, stroke_opacity=self.rate_func(i/len(self.path_xyz)), plot_depth=self.rate_func(2-i/len(self.path_xyz)), stroke_width=self.max_width * self.rate_func(i/len(self.path_xyz)))) else: path.add(Line(self.path_xyz[i], self.path_xyz[i+1], stroke_color=self.colors[i], stroke_opacity=self.rate_func(i/len(self.path_xyz)), plot_depth=self.rate_func(2-i/len(self.path_xyz)), stroke_width=self.max_width * self.rate_func(i/len(self.path_xyz)))) # print('i = %d' % i) # # print(self.path_xyz) # print(self.color) # print(self.rate_func(i/len(self.path_xyz))) # print(self.max_width*self.rate_func(i/len(self.path_xyz))) return path def update_path(self, trail): trail.become(self.create_path()) def start_trace(self): # self.trail.add_updater(self.update_trail) self.trail.add_updater(self.update_path) def stop_trace(self): self.trial.remove_updater(self.update_path) def decrease_trail_num(self, trail, dt): if self.nums > max(self.min_num, 2): if self.nums <= 2: trail.become(VGroup()) else: self.nums -= self.rate if self.nums < 2: self.nums = 2 trail.become(self.create_path()) def retrieve_trail(self, rate=2, min_num=0): # self.stop_trace() self.nums = len(self.trail) self.min_num = min_num self.rate = rate self.trail.add_updater(self.decrease_trail_num) class Sun(VGroup): CONFIG = { 'colors': [RED_B, ORANGE, WHITE], # 'opacity_func': lambda t: 1.1 - t ** 0.24 if t < 0.1 else 1 - 0.95 * t ** 0.18 - 0.05 * t ** 0.05, # 'opacity_func': lambda t: 1000 * (1 - t ** 0.00012) if t < 0.1 else 0.75 * (1 - t ** 0.21), # 'opacity_func': lambda t: 1250 * (1 - abs(t-0.006) ** 0.0001) if t < 0.12 else 0.72 * (1 - t ** 0.2), 'opacity_func': lambda t: 1500 * (1 - abs(t-0.009) ** 0.0001), 'radius': 4, 'layer_num': 80, # 'rate_func': smooth, 'rate_func': lambda t: t ** 2, } def __init__(self, **kwargs): VGroup.__init__(self, **kwargs) self.color_list = color_gradient(self.colors, self.layer_num) self.add(Dot(color=average_color(self.colors[0], WHITE), plot_depth=4).set_height(0.015 * self.radius)) for i in range(self.layer_num): # self.add(Arc(radius= self.radius/self.layer_num * (0.5 + i), angle=TAU, color=self.color_list[i], # stroke_width=100 * self.radius/self.layer_num, # stroke_opacity=self.opacity_func(i/self.layer_num), plot_depth=5)) self.add(Arc(radius= self.radius * self.rate_func((0.5 + i)/self.layer_num), angle=TAU, color=self.color_list[i], stroke_width=101 * (self.rate_func((i + 1)/self.layer_num) - self.rate_func(i/self.layer_num)) * self.radius, stroke_opacity=self.opacity_func(self.rate_func(i/self.layer_num)), plot_depth=5)) class Three_Body(VGroup): CONFIG = { 'mass': np.array([0.98, 1.025, 1]) * 1.2, 'pos': np.array([[-3., -np.sqrt(3), 0], [0., 3 * np.sqrt(3) - 1, 0], [3, -np.sqrt(3), 0]]) * 0.75, 'velocity': np.array([[1, -np.sqrt(3), 0], [-2, 0, 0], [1, np.sqrt(3), 0]]) * 0.8, 'p_pos': np.array([2, -np.sqrt(3)+1, 0]) * 1., 'p_velocity':np.array([-1, -1.7, 0]) * 2.4, 'plot_depth':5, } def __init__(self, *three_Mobject, **kwargs): VGroup.__init__(self, **kwargs) self.sun_01 = three_Mobject[0].move_to(self.pos[0]) self.sun_02 = three_Mobject[1].move_to(self.pos[1]) self.sun_03 = three_Mobject[2].move_to(self.pos[2]) if len(three_Mobject) > 3: self.planet = three_Mobject[3].move_to(self.p_pos) self.add(self.sun_01, self.sun_02, self.sun_03) if len(three_Mobject) > 3: self.planet = three_Mobject[3].move_to(self.p_pos) self.add(self.planet) def get_force(self, x1, x2, m1, m2, G=1): # force of obj_01 to obj_02, this vector start from obj_02 and end in obj_01 r = np.sqrt(sum((x1 - x2) ** 2)) return G * m1 * m2 * (x1 - x2) / (r ** 3 + 2e-3) def update_xyz(self, G=1, delta_t =2.5e-3): m1, m2, m3 = self.mass[0], self.mass[1], self.mass[2] x1, x2, x3 = self.pos[0], self.pos[1], self.pos[2] v1, v2, v3 = self.velocity[0], self.velocity[1], self.velocity[2] f21, f31, f32 = self.get_force(x2, x1, m2, m1, G=G), self.get_force(x3, x1, m3, m1, G=G), self.get_force(x3, x2, m3, m2, G=G) a1, a2, a3 = (f21 + f31) / m1, (-f21 + f32) / m2, (-f32 - f31) / m3 xp, vp = self.p_pos, self.p_velocity f1, f2, f3 = self.get_force(x1, xp, m1, 1, G=G), self.get_force(x2, xp, m2, 1, G=G), self.get_force(x3, xp, m3, 1, G=G) a = (f1 + f2 + f3) / 1. self.velocity[0] += a1 * delta_t self.velocity[1] += a2 * delta_t self.velocity[2] += a3 * delta_t self.p_velocity += a * delta_t self.pos[0] += v1 * delta_t self.pos[1] += v2 * delta_t self.pos[2] += v3 * delta_t self.p_pos += vp *delta_t def reset_velocity(self): v1, v2, v3 = self.velocity[0], self.velocity[1], self.velocity[2] m1, m2, m3 = self.mass[0], self.mass[1], self.mass[2] momentum = v1 * m1 + v2 * m2 + v3 * m3 v = momentum/(m1 + m2 + m3) v1, v2, v3 = v1 - v, v2 - v, v3 - v print(v1, v2, v3) self.p_velocity -= v self.velocity = np.array([v1, v2, v3]) def update_three_body(self, tb, dt): self.update_xyz(G=40) # avervage_pos = (self.pos[0] + self.pos[1] + self.pos[2]) / 3 # tb[0].move_to(self.pos[0] - avervage_pos) # tb[1].move_to(self.pos[1] - avervage_pos) # tb[2].move_to(self.pos[2] - avervage_pos) # if len(tb)>3: # tb[3].move_to(self.p_pos - avervage_pos) tb[0].move_to(self.pos[0]) tb[1].move_to(self.pos[1]) tb[2].move_to(self.pos[2]) if len(tb)>3: tb[3].move_to(self.p_pos) def start_move(self): self.add_updater(self.update_three_body) class MySectors(VGroup): CONFIG = { 'stroke_width': 0, 'fill_opacity': 1, 'inner_radius': 1.6, # 'outer_radius': [], 'gap': 0.025, 'start_direction': UP, 'values': [1,2,3], 'labels': None, # 'data': {'labels': 1.23}, 'unit': None, # 'data_2d': None, 'outer_radius_func': lambda t: t/10 + 0.32, 'label_font': '思源黑体 Bold', 'center': ORIGIN, } def __init__(self, **kwargs): VGroup.__init__(self, **kwargs) self.colors = color_gradient([ORANGE, RED, PINK, BLUE, GREEN, YELLOW], len(self.values)) self.sectors, self.labels_group = VGroup(), VGroup() self.sectors = self.create_sectors() if not self.labels == None: self.labels_group = self.create_label() self.add(self.sectors, self.labels_group) def create_sectors(self): angle = TAU/len(self.values) colors = self.colors start_a = np.angle(complex(*self.start_direction[0:2])) for i in range(len(self.values)): r_i = self.inner_radius + self.outer_radius_func(self.values[i]) sector_i = Sector(arc_center=self.center, inner_radius=self.inner_radius, outer_radius=r_i, stroke_width=self.stroke_width, start_angle=start_a + i * angle, angle=angle * (1 - self.gap), color=colors[i], fill_opacity=self.fill_opacity) self.sectors.add(sector_i) return self.sectors def create_label(self): for tex, value in zip(self.labels, self.values): i = self.labels.index(tex) r = self.inner_radius + self.outer_radius_func(self.values[i]) size = TAU * r / len(self.values) * 0.2 tex_i = Text(tex, font=self.label_font, color=WHITE, plot_depth=1).set_height(size) value_i = Text(str(value), font=self.label_font, color=WHITE, plot_depth=1).set_height(size).next_to(tex_i, DOWN * 0.64 * size) if not self.unit == None: unit_i = Text(self.unit, font=self.label_font, color=WHITE, plot_depth=1).set_height(size).next_to(value_i, RIGHT * 0.2 * size) VGroup(value_i, unit_i).next_to(tex_i, DOWN * 0.64 * size) label_i = VGroup(tex_i, value_i, unit_i) else: label_i = VGroup(tex_i, value_i) angle = TAU/len(self.values) start_a = np.angle(complex(*self.start_direction[0:2])) self.labels_group.add(label_i.shift(self.center + complex_to_R3((r-size * 1.2-r*0.05) * np.exp(1j * (start_a + (i + 0.5) * TAU/len(self.values)))))) return self.labels_group def create_cicles(self, color=BLUE_A): circle_01 = Circle(radius=self.inner_radius, stroke_width=12, stroke_color=color, plot_depth=2.5) circle_02 = Circle(radius=self.inner_radius - 0.15, stroke_width=4, stroke_color=color, plot_depth=2.5) self.circles = VGroup(circle_01, circle_02).move_to(self.center) self.add(self.circles) return self.circles def create_circle_shadow(self, width=32, num=50, color=BLUE_A): self.shadow = VGroup(*[Circle(radius=self.inner_radius + (i+0.5) * width/100/num, stroke_width=width/num, stroke_color=color, stroke_opacity=(i-num) ** 2 * 1/num/num, plot_depth=2) for i in range(num+1)]).move_to(self.center) self.add(self.shadow) return self.shadow class New_Polygon(VGroup): CONFIG = { 'stroke_color': BLUE, 'stroke_width': 4, 'fill_color': BLUE_B, 'fill_opacity': 0, } def __init__(self, *vertices, **kwargs): VGroup.__init__(self, **kwargs) self.lines, self.dots = VGroup(plot_depth=1), VGroup(plot_depth=1) self.poly=Polygon(*vertices, fill_color=self.fill_color, fill_opacity=self.fill_opacity, plot_depth=0).set_stroke(width=0) self.add(self.poly, self.lines, self.dots) n = len(vertices) for i in range(n): self.lines.add(Line(vertices[i], vertices[(i+1) % n], color=self.stroke_color, stroke_width=self.stroke_width)) self.dots.add(Dot(vertices[i], color=self.stroke_color).set_height(self.stroke_width/100)) for dot in self.dots: dot.add_updater(lambda d: d.set_height(self.stroke_width/100)) class MySector(VGroup): CONFIG = { 'label': 'label', 'font': '思源黑体 Bold', 'value': 1, } def __init__(self, ): pass class Shadow_2d(VGroup): CONFIG = { 'shadow_color': DARK_GRAY, 'shadow_opacity': 0.6, 'blur_width': 0.25, 'layer_num': 40, 'scale_factor': 1, 'shadow_out': True, 'show_basic_shape': True, 'plot_depth':-1, 'rate_func': lambda t: t ** 0.5, } def __init__(self, mob_or_points, **kwargs): VGroup.__init__(self, **kwargs) if type(mob_or_points) == list: self.shape = Polygon(*mob_or_points, stroke_width=0, plot_depth=-1) else: self.shape = mob_or_points.set_stroke(width=0) self.shape.set_fill(color=self.shadow_color, opacity=self.shadow_opacity * (1 if self.show_basic_shape else 0)).scale(self.scale_factor) self.blur_outline = VGroup() s = (self.shape.get_height() + self.shape.get_width())/2 if self.blur_width > 1e-4: for i in range(self.layer_num): layer_i = self.shape.copy().set_stroke(color=self.shadow_color, width=100 * self.blur_width/self.layer_num, opacity=self.shadow_opacity * (1-self.rate_func(i/self.layer_num))).\ set_fill(opacity=0).scale((s + (1 if self.shadow_out else -1) * self.blur_width/self.layer_num * (i+0.5))/ s).set_plot_depth(-2) self.blur_outline.add(layer_i) self.add(self.shape, self.blur_outline) class TransformMobject(VGroup): CONFIG = { 'rotate_angle': PI/2, 'shift_vect': ORIGIN, # 'path': , 'scale_range': (1, 1e-3), 'stroke_colors': [RED, PINK, BLUE], 'num': 10, 'rate_func': linear, 'scale_type': 0, } def __init__(self, mob, **kwargs): VGroup.__init__(self, **kwargs) if type(self.stroke_colors) == list: stroke_colors = color_gradient(self.stroke_colors, self.num) else: stroke_colors = color_gradient([self.stroke_colors, self.stroke_colors], self.num) for i in range(self.num): t = i/(self.num-1) shift_i = self.rate_func(t) * self.shift_vect # scale_i = min(self.scale_range) + self.rate_func(t) * (max(self.scale_range)-min(self.scale_range)) if self.scale_type == 0: scale_i = np.exp(np.log(self.scale_range[0]) + self.rate_func(t) * (np.log(self.scale_range[1])-np.log(self.scale_range[0]))) else: scale_i = self.scale_range[0] + self.rate_func(t) * (self.scale_range[1]-self.scale_range[0]) theta_i = self.rate_func(t) * self.rotate_angle mob_i = mob.copy().shift(shift_i) mob_i.scale(scale_i, about_point=mob_i.get_center_of_mass()).rotate(theta_i, about_point=mob_i.get_center_of_mass()).set_stroke(color=stroke_colors[i]) self.add(mob_i)
41.116981
193
0.583058
from manimlib.constants import * from manimlib.mobject.types.vectorized_mobject import VMobject, VGroup from manimlib.mobject.geometry import Arc, Line, Dot, Polygon, Sector, Circle from manimlib.utils.color import color_gradient from manimlib.mobject.number_line import DecimalNumber from manimlib.mobject.svg.tex_mobject import TexMobject from manimlib.mobject.svg.text_mobject import Text from manimlib.utils.rate_functions import linear, smooth from manimlib.utils.space_ops import * class Arcs(VGroup): CONFIG = { 'colors': [RED, YELLOW, BLUE, PINK], 'radius': 1, 'start_angle':0, 'angle_list': [30 * DEGREES, 60 * DEGREES, 90 * DEGREES], 'stroke_width': 40, } def __init__(self, **kwargs): VMobject.__init__(self, **kwargs) self.create_arcs() def create_arcs(self, **kwargs): angle = self.start_angle colors = color_gradient(self.colors, len(self.angle_list)) for i in range(len(self.angle_list)): self.add(Arc(radius=self.radius, start_angle=angle, angle=self.angle_list[i], color=colors[i], stroke_width=self.stroke_width, **kwargs)) angle += self.angle_list[i] class Angle(VGroup): CONFIG = { 'radius': 1, 'color': RED, 'opacity': 0.4, 'stroke_width': 10, } def __init__(self, A, O, B, **kwargs): VMobject.__init__(self, **kwargs) OA, OB = A-O, B-O theta = np.angle(complex(*OA[:2])/complex(*OB[:2])) self.add(Arc(start_angle=Line(O, B).get_angle(), angle=theta, radius=self.radius/2, stroke_width=100 * self.radius, color=self.color).set_stroke(opacity=self.opacity).move_arc_center_to(O)) self.add(Arc(start_angle=Line(O, B).get_angle(), angle=theta, radius=self.radius, stroke_width=self.stroke_width, color=self.color).move_arc_center_to(O)) class Tracked_Point(VGroup): CONFIG = { 'size': 0.1, 'point_color': BLUE, 'num_decimal_places': 2, 'coordinates_scale': 0.8, 'coordinates_color': GREEN, 'coordinates_direction': DOWN * 0.25, 'bracket_color': WHITE, } def __init__(self, init_loc=ORIGIN, **kwargs): VGroup.__init__(self, **kwargs) self.point = Dot(init_loc, color=self.point_color).set_height(self.size) self.value_x = DecimalNumber(0, color=self.coordinates_color, num_decimal_places=self.num_decimal_places).scale(self.coordinates_scale) self.value_y = DecimalNumber(0, color=self.coordinates_color, num_decimal_places=self.num_decimal_places).scale(self.coordinates_scale) text = TexMobject('(', ',', ')').scale(self.coordinates_scale) self.coordinates_text = VGroup(text[0], self.value_x, text[1], self.value_y, text[2]) self.coordinates_text.add_updater(self.update_coordinates_text) self.add(self.point) def update_coordinates_text(self, coords): for i in range(1, len(coords)): coords[i].next_to(coords[i-1], RIGHT * 0.5) coords[2].align_to(coords[1], DOWN) pos = self.point.get_center() x, y = self.mapping_func(pos[0], pos[1]) coords[1].set_value(x) coords[3].set_value(y) coords.next_to(self.point, self.coordinates_direction) def mapping_func(self, x, y): return x, y class Dashed_Circle(VGroup): CONFIG = { 'arc_ratio': 0.6, 'arc_num': 36, 'arc_config':{ 'color': WHITE, 'stroke_width': 2.5, }, } def __init__(self, radius=1, center=ORIGIN, **kwargs): VGroup.__init__(self, **kwargs) theta = TAU/self.arc_num for i in range(self.arc_num): arc_i = Arc(radius=radius, angle=theta * self.arc_ratio, **self.arc_config) arc_i.rotate(theta * i, about_point=ORIGIN) self.add(arc_i) self.move_to(center) class Right_angle(VGroup): CONFIG = { 'size': 0.25, 'stroke_color': WHITE, 'stroke_width': 3.2, 'fill_color': BLUE, 'fill_opacity': 0.5, 'on_the_right': True, } def __init__(self, corner=ORIGIN, angle=0, **kwargs): VGroup.__init__(self, **kwargs) self.corner = ORIGIN self.angle = 0 r = UR if self.on_the_right else UL self.add(Polygon(ORIGIN, RIGHT * self.size * r, UR * self.size * r, UP * self.size * r, stroke_width=0, fill_color=self.fill_color, fill_opacity=self.fill_opacity), Line(RIGHT * self.size * r, UR * self.size * r + UP * self.stroke_width/100/2 * 0.8, stroke_width=self.stroke_width, stroke_color=self.stroke_color), Line(UR * self.size * r + RIGHT * self.stroke_width/100/2 * r * 0.8, UP * self.size * r, stroke_width=self.stroke_width, stroke_color=self.stroke_color), ) self.move_corner_to(corner) self.change_angle_to(angle) def move_corner_to(self, new_corner): self.shift(new_corner - self.corner) self.corner = new_corner return self def change_angle_to(self, new_angle): self.rotate(new_angle - self.angle, about_point=self.corner) self.angle = new_angle return self class Trail(VGroup): CONFIG = { 'max_width': 5, 'nums': 500, 'trail_color': BLUE_B, 'rate_func': lambda t: t ** 1.25, } def __init__(self, mob, **kwargs): VGroup.__init__(self, **kwargs) self.add(mob) self.trail = VGroup() self.path_xyz = [] self.add(self.trail) self.pos_old = self[0].get_center() if type(self.trail_color) != str: self.colors = color_gradient(self.trail_color, self.nums) self.path_xyz.remove(self.path_xyz[0]) def create_path(self): path = VGroup() self.get_path_xyz() if len(self.path_xyz) > 1: for i in range(len(self.path_xyz)-1): if type(self.trail_color) == str: path.add(Line(self.path_xyz[i], self.path_xyz[i+1], stroke_color=self.trail_color, stroke_opacity=self.rate_func(i/len(self.path_xyz)), plot_depth=self.rate_func(2-i/len(self.path_xyz)), stroke_width=self.max_width * self.rate_func(i/len(self.path_xyz)))) else: path.add(Line(self.path_xyz[i], self.path_xyz[i+1], stroke_color=self.colors[i], stroke_opacity=self.rate_func(i/len(self.path_xyz)), plot_depth=self.rate_func(2-i/len(self.path_xyz)), stroke_width=self.max_width * self.rate_func(i/len(self.path_xyz)))) return path def update_path(self, trail): trail.become(self.create_path()) def start_trace(self): self.trail.add_updater(self.update_path) def stop_trace(self): self.trial.remove_updater(self.update_path) def decrease_trail_num(self, trail, dt): if self.nums > max(self.min_num, 2): if self.nums <= 2: trail.become(VGroup()) else: self.nums -= self.rate if self.nums < 2: self.nums = 2 trail.become(self.create_path()) def retrieve_trail(self, rate=2, min_num=0): self.nums = len(self.trail) self.min_num = min_num self.rate = rate self.trail.add_updater(self.decrease_trail_num) class Sun(VGroup): CONFIG = { 'colors': [RED_B, ORANGE, WHITE], 'opacity_func': lambda t: 1500 * (1 - abs(t-0.009) ** 0.0001), 'radius': 4, 'layer_num': 80, 'rate_func': lambda t: t ** 2, } def __init__(self, **kwargs): VGroup.__init__(self, **kwargs) self.color_list = color_gradient(self.colors, self.layer_num) self.add(Dot(color=average_color(self.colors[0], WHITE), plot_depth=4).set_height(0.015 * self.radius)) for i in range(self.layer_num): self.add(Arc(radius= self.radius * self.rate_func((0.5 + i)/self.layer_num), angle=TAU, color=self.color_list[i], stroke_width=101 * (self.rate_func((i + 1)/self.layer_num) - self.rate_func(i/self.layer_num)) * self.radius, stroke_opacity=self.opacity_func(self.rate_func(i/self.layer_num)), plot_depth=5)) class Three_Body(VGroup): CONFIG = { 'mass': np.array([0.98, 1.025, 1]) * 1.2, 'pos': np.array([[-3., -np.sqrt(3), 0], [0., 3 * np.sqrt(3) - 1, 0], [3, -np.sqrt(3), 0]]) * 0.75, 'velocity': np.array([[1, -np.sqrt(3), 0], [-2, 0, 0], [1, np.sqrt(3), 0]]) * 0.8, 'p_pos': np.array([2, -np.sqrt(3)+1, 0]) * 1., 'p_velocity':np.array([-1, -1.7, 0]) * 2.4, 'plot_depth':5, } def __init__(self, *three_Mobject, **kwargs): VGroup.__init__(self, **kwargs) self.sun_01 = three_Mobject[0].move_to(self.pos[0]) self.sun_02 = three_Mobject[1].move_to(self.pos[1]) self.sun_03 = three_Mobject[2].move_to(self.pos[2]) if len(three_Mobject) > 3: self.planet = three_Mobject[3].move_to(self.p_pos) self.add(self.sun_01, self.sun_02, self.sun_03) if len(three_Mobject) > 3: self.planet = three_Mobject[3].move_to(self.p_pos) self.add(self.planet) def get_force(self, x1, x2, m1, m2, G=1): r = np.sqrt(sum((x1 - x2) ** 2)) return G * m1 * m2 * (x1 - x2) / (r ** 3 + 2e-3) def update_xyz(self, G=1, delta_t =2.5e-3): m1, m2, m3 = self.mass[0], self.mass[1], self.mass[2] x1, x2, x3 = self.pos[0], self.pos[1], self.pos[2] v1, v2, v3 = self.velocity[0], self.velocity[1], self.velocity[2] f21, f31, f32 = self.get_force(x2, x1, m2, m1, G=G), self.get_force(x3, x1, m3, m1, G=G), self.get_force(x3, x2, m3, m2, G=G) a1, a2, a3 = (f21 + f31) / m1, (-f21 + f32) / m2, (-f32 - f31) / m3 xp, vp = self.p_pos, self.p_velocity f1, f2, f3 = self.get_force(x1, xp, m1, 1, G=G), self.get_force(x2, xp, m2, 1, G=G), self.get_force(x3, xp, m3, 1, G=G) a = (f1 + f2 + f3) / 1. self.velocity[0] += a1 * delta_t self.velocity[1] += a2 * delta_t self.velocity[2] += a3 * delta_t self.p_velocity += a * delta_t self.pos[0] += v1 * delta_t self.pos[1] += v2 * delta_t self.pos[2] += v3 * delta_t self.p_pos += vp *delta_t def reset_velocity(self): v1, v2, v3 = self.velocity[0], self.velocity[1], self.velocity[2] m1, m2, m3 = self.mass[0], self.mass[1], self.mass[2] momentum = v1 * m1 + v2 * m2 + v3 * m3 v = momentum/(m1 + m2 + m3) v1, v2, v3 = v1 - v, v2 - v, v3 - v print(v1, v2, v3) self.p_velocity -= v self.velocity = np.array([v1, v2, v3]) def update_three_body(self, tb, dt): self.update_xyz(G=40) tb[0].move_to(self.pos[0]) tb[1].move_to(self.pos[1]) tb[2].move_to(self.pos[2]) if len(tb)>3: tb[3].move_to(self.p_pos) def start_move(self): self.add_updater(self.update_three_body) class MySectors(VGroup): CONFIG = { 'stroke_width': 0, 'fill_opacity': 1, 'inner_radius': 1.6, 'gap': 0.025, 'start_direction': UP, 'values': [1,2,3], 'labels': None, 'unit': None, 'outer_radius_func': lambda t: t/10 + 0.32, 'label_font': '思源黑体 Bold', 'center': ORIGIN, } def __init__(self, **kwargs): VGroup.__init__(self, **kwargs) self.colors = color_gradient([ORANGE, RED, PINK, BLUE, GREEN, YELLOW], len(self.values)) self.sectors, self.labels_group = VGroup(), VGroup() self.sectors = self.create_sectors() if not self.labels == None: self.labels_group = self.create_label() self.add(self.sectors, self.labels_group) def create_sectors(self): angle = TAU/len(self.values) colors = self.colors start_a = np.angle(complex(*self.start_direction[0:2])) for i in range(len(self.values)): r_i = self.inner_radius + self.outer_radius_func(self.values[i]) sector_i = Sector(arc_center=self.center, inner_radius=self.inner_radius, outer_radius=r_i, stroke_width=self.stroke_width, start_angle=start_a + i * angle, angle=angle * (1 - self.gap), color=colors[i], fill_opacity=self.fill_opacity) self.sectors.add(sector_i) return self.sectors def create_label(self): for tex, value in zip(self.labels, self.values): i = self.labels.index(tex) r = self.inner_radius + self.outer_radius_func(self.values[i]) size = TAU * r / len(self.values) * 0.2 tex_i = Text(tex, font=self.label_font, color=WHITE, plot_depth=1).set_height(size) value_i = Text(str(value), font=self.label_font, color=WHITE, plot_depth=1).set_height(size).next_to(tex_i, DOWN * 0.64 * size) if not self.unit == None: unit_i = Text(self.unit, font=self.label_font, color=WHITE, plot_depth=1).set_height(size).next_to(value_i, RIGHT * 0.2 * size) VGroup(value_i, unit_i).next_to(tex_i, DOWN * 0.64 * size) label_i = VGroup(tex_i, value_i, unit_i) else: label_i = VGroup(tex_i, value_i) angle = TAU/len(self.values) start_a = np.angle(complex(*self.start_direction[0:2])) self.labels_group.add(label_i.shift(self.center + complex_to_R3((r-size * 1.2-r*0.05) * np.exp(1j * (start_a + (i + 0.5) * TAU/len(self.values)))))) return self.labels_group def create_cicles(self, color=BLUE_A): circle_01 = Circle(radius=self.inner_radius, stroke_width=12, stroke_color=color, plot_depth=2.5) circle_02 = Circle(radius=self.inner_radius - 0.15, stroke_width=4, stroke_color=color, plot_depth=2.5) self.circles = VGroup(circle_01, circle_02).move_to(self.center) self.add(self.circles) return self.circles def create_circle_shadow(self, width=32, num=50, color=BLUE_A): self.shadow = VGroup(*[Circle(radius=self.inner_radius + (i+0.5) * width/100/num, stroke_width=width/num, stroke_color=color, stroke_opacity=(i-num) ** 2 * 1/num/num, plot_depth=2) for i in range(num+1)]).move_to(self.center) self.add(self.shadow) return self.shadow class New_Polygon(VGroup): CONFIG = { 'stroke_color': BLUE, 'stroke_width': 4, 'fill_color': BLUE_B, 'fill_opacity': 0, } def __init__(self, *vertices, **kwargs): VGroup.__init__(self, **kwargs) self.lines, self.dots = VGroup(plot_depth=1), VGroup(plot_depth=1) self.poly=Polygon(*vertices, fill_color=self.fill_color, fill_opacity=self.fill_opacity, plot_depth=0).set_stroke(width=0) self.add(self.poly, self.lines, self.dots) n = len(vertices) for i in range(n): self.lines.add(Line(vertices[i], vertices[(i+1) % n], color=self.stroke_color, stroke_width=self.stroke_width)) self.dots.add(Dot(vertices[i], color=self.stroke_color).set_height(self.stroke_width/100)) for dot in self.dots: dot.add_updater(lambda d: d.set_height(self.stroke_width/100)) class MySector(VGroup): CONFIG = { 'label': 'label', 'font': '思源黑体 Bold', 'value': 1, } def __init__(self, ): pass class Shadow_2d(VGroup): CONFIG = { 'shadow_color': DARK_GRAY, 'shadow_opacity': 0.6, 'blur_width': 0.25, 'layer_num': 40, 'scale_factor': 1, 'shadow_out': True, 'show_basic_shape': True, 'plot_depth':-1, 'rate_func': lambda t: t ** 0.5, } def __init__(self, mob_or_points, **kwargs): VGroup.__init__(self, **kwargs) if type(mob_or_points) == list: self.shape = Polygon(*mob_or_points, stroke_width=0, plot_depth=-1) else: self.shape = mob_or_points.set_stroke(width=0) self.shape.set_fill(color=self.shadow_color, opacity=self.shadow_opacity * (1 if self.show_basic_shape else 0)).scale(self.scale_factor) self.blur_outline = VGroup() s = (self.shape.get_height() + self.shape.get_width())/2 if self.blur_width > 1e-4: for i in range(self.layer_num): layer_i = self.shape.copy().set_stroke(color=self.shadow_color, width=100 * self.blur_width/self.layer_num, opacity=self.shadow_opacity * (1-self.rate_func(i/self.layer_num))).\ set_fill(opacity=0).scale((s + (1 if self.shadow_out else -1) * self.blur_width/self.layer_num * (i+0.5))/ s).set_plot_depth(-2) self.blur_outline.add(layer_i) self.add(self.shape, self.blur_outline) class TransformMobject(VGroup): CONFIG = { 'rotate_angle': PI/2, 'shift_vect': ORIGIN, 'scale_range': (1, 1e-3), 'stroke_colors': [RED, PINK, BLUE], 'num': 10, 'rate_func': linear, 'scale_type': 0, } def __init__(self, mob, **kwargs): VGroup.__init__(self, **kwargs) if type(self.stroke_colors) == list: stroke_colors = color_gradient(self.stroke_colors, self.num) else: stroke_colors = color_gradient([self.stroke_colors, self.stroke_colors], self.num) for i in range(self.num): t = i/(self.num-1) shift_i = self.rate_func(t) * self.shift_vect if self.scale_type == 0: scale_i = np.exp(np.log(self.scale_range[0]) + self.rate_func(t) * (np.log(self.scale_range[1])-np.log(self.scale_range[0]))) else: scale_i = self.scale_range[0] + self.rate_func(t) * (self.scale_range[1]-self.scale_range[0]) theta_i = self.rate_func(t) * self.rotate_angle mob_i = mob.copy().shift(shift_i) mob_i.scale(scale_i, about_point=mob_i.get_center_of_mass()).rotate(theta_i, about_point=mob_i.get_center_of_mass()).set_stroke(color=stroke_colors[i]) self.add(mob_i)
true
true
f70fdb71a02ce4ac6ec40e53c2beb666b9b2d2c5
1,884
py
Python
image_augmentation/callbacks/extra_eval.py
tanzhenyu/image_augmentation
d1f8cc35cf25438556e7934e8e6c78827819ea9d
[ "Apache-2.0" ]
6
2020-08-26T18:54:42.000Z
2020-11-22T02:45:37.000Z
image_augmentation/callbacks/extra_eval.py
tanzhenyu/image_augmentation
d1f8cc35cf25438556e7934e8e6c78827819ea9d
[ "Apache-2.0" ]
3
2020-07-13T13:44:09.000Z
2022-02-10T02:12:46.000Z
image_augmentation/callbacks/extra_eval.py
tanzhenyu/image_augmentation
d1f8cc35cf25438556e7934e8e6c78827819ea9d
[ "Apache-2.0" ]
1
2021-03-24T09:51:22.000Z
2021-03-24T09:51:22.000Z
import tensorflow as tf from tensorflow.keras.callbacks import Callback class ExtraValidation(Callback): """Log evaluation metrics of an extra validation set. This callback is useful for model training scenarios where multiple validation sets are used for evaluation (as Keras by default, provides functionality for evaluating on a single validation set only). The evaluation metrics are also logged to TensorBoard. Args: validation_data: A tf.data.Dataset pipeline used to evaluate the model, essentially an extra validation dataset. tensorboard_path: Path to the TensorBoard logging directory. validation_freq: Number of epochs to wait before performing subsequent evaluations. """ def __init__(self, validation_data, tensorboard_path, validation_freq=1): super(ExtraValidation, self).__init__() self.validation_data = validation_data self.tensorboard_path = tensorboard_path self.tensorboard_writer = tf.summary.create_file_writer(self.tensorboard_path) self.validation_freq = validation_freq def on_epoch_end(self, epoch, logs=None): # evaluate at an interval of `validation_freq` epochs if (epoch + 1) % self.validation_freq == 0: # gather metric names form model metric_names = ['{}_{}'.format('epoch', metric.name) for metric in self.model.metrics] # TODO: fix `model.evaluate` memory leak on TPU # gather the evaluation metrics scores = self.model.evaluate(self.validation_data, verbose=2) # gather evaluation metrics to TensorBoard with self.tensorboard_writer.as_default(): for metric_name, score in zip(metric_names, scores): tf.summary.scalar(metric_name, score, step=epoch)
42.818182
86
0.684713
import tensorflow as tf from tensorflow.keras.callbacks import Callback class ExtraValidation(Callback): def __init__(self, validation_data, tensorboard_path, validation_freq=1): super(ExtraValidation, self).__init__() self.validation_data = validation_data self.tensorboard_path = tensorboard_path self.tensorboard_writer = tf.summary.create_file_writer(self.tensorboard_path) self.validation_freq = validation_freq def on_epoch_end(self, epoch, logs=None): if (epoch + 1) % self.validation_freq == 0: metric_names = ['{}_{}'.format('epoch', metric.name) for metric in self.model.metrics] scores = self.model.evaluate(self.validation_data, verbose=2) with self.tensorboard_writer.as_default(): for metric_name, score in zip(metric_names, scores): tf.summary.scalar(metric_name, score, step=epoch)
true
true
f70fdbec7fdf36e212bd081759f1a760efe641e0
31
py
Python
anoflows/__init__.py
rom1mouret/anoflows
42381c06b8897e4510e73cda87ea97ea3f4a5579
[ "Apache-2.0" ]
null
null
null
anoflows/__init__.py
rom1mouret/anoflows
42381c06b8897e4510e73cda87ea97ea3f4a5579
[ "Apache-2.0" ]
null
null
null
anoflows/__init__.py
rom1mouret/anoflows
42381c06b8897e4510e73cda87ea97ea3f4a5579
[ "Apache-2.0" ]
null
null
null
from .anoflows import AnoFlows
15.5
30
0.83871
from .anoflows import AnoFlows
true
true
f70fdce8d4dc8ca30480e127bc4ef4d7f2e0fe06
585
py
Python
flask_unchained/bundles/security/admins/role_admin.py
briancappello/flask-unchained
bff296b5c808f5b1db10f7dddb81054600545749
[ "MIT" ]
69
2018-10-10T01:59:11.000Z
2022-03-29T17:29:30.000Z
flask_unchained/bundles/security/admins/role_admin.py
briancappello/flask-unchained
bff296b5c808f5b1db10f7dddb81054600545749
[ "MIT" ]
18
2018-11-17T12:42:02.000Z
2021-05-22T18:45:27.000Z
flask_unchained/bundles/security/admins/role_admin.py
briancappello/flask-unchained
bff296b5c808f5b1db10f7dddb81054600545749
[ "MIT" ]
7
2018-10-12T16:20:25.000Z
2021-10-06T12:18:21.000Z
from flask_unchained.bundles.admin import ModelAdmin from flask_unchained.bundles.admin.templates import details_link, edit_link from ..models import Role class RoleAdmin(ModelAdmin): model = Role name = 'Roles' category_name = 'Security' menu_icon_value = 'fa fa-check' column_searchable_list = ('name',) column_sortable_list = ('name',) column_formatters = dict(name=details_link('role')) column_formatters_detail = dict(name=edit_link('role')) form_columns = ('name',) column_details_list = ('id', 'name', 'created_at', 'updated_at')
25.434783
75
0.71453
from flask_unchained.bundles.admin import ModelAdmin from flask_unchained.bundles.admin.templates import details_link, edit_link from ..models import Role class RoleAdmin(ModelAdmin): model = Role name = 'Roles' category_name = 'Security' menu_icon_value = 'fa fa-check' column_searchable_list = ('name',) column_sortable_list = ('name',) column_formatters = dict(name=details_link('role')) column_formatters_detail = dict(name=edit_link('role')) form_columns = ('name',) column_details_list = ('id', 'name', 'created_at', 'updated_at')
true
true
f70fdd0c46e28d30b6201db991e8334f76ba47d6
762
py
Python
image_optimizer_demo/image_optimizer_demo/urls.py
agusmakmun/django-tinypng
30fe24d2f9bd8767133ee8c0f418fd6c58fdd1be
[ "MIT" ]
17
2018-08-15T21:38:21.000Z
2021-08-17T04:31:10.000Z
image_optimizer_demo/image_optimizer_demo/urls.py
agusmakmun/django-tinypng
30fe24d2f9bd8767133ee8c0f418fd6c58fdd1be
[ "MIT" ]
6
2019-02-17T03:07:46.000Z
2022-03-29T06:35:01.000Z
image_optimizer_demo/image_optimizer_demo/urls.py
agusmakmun/django-tinypng
30fe24d2f9bd8767133ee8c0f418fd6c58fdd1be
[ "MIT" ]
9
2019-02-16T16:54:38.000Z
2022-01-14T18:29:56.000Z
"""image_optimizer_demo URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.0/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path urlpatterns = [ path('admin/', admin.site.urls), ]
34.636364
77
0.713911
from django.contrib import admin from django.urls import path urlpatterns = [ path('admin/', admin.site.urls), ]
true
true
f70fdd11036b90d4d4b1ae1d474d715bf5351817
11,113
py
Python
3-extract-data/cablegatedata.py
paul-mathieu/cablegate-cia-analysis
89ea4570319c29df5f2b105384ff9e7eb408f2f9
[ "MIT" ]
null
null
null
3-extract-data/cablegatedata.py
paul-mathieu/cablegate-cia-analysis
89ea4570319c29df5f2b105384ff9e7eb408f2f9
[ "MIT" ]
null
null
null
3-extract-data/cablegatedata.py
paul-mathieu/cablegate-cia-analysis
89ea4570319c29df5f2b105384ff9e7eb408f2f9
[ "MIT" ]
null
null
null
countries = ["Afghanistan", "Albania", "Algeria", "Andorra", "Angola", "Antigua and Barbuda", "Argentina", "Armenia", "Australia", "Austria", "Azerbaijan", "Bahamas", "Bahrain", "Bangladesh", "Barbados", "Belarus", "Belgium", "Belize", "Benin", "Bhutan", "Bolivia", "Bosnia and Herzegovina", "Botswana", "Brazil", "Brunei", "Bulgaria", "Burkina Faso", "Burundi", "Cambodia", "Cameroon", "Canada", "Cape Verde", "Central African Republic", "Chad", "Chile", "China", "Colombia", "Comoros", "Congo", "Cook Islands", "Costa Rica", "Cote d'Ivoire", "Croatia", "Cuba", "Cyprus", "Czech Republic", "Democratic Republic of Congo", "Denmark", "Djibouti", "Dominica", "Dominican Republic", "Ecuador", "Egypt", "El Salvador", "Equatorial Guinea", "Eritrea", "Estonia", "Ethiopia", "Fiji", "Finland", "France", "Gabon", "Gambia", "Georgia", "Germany", "Ghana", "Greece", "Grenada", "Guatemala", "Guinea", "Guinea-Bissau", "Guyana", "Haiti", "Honduras", "Hungary", "Iceland", "India", "Indonesia", "Iran", "Iraq", "Ireland", "Israel", "Italy", "Jamaica", "Japan", "Jordan", "Kazakhstan", "Kenya", "Kiribati", "Kuwait", "Kyrgyzstan", "Laos", "Latvia", "Lebanon", "Lesotho", "Liberia", "Libya", "Lithuania", "Luxembourg", "Macedonia", "Madagascar", "Malawi", "Malaysia", "Maldives", "Mali", "Malta", "Marshall Islands", "Mauritania", "Mauritius", "Mexico", "Micronesia (country)", "Moldova", "Mongolia", "Montenegro", "Morocco", "Mozambique", "Myanmar", "Namibia", "Nauru", "Nepal", "Netherlands", "New Zealand", "Nicaragua", "Niger", "Nigeria", "Niue", "North Korea", "Norway", "Oman", "Pakistan", "Palau", "Panama", "Papua New Guinea", "Paraguay", "Peru", "Philippines", "Poland", "Portugal", "Qatar", "Romania", "Russia", "Rwanda", "Saint Kitts and Nevis", "Saint Lucia", "Saint Vincent and the Grenadines", "Samoa", "Sao Tome and Principe", "Saudi Arabia", "Senegal", "Serbia", "Seychelles", "Sierra Leone", "Singapore", "Slovakia", "Slovenia", "Solomon Islands", "Somalia", "South Africa", "South Korea", "Spain", "Sri Lanka", "Sudan (former)", "Suriname", "Swaziland", "Sweden", "Switzerland", "Syria", "Tajikistan", "Tanzania", "Thailand", "Timor", "Togo", "Tonga", "Trinidad and Tobago", "Tunisia", "Turkey", "Turkmenistan", "Tuvalu", "Uganda", "Ukraine", "United Arab Emirates", "United Kingdom", "United States", "Uruguay", "Uzbekistan", "Vanuatu", "Venezuela", "Vietnam", "Yemen", "Zambia", "Zimbabwe"] res = {'06HONGKONG4795': {'date': {'month': 'DEC', 'year': '2006'}, 'entity_involved': ['RHMFIUU/HQ', 'USDOC', 'OEA', 'LHINES/DFARROW USDOC', 'FCS', 'the Export Administration Act', 'the Office of Enforcement Analysis', 'the USDOC Bureau of Industry and Security', 'BIS', 'Export Control', 'Advanced Energy-Shenzhen ', 'Baltrans', 'ECCN', 'International Rectifier of Leominster', 'International Rectifier', 'Advanced Energy', 'ECO', 'Airfreight Operations', 'Operations Manager', 'Airfreight', 'Federal Express', "Advanced Energy's", 'BIS '], 'from': 'AMCONSUL HONG KONG', 'keywords': ['subject', 'ankel', 'providers', 'street', 'route'], 'most_common_words': [('Advanced', 14), ('Energy', 14), ('Baltrans', 10), ('Mr.', 10), ('Lam', 9), ('shipment', 8), ('Hong', 8), ('Kong', 8), ('items', 8), ('ECO', 6), ('USDOC', 5), ('export', 5), ('OEA', 4), ('provided', 4)], 'people_involved': ['RUCPDOC', 'RUEHC', 'SIPDIS ', 'WILLIAM ZARIT ', 'BMGT BEXP', 'ETRD ETTC', 'Philip Ankel', 'Tai Yip Street', 'Theodore Shum', 'Gordon Lam', 'Lam', 'Cunningham'], 'place_involved': ['KOWLOON', 'HONG KONG', 'CHINA', 'MASSACHUSETTS', 'UNITED STATES', 'SHENZHEN'], 'place_of_document': 'HONGKONG', 'subject': 'EXTRANCHECK: POST SHIPMENT VERIFICATION: ADVANCED ' 'ENERGY-SHENZHEN C/O ' 'BALTRANS LOGISTRIC ', 'tags': ['BMGT', 'BEXP', 'HK', 'ETRD', 'ETTC']}, '06HOCHIMINHCITY917': {'date': {'month': 'AUG', 'year': '2006'}, 'entity_involved': ['RUEHC/SECSTATE WASHDC PRIORITY', 'RUCNARF', 'RUEHHM/AMCONSUL HO', 'PHUM PGOV PREF KIRF', 'Consul General', 'State', 'the Montagnard Foundation', 'ConGen', 'GVN', 'Southern Evangelical Church of Vietnam', 'Dak Nong', 'SBU', 'Vietnamese Embassy', 'PNTR', 'Congress', 'WINNICK'], 'from': 'AMCONSUL HO CHI MINH CITY', 'keywords': ['subject', 'migrants', 'congress', 'collective', 'leader'], 'most_common_words': [('police', 12), ('ethnic', 7), ('minority', 7), ('Adrong', 7), ('contact', 7), ('province', 6), ('HCMC', 5), ('United', 5), ('States', 5), ('Central', 5), ('Highlands', 5), ('SECV', 5), ('contacts', 4)], 'people_involved': ['RUEHCHI RUEHDT RUEHNH', 'HO CHI MINH CITY', '000917 ', 'SIPDIS ', 'E.O.', 'DECL', 'Seth Winnick', 'Y Ngo Adrong', 'Adrong', 'Siu Y Kim', 'Gia Lai', 'Chu Se', 'Kim', 'Dega', 'Phu Yen'], 'place_involved': ['CENTRAL HIGHLANDS', 'HCMC', 'UNITED STATES', 'DAK LAK', 'CAMBODIA', 'VIETNAM', 'WASHINGTON'], 'place_of_document': 'HOCHIMINHCITY', 'subject': 'POLICE BRUTALITY RISING; CENTRAL HIGHLANDS DEATH CONFIRMED ', 'tags': ['PHUM', 'PGOV', 'PREF', 'KIRF', 'VM']}, '06JERUSALEM906': {'date': {'month': 'MAR', 'year': '2006'}, 'entity_involved': ['RUEHC/SECSTATE WASHDC', '0698', 'RHEHNSC', 'NSC', 'RUEHBS/USEU BRUSSELS', 'FRONT OFFICE', 'NEA/IPA', 'WILLIAMS/GREENE/WAECHTER', 'ABRAMS', 'PHUM PREF EAID ECON', 'SBU', 'the World Food Program', 'WFP', 'ECON', 'the PA Ministry of National Economy', 'UNRWA', 'Market Monitoring'], 'from': 'AMCONSUL JERUSALEM', 'keywords': ['subject', 'vulnerability', 'collective', 'works', 'phum'], 'most_common_words': [('days', 11), ('food', 7), ('IMMEDIATE', 5), ('Gaza', 5), ('price', 5), ('flour', 4), ('WASHDC', 3), ('WFP', 3), ('March', 3), ('Karni', 3), ('stocks', 3), ('report', 3), ('percent', 3), ('JERUSALEM', 2)], 'people_involved': ['000906 ', 'SIPDIS ', 'NEA', 'DORAN', 'MUSTAFA ', 'Arnold Vercken', 'Karni'], 'place_involved': ['GAZA', 'WEST BANK/GAZA COUNTRY', 'U.S.'], 'place_of_document': 'JERUSALEM', 'subject': 'KARNI CLOSURE CAUSING FOOD SHORTAGE IN GAZA ', 'tags': ['PHUM', 'PREF', 'EAID', 'ECON', 'KWBG']}, '09BERLIN831': {'date': {'month': 'JUL', 'year': '2009'}, 'entity_involved': ['RUEHC/SECSTATE WASHDC', 'RUEHAD', 'AMEMBASSY ABU DHABI', 'RUEHUJA', 'AMEMBASSY ABUJA PRIORITY', 'RUEHAK', 'AMEMBASSY ANKARA', 'RUEHTH', 'AMEMBASSY ATHENS', 'RUEHBS/', 'AMEMBASSY', 'RUEHEG', 'AMEMBASSY CAIRO', 'RUEHBY', 'AMEMBASSY CANBERRA', 'RUEHCP', 'AMEMBASSY COPENHAGEN', 'RUEHDJ', 'RUEHKL', 'AMEMBASSY KUALA LUMPUR', 'RUEHLI', 'AMEMBASSY LONDON', 'RUEHMD', 'RUEHMV', 'AMEMBASSY MONROVIA', 'RUEHMO', 'RUEHMS/AMEMBASSY MUSCAT', 'RUEHNR', 'AMEMBASSY NAIROBI', 'RUEHNE', 'AMEMBASSY NEW DELHI', 'RUEHNY', 'AMEMBASSY OSLO', 'RUEHOT', 'AMEMBASSY OTTAWA', 'RUEHZP', 'AMEMBASSY PANAMA', 'RUEHFR', 'AMEMBASSY PARIS', 'RUEHRH', 'AMEMBASSY RIYADH', 'RUEHRO', 'RUEHYN', 'RUEHGP/AMEMBASSY SINGAPORE', 'RUEHSM', 'AMEMBASSY STOCKHOLM', 'RUEHTC', 'RUEHKO/AMEMBASSY TOKYO', 'RUCNDT/USMISSION', 'EWWT', 'PHSA', 'PHUM PREL', 'GM', 'CGPCS', 'ON PARTICIPATION ISSUE', 'MFA UN Security Council Action', 'the Contact Group for Piracy', 'Turkish', 'German', 'the International Criminal Tribunal'], 'from': 'AMEMBASSY BERLIN', 'keywords': ['subject', 'expertise', 'stockhausen', '091715z', 'ruehul'], 'most_common_words': [('AMEMBASSY', 32), ('PRIORITY', 32), ('Germany', 5), ('national', 5), ('Stockhausen', 4), ('said', 4), ('cases', 4), ('region', 4), ('BERLIN', 3), ('CGPCS', 3), ('U.S.', 3), ('countries', 3), ('piracy', 3)], 'people_involved': ['RUEHBJ', '0210RUEHLO/', 'SIPDIS ', 'E.O.', 'DECL', 'STAN OTTO', 'Dirk Stockhausen', 'Koenig'], 'place_involved': ['BRUSSELS', 'MOSCOW', 'HAGUE', 'NEW YORK', 'BERLIN', 'GERMANY', 'U.S.', 'SOMALIA', 'NETHERLANDS', 'KENYA', 'CAMBODIA', 'ARUSHA', 'TANZANIA', 'RWANDA'], 'place_of_document': 'BERLIN', 'subject': 'CGPCS: GERMANY AGREES ON PARTICIPATION ISSUE, BUT IS STILL ' 'OFFSIDE REGARDING ' 'INTERNATIONAL ' 'TRIBUNAL ', 'tags': ['EWWT', 'MARR', 'PGOV', 'PHSA', 'PHUM', 'PREL', 'MOPS', 'GM']}}
101.954128
120
0.454513
countries = ["Afghanistan", "Albania", "Algeria", "Andorra", "Angola", "Antigua and Barbuda", "Argentina", "Armenia", "Australia", "Austria", "Azerbaijan", "Bahamas", "Bahrain", "Bangladesh", "Barbados", "Belarus", "Belgium", "Belize", "Benin", "Bhutan", "Bolivia", "Bosnia and Herzegovina", "Botswana", "Brazil", "Brunei", "Bulgaria", "Burkina Faso", "Burundi", "Cambodia", "Cameroon", "Canada", "Cape Verde", "Central African Republic", "Chad", "Chile", "China", "Colombia", "Comoros", "Congo", "Cook Islands", "Costa Rica", "Cote d'Ivoire", "Croatia", "Cuba", "Cyprus", "Czech Republic", "Democratic Republic of Congo", "Denmark", "Djibouti", "Dominica", "Dominican Republic", "Ecuador", "Egypt", "El Salvador", "Equatorial Guinea", "Eritrea", "Estonia", "Ethiopia", "Fiji", "Finland", "France", "Gabon", "Gambia", "Georgia", "Germany", "Ghana", "Greece", "Grenada", "Guatemala", "Guinea", "Guinea-Bissau", "Guyana", "Haiti", "Honduras", "Hungary", "Iceland", "India", "Indonesia", "Iran", "Iraq", "Ireland", "Israel", "Italy", "Jamaica", "Japan", "Jordan", "Kazakhstan", "Kenya", "Kiribati", "Kuwait", "Kyrgyzstan", "Laos", "Latvia", "Lebanon", "Lesotho", "Liberia", "Libya", "Lithuania", "Luxembourg", "Macedonia", "Madagascar", "Malawi", "Malaysia", "Maldives", "Mali", "Malta", "Marshall Islands", "Mauritania", "Mauritius", "Mexico", "Micronesia (country)", "Moldova", "Mongolia", "Montenegro", "Morocco", "Mozambique", "Myanmar", "Namibia", "Nauru", "Nepal", "Netherlands", "New Zealand", "Nicaragua", "Niger", "Nigeria", "Niue", "North Korea", "Norway", "Oman", "Pakistan", "Palau", "Panama", "Papua New Guinea", "Paraguay", "Peru", "Philippines", "Poland", "Portugal", "Qatar", "Romania", "Russia", "Rwanda", "Saint Kitts and Nevis", "Saint Lucia", "Saint Vincent and the Grenadines", "Samoa", "Sao Tome and Principe", "Saudi Arabia", "Senegal", "Serbia", "Seychelles", "Sierra Leone", "Singapore", "Slovakia", "Slovenia", "Solomon Islands", "Somalia", "South Africa", "South Korea", "Spain", "Sri Lanka", "Sudan (former)", "Suriname", "Swaziland", "Sweden", "Switzerland", "Syria", "Tajikistan", "Tanzania", "Thailand", "Timor", "Togo", "Tonga", "Trinidad and Tobago", "Tunisia", "Turkey", "Turkmenistan", "Tuvalu", "Uganda", "Ukraine", "United Arab Emirates", "United Kingdom", "United States", "Uruguay", "Uzbekistan", "Vanuatu", "Venezuela", "Vietnam", "Yemen", "Zambia", "Zimbabwe"] res = {'06HONGKONG4795': {'date': {'month': 'DEC', 'year': '2006'}, 'entity_involved': ['RHMFIUU/HQ', 'USDOC', 'OEA', 'LHINES/DFARROW USDOC', 'FCS', 'the Export Administration Act', 'the Office of Enforcement Analysis', 'the USDOC Bureau of Industry and Security', 'BIS', 'Export Control', 'Advanced Energy-Shenzhen ', 'Baltrans', 'ECCN', 'International Rectifier of Leominster', 'International Rectifier', 'Advanced Energy', 'ECO', 'Airfreight Operations', 'Operations Manager', 'Airfreight', 'Federal Express', "Advanced Energy's", 'BIS '], 'from': 'AMCONSUL HONG KONG', 'keywords': ['subject', 'ankel', 'providers', 'street', 'route'], 'most_common_words': [('Advanced', 14), ('Energy', 14), ('Baltrans', 10), ('Mr.', 10), ('Lam', 9), ('shipment', 8), ('Hong', 8), ('Kong', 8), ('items', 8), ('ECO', 6), ('USDOC', 5), ('export', 5), ('OEA', 4), ('provided', 4)], 'people_involved': ['RUCPDOC', 'RUEHC', 'SIPDIS ', 'WILLIAM ZARIT ', 'BMGT BEXP', 'ETRD ETTC', 'Philip Ankel', 'Tai Yip Street', 'Theodore Shum', 'Gordon Lam', 'Lam', 'Cunningham'], 'place_involved': ['KOWLOON', 'HONG KONG', 'CHINA', 'MASSACHUSETTS', 'UNITED STATES', 'SHENZHEN'], 'place_of_document': 'HONGKONG', 'subject': 'EXTRANCHECK: POST SHIPMENT VERIFICATION: ADVANCED ' 'ENERGY-SHENZHEN C/O ' 'BALTRANS LOGISTRIC ', 'tags': ['BMGT', 'BEXP', 'HK', 'ETRD', 'ETTC']}, '06HOCHIMINHCITY917': {'date': {'month': 'AUG', 'year': '2006'}, 'entity_involved': ['RUEHC/SECSTATE WASHDC PRIORITY', 'RUCNARF', 'RUEHHM/AMCONSUL HO', 'PHUM PGOV PREF KIRF', 'Consul General', 'State', 'the Montagnard Foundation', 'ConGen', 'GVN', 'Southern Evangelical Church of Vietnam', 'Dak Nong', 'SBU', 'Vietnamese Embassy', 'PNTR', 'Congress', 'WINNICK'], 'from': 'AMCONSUL HO CHI MINH CITY', 'keywords': ['subject', 'migrants', 'congress', 'collective', 'leader'], 'most_common_words': [('police', 12), ('ethnic', 7), ('minority', 7), ('Adrong', 7), ('contact', 7), ('province', 6), ('HCMC', 5), ('United', 5), ('States', 5), ('Central', 5), ('Highlands', 5), ('SECV', 5), ('contacts', 4)], 'people_involved': ['RUEHCHI RUEHDT RUEHNH', 'HO CHI MINH CITY', '000917 ', 'SIPDIS ', 'E.O.', 'DECL', 'Seth Winnick', 'Y Ngo Adrong', 'Adrong', 'Siu Y Kim', 'Gia Lai', 'Chu Se', 'Kim', 'Dega', 'Phu Yen'], 'place_involved': ['CENTRAL HIGHLANDS', 'HCMC', 'UNITED STATES', 'DAK LAK', 'CAMBODIA', 'VIETNAM', 'WASHINGTON'], 'place_of_document': 'HOCHIMINHCITY', 'subject': 'POLICE BRUTALITY RISING; CENTRAL HIGHLANDS DEATH CONFIRMED ', 'tags': ['PHUM', 'PGOV', 'PREF', 'KIRF', 'VM']}, '06JERUSALEM906': {'date': {'month': 'MAR', 'year': '2006'}, 'entity_involved': ['RUEHC/SECSTATE WASHDC', '0698', 'RHEHNSC', 'NSC', 'RUEHBS/USEU BRUSSELS', 'FRONT OFFICE', 'NEA/IPA', 'WILLIAMS/GREENE/WAECHTER', 'ABRAMS', 'PHUM PREF EAID ECON', 'SBU', 'the World Food Program', 'WFP', 'ECON', 'the PA Ministry of National Economy', 'UNRWA', 'Market Monitoring'], 'from': 'AMCONSUL JERUSALEM', 'keywords': ['subject', 'vulnerability', 'collective', 'works', 'phum'], 'most_common_words': [('days', 11), ('food', 7), ('IMMEDIATE', 5), ('Gaza', 5), ('price', 5), ('flour', 4), ('WASHDC', 3), ('WFP', 3), ('March', 3), ('Karni', 3), ('stocks', 3), ('report', 3), ('percent', 3), ('JERUSALEM', 2)], 'people_involved': ['000906 ', 'SIPDIS ', 'NEA', 'DORAN', 'MUSTAFA ', 'Arnold Vercken', 'Karni'], 'place_involved': ['GAZA', 'WEST BANK/GAZA COUNTRY', 'U.S.'], 'place_of_document': 'JERUSALEM', 'subject': 'KARNI CLOSURE CAUSING FOOD SHORTAGE IN GAZA ', 'tags': ['PHUM', 'PREF', 'EAID', 'ECON', 'KWBG']}, '09BERLIN831': {'date': {'month': 'JUL', 'year': '2009'}, 'entity_involved': ['RUEHC/SECSTATE WASHDC', 'RUEHAD', 'AMEMBASSY ABU DHABI', 'RUEHUJA', 'AMEMBASSY ABUJA PRIORITY', 'RUEHAK', 'AMEMBASSY ANKARA', 'RUEHTH', 'AMEMBASSY ATHENS', 'RUEHBS/', 'AMEMBASSY', 'RUEHEG', 'AMEMBASSY CAIRO', 'RUEHBY', 'AMEMBASSY CANBERRA', 'RUEHCP', 'AMEMBASSY COPENHAGEN', 'RUEHDJ', 'RUEHKL', 'AMEMBASSY KUALA LUMPUR', 'RUEHLI', 'AMEMBASSY LONDON', 'RUEHMD', 'RUEHMV', 'AMEMBASSY MONROVIA', 'RUEHMO', 'RUEHMS/AMEMBASSY MUSCAT', 'RUEHNR', 'AMEMBASSY NAIROBI', 'RUEHNE', 'AMEMBASSY NEW DELHI', 'RUEHNY', 'AMEMBASSY OSLO', 'RUEHOT', 'AMEMBASSY OTTAWA', 'RUEHZP', 'AMEMBASSY PANAMA', 'RUEHFR', 'AMEMBASSY PARIS', 'RUEHRH', 'AMEMBASSY RIYADH', 'RUEHRO', 'RUEHYN', 'RUEHGP/AMEMBASSY SINGAPORE', 'RUEHSM', 'AMEMBASSY STOCKHOLM', 'RUEHTC', 'RUEHKO/AMEMBASSY TOKYO', 'RUCNDT/USMISSION', 'EWWT', 'PHSA', 'PHUM PREL', 'GM', 'CGPCS', 'ON PARTICIPATION ISSUE', 'MFA UN Security Council Action', 'the Contact Group for Piracy', 'Turkish', 'German', 'the International Criminal Tribunal'], 'from': 'AMEMBASSY BERLIN', 'keywords': ['subject', 'expertise', 'stockhausen', '091715z', 'ruehul'], 'most_common_words': [('AMEMBASSY', 32), ('PRIORITY', 32), ('Germany', 5), ('national', 5), ('Stockhausen', 4), ('said', 4), ('cases', 4), ('region', 4), ('BERLIN', 3), ('CGPCS', 3), ('U.S.', 3), ('countries', 3), ('piracy', 3)], 'people_involved': ['RUEHBJ', '0210RUEHLO/', 'SIPDIS ', 'E.O.', 'DECL', 'STAN OTTO', 'Dirk Stockhausen', 'Koenig'], 'place_involved': ['BRUSSELS', 'MOSCOW', 'HAGUE', 'NEW YORK', 'BERLIN', 'GERMANY', 'U.S.', 'SOMALIA', 'NETHERLANDS', 'KENYA', 'CAMBODIA', 'ARUSHA', 'TANZANIA', 'RWANDA'], 'place_of_document': 'BERLIN', 'subject': 'CGPCS: GERMANY AGREES ON PARTICIPATION ISSUE, BUT IS STILL ' 'OFFSIDE REGARDING ' 'INTERNATIONAL ' 'TRIBUNAL ', 'tags': ['EWWT', 'MARR', 'PGOV', 'PHSA', 'PHUM', 'PREL', 'MOPS', 'GM']}}
true
true
f70fdf440a5e9d6267be85dd22467b253cd43fb7
42,685
py
Python
httpx/models.py
pereile/httpx
31730e709597baaa7b2364fee041dfa985169789
[ "BSD-3-Clause" ]
null
null
null
httpx/models.py
pereile/httpx
31730e709597baaa7b2364fee041dfa985169789
[ "BSD-3-Clause" ]
null
null
null
httpx/models.py
pereile/httpx
31730e709597baaa7b2364fee041dfa985169789
[ "BSD-3-Clause" ]
null
null
null
import cgi import datetime import email.message import json as jsonlib import typing import urllib.request from collections.abc import MutableMapping from http.cookiejar import Cookie, CookieJar from urllib.parse import parse_qsl, urlencode import chardet import rfc3986 from .config import USER_AGENT from .decoders import ( ACCEPT_ENCODING, SUPPORTED_DECODERS, Decoder, IdentityDecoder, MultiDecoder, TextDecoder, ) from .exceptions import ( CookieConflict, HTTPError, InvalidURL, NotRedirectResponse, ResponseClosed, ResponseNotRead, StreamConsumed, ) from .multipart import multipart_encode from .status_codes import StatusCode from .utils import ( guess_json_utf, is_known_encoding, normalize_header_key, normalize_header_value, obfuscate_sensitive_headers, parse_header_links, str_query_param, ) if typing.TYPE_CHECKING: # pragma: no cover from .middleware.base import BaseMiddleware # noqa: F401 from .dispatch.base import AsyncDispatcher # noqa: F401 PrimitiveData = typing.Optional[typing.Union[str, int, float, bool]] URLTypes = typing.Union["URL", str] QueryParamTypes = typing.Union[ "QueryParams", typing.Mapping[str, PrimitiveData], typing.List[typing.Tuple[str, PrimitiveData]], str, ] HeaderTypes = typing.Union[ "Headers", typing.Dict[typing.AnyStr, typing.AnyStr], typing.List[typing.Tuple[typing.AnyStr, typing.AnyStr]], ] CookieTypes = typing.Union["Cookies", CookieJar, typing.Dict[str, str]] AuthTypes = typing.Union[ typing.Tuple[typing.Union[str, bytes], typing.Union[str, bytes]], typing.Callable[["AsyncRequest"], "AsyncRequest"], "BaseMiddleware", ] ProxiesTypes = typing.Union[ URLTypes, "AsyncDispatcher", typing.Dict[URLTypes, typing.Union[URLTypes, "AsyncDispatcher"]], ] AsyncRequestData = typing.Union[dict, str, bytes, typing.AsyncIterator[bytes]] RequestData = typing.Union[dict, str, bytes, typing.Iterator[bytes]] RequestFiles = typing.Dict[ str, typing.Union[ typing.IO[typing.AnyStr], # file typing.Tuple[str, typing.IO[typing.AnyStr]], # (filename, file) typing.Tuple[ str, typing.IO[typing.AnyStr], str ], # (filename, file, content_type) ], ] AsyncResponseContent = typing.Union[bytes, typing.AsyncIterator[bytes]] ResponseContent = typing.Union[bytes, typing.Iterator[bytes]] class URL: def __init__( self, url: URLTypes, allow_relative: bool = False, params: QueryParamTypes = None, ) -> None: if isinstance(url, str): self._uri_reference = rfc3986.api.iri_reference(url).encode() else: self._uri_reference = url._uri_reference # Normalize scheme and domain name. if self.is_absolute_url: self._uri_reference = self._uri_reference.normalize() # Add any query parameters. if params: query_string = str(QueryParams(params)) self._uri_reference = self._uri_reference.copy_with(query=query_string) # Enforce absolute URLs by default. if not allow_relative: if not self.scheme: raise InvalidURL("No scheme included in URL.") if not self.host: raise InvalidURL("No host included in URL.") # Allow setting full_path to custom attributes requests # like OPTIONS, CONNECT, and forwarding proxy requests. self._full_path: typing.Optional[str] = None @property def scheme(self) -> str: return self._uri_reference.scheme or "" @property def authority(self) -> str: return self._uri_reference.authority or "" @property def userinfo(self) -> str: return self._uri_reference.userinfo or "" @property def username(self) -> str: userinfo = self._uri_reference.userinfo or "" return userinfo.partition(":")[0] @property def password(self) -> str: userinfo = self._uri_reference.userinfo or "" return userinfo.partition(":")[2] @property def host(self) -> str: return self._uri_reference.host or "" @property def port(self) -> int: port = self._uri_reference.port if port is None: return {"https": 443, "http": 80}[self.scheme] return int(port) @property def path(self) -> str: return self._uri_reference.path or "/" @property def query(self) -> str: return self._uri_reference.query or "" @property def full_path(self) -> str: if self._full_path is not None: return self._full_path path = self.path if self.query: path += "?" + self.query return path @full_path.setter def full_path(self, value: typing.Optional[str]) -> None: self._full_path = value @property def fragment(self) -> str: return self._uri_reference.fragment or "" @property def is_ssl(self) -> bool: return self.scheme == "https" @property def is_absolute_url(self) -> bool: """ Return `True` for absolute URLs such as 'http://example.com/path', and `False` for relative URLs such as '/path'. """ # We don't use `.is_absolute` from `rfc3986` because it treats # URLs with a fragment portion as not absolute. # What we actually care about is if the URL provides # a scheme and hostname to which connections should be made. return bool(self.scheme and self.host) @property def is_relative_url(self) -> bool: return not self.is_absolute_url @property def origin(self) -> "Origin": return Origin(self) def copy_with(self, **kwargs: typing.Any) -> "URL": if ( "username" in kwargs or "password" in kwargs or "host" in kwargs or "port" in kwargs ): host = kwargs.pop("host", self.host) port = kwargs.pop("port", self.port) username = kwargs.pop("username", self.username) password = kwargs.pop("password", self.password) authority = host if port is not None: authority += f":{port}" if username is not None: userpass = username if password is not None: userpass += f":{password}" authority = f"{userpass}@{authority}" kwargs["authority"] = authority return URL(self._uri_reference.copy_with(**kwargs).unsplit()) def join(self, relative_url: URLTypes) -> "URL": """ Return an absolute URL, using given this URL as the base. """ if self.is_relative_url: return URL(relative_url) # We drop any fragment portion, because RFC 3986 strictly # treats URLs with a fragment portion as not being absolute URLs. base_uri = self._uri_reference.copy_with(fragment=None) relative_url = URL(relative_url, allow_relative=True) return URL(relative_url._uri_reference.resolve_with(base_uri).unsplit()) def __hash__(self) -> int: return hash(str(self)) def __eq__(self, other: typing.Any) -> bool: return isinstance(other, (URL, str)) and str(self) == str(other) def __str__(self) -> str: return self._uri_reference.unsplit() def __repr__(self) -> str: class_name = self.__class__.__name__ url_str = str(self) if self._uri_reference.userinfo: url_str = ( rfc3986.urlparse(url_str) .copy_with(userinfo=f"{self.username}:[secure]") .unsplit() ) return f"{class_name}({url_str!r})" class Origin: """ The URL scheme and authority information, as a comparable, hashable object. """ def __init__(self, url: URLTypes) -> None: if not isinstance(url, URL): url = URL(url) self.scheme = url.scheme self.is_ssl = url.is_ssl self.host = url.host self.port = url.port def __eq__(self, other: typing.Any) -> bool: return ( isinstance(other, self.__class__) and self.scheme == other.scheme and self.host == other.host and self.port == other.port ) def __hash__(self) -> int: return hash((self.scheme, self.host, self.port)) def __repr__(self) -> str: class_name = self.__class__.__name__ return ( f"{class_name}(scheme={self.scheme!r} host={self.host!r} port={self.port})" ) class QueryParams(typing.Mapping[str, str]): """ URL query parameters, as a multi-dict. """ def __init__(self, *args: QueryParamTypes, **kwargs: typing.Any) -> None: assert len(args) < 2, "Too many arguments." assert not (args and kwargs), "Cannot mix named and unnamed arguments." value = args[0] if args else kwargs if isinstance(value, str): items = parse_qsl(value) elif isinstance(value, QueryParams): items = value.multi_items() elif isinstance(value, list): items = value # type: ignore else: items = value.items() # type: ignore self._list = [(str(k), str_query_param(v)) for k, v in items] self._dict = {str(k): str_query_param(v) for k, v in items} def getlist(self, key: typing.Any) -> typing.List[str]: return [item_value for item_key, item_value in self._list if item_key == key] def keys(self) -> typing.KeysView: return self._dict.keys() def values(self) -> typing.ValuesView: return self._dict.values() def items(self) -> typing.ItemsView: return self._dict.items() def multi_items(self) -> typing.List[typing.Tuple[str, str]]: return list(self._list) def get(self, key: typing.Any, default: typing.Any = None) -> typing.Any: if key in self._dict: return self._dict[key] return default def update(self, params: QueryParamTypes = None) -> None: # type: ignore if not params: return params = QueryParams(params) for param in params: self[param] = params[param] def __getitem__(self, key: typing.Any) -> str: return self._dict[key] def __setitem__(self, key: str, value: str) -> None: self._dict[key] = value found_indexes = [] for idx, (item_key, _) in enumerate(self._list): if item_key == key: found_indexes.append(idx) for idx in reversed(found_indexes[1:]): del self._list[idx] if found_indexes: idx = found_indexes[0] self._list[idx] = (key, value) else: self._list.append((key, value)) def __contains__(self, key: typing.Any) -> bool: return key in self._dict def __iter__(self) -> typing.Iterator[typing.Any]: return iter(self.keys()) def __len__(self) -> int: return len(self._dict) def __eq__(self, other: typing.Any) -> bool: if not isinstance(other, self.__class__): return False return sorted(self._list) == sorted(other._list) def __str__(self) -> str: return urlencode(self._list) def __repr__(self) -> str: class_name = self.__class__.__name__ query_string = str(self) return f"{class_name}({query_string!r})" class Headers(typing.MutableMapping[str, str]): """ HTTP headers, as a case-insensitive multi-dict. """ def __init__(self, headers: HeaderTypes = None, encoding: str = None) -> None: if headers is None: self._list = [] # type: typing.List[typing.Tuple[bytes, bytes]] elif isinstance(headers, Headers): self._list = list(headers.raw) elif isinstance(headers, dict): self._list = [ (normalize_header_key(k, encoding), normalize_header_value(v, encoding)) for k, v in headers.items() ] else: self._list = [ (normalize_header_key(k, encoding), normalize_header_value(v, encoding)) for k, v in headers ] self._encoding = encoding @property def encoding(self) -> str: """ Header encoding is mandated as ascii, but we allow fallbacks to utf-8 or iso-8859-1. """ if self._encoding is None: for encoding in ["ascii", "utf-8"]: for key, value in self.raw: try: key.decode(encoding) value.decode(encoding) except UnicodeDecodeError: break else: # The else block runs if 'break' did not occur, meaning # all values fitted the encoding. self._encoding = encoding break else: # The ISO-8859-1 encoding covers all 256 code points in a byte, # so will never raise decode errors. self._encoding = "iso-8859-1" return self._encoding @encoding.setter def encoding(self, value: str) -> None: self._encoding = value @property def raw(self) -> typing.List[typing.Tuple[bytes, bytes]]: """ Returns a list of the raw header items, as byte pairs. May be mutated in-place. """ return self._list def keys(self) -> typing.List[str]: # type: ignore return [key.decode(self.encoding) for key, value in self._list] def values(self) -> typing.List[str]: # type: ignore return [value.decode(self.encoding) for key, value in self._list] def items(self) -> typing.List[typing.Tuple[str, str]]: # type: ignore return [ (key.decode(self.encoding), value.decode(self.encoding)) for key, value in self._list ] def get(self, key: str, default: typing.Any = None) -> typing.Any: try: return self[key] except KeyError: return default def getlist(self, key: str, split_commas: bool = False) -> typing.List[str]: """ Return multiple header values. """ get_header_key = key.lower().encode(self.encoding) values = [ item_value.decode(self.encoding) for item_key, item_value in self._list if item_key == get_header_key ] if not split_commas: return values split_values = [] for value in values: split_values.extend([item.strip() for item in value.split(",")]) return split_values def update(self, headers: HeaderTypes = None) -> None: # type: ignore headers = Headers(headers) for header in headers: self[header] = headers[header] def copy(self) -> "Headers": return Headers(self.items(), encoding=self.encoding) def __getitem__(self, key: str) -> str: """ Return a single header value. If there are multiple headers with the same key, then we concatenate them with commas. See: https://tools.ietf.org/html/rfc7230#section-3.2.2 """ normalized_key = key.lower().encode(self.encoding) items = [] for header_key, header_value in self._list: if header_key == normalized_key: items.append(header_value.decode(self.encoding)) if items: return ", ".join(items) raise KeyError(key) def __setitem__(self, key: str, value: str) -> None: """ Set the header `key` to `value`, removing any duplicate entries. Retains insertion order. """ set_key = key.lower().encode(self.encoding) set_value = value.encode(self.encoding) found_indexes = [] for idx, (item_key, _) in enumerate(self._list): if item_key == set_key: found_indexes.append(idx) for idx in reversed(found_indexes[1:]): del self._list[idx] if found_indexes: idx = found_indexes[0] self._list[idx] = (set_key, set_value) else: self._list.append((set_key, set_value)) def __delitem__(self, key: str) -> None: """ Remove the header `key`. """ del_key = key.lower().encode(self.encoding) pop_indexes = [] for idx, (item_key, _) in enumerate(self._list): if item_key == del_key: pop_indexes.append(idx) if not pop_indexes: raise KeyError(key) for idx in reversed(pop_indexes): del self._list[idx] def __contains__(self, key: typing.Any) -> bool: get_header_key = key.lower().encode(self.encoding) for header_key, _ in self._list: if header_key == get_header_key: return True return False def __iter__(self) -> typing.Iterator[typing.Any]: return iter(self.keys()) def __len__(self) -> int: return len(self._list) def __eq__(self, other: typing.Any) -> bool: if not isinstance(other, Headers): return False return sorted(self._list) == sorted(other._list) def __repr__(self) -> str: class_name = self.__class__.__name__ encoding_str = "" if self.encoding != "ascii": encoding_str = f", encoding={self.encoding!r}" as_list = list(obfuscate_sensitive_headers(self.items())) as_dict = dict(as_list) no_duplicate_keys = len(as_dict) == len(as_list) if no_duplicate_keys: return f"{class_name}({as_dict!r}{encoding_str})" return f"{class_name}({as_list!r}{encoding_str})" class BaseRequest: def __init__( self, method: str, url: typing.Union[str, URL], *, params: QueryParamTypes = None, headers: HeaderTypes = None, cookies: CookieTypes = None, ): self.method = method.upper() self.url = URL(url, params=params) self.headers = Headers(headers) if cookies: self._cookies = Cookies(cookies) self._cookies.set_cookie_header(self) def encode_data( self, data: dict = None, files: RequestFiles = None, json: typing.Any = None ) -> typing.Tuple[bytes, str]: if json is not None: content = jsonlib.dumps(json).encode("utf-8") content_type = "application/json" elif files is not None: content, content_type = multipart_encode(data or {}, files) elif data is not None: content = urlencode(data, doseq=True).encode("utf-8") content_type = "application/x-www-form-urlencoded" else: content = b"" content_type = "" return content, content_type def prepare(self) -> None: content: typing.Optional[bytes] = getattr(self, "content", None) is_streaming = getattr(self, "is_streaming", False) auto_headers: typing.List[typing.Tuple[bytes, bytes]] = [] has_host = "host" in self.headers has_user_agent = "user-agent" in self.headers has_accept = "accept" in self.headers has_content_length = ( "content-length" in self.headers or "transfer-encoding" in self.headers ) has_accept_encoding = "accept-encoding" in self.headers has_connection = "connection" in self.headers if not has_host: url = self.url if url.userinfo: url = url.copy_with(username=None, password=None) auto_headers.append((b"host", url.authority.encode("ascii"))) if not has_user_agent: auto_headers.append((b"user-agent", USER_AGENT.encode("ascii"))) if not has_accept: auto_headers.append((b"accept", b"*/*")) if not has_content_length: if is_streaming: auto_headers.append((b"transfer-encoding", b"chunked")) elif content: content_length = str(len(content)).encode() auto_headers.append((b"content-length", content_length)) if not has_accept_encoding: auto_headers.append((b"accept-encoding", ACCEPT_ENCODING.encode())) if not has_connection: auto_headers.append((b"connection", b"keep-alive")) for item in reversed(auto_headers): self.headers.raw.insert(0, item) @property def cookies(self) -> "Cookies": if not hasattr(self, "_cookies"): self._cookies = Cookies() return self._cookies def __repr__(self) -> str: class_name = self.__class__.__name__ url = str(self.url) return f"<{class_name}({self.method!r}, {url!r})>" class AsyncRequest(BaseRequest): def __init__( self, method: str, url: typing.Union[str, URL], *, params: QueryParamTypes = None, headers: HeaderTypes = None, cookies: CookieTypes = None, data: AsyncRequestData = None, files: RequestFiles = None, json: typing.Any = None, ): super().__init__( method=method, url=url, params=params, headers=headers, cookies=cookies ) if data is None or isinstance(data, dict): content, content_type = self.encode_data(data, files, json) self.is_streaming = False self.content = content if content_type: self.headers["Content-Type"] = content_type elif isinstance(data, (str, bytes)): data = data.encode("utf-8") if isinstance(data, str) else data self.is_streaming = False self.content = data else: assert hasattr(data, "__aiter__") self.is_streaming = True self.content_aiter = data self.prepare() async def read(self) -> bytes: """ Read and return the response content. """ if not hasattr(self, "content"): self.content = b"".join([part async for part in self.stream()]) return self.content async def stream(self) -> typing.AsyncIterator[bytes]: if self.is_streaming: async for part in self.content_aiter: yield part elif self.content: yield self.content class Request(BaseRequest): def __init__( self, method: str, url: typing.Union[str, URL], *, params: QueryParamTypes = None, headers: HeaderTypes = None, cookies: CookieTypes = None, data: RequestData = None, files: RequestFiles = None, json: typing.Any = None, ): super().__init__( method=method, url=url, params=params, headers=headers, cookies=cookies ) if data is None or isinstance(data, dict): content, content_type = self.encode_data(data, files, json) self.is_streaming = False self.content = content if content_type: self.headers["Content-Type"] = content_type elif isinstance(data, (str, bytes)): data = data.encode("utf-8") if isinstance(data, str) else data self.is_streaming = False self.content = data else: assert hasattr(data, "__iter__") self.is_streaming = True self.content_iter = data self.prepare() def read(self) -> bytes: if not hasattr(self, "content"): self.content = b"".join([part for part in self.stream()]) return self.content def stream(self) -> typing.Iterator[bytes]: if self.is_streaming: for part in self.content_iter: yield part elif self.content: yield self.content class BaseResponse: def __init__( self, status_code: int, *, http_version: str = None, headers: HeaderTypes = None, request: BaseRequest = None, on_close: typing.Callable = None, elapsed: datetime.timedelta = None, ): self.status_code = status_code self.http_version = http_version self.headers = Headers(headers) self.request = request self.on_close = on_close self.elapsed = datetime.timedelta(0) if elapsed is None else elapsed self.call_next: typing.Optional[typing.Callable] = None @property def reason_phrase(self) -> str: return StatusCode.get_reason_phrase(self.status_code) @property def url(self) -> typing.Optional[URL]: """ Returns the URL for which the request was made. Requires that `request` was provided when instantiating the response. """ return None if self.request is None else self.request.url @property def content(self) -> bytes: if not hasattr(self, "_content"): if hasattr(self, "_raw_content"): raw_content = self._raw_content # type: ignore content = self.decoder.decode(raw_content) content += self.decoder.flush() self._content = content else: raise ResponseNotRead() return self._content @property def text(self) -> str: if not hasattr(self, "_text"): content = self.content if not content: self._text = "" else: encoding = self.encoding self._text = content.decode(encoding, errors="replace") return self._text @property def encoding(self) -> str: if not hasattr(self, "_encoding"): encoding = self.charset_encoding if encoding is None or not is_known_encoding(encoding): encoding = self.apparent_encoding if encoding is None or not is_known_encoding(encoding): encoding = "utf-8" self._encoding = encoding return self._encoding @encoding.setter def encoding(self, value: str) -> None: self._encoding = value @property def charset_encoding(self) -> typing.Optional[str]: """ Return the encoding, as specified by the Content-Type header. """ content_type = self.headers.get("Content-Type") if content_type is None: return None parsed = cgi.parse_header(content_type) media_type, params = parsed[0], parsed[-1] if "charset" in params: return params["charset"].strip("'\"") # RFC 2616 specifies that 'iso-8859-1' should be used as the default # for 'text/*' media types, if no charset is provided. # See: https://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1 if media_type.startswith("text/"): return "iso-8859-1" return None @property def apparent_encoding(self) -> typing.Optional[str]: """ Return the encoding, as it appears to autodetection. """ return chardet.detect(self.content)["encoding"] @property def decoder(self) -> Decoder: """ Returns a decoder instance which can be used to decode the raw byte content, depending on the Content-Encoding used in the response. """ if not hasattr(self, "_decoder"): decoders: typing.List[Decoder] = [] values = self.headers.getlist("content-encoding", split_commas=True) for value in values: value = value.strip().lower() try: decoder_cls = SUPPORTED_DECODERS[value] decoders.append(decoder_cls()) except KeyError: continue if len(decoders) == 1: self._decoder = decoders[0] elif len(decoders) > 1: self._decoder = MultiDecoder(decoders) else: self._decoder = IdentityDecoder() return self._decoder @property def is_redirect(self) -> bool: return StatusCode.is_redirect(self.status_code) and "location" in self.headers def raise_for_status(self) -> None: """ Raise the `HttpError` if one occurred. """ message = ( "{0.status_code} {error_type}: {0.reason_phrase} for url: {0.url}\n" "For more information check: https://httpstatuses.com/{0.status_code}" ) if StatusCode.is_client_error(self.status_code): message = message.format(self, error_type="Client Error") elif StatusCode.is_server_error(self.status_code): message = message.format(self, error_type="Server Error") else: message = "" if message: raise HTTPError(message, response=self) def json(self, **kwargs: typing.Any) -> typing.Union[dict, list]: if self.charset_encoding is None and self.content and len(self.content) > 3: encoding = guess_json_utf(self.content) if encoding is not None: try: return jsonlib.loads(self.content.decode(encoding), **kwargs) except UnicodeDecodeError: pass return jsonlib.loads(self.text, **kwargs) @property def cookies(self) -> "Cookies": if not hasattr(self, "_cookies"): assert self.request is not None self._cookies = Cookies() self._cookies.extract_cookies(self) return self._cookies @property def links(self) -> typing.Dict[typing.Optional[str], typing.Dict[str, str]]: """ Returns the parsed header links of the response, if any """ header = self.headers.get("link") ldict = {} if header: links = parse_header_links(header) for link in links: key = link.get("rel") or link.get("url") ldict[key] = link return ldict def __repr__(self) -> str: return f"<Response [{self.status_code} {self.reason_phrase}]>" class AsyncResponse(BaseResponse): def __init__( self, status_code: int, *, http_version: str = None, headers: HeaderTypes = None, content: AsyncResponseContent = None, on_close: typing.Callable = None, request: AsyncRequest = None, history: typing.List["BaseResponse"] = None, elapsed: datetime.timedelta = None, ): super().__init__( status_code=status_code, http_version=http_version, headers=headers, request=request, on_close=on_close, elapsed=elapsed, ) self.history = [] if history is None else list(history) if content is None or isinstance(content, bytes): self.is_closed = True self.is_stream_consumed = True self._raw_content = content or b"" else: self.is_closed = False self.is_stream_consumed = False self._raw_stream = content async def read(self) -> bytes: """ Read and return the response content. """ if not hasattr(self, "_content"): self._content = b"".join([part async for part in self.stream()]) return self._content async def stream(self) -> typing.AsyncIterator[bytes]: """ A byte-iterator over the decoded response content. This allows us to handle gzip, deflate, and brotli encoded responses. """ if hasattr(self, "_content"): yield self._content else: async for chunk in self.raw(): yield self.decoder.decode(chunk) yield self.decoder.flush() async def stream_text(self) -> typing.AsyncIterator[str]: """ A str-iterator over the decoded response content that handles both gzip, deflate, etc but also detects the content's string encoding. """ decoder = TextDecoder(encoding=self.charset_encoding) async for chunk in self.stream(): yield decoder.decode(chunk) yield decoder.flush() async def raw(self) -> typing.AsyncIterator[bytes]: """ A byte-iterator over the raw response content. """ if hasattr(self, "_raw_content"): yield self._raw_content else: if self.is_stream_consumed: raise StreamConsumed() if self.is_closed: raise ResponseClosed() self.is_stream_consumed = True async for part in self._raw_stream: yield part await self.close() async def next(self) -> "AsyncResponse": """ Get the next response from a redirect response. """ if not self.is_redirect: raise NotRedirectResponse() assert self.call_next is not None return await self.call_next() async def close(self) -> None: """ Close the response and release the connection. Automatically called if the response body is read to completion. """ if not self.is_closed: self.is_closed = True if self.on_close is not None: await self.on_close() class Response(BaseResponse): def __init__( self, status_code: int, *, http_version: str = None, headers: HeaderTypes = None, content: ResponseContent = None, on_close: typing.Callable = None, request: Request = None, history: typing.List["BaseResponse"] = None, elapsed: datetime.timedelta = None, ): super().__init__( status_code=status_code, http_version=http_version, headers=headers, request=request, on_close=on_close, elapsed=elapsed, ) self.history = [] if history is None else list(history) if content is None or isinstance(content, bytes): self.is_closed = True self.is_stream_consumed = True self._raw_content = content or b"" else: self.is_closed = False self.is_stream_consumed = False self._raw_stream = content def read(self) -> bytes: """ Read and return the response content. """ if not hasattr(self, "_content"): self._content = b"".join([part for part in self.stream()]) return self._content def stream(self) -> typing.Iterator[bytes]: """ A byte-iterator over the decoded response content. This allows us to handle gzip, deflate, and brotli encoded responses. """ if hasattr(self, "_content"): yield self._content else: for chunk in self.raw(): yield self.decoder.decode(chunk) yield self.decoder.flush() def stream_text(self) -> typing.Iterator[str]: """ A str-iterator over the decoded response content that handles both gzip, deflate, etc but also detects the content's string encoding. """ decoder = TextDecoder(encoding=self.charset_encoding) for chunk in self.stream(): yield decoder.decode(chunk) yield decoder.flush() def raw(self) -> typing.Iterator[bytes]: """ A byte-iterator over the raw response content. """ if hasattr(self, "_raw_content"): yield self._raw_content else: if self.is_stream_consumed: raise StreamConsumed() if self.is_closed: raise ResponseClosed() self.is_stream_consumed = True for part in self._raw_stream: yield part self.close() def close(self) -> None: """ Close the response and release the connection. Automatically called if the response body is read to completion. """ if not self.is_closed: self.is_closed = True if self.on_close is not None: self.on_close() class Cookies(MutableMapping): """ HTTP Cookies, as a mutable mapping. """ def __init__(self, cookies: CookieTypes = None) -> None: if cookies is None or isinstance(cookies, dict): self.jar = CookieJar() if isinstance(cookies, dict): for key, value in cookies.items(): self.set(key, value) elif isinstance(cookies, Cookies): self.jar = CookieJar() for cookie in cookies.jar: self.jar.set_cookie(cookie) else: self.jar = cookies def extract_cookies(self, response: BaseResponse) -> None: """ Loads any cookies based on the response `Set-Cookie` headers. """ assert response.request is not None urlib_response = self._CookieCompatResponse(response) urllib_request = self._CookieCompatRequest(response.request) self.jar.extract_cookies(urlib_response, urllib_request) # type: ignore def set_cookie_header(self, request: BaseRequest) -> None: """ Sets an appropriate 'Cookie:' HTTP header on the `Request`. """ urllib_request = self._CookieCompatRequest(request) self.jar.add_cookie_header(urllib_request) def set(self, name: str, value: str, domain: str = "", path: str = "/") -> None: """ Set a cookie value by name. May optionally include domain and path. """ kwargs = { "version": 0, "name": name, "value": value, "port": None, "port_specified": False, "domain": domain, "domain_specified": bool(domain), "domain_initial_dot": domain.startswith("."), "path": path, "path_specified": bool(path), "secure": False, "expires": None, "discard": True, "comment": None, "comment_url": None, "rest": {"HttpOnly": None}, "rfc2109": False, } cookie = Cookie(**kwargs) # type: ignore self.jar.set_cookie(cookie) def get( # type: ignore self, name: str, default: str = None, domain: str = None, path: str = None ) -> typing.Optional[str]: """ Get a cookie by name. May optionally include domain and path in order to specify exactly which cookie to retrieve. """ value = None for cookie in self.jar: if cookie.name == name: if domain is None or cookie.domain == domain: # type: ignore if path is None or cookie.path == path: if value is not None: message = f"Multiple cookies exist with name={name}" raise CookieConflict(message) value = cookie.value if value is None: return default return value def delete(self, name: str, domain: str = None, path: str = None) -> None: """ Delete a cookie by name. May optionally include domain and path in order to specify exactly which cookie to delete. """ if domain is not None and path is not None: return self.jar.clear(domain, path, name) remove = [] for cookie in self.jar: if cookie.name == name: if domain is None or cookie.domain == domain: # type: ignore if path is None or cookie.path == path: remove.append(cookie) for cookie in remove: self.jar.clear(cookie.domain, cookie.path, cookie.name) # type: ignore def clear(self, domain: str = None, path: str = None) -> None: """ Delete all cookies. Optionally include a domain and path in order to only delete a subset of all the cookies. """ args = [] if domain is not None: args.append(domain) if path is not None: assert domain is not None args.append(path) self.jar.clear(*args) def update(self, cookies: CookieTypes = None) -> None: # type: ignore cookies = Cookies(cookies) for cookie in cookies.jar: self.jar.set_cookie(cookie) def __setitem__(self, name: str, value: str) -> None: return self.set(name, value) def __getitem__(self, name: str) -> str: value = self.get(name) if value is None: raise KeyError(name) return value def __delitem__(self, name: str) -> None: return self.delete(name) def __len__(self) -> int: return len(self.jar) def __iter__(self) -> typing.Iterator[str]: return (cookie.name for cookie in self.jar) def __bool__(self) -> bool: for _ in self.jar: return True return False class _CookieCompatRequest(urllib.request.Request): """ Wraps a `Request` instance up in a compatibility interface suitable for use with `CookieJar` operations. """ def __init__(self, request: BaseRequest) -> None: super().__init__( url=str(request.url), headers=dict(request.headers), method=request.method, ) self.request = request def add_unredirected_header(self, key: str, value: str) -> None: super().add_unredirected_header(key, value) self.request.headers[key] = value class _CookieCompatResponse: """ Wraps a `Request` instance up in a compatibility interface suitable for use with `CookieJar` operations. """ def __init__(self, response: BaseResponse): self.response = response def info(self) -> email.message.Message: info = email.message.Message() for key, value in self.response.headers.items(): info[key] = value return info
32.2882
88
0.579806
import cgi import datetime import email.message import json as jsonlib import typing import urllib.request from collections.abc import MutableMapping from http.cookiejar import Cookie, CookieJar from urllib.parse import parse_qsl, urlencode import chardet import rfc3986 from .config import USER_AGENT from .decoders import ( ACCEPT_ENCODING, SUPPORTED_DECODERS, Decoder, IdentityDecoder, MultiDecoder, TextDecoder, ) from .exceptions import ( CookieConflict, HTTPError, InvalidURL, NotRedirectResponse, ResponseClosed, ResponseNotRead, StreamConsumed, ) from .multipart import multipart_encode from .status_codes import StatusCode from .utils import ( guess_json_utf, is_known_encoding, normalize_header_key, normalize_header_value, obfuscate_sensitive_headers, parse_header_links, str_query_param, ) if typing.TYPE_CHECKING: from .middleware.base import BaseMiddleware from .dispatch.base import AsyncDispatcher PrimitiveData = typing.Optional[typing.Union[str, int, float, bool]] URLTypes = typing.Union["URL", str] QueryParamTypes = typing.Union[ "QueryParams", typing.Mapping[str, PrimitiveData], typing.List[typing.Tuple[str, PrimitiveData]], str, ] HeaderTypes = typing.Union[ "Headers", typing.Dict[typing.AnyStr, typing.AnyStr], typing.List[typing.Tuple[typing.AnyStr, typing.AnyStr]], ] CookieTypes = typing.Union["Cookies", CookieJar, typing.Dict[str, str]] AuthTypes = typing.Union[ typing.Tuple[typing.Union[str, bytes], typing.Union[str, bytes]], typing.Callable[["AsyncRequest"], "AsyncRequest"], "BaseMiddleware", ] ProxiesTypes = typing.Union[ URLTypes, "AsyncDispatcher", typing.Dict[URLTypes, typing.Union[URLTypes, "AsyncDispatcher"]], ] AsyncRequestData = typing.Union[dict, str, bytes, typing.AsyncIterator[bytes]] RequestData = typing.Union[dict, str, bytes, typing.Iterator[bytes]] RequestFiles = typing.Dict[ str, typing.Union[ typing.IO[typing.AnyStr], typing.Tuple[str, typing.IO[typing.AnyStr]], typing.Tuple[ str, typing.IO[typing.AnyStr], str ], ], ] AsyncResponseContent = typing.Union[bytes, typing.AsyncIterator[bytes]] ResponseContent = typing.Union[bytes, typing.Iterator[bytes]] class URL: def __init__( self, url: URLTypes, allow_relative: bool = False, params: QueryParamTypes = None, ) -> None: if isinstance(url, str): self._uri_reference = rfc3986.api.iri_reference(url).encode() else: self._uri_reference = url._uri_reference if self.is_absolute_url: self._uri_reference = self._uri_reference.normalize() if params: query_string = str(QueryParams(params)) self._uri_reference = self._uri_reference.copy_with(query=query_string) if not allow_relative: if not self.scheme: raise InvalidURL("No scheme included in URL.") if not self.host: raise InvalidURL("No host included in URL.") self._full_path: typing.Optional[str] = None @property def scheme(self) -> str: return self._uri_reference.scheme or "" @property def authority(self) -> str: return self._uri_reference.authority or "" @property def userinfo(self) -> str: return self._uri_reference.userinfo or "" @property def username(self) -> str: userinfo = self._uri_reference.userinfo or "" return userinfo.partition(":")[0] @property def password(self) -> str: userinfo = self._uri_reference.userinfo or "" return userinfo.partition(":")[2] @property def host(self) -> str: return self._uri_reference.host or "" @property def port(self) -> int: port = self._uri_reference.port if port is None: return {"https": 443, "http": 80}[self.scheme] return int(port) @property def path(self) -> str: return self._uri_reference.path or "/" @property def query(self) -> str: return self._uri_reference.query or "" @property def full_path(self) -> str: if self._full_path is not None: return self._full_path path = self.path if self.query: path += "?" + self.query return path @full_path.setter def full_path(self, value: typing.Optional[str]) -> None: self._full_path = value @property def fragment(self) -> str: return self._uri_reference.fragment or "" @property def is_ssl(self) -> bool: return self.scheme == "https" @property def is_absolute_url(self) -> bool: # URLs with a fragment portion as not absolute. # What we actually care about is if the URL provides # a scheme and hostname to which connections should be made. return bool(self.scheme and self.host) @property def is_relative_url(self) -> bool: return not self.is_absolute_url @property def origin(self) -> "Origin": return Origin(self) def copy_with(self, **kwargs: typing.Any) -> "URL": if ( "username" in kwargs or "password" in kwargs or "host" in kwargs or "port" in kwargs ): host = kwargs.pop("host", self.host) port = kwargs.pop("port", self.port) username = kwargs.pop("username", self.username) password = kwargs.pop("password", self.password) authority = host if port is not None: authority += f":{port}" if username is not None: userpass = username if password is not None: userpass += f":{password}" authority = f"{userpass}@{authority}" kwargs["authority"] = authority return URL(self._uri_reference.copy_with(**kwargs).unsplit()) def join(self, relative_url: URLTypes) -> "URL": if self.is_relative_url: return URL(relative_url) # We drop any fragment portion, because RFC 3986 strictly # treats URLs with a fragment portion as not being absolute URLs. base_uri = self._uri_reference.copy_with(fragment=None) relative_url = URL(relative_url, allow_relative=True) return URL(relative_url._uri_reference.resolve_with(base_uri).unsplit()) def __hash__(self) -> int: return hash(str(self)) def __eq__(self, other: typing.Any) -> bool: return isinstance(other, (URL, str)) and str(self) == str(other) def __str__(self) -> str: return self._uri_reference.unsplit() def __repr__(self) -> str: class_name = self.__class__.__name__ url_str = str(self) if self._uri_reference.userinfo: url_str = ( rfc3986.urlparse(url_str) .copy_with(userinfo=f"{self.username}:[secure]") .unsplit() ) return f"{class_name}({url_str!r})" class Origin: def __init__(self, url: URLTypes) -> None: if not isinstance(url, URL): url = URL(url) self.scheme = url.scheme self.is_ssl = url.is_ssl self.host = url.host self.port = url.port def __eq__(self, other: typing.Any) -> bool: return ( isinstance(other, self.__class__) and self.scheme == other.scheme and self.host == other.host and self.port == other.port ) def __hash__(self) -> int: return hash((self.scheme, self.host, self.port)) def __repr__(self) -> str: class_name = self.__class__.__name__ return ( f"{class_name}(scheme={self.scheme!r} host={self.host!r} port={self.port})" ) class QueryParams(typing.Mapping[str, str]): def __init__(self, *args: QueryParamTypes, **kwargs: typing.Any) -> None: assert len(args) < 2, "Too many arguments." assert not (args and kwargs), "Cannot mix named and unnamed arguments." value = args[0] if args else kwargs if isinstance(value, str): items = parse_qsl(value) elif isinstance(value, QueryParams): items = value.multi_items() elif isinstance(value, list): items = value # type: ignore else: items = value.items() # type: ignore self._list = [(str(k), str_query_param(v)) for k, v in items] self._dict = {str(k): str_query_param(v) for k, v in items} def getlist(self, key: typing.Any) -> typing.List[str]: return [item_value for item_key, item_value in self._list if item_key == key] def keys(self) -> typing.KeysView: return self._dict.keys() def values(self) -> typing.ValuesView: return self._dict.values() def items(self) -> typing.ItemsView: return self._dict.items() def multi_items(self) -> typing.List[typing.Tuple[str, str]]: return list(self._list) def get(self, key: typing.Any, default: typing.Any = None) -> typing.Any: if key in self._dict: return self._dict[key] return default def update(self, params: QueryParamTypes = None) -> None: # type: ignore if not params: return params = QueryParams(params) for param in params: self[param] = params[param] def __getitem__(self, key: typing.Any) -> str: return self._dict[key] def __setitem__(self, key: str, value: str) -> None: self._dict[key] = value found_indexes = [] for idx, (item_key, _) in enumerate(self._list): if item_key == key: found_indexes.append(idx) for idx in reversed(found_indexes[1:]): del self._list[idx] if found_indexes: idx = found_indexes[0] self._list[idx] = (key, value) else: self._list.append((key, value)) def __contains__(self, key: typing.Any) -> bool: return key in self._dict def __iter__(self) -> typing.Iterator[typing.Any]: return iter(self.keys()) def __len__(self) -> int: return len(self._dict) def __eq__(self, other: typing.Any) -> bool: if not isinstance(other, self.__class__): return False return sorted(self._list) == sorted(other._list) def __str__(self) -> str: return urlencode(self._list) def __repr__(self) -> str: class_name = self.__class__.__name__ query_string = str(self) return f"{class_name}({query_string!r})" class Headers(typing.MutableMapping[str, str]): def __init__(self, headers: HeaderTypes = None, encoding: str = None) -> None: if headers is None: self._list = [] # type: typing.List[typing.Tuple[bytes, bytes]] elif isinstance(headers, Headers): self._list = list(headers.raw) elif isinstance(headers, dict): self._list = [ (normalize_header_key(k, encoding), normalize_header_value(v, encoding)) for k, v in headers.items() ] else: self._list = [ (normalize_header_key(k, encoding), normalize_header_value(v, encoding)) for k, v in headers ] self._encoding = encoding @property def encoding(self) -> str: if self._encoding is None: for encoding in ["ascii", "utf-8"]: for key, value in self.raw: try: key.decode(encoding) value.decode(encoding) except UnicodeDecodeError: break else: # The else block runs if 'break' did not occur, meaning # all values fitted the encoding. self._encoding = encoding break else: # The ISO-8859-1 encoding covers all 256 code points in a byte, # so will never raise decode errors. self._encoding = "iso-8859-1" return self._encoding @encoding.setter def encoding(self, value: str) -> None: self._encoding = value @property def raw(self) -> typing.List[typing.Tuple[bytes, bytes]]: return self._list def keys(self) -> typing.List[str]: # type: ignore return [key.decode(self.encoding) for key, value in self._list] def values(self) -> typing.List[str]: # type: ignore return [value.decode(self.encoding) for key, value in self._list] def items(self) -> typing.List[typing.Tuple[str, str]]: # type: ignore return [ (key.decode(self.encoding), value.decode(self.encoding)) for key, value in self._list ] def get(self, key: str, default: typing.Any = None) -> typing.Any: try: return self[key] except KeyError: return default def getlist(self, key: str, split_commas: bool = False) -> typing.List[str]: get_header_key = key.lower().encode(self.encoding) values = [ item_value.decode(self.encoding) for item_key, item_value in self._list if item_key == get_header_key ] if not split_commas: return values split_values = [] for value in values: split_values.extend([item.strip() for item in value.split(",")]) return split_values def update(self, headers: HeaderTypes = None) -> None: # type: ignore headers = Headers(headers) for header in headers: self[header] = headers[header] def copy(self) -> "Headers": return Headers(self.items(), encoding=self.encoding) def __getitem__(self, key: str) -> str: normalized_key = key.lower().encode(self.encoding) items = [] for header_key, header_value in self._list: if header_key == normalized_key: items.append(header_value.decode(self.encoding)) if items: return ", ".join(items) raise KeyError(key) def __setitem__(self, key: str, value: str) -> None: set_key = key.lower().encode(self.encoding) set_value = value.encode(self.encoding) found_indexes = [] for idx, (item_key, _) in enumerate(self._list): if item_key == set_key: found_indexes.append(idx) for idx in reversed(found_indexes[1:]): del self._list[idx] if found_indexes: idx = found_indexes[0] self._list[idx] = (set_key, set_value) else: self._list.append((set_key, set_value)) def __delitem__(self, key: str) -> None: del_key = key.lower().encode(self.encoding) pop_indexes = [] for idx, (item_key, _) in enumerate(self._list): if item_key == del_key: pop_indexes.append(idx) if not pop_indexes: raise KeyError(key) for idx in reversed(pop_indexes): del self._list[idx] def __contains__(self, key: typing.Any) -> bool: get_header_key = key.lower().encode(self.encoding) for header_key, _ in self._list: if header_key == get_header_key: return True return False def __iter__(self) -> typing.Iterator[typing.Any]: return iter(self.keys()) def __len__(self) -> int: return len(self._list) def __eq__(self, other: typing.Any) -> bool: if not isinstance(other, Headers): return False return sorted(self._list) == sorted(other._list) def __repr__(self) -> str: class_name = self.__class__.__name__ encoding_str = "" if self.encoding != "ascii": encoding_str = f", encoding={self.encoding!r}" as_list = list(obfuscate_sensitive_headers(self.items())) as_dict = dict(as_list) no_duplicate_keys = len(as_dict) == len(as_list) if no_duplicate_keys: return f"{class_name}({as_dict!r}{encoding_str})" return f"{class_name}({as_list!r}{encoding_str})" class BaseRequest: def __init__( self, method: str, url: typing.Union[str, URL], *, params: QueryParamTypes = None, headers: HeaderTypes = None, cookies: CookieTypes = None, ): self.method = method.upper() self.url = URL(url, params=params) self.headers = Headers(headers) if cookies: self._cookies = Cookies(cookies) self._cookies.set_cookie_header(self) def encode_data( self, data: dict = None, files: RequestFiles = None, json: typing.Any = None ) -> typing.Tuple[bytes, str]: if json is not None: content = jsonlib.dumps(json).encode("utf-8") content_type = "application/json" elif files is not None: content, content_type = multipart_encode(data or {}, files) elif data is not None: content = urlencode(data, doseq=True).encode("utf-8") content_type = "application/x-www-form-urlencoded" else: content = b"" content_type = "" return content, content_type def prepare(self) -> None: content: typing.Optional[bytes] = getattr(self, "content", None) is_streaming = getattr(self, "is_streaming", False) auto_headers: typing.List[typing.Tuple[bytes, bytes]] = [] has_host = "host" in self.headers has_user_agent = "user-agent" in self.headers has_accept = "accept" in self.headers has_content_length = ( "content-length" in self.headers or "transfer-encoding" in self.headers ) has_accept_encoding = "accept-encoding" in self.headers has_connection = "connection" in self.headers if not has_host: url = self.url if url.userinfo: url = url.copy_with(username=None, password=None) auto_headers.append((b"host", url.authority.encode("ascii"))) if not has_user_agent: auto_headers.append((b"user-agent", USER_AGENT.encode("ascii"))) if not has_accept: auto_headers.append((b"accept", b"*/*")) if not has_content_length: if is_streaming: auto_headers.append((b"transfer-encoding", b"chunked")) elif content: content_length = str(len(content)).encode() auto_headers.append((b"content-length", content_length)) if not has_accept_encoding: auto_headers.append((b"accept-encoding", ACCEPT_ENCODING.encode())) if not has_connection: auto_headers.append((b"connection", b"keep-alive")) for item in reversed(auto_headers): self.headers.raw.insert(0, item) @property def cookies(self) -> "Cookies": if not hasattr(self, "_cookies"): self._cookies = Cookies() return self._cookies def __repr__(self) -> str: class_name = self.__class__.__name__ url = str(self.url) return f"<{class_name}({self.method!r}, {url!r})>" class AsyncRequest(BaseRequest): def __init__( self, method: str, url: typing.Union[str, URL], *, params: QueryParamTypes = None, headers: HeaderTypes = None, cookies: CookieTypes = None, data: AsyncRequestData = None, files: RequestFiles = None, json: typing.Any = None, ): super().__init__( method=method, url=url, params=params, headers=headers, cookies=cookies ) if data is None or isinstance(data, dict): content, content_type = self.encode_data(data, files, json) self.is_streaming = False self.content = content if content_type: self.headers["Content-Type"] = content_type elif isinstance(data, (str, bytes)): data = data.encode("utf-8") if isinstance(data, str) else data self.is_streaming = False self.content = data else: assert hasattr(data, "__aiter__") self.is_streaming = True self.content_aiter = data self.prepare() async def read(self) -> bytes: if not hasattr(self, "content"): self.content = b"".join([part async for part in self.stream()]) return self.content async def stream(self) -> typing.AsyncIterator[bytes]: if self.is_streaming: async for part in self.content_aiter: yield part elif self.content: yield self.content class Request(BaseRequest): def __init__( self, method: str, url: typing.Union[str, URL], *, params: QueryParamTypes = None, headers: HeaderTypes = None, cookies: CookieTypes = None, data: RequestData = None, files: RequestFiles = None, json: typing.Any = None, ): super().__init__( method=method, url=url, params=params, headers=headers, cookies=cookies ) if data is None or isinstance(data, dict): content, content_type = self.encode_data(data, files, json) self.is_streaming = False self.content = content if content_type: self.headers["Content-Type"] = content_type elif isinstance(data, (str, bytes)): data = data.encode("utf-8") if isinstance(data, str) else data self.is_streaming = False self.content = data else: assert hasattr(data, "__iter__") self.is_streaming = True self.content_iter = data self.prepare() def read(self) -> bytes: if not hasattr(self, "content"): self.content = b"".join([part for part in self.stream()]) return self.content def stream(self) -> typing.Iterator[bytes]: if self.is_streaming: for part in self.content_iter: yield part elif self.content: yield self.content class BaseResponse: def __init__( self, status_code: int, *, http_version: str = None, headers: HeaderTypes = None, request: BaseRequest = None, on_close: typing.Callable = None, elapsed: datetime.timedelta = None, ): self.status_code = status_code self.http_version = http_version self.headers = Headers(headers) self.request = request self.on_close = on_close self.elapsed = datetime.timedelta(0) if elapsed is None else elapsed self.call_next: typing.Optional[typing.Callable] = None @property def reason_phrase(self) -> str: return StatusCode.get_reason_phrase(self.status_code) @property def url(self) -> typing.Optional[URL]: return None if self.request is None else self.request.url @property def content(self) -> bytes: if not hasattr(self, "_content"): if hasattr(self, "_raw_content"): raw_content = self._raw_content # type: ignore content = self.decoder.decode(raw_content) content += self.decoder.flush() self._content = content else: raise ResponseNotRead() return self._content @property def text(self) -> str: if not hasattr(self, "_text"): content = self.content if not content: self._text = "" else: encoding = self.encoding self._text = content.decode(encoding, errors="replace") return self._text @property def encoding(self) -> str: if not hasattr(self, "_encoding"): encoding = self.charset_encoding if encoding is None or not is_known_encoding(encoding): encoding = self.apparent_encoding if encoding is None or not is_known_encoding(encoding): encoding = "utf-8" self._encoding = encoding return self._encoding @encoding.setter def encoding(self, value: str) -> None: self._encoding = value @property def charset_encoding(self) -> typing.Optional[str]: content_type = self.headers.get("Content-Type") if content_type is None: return None parsed = cgi.parse_header(content_type) media_type, params = parsed[0], parsed[-1] if "charset" in params: return params["charset"].strip("'\"") # RFC 2616 specifies that 'iso-8859-1' should be used as the default # for 'text/*' media types, if no charset is provided. # See: https://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1 if media_type.startswith("text/"): return "iso-8859-1" return None @property def apparent_encoding(self) -> typing.Optional[str]: return chardet.detect(self.content)["encoding"] @property def decoder(self) -> Decoder: if not hasattr(self, "_decoder"): decoders: typing.List[Decoder] = [] values = self.headers.getlist("content-encoding", split_commas=True) for value in values: value = value.strip().lower() try: decoder_cls = SUPPORTED_DECODERS[value] decoders.append(decoder_cls()) except KeyError: continue if len(decoders) == 1: self._decoder = decoders[0] elif len(decoders) > 1: self._decoder = MultiDecoder(decoders) else: self._decoder = IdentityDecoder() return self._decoder @property def is_redirect(self) -> bool: return StatusCode.is_redirect(self.status_code) and "location" in self.headers def raise_for_status(self) -> None: message = ( "{0.status_code} {error_type}: {0.reason_phrase} for url: {0.url}\n" "For more information check: https://httpstatuses.com/{0.status_code}" ) if StatusCode.is_client_error(self.status_code): message = message.format(self, error_type="Client Error") elif StatusCode.is_server_error(self.status_code): message = message.format(self, error_type="Server Error") else: message = "" if message: raise HTTPError(message, response=self) def json(self, **kwargs: typing.Any) -> typing.Union[dict, list]: if self.charset_encoding is None and self.content and len(self.content) > 3: encoding = guess_json_utf(self.content) if encoding is not None: try: return jsonlib.loads(self.content.decode(encoding), **kwargs) except UnicodeDecodeError: pass return jsonlib.loads(self.text, **kwargs) @property def cookies(self) -> "Cookies": if not hasattr(self, "_cookies"): assert self.request is not None self._cookies = Cookies() self._cookies.extract_cookies(self) return self._cookies @property def links(self) -> typing.Dict[typing.Optional[str], typing.Dict[str, str]]: header = self.headers.get("link") ldict = {} if header: links = parse_header_links(header) for link in links: key = link.get("rel") or link.get("url") ldict[key] = link return ldict def __repr__(self) -> str: return f"<Response [{self.status_code} {self.reason_phrase}]>" class AsyncResponse(BaseResponse): def __init__( self, status_code: int, *, http_version: str = None, headers: HeaderTypes = None, content: AsyncResponseContent = None, on_close: typing.Callable = None, request: AsyncRequest = None, history: typing.List["BaseResponse"] = None, elapsed: datetime.timedelta = None, ): super().__init__( status_code=status_code, http_version=http_version, headers=headers, request=request, on_close=on_close, elapsed=elapsed, ) self.history = [] if history is None else list(history) if content is None or isinstance(content, bytes): self.is_closed = True self.is_stream_consumed = True self._raw_content = content or b"" else: self.is_closed = False self.is_stream_consumed = False self._raw_stream = content async def read(self) -> bytes: if not hasattr(self, "_content"): self._content = b"".join([part async for part in self.stream()]) return self._content async def stream(self) -> typing.AsyncIterator[bytes]: if hasattr(self, "_content"): yield self._content else: async for chunk in self.raw(): yield self.decoder.decode(chunk) yield self.decoder.flush() async def stream_text(self) -> typing.AsyncIterator[str]: decoder = TextDecoder(encoding=self.charset_encoding) async for chunk in self.stream(): yield decoder.decode(chunk) yield decoder.flush() async def raw(self) -> typing.AsyncIterator[bytes]: if hasattr(self, "_raw_content"): yield self._raw_content else: if self.is_stream_consumed: raise StreamConsumed() if self.is_closed: raise ResponseClosed() self.is_stream_consumed = True async for part in self._raw_stream: yield part await self.close() async def next(self) -> "AsyncResponse": if not self.is_redirect: raise NotRedirectResponse() assert self.call_next is not None return await self.call_next() async def close(self) -> None: if not self.is_closed: self.is_closed = True if self.on_close is not None: await self.on_close() class Response(BaseResponse): def __init__( self, status_code: int, *, http_version: str = None, headers: HeaderTypes = None, content: ResponseContent = None, on_close: typing.Callable = None, request: Request = None, history: typing.List["BaseResponse"] = None, elapsed: datetime.timedelta = None, ): super().__init__( status_code=status_code, http_version=http_version, headers=headers, request=request, on_close=on_close, elapsed=elapsed, ) self.history = [] if history is None else list(history) if content is None or isinstance(content, bytes): self.is_closed = True self.is_stream_consumed = True self._raw_content = content or b"" else: self.is_closed = False self.is_stream_consumed = False self._raw_stream = content def read(self) -> bytes: if not hasattr(self, "_content"): self._content = b"".join([part for part in self.stream()]) return self._content def stream(self) -> typing.Iterator[bytes]: if hasattr(self, "_content"): yield self._content else: for chunk in self.raw(): yield self.decoder.decode(chunk) yield self.decoder.flush() def stream_text(self) -> typing.Iterator[str]: decoder = TextDecoder(encoding=self.charset_encoding) for chunk in self.stream(): yield decoder.decode(chunk) yield decoder.flush() def raw(self) -> typing.Iterator[bytes]: if hasattr(self, "_raw_content"): yield self._raw_content else: if self.is_stream_consumed: raise StreamConsumed() if self.is_closed: raise ResponseClosed() self.is_stream_consumed = True for part in self._raw_stream: yield part self.close() def close(self) -> None: if not self.is_closed: self.is_closed = True if self.on_close is not None: self.on_close() class Cookies(MutableMapping): def __init__(self, cookies: CookieTypes = None) -> None: if cookies is None or isinstance(cookies, dict): self.jar = CookieJar() if isinstance(cookies, dict): for key, value in cookies.items(): self.set(key, value) elif isinstance(cookies, Cookies): self.jar = CookieJar() for cookie in cookies.jar: self.jar.set_cookie(cookie) else: self.jar = cookies def extract_cookies(self, response: BaseResponse) -> None: assert response.request is not None urlib_response = self._CookieCompatResponse(response) urllib_request = self._CookieCompatRequest(response.request) self.jar.extract_cookies(urlib_response, urllib_request) # type: ignore def set_cookie_header(self, request: BaseRequest) -> None: urllib_request = self._CookieCompatRequest(request) self.jar.add_cookie_header(urllib_request) def set(self, name: str, value: str, domain: str = "", path: str = "/") -> None: kwargs = { "version": 0, "name": name, "value": value, "port": None, "port_specified": False, "domain": domain, "domain_specified": bool(domain), "domain_initial_dot": domain.startswith("."), "path": path, "path_specified": bool(path), "secure": False, "expires": None, "discard": True, "comment": None, "comment_url": None, "rest": {"HttpOnly": None}, "rfc2109": False, } cookie = Cookie(**kwargs) # type: ignore self.jar.set_cookie(cookie) def get( # type: ignore self, name: str, default: str = None, domain: str = None, path: str = None ) -> typing.Optional[str]: value = None for cookie in self.jar: if cookie.name == name: if domain is None or cookie.domain == domain: # type: ignore if path is None or cookie.path == path: if value is not None: message = f"Multiple cookies exist with name={name}" raise CookieConflict(message) value = cookie.value if value is None: return default return value def delete(self, name: str, domain: str = None, path: str = None) -> None: if domain is not None and path is not None: return self.jar.clear(domain, path, name) remove = [] for cookie in self.jar: if cookie.name == name: if domain is None or cookie.domain == domain: # type: ignore if path is None or cookie.path == path: remove.append(cookie) for cookie in remove: self.jar.clear(cookie.domain, cookie.path, cookie.name) # type: ignore def clear(self, domain: str = None, path: str = None) -> None: args = [] if domain is not None: args.append(domain) if path is not None: assert domain is not None args.append(path) self.jar.clear(*args) def update(self, cookies: CookieTypes = None) -> None: # type: ignore cookies = Cookies(cookies) for cookie in cookies.jar: self.jar.set_cookie(cookie) def __setitem__(self, name: str, value: str) -> None: return self.set(name, value) def __getitem__(self, name: str) -> str: value = self.get(name) if value is None: raise KeyError(name) return value def __delitem__(self, name: str) -> None: return self.delete(name) def __len__(self) -> int: return len(self.jar) def __iter__(self) -> typing.Iterator[str]: return (cookie.name for cookie in self.jar) def __bool__(self) -> bool: for _ in self.jar: return True return False class _CookieCompatRequest(urllib.request.Request): def __init__(self, request: BaseRequest) -> None: super().__init__( url=str(request.url), headers=dict(request.headers), method=request.method, ) self.request = request def add_unredirected_header(self, key: str, value: str) -> None: super().add_unredirected_header(key, value) self.request.headers[key] = value class _CookieCompatResponse: def __init__(self, response: BaseResponse): self.response = response def info(self) -> email.message.Message: info = email.message.Message() for key, value in self.response.headers.items(): info[key] = value return info
true
true
f70fdfa9f2c0a52362c0e1de74f04cebad41b2f6
13,269
py
Python
framework/CodeInterfaces/MAMMOTH/MAMMOTHInterface.py
milljm/raven
5f29fe81b75e2ffbeb54a55aa63647e7b2f6457b
[ "Apache-2.0" ]
null
null
null
framework/CodeInterfaces/MAMMOTH/MAMMOTHInterface.py
milljm/raven
5f29fe81b75e2ffbeb54a55aa63647e7b2f6457b
[ "Apache-2.0" ]
null
null
null
framework/CodeInterfaces/MAMMOTH/MAMMOTHInterface.py
milljm/raven
5f29fe81b75e2ffbeb54a55aa63647e7b2f6457b
[ "Apache-2.0" ]
null
null
null
# Copyright 2017 Battelle Energy Alliance, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import division, print_function, unicode_literals, absolute_import import warnings warnings.simplefilter('default',DeprecationWarning) import os import copy from subprocess import Popen from CodeInterfaceBaseClass import CodeInterfaceBase from MooseBasedAppInterface import MooseBasedApp from RattlesnakeInterface import Rattlesnake from RELAP7Interface import RELAP7 class MAMMOTHInterface(CodeInterfaceBase): """ This class is used to couple raven with MAMMOTH (A moose based application, can call Rattlesnake, Bison and Relap-7) """ def __init__(self): """ Constructor @ In, None @ Out, None """ CodeInterfaceBase.__init__(self) self.MooseInterface = MooseBasedApp() #used to perturb MAMMOTH input files self.MooseInterface.addDefaultExtension() self.BisonInterface = MooseBasedApp() #used to perturb Bison input files self.BisonInterface.addDefaultExtension() self.RattlesnakeInterface = Rattlesnake() #used to perturb Rattlesnake and Yak input files #FIXME Would like to use RELAP7() as interface, but Distributions block appears to be out of date when running Mammoth #self.Relap7Interface = RELAP7() #used to perturb RELAP7 input files self.Relap7Interface = MooseBasedApp() self.Relap7Interface.addDefaultExtension() def findInps(self,inputFiles): """ Locates the input files required by MAMMOTH @ In, inputFiles, list, list of Files objects @ Out, inputDict, dict, dictionary containing MAMMOTH required input files """ inputDict = {} inputDict['MammothInput'] = [] inputDict['BisonInput'] = [] inputDict['RattlesnakeInput'] = [] inputDict['Relap7Input'] = [] inputDict['AncillaryInput'] = [] allowedDriverAppInput = ['bisoninput','rattlesnakeinput','relap7input'] for inputFile in inputFiles: fileType = inputFile.getType() if fileType.strip().lower().split('|')[0] == "mammothinput": inputDict['MammothInput'].append(inputFile) inputDict['DriverAppInput'] = fileType.strip().lower().split('|')[-1] if fileType.strip().lower().split('|')[-1] == "bisoninput": inputDict['BisonInput'].append(inputFile) elif fileType.strip().lower().split('|')[-1] == "rattlesnakeinput" or \ fileType.strip().lower() == "yakxsinput" or \ fileType.strip().lower() == "yakxsaliasinput" or \ fileType.strip().lower() == "instantxsinput" or \ fileType.strip().lower() == "instantxsaliasinput": inputDict['RattlesnakeInput'].append(inputFile) elif fileType.strip().lower().split('|')[-1] == "relap7input": inputDict['Relap7Input'].append(inputFile) elif fileType.strip().lower() == "ancillaryinput": inputDict['AncillaryInput'] = [] # Mammoth input is not found if len(inputDict['MammothInput']) == 0: errorMessage = 'No MAMMOTH input file specified! Please prepend "MAMMOTHInput|" to the driver App input \n' errorMessage += 'file\'s type in the RAVEN input file.' raise IOError(errorMessage) # Multiple mammoth files are found elif len(inputDict['MammothInput']) > 1: raise IOError('Multiple MAMMOTH input files are provided! Please limit the number of input files to one.') # Mammoth input found, but driverAppInput is not in the allowedDriverAppInput list elif len(inputDict['MammothInput']) == 1 and inputDict['DriverAppInput'] not in allowedDriverAppInput: errorMessage = 'A MAMMOTH input file was specified, but the driver app is not currently supported by this\n' errorMessage += 'interface. The MAMMOTH input file can only be specified as one of the following types:' for goodDriverAppInput in allowedDriverAppInput: errorMessage += '\nMAMMOTHInput|' + goodDriverAppInput raise IOError(errorMessage) return inputDict def generateCommand(self, inputFiles, executable, clargs=None, fargs=None): """ Generate a command to run Mammoth using an input with sampled variables See base class. Collects all the clargs and the executable to produce the command-line call. Returns tuple of commands and base file name for run. Commands are a list of tuples, indicating parallel/serial and the execution command to use. @ In, inputFiles, list, List of input files (length of the list depends on the number of inputs have been added in the Step is running this code) @ In, executable, string, executable name with absolute path (e.g. /home/path_to_executable/code.exe) @ In, clargs, dict, optional, dictionary containing the command-line flags the user can specify in the input (e.g. under the node < Code >< clargstype = 0 input0arg = 0 i0extension = 0 .inp0/ >< /Code >) @ In, fargs, dict, optional, a dictionary containing the axuiliary input file variables the user can specify in the input (e.g. under the node < Code >< fargstype = 0 input0arg = 0 aux0extension = 0 .aux0/ >< /Code >) @ Out, returnCommand, tuple, tuple containing the generated command. returnCommand[0] is the command to run the code (string), returnCommand[1] is the name of the output root """ inputDict = self.findInps(inputFiles) mammothInput = inputDict['MammothInput'] mooseCommand, mooseOut = self.MooseInterface.generateCommand(mammothInput,executable,clargs,fargs) returnCommand = mooseCommand, mooseOut return returnCommand def createNewInput(self, currentInputFiles, origInputFiles, samplerType, **Kwargs): """ Generates new perturbed input files for Mammoth and associated Moose based applications. @ In, currentInputFiles, list, list of current input files @ In, origInputFiles, list, list of the original input files @ In, samplerType, string, Sampler type (e.g. MonteCarlo, Adaptive, etc. see manual Samplers section) @ In, Kwargs, dict, dictionary of parameters. In this dictionary there is another dictionary called "SampledVars" where RAVEN stores the variables that got sampled (e.g. Kwargs['SampledVars'] => {'var1':10,'var2':40}) @ Out, newInputFiles, list, list of new input files (modified or not) """ #split up sampledAars in Kwargs between Bison, Rattlesnake and Relap-7 bisonArgs = copy.deepcopy(Kwargs) bisonArgs['SampledVars'] = {} perturbBison = False rattlesnakeArgs = copy.deepcopy(Kwargs) rattlesnakeArgs['SampledVars'] = {} perturbRattlesnake = False relap7Args = copy.deepcopy(Kwargs) relap7Args['SampledVars'] = {} perturbRelap7 = False foundAlias = False for varName,varValue in Kwargs['SampledVars'].items(): # get the variable's full name if len(varName.split('@')) == 2: appName = varName.split('@')[0].lower() baseVarName = varName.split('@')[-1] elif len(varName.split('@')) == 1: appName = None baseVarName = varName else: errorMessage = 'Variable names passed to the MAMMOTH Code Interface must either\n' errorMessage += 'specifiy to which App input they belong by prepending the App\'s name\n' errorMessage += 'followed by "@" to the base variable\'s name or alias or have no App\n' errorMessage += 'name to signify a passthrough variable. Please check that\n' errorMessage += varName+'\n' errorMessage += 'fits within this syntax specification.' raise IOError(errorMessage) # Identify which app's input the variable goes into and separate appArgs if appName == 'bison': bisonArgs['SampledVars'][baseVarName] = varValue perturbBison = True elif appName == 'rattlesnake': rattlesnakeArgs['SampledVars'][baseVarName] = varValue perturbRattlesnake = True elif appName == 'relap7': relap7Args['SampledVars'][baseVarName] = varValue perturbRelap7 = True elif appName == None: # It's a dummy variable. Doesn't need to be added to any argument lists, just continue. pass else: errorMessage = appName+' is not an App supported by the MAMMOTH Code Interface!\n' errorMessage += 'Please specify a supported App in which to send \n' errorMessage += baseVarName+'\n' errorMessage += 'or add the desired App to the MAMMOTH Code Interface.' raise IOError(errorMessage) # Check if the user wants to perturb yak xs libraries for inputFile in currentInputFiles: fileType = inputFile.getType() if fileType.strip().lower() == "yakxsaliasinput": foundAlias = True break elif fileType.strip().lower() == "instantxsaliasinput": foundAlias = True break inputDicts = self.findInps(currentInputFiles) # Bison Interface if perturbBison: bisonInps = inputDicts['BisonInput'] bisonInTypes = [] for bisonIn in bisonInps: bisonInTypes.append(bisonIn.getType().strip().lower().split('|')[-1]) if 'bisoninput' not in bisonInTypes: errorMessage = 'Variable(s):\n' for bisonVarName in bisonArgs['SampledVars'].keys(): errorMessage += bisonVarName + '\n' errorMessage += 'are specified as Bison parameters, but no Bison input file is listed!' raise IOError(errorMessage) elif bisonInTypes.count('bisoninput') > 1: errorMessage = 'Multiple Bison input files specified! This interface currently only\n' errorMessage += 'supports one input for each App utilized.' raise IOError(errorMessage) origBisonInps = origInputFiles[currentInputFiles.index(bisonInps[0])] bisonInps = self.BisonInterface.createNewInput(bisonInps,[origBisonInps],samplerType,**bisonArgs) # Rattlesnake Interface if perturbRattlesnake or foundAlias: rattlesnakeInps = inputDicts['RattlesnakeInput'] rattlesnakeInTypes = [] for rattlesnakeIn in rattlesnakeInps: rattlesnakeInTypes.append(rattlesnakeIn.getType().strip().lower().split('|')[-1]) if 'rattlesnakeinput' not in rattlesnakeInTypes: errorMessage = 'Variable(s):\n' for rattlesnakeVarName in rattlesnakeArgs['SampledVars'].keys(): errorMessage += rattlesnakeVarName + '\n' errorMessage += 'are specified as Rattlesnake parameters, but no Rattlesnake input file is listed!' raise IOError(errorMessage) elif rattlesnakeInTypes.count('rattlesnakeinput') > 1: errorMessage = 'Multiple Rattlesnake input files specified! This interface currently only\n' errorMessage += 'supports one input for each App utilized.' raise IOError(errorMessage) origRattlesnakeInps = origInputFiles[currentInputFiles.index(rattlesnakeInps[0])] rattlesnakeInps = self.RattlesnakeInterface.createNewInput(rattlesnakeInps, [origRattlesnakeInps],samplerType,**rattlesnakeArgs) # Relap7 Interface if perturbRelap7: relap7Inps = inputDicts['Relap7Input'] relap7InTypes = [] for relap7In in relap7Inps: relap7InTypes.append(relap7In.getType().strip().lower().split('|')[-1]) if 'relap7input' not in relap7InTypes: errorMessage = 'Variable(s):\n' for relap7VarName in relap7Args['SampledVars'].keys(): errorMessage += relap7VarName + '\n' errorMessage += 'are specified as Relap7 parameters, but no Relap7 input file is listed!' raise IOError(errorMessage) elif relap7InTypes.count('relap7input') > 1: errorMessage = 'Multiple Relap7 input files specified! This interface currently only\n' errorMessage += 'supports one input for each App utilized.' raise IOError(errorMessage) origRelap7Inps = origInputFiles[currentInputFiles.index(relap7Inps[0])] relap7Inps = self.Relap7Interface.createNewInput(relap7Inps,[origRelap7Inps],samplerType,**relap7Args) return currentInputFiles def finalizeCodeOutput(self, command, output, workingDir): """ this method is called by the RAVEN code at the end of each run (if the method is present). Cleans up files in the working directory that are not needed after the run @ In, command, string, the command used to run the just ended job @ In, output, string, the Output name root @ In, workingDir, string, current working dir @ Out, output, string, optional, present in case the root of the output file gets changed in this method (in this case None) """ #may need to implement this method, such as remove unused files, ... pass
52.240157
130
0.692064
from __future__ import division, print_function, unicode_literals, absolute_import import warnings warnings.simplefilter('default',DeprecationWarning) import os import copy from subprocess import Popen from CodeInterfaceBaseClass import CodeInterfaceBase from MooseBasedAppInterface import MooseBasedApp from RattlesnakeInterface import Rattlesnake from RELAP7Interface import RELAP7 class MAMMOTHInterface(CodeInterfaceBase): def __init__(self): CodeInterfaceBase.__init__(self) self.MooseInterface = MooseBasedApp() self.MooseInterface.addDefaultExtension() self.BisonInterface = MooseBasedApp() self.BisonInterface.addDefaultExtension() self.RattlesnakeInterface = Rattlesnake() sedApp() self.Relap7Interface.addDefaultExtension() def findInps(self,inputFiles): inputDict = {} inputDict['MammothInput'] = [] inputDict['BisonInput'] = [] inputDict['RattlesnakeInput'] = [] inputDict['Relap7Input'] = [] inputDict['AncillaryInput'] = [] allowedDriverAppInput = ['bisoninput','rattlesnakeinput','relap7input'] for inputFile in inputFiles: fileType = inputFile.getType() if fileType.strip().lower().split('|')[0] == "mammothinput": inputDict['MammothInput'].append(inputFile) inputDict['DriverAppInput'] = fileType.strip().lower().split('|')[-1] if fileType.strip().lower().split('|')[-1] == "bisoninput": inputDict['BisonInput'].append(inputFile) elif fileType.strip().lower().split('|')[-1] == "rattlesnakeinput" or \ fileType.strip().lower() == "yakxsinput" or \ fileType.strip().lower() == "yakxsaliasinput" or \ fileType.strip().lower() == "instantxsinput" or \ fileType.strip().lower() == "instantxsaliasinput": inputDict['RattlesnakeInput'].append(inputFile) elif fileType.strip().lower().split('|')[-1] == "relap7input": inputDict['Relap7Input'].append(inputFile) elif fileType.strip().lower() == "ancillaryinput": inputDict['AncillaryInput'] = [] if len(inputDict['MammothInput']) == 0: errorMessage = 'No MAMMOTH input file specified! Please prepend "MAMMOTHInput|" to the driver App input \n' errorMessage += 'file\'s type in the RAVEN input file.' raise IOError(errorMessage) # Multiple mammoth files are found elif len(inputDict['MammothInput']) > 1: raise IOError('Multiple MAMMOTH input files are provided! Please limit the number of input files to one.') # Mammoth input found, but driverAppInput is not in the allowedDriverAppInput list elif len(inputDict['MammothInput']) == 1 and inputDict['DriverAppInput'] not in allowedDriverAppInput: errorMessage = 'A MAMMOTH input file was specified, but the driver app is not currently supported by this\n' errorMessage += 'interface. The MAMMOTH input file can only be specified as one of the following types:' for goodDriverAppInput in allowedDriverAppInput: errorMessage += '\nMAMMOTHInput|' + goodDriverAppInput raise IOError(errorMessage) return inputDict def generateCommand(self, inputFiles, executable, clargs=None, fargs=None): inputDict = self.findInps(inputFiles) mammothInput = inputDict['MammothInput'] mooseCommand, mooseOut = self.MooseInterface.generateCommand(mammothInput,executable,clargs,fargs) returnCommand = mooseCommand, mooseOut return returnCommand def createNewInput(self, currentInputFiles, origInputFiles, samplerType, **Kwargs): #split up sampledAars in Kwargs between Bison, Rattlesnake and Relap-7 bisonArgs = copy.deepcopy(Kwargs) bisonArgs['SampledVars'] = {} perturbBison = False rattlesnakeArgs = copy.deepcopy(Kwargs) rattlesnakeArgs['SampledVars'] = {} perturbRattlesnake = False relap7Args = copy.deepcopy(Kwargs) relap7Args['SampledVars'] = {} perturbRelap7 = False foundAlias = False for varName,varValue in Kwargs['SampledVars'].items(): # get the variable's full name if len(varName.split('@')) == 2: appName = varName.split('@')[0].lower() baseVarName = varName.split('@')[-1] elif len(varName.split('@')) == 1: appName = None baseVarName = varName else: errorMessage = 'Variable names passed to the MAMMOTH Code Interface must either\n' errorMessage += 'specifiy to which App input they belong by prepending the App\'s name\n' errorMessage += 'followed by "@" to the base variable\'s name or alias or have no App\n' errorMessage += 'name to signify a passthrough variable. Please check that\n' errorMessage += varName+'\n' errorMessage += 'fits within this syntax specification.' raise IOError(errorMessage) if appName == 'bison': bisonArgs['SampledVars'][baseVarName] = varValue perturbBison = True elif appName == 'rattlesnake': rattlesnakeArgs['SampledVars'][baseVarName] = varValue perturbRattlesnake = True elif appName == 'relap7': relap7Args['SampledVars'][baseVarName] = varValue perturbRelap7 = True elif appName == None: # It's a dummy variable. Doesn't need to be added to any argument lists, just continue. pass else: errorMessage = appName+' is not an App supported by the MAMMOTH Code Interface!\n' errorMessage += 'Please specify a supported App in which to send \n' errorMessage += baseVarName+'\n' errorMessage += 'or add the desired App to the MAMMOTH Code Interface.' raise IOError(errorMessage) # Check if the user wants to perturb yak xs libraries for inputFile in currentInputFiles: fileType = inputFile.getType() if fileType.strip().lower() == "yakxsaliasinput": foundAlias = True break elif fileType.strip().lower() == "instantxsaliasinput": foundAlias = True break inputDicts = self.findInps(currentInputFiles) # Bison Interface if perturbBison: bisonInps = inputDicts['BisonInput'] bisonInTypes = [] for bisonIn in bisonInps: bisonInTypes.append(bisonIn.getType().strip().lower().split('|')[-1]) if 'bisoninput' not in bisonInTypes: errorMessage = 'Variable(s):\n' for bisonVarName in bisonArgs['SampledVars'].keys(): errorMessage += bisonVarName + '\n' errorMessage += 'are specified as Bison parameters, but no Bison input file is listed!' raise IOError(errorMessage) elif bisonInTypes.count('bisoninput') > 1: errorMessage = 'Multiple Bison input files specified! This interface currently only\n' errorMessage += 'supports one input for each App utilized.' raise IOError(errorMessage) origBisonInps = origInputFiles[currentInputFiles.index(bisonInps[0])] bisonInps = self.BisonInterface.createNewInput(bisonInps,[origBisonInps],samplerType,**bisonArgs) # Rattlesnake Interface if perturbRattlesnake or foundAlias: rattlesnakeInps = inputDicts['RattlesnakeInput'] rattlesnakeInTypes = [] for rattlesnakeIn in rattlesnakeInps: rattlesnakeInTypes.append(rattlesnakeIn.getType().strip().lower().split('|')[-1]) if 'rattlesnakeinput' not in rattlesnakeInTypes: errorMessage = 'Variable(s):\n' for rattlesnakeVarName in rattlesnakeArgs['SampledVars'].keys(): errorMessage += rattlesnakeVarName + '\n' errorMessage += 'are specified as Rattlesnake parameters, but no Rattlesnake input file is listed!' raise IOError(errorMessage) elif rattlesnakeInTypes.count('rattlesnakeinput') > 1: errorMessage = 'Multiple Rattlesnake input files specified! This interface currently only\n' errorMessage += 'supports one input for each App utilized.' raise IOError(errorMessage) origRattlesnakeInps = origInputFiles[currentInputFiles.index(rattlesnakeInps[0])] rattlesnakeInps = self.RattlesnakeInterface.createNewInput(rattlesnakeInps, [origRattlesnakeInps],samplerType,**rattlesnakeArgs) # Relap7 Interface if perturbRelap7: relap7Inps = inputDicts['Relap7Input'] relap7InTypes = [] for relap7In in relap7Inps: relap7InTypes.append(relap7In.getType().strip().lower().split('|')[-1]) if 'relap7input' not in relap7InTypes: errorMessage = 'Variable(s):\n' for relap7VarName in relap7Args['SampledVars'].keys(): errorMessage += relap7VarName + '\n' errorMessage += 'are specified as Relap7 parameters, but no Relap7 input file is listed!' raise IOError(errorMessage) elif relap7InTypes.count('relap7input') > 1: errorMessage = 'Multiple Relap7 input files specified! This interface currently only\n' errorMessage += 'supports one input for each App utilized.' raise IOError(errorMessage) origRelap7Inps = origInputFiles[currentInputFiles.index(relap7Inps[0])] relap7Inps = self.Relap7Interface.createNewInput(relap7Inps,[origRelap7Inps],samplerType,**relap7Args) return currentInputFiles def finalizeCodeOutput(self, command, output, workingDir): #may need to implement this method, such as remove unused files, ... pass
true
true
f70fe063edbc04be9bbe4375c8b98151415f3e24
514
py
Python
1. Python/1. Getting Started with Python/27.explicite_type_conversion.py
theparitoshkumar/Data-Structures-Algorithms-using-python
445b9dee56bca637f21267114cc1686d333ea4c4
[ "Apache-2.0" ]
1
2021-12-05T18:02:15.000Z
2021-12-05T18:02:15.000Z
1. Python/1. Getting Started with Python/27.explicite_type_conversion.py
theparitoshkumar/Data-Structures-Algorithms-using-python
445b9dee56bca637f21267114cc1686d333ea4c4
[ "Apache-2.0" ]
null
null
null
1. Python/1. Getting Started with Python/27.explicite_type_conversion.py
theparitoshkumar/Data-Structures-Algorithms-using-python
445b9dee56bca637f21267114cc1686d333ea4c4
[ "Apache-2.0" ]
null
null
null
#Explicit type conversion from int to float num1 = 10 num2 = 20 num3 = num1 + num2 print(num3) print(type(num3)) num4 = float(num1 + num2) print(num4) print(type(num4)) #Explicit type conversion from float to int num1 = 10.2 num2 = 20.6 num3 = (num1 + num2) print(num3) print(type(num3)) num4 = int(num1 + num2) print(num4) print(type(num4)) #Type Conversion between Numbers and Strings priceIcecream = 25 priceBrownie = 45 totalPrice = priceIcecream + priceBrownie print("The total is Rs." + str(totalPrice) )
19.037037
44
0.727626
num1 = 10 num2 = 20 num3 = num1 + num2 print(num3) print(type(num3)) num4 = float(num1 + num2) print(num4) print(type(num4)) num1 = 10.2 num2 = 20.6 num3 = (num1 + num2) print(num3) print(type(num3)) num4 = int(num1 + num2) print(num4) print(type(num4)) priceIcecream = 25 priceBrownie = 45 totalPrice = priceIcecream + priceBrownie print("The total is Rs." + str(totalPrice) )
true
true
f70fe0a01bae4cc23cfd544120436709046c362b
10,820
py
Python
backend/workers/tasks/data.py
uwer/coco-annotator
03f33aee2f1bb00c3b5f93b299f7c45dd7ab36d4
[ "MIT" ]
null
null
null
backend/workers/tasks/data.py
uwer/coco-annotator
03f33aee2f1bb00c3b5f93b299f7c45dd7ab36d4
[ "MIT" ]
null
null
null
backend/workers/tasks/data.py
uwer/coco-annotator
03f33aee2f1bb00c3b5f93b299f7c45dd7ab36d4
[ "MIT" ]
null
null
null
from database import ( fix_ids, ImageModel, CategoryModel, AnnotationModel, DatasetModel, TaskModel, ExportModel ) # import pycocotools.mask as mask import numpy as np import time import json import os from celery import shared_task from ..socket import create_socket from mongoengine import Q from config import Config from pathlib import PurePath def bbox2seg(bbox): return [bbox[0],bbox[1],bbox[0]+bbox[2],bbox[1],bbox[0]+bbox[2],bbox[1]+bbox[3],bbox[0],bbox[1]+bbox[3]] @shared_task def export_annotations(task_id, dataset_id, categories): task = TaskModel.objects.get(id=task_id) dataset = DatasetModel.objects.get(id=dataset_id) task.update(status="PROGRESS") socket = create_socket() task.info("Beginning Export (COCO Format)") db_categories = CategoryModel.objects(id__in=categories, deleted=False) \ .only(*CategoryModel.COCO_PROPERTIES) db_images = ImageModel.objects( deleted=False, dataset_id=dataset.id).only( *ImageModel.COCO_PROPERTIES) db_annotations = AnnotationModel.objects( deleted=False, category_id__in=categories) total_items = db_categories.count() coco = { 'images': [], 'categories': [], 'annotations': [] } total_items += db_images.count() progress = 0 # iterate though all categoires and upsert category_names = [] for category in fix_ids(db_categories): if len(category.get('keypoint_labels', [])) > 0: category['keypoints'] = category.pop('keypoint_labels', []) category['skeleton'] = category.pop('keypoint_edges', []) else: if 'keypoint_edges' in category: del category['keypoint_edges'] if 'keypoint_labels' in category: del category['keypoint_labels'] task.info(f"Adding category: {category.get('name')}") coco.get('categories').append(category) category_names.append(category.get('name')) progress += 1 task.set_progress((progress / total_items) * 100, socket=socket) total_annotations = db_annotations.count() total_images = db_images.count() for image in db_images: image = fix_ids(image) if Config.EXPORT_RELPATH and 'relpath' in image: image['file_name'] = image['relpath'] progress += 1 task.set_progress((progress / total_items) * 100, socket=socket) annotations = db_annotations.filter(image_id=image.get('id'))\ .only(*AnnotationModel.COCO_PROPERTIES) annotations = fix_ids(annotations) if len(annotations) == 0: continue num_annotations = 0 for annotation in annotations: has_keypoints = len(annotation.get('keypoints', [])) > 0 has_segmentation = len(annotation.get('segmentation', [])) > 0 if has_keypoints or has_segmentation: if not has_keypoints: if 'keypoints' in annotation: del annotation['keypoints'] else: arr = np.array(annotation.get('keypoints', [])) arr = arr[2::3] annotation['num_keypoints'] = len(arr[arr > 0]) num_annotations += 1 coco.get('annotations').append(annotation) ''' if num_annotations > 0: image["num_annotations"]=num_annotations image["annotated"]=True ''' task.info( f"Exporting {num_annotations} annotations for image {image.get('id')}") coco.get('images').append(image) task.info( f"Done export {total_annotations} annotations and {total_images} images from {dataset.name}") timestamp = time.time() directory = f"{dataset.directory}.exports/" file_path = f"{directory}coco-{timestamp}.json" if not os.path.exists(directory): os.makedirs(directory) task.info(f"Writing export to file {file_path}") with open(file_path, 'w') as fp: json.dump(coco, fp) task.info("Creating export object") export = ExportModel(dataset_id=dataset.id, path=file_path, tags=[ "COCO", *category_names]) export.save() task.set_progress(100, socket=socket) @shared_task def import_annotations(task_id, dataset_id, coco_json): task = TaskModel.objects.get(id=task_id) dataset = DatasetModel.objects.get(id=dataset_id) # UR added relpath directory = os.path.join(Config.DATASET_DIRECTORY, dataset.name) task.update(status="PROGRESS") socket = create_socket() task.info("Beginning Import") images = ImageModel.objects(dataset_id=dataset.id) categories = CategoryModel.objects coco_images = coco_json.get('images', []) coco_annotations = coco_json.get('annotations', []) coco_categories = coco_json.get('categories', []) task.info(f"Importing {len(coco_categories)} categories, " f"{len(coco_images)} images, and " f"{len(coco_annotations)} annotations") total_items = sum([ len(coco_categories), len(coco_annotations), len(coco_images) ]) progress = 0 task.info("===== Importing Categories =====") # category id mapping ( file : database ) categories_id = {} # Create any missing categories for category in coco_categories: category_name = category.get('name') category_id = category.get('id') category_model = categories.filter(name__iexact=category_name).first() if category_model is None: task.warning( f"{category_name} category not found (creating a new one)") new_category = CategoryModel( name=category_name, keypoint_edges=category.get('skeleton', []), keypoint_labels=category.get('keypoints', []) ) new_category.save() category_model = new_category dataset.categories.append(new_category.id) task.info(f"{category_name} category found") # map category ids categories_id[category_id] = category_model.id # update progress progress += 1 task.set_progress((progress / total_items) * 100, socket=socket) dataset.update(set__categories=dataset.categories) task.info("===== Loading Images =====") # image id mapping ( file: database ) images_id = {} categories_by_image = {} # Find all images for image in coco_images: image_id = image.get('id') image_filename = image.get('file_name') # update progress progress += 1 task.set_progress((progress / total_items) * 100, socket=socket) # UR added relpath image_model = images.filter(relpath=image_filename).all() if len(image_model) == 0: task.warning(f"Could not find image {image_filename}") continue if len(image_model) > 1: task.error( f"Too many images found with the same file name: {image_filename}") continue task.info(f"Image {image_filename} found") image_model = image_model[0] images_id[image_id] = image_model categories_by_image[image_id] = list() task.info("===== Import Annotations =====") for annotation in coco_annotations: image_id = annotation.get('image_id') category_id = annotation.get('category_id') segmentation = annotation.get('segmentation', []) keypoints = annotation.get('keypoints', []) # is_crowd = annotation.get('iscrowed', False) area = annotation.get('area', 0) bbox = annotation.get('bbox', [0, 0, 0, 0]) isbbox = annotation.get('isbbox', False) progress += 1 task.set_progress((progress / total_items) * 100, socket=socket) has_segmentation = (len(segmentation) > 0 or isbbox) and sum(bbox) > 1 has_keypoints = len(keypoints) > 0 if not has_segmentation and not has_keypoints: task.warning( f"Annotation {annotation.get('id')} has no segmentation, bbox or keypoints") continue try: image_model = images_id[image_id] category_model_id = categories_id[category_id] image_categories = categories_by_image[image_id] except KeyError: task.warning( f"Could not find image assoicated with annotation {annotation.get('id')}") continue annotation_model = AnnotationModel.objects( image_id=image_model.id, category_id=category_model_id, segmentation=segmentation, keypoints=keypoints, bbox = bbox ).first() if annotation_model is None: task.info(f"Creating annotation data ({image_id}, {category_id})") annotation_model = AnnotationModel(image_id=image_model.id) annotation_model.category_id = category_model_id annotation_model.color = annotation.get('color') annotation_model.metadata = annotation.get('metadata', {}) if has_segmentation: if len(segmentation) < 1 or len(segmentation[0]) < 1: ## we have an empty segment with a bbox task.info(f"Creating segment from bbox {bbox}") segmentation = [bbox2seg(bbox)] isbbox = True annotation_model.segmentation = segmentation annotation_model.area = area annotation_model.bbox = bbox if has_keypoints: annotation_model.keypoints = keypoints annotation_model.isbbox = isbbox annotation_model.save() image_categories.append(category_id) else: annotation_model.update(deleted=False, isbbox=isbbox) task.info( f"Annotation already exists (i:{image_id}, c:{category_id})") for image_id in images_id: image_model = images_id[image_id] category_ids = categories_by_image[image_id] all_category_ids = list(image_model.category_ids) all_category_ids += category_ids num_annotations = AnnotationModel.objects( Q(image_id=image_id) & Q(deleted=False) & (Q(area__gt=0) | Q(keypoints__size__gt=0)) ).count() image_model.update( set__annotated=True, set__category_ids=list(set(all_category_ids)), set__num_annotations=num_annotations ) task.set_progress(100, socket=socket) __all__ = ["export_annotations", "import_annotations"]
32.492492
109
0.614325
from database import ( fix_ids, ImageModel, CategoryModel, AnnotationModel, DatasetModel, TaskModel, ExportModel ) import numpy as np import time import json import os from celery import shared_task from ..socket import create_socket from mongoengine import Q from config import Config from pathlib import PurePath def bbox2seg(bbox): return [bbox[0],bbox[1],bbox[0]+bbox[2],bbox[1],bbox[0]+bbox[2],bbox[1]+bbox[3],bbox[0],bbox[1]+bbox[3]] @shared_task def export_annotations(task_id, dataset_id, categories): task = TaskModel.objects.get(id=task_id) dataset = DatasetModel.objects.get(id=dataset_id) task.update(status="PROGRESS") socket = create_socket() task.info("Beginning Export (COCO Format)") db_categories = CategoryModel.objects(id__in=categories, deleted=False) \ .only(*CategoryModel.COCO_PROPERTIES) db_images = ImageModel.objects( deleted=False, dataset_id=dataset.id).only( *ImageModel.COCO_PROPERTIES) db_annotations = AnnotationModel.objects( deleted=False, category_id__in=categories) total_items = db_categories.count() coco = { 'images': [], 'categories': [], 'annotations': [] } total_items += db_images.count() progress = 0 category_names = [] for category in fix_ids(db_categories): if len(category.get('keypoint_labels', [])) > 0: category['keypoints'] = category.pop('keypoint_labels', []) category['skeleton'] = category.pop('keypoint_edges', []) else: if 'keypoint_edges' in category: del category['keypoint_edges'] if 'keypoint_labels' in category: del category['keypoint_labels'] task.info(f"Adding category: {category.get('name')}") coco.get('categories').append(category) category_names.append(category.get('name')) progress += 1 task.set_progress((progress / total_items) * 100, socket=socket) total_annotations = db_annotations.count() total_images = db_images.count() for image in db_images: image = fix_ids(image) if Config.EXPORT_RELPATH and 'relpath' in image: image['file_name'] = image['relpath'] progress += 1 task.set_progress((progress / total_items) * 100, socket=socket) annotations = db_annotations.filter(image_id=image.get('id'))\ .only(*AnnotationModel.COCO_PROPERTIES) annotations = fix_ids(annotations) if len(annotations) == 0: continue num_annotations = 0 for annotation in annotations: has_keypoints = len(annotation.get('keypoints', [])) > 0 has_segmentation = len(annotation.get('segmentation', [])) > 0 if has_keypoints or has_segmentation: if not has_keypoints: if 'keypoints' in annotation: del annotation['keypoints'] else: arr = np.array(annotation.get('keypoints', [])) arr = arr[2::3] annotation['num_keypoints'] = len(arr[arr > 0]) num_annotations += 1 coco.get('annotations').append(annotation) task.info( f"Exporting {num_annotations} annotations for image {image.get('id')}") coco.get('images').append(image) task.info( f"Done export {total_annotations} annotations and {total_images} images from {dataset.name}") timestamp = time.time() directory = f"{dataset.directory}.exports/" file_path = f"{directory}coco-{timestamp}.json" if not os.path.exists(directory): os.makedirs(directory) task.info(f"Writing export to file {file_path}") with open(file_path, 'w') as fp: json.dump(coco, fp) task.info("Creating export object") export = ExportModel(dataset_id=dataset.id, path=file_path, tags=[ "COCO", *category_names]) export.save() task.set_progress(100, socket=socket) @shared_task def import_annotations(task_id, dataset_id, coco_json): task = TaskModel.objects.get(id=task_id) dataset = DatasetModel.objects.get(id=dataset_id) directory = os.path.join(Config.DATASET_DIRECTORY, dataset.name) task.update(status="PROGRESS") socket = create_socket() task.info("Beginning Import") images = ImageModel.objects(dataset_id=dataset.id) categories = CategoryModel.objects coco_images = coco_json.get('images', []) coco_annotations = coco_json.get('annotations', []) coco_categories = coco_json.get('categories', []) task.info(f"Importing {len(coco_categories)} categories, " f"{len(coco_images)} images, and " f"{len(coco_annotations)} annotations") total_items = sum([ len(coco_categories), len(coco_annotations), len(coco_images) ]) progress = 0 task.info("===== Importing Categories =====") categories_id = {} for category in coco_categories: category_name = category.get('name') category_id = category.get('id') category_model = categories.filter(name__iexact=category_name).first() if category_model is None: task.warning( f"{category_name} category not found (creating a new one)") new_category = CategoryModel( name=category_name, keypoint_edges=category.get('skeleton', []), keypoint_labels=category.get('keypoints', []) ) new_category.save() category_model = new_category dataset.categories.append(new_category.id) task.info(f"{category_name} category found") categories_id[category_id] = category_model.id progress += 1 task.set_progress((progress / total_items) * 100, socket=socket) dataset.update(set__categories=dataset.categories) task.info("===== Loading Images =====") images_id = {} categories_by_image = {} for image in coco_images: image_id = image.get('id') image_filename = image.get('file_name') progress += 1 task.set_progress((progress / total_items) * 100, socket=socket) image_model = images.filter(relpath=image_filename).all() if len(image_model) == 0: task.warning(f"Could not find image {image_filename}") continue if len(image_model) > 1: task.error( f"Too many images found with the same file name: {image_filename}") continue task.info(f"Image {image_filename} found") image_model = image_model[0] images_id[image_id] = image_model categories_by_image[image_id] = list() task.info("===== Import Annotations =====") for annotation in coco_annotations: image_id = annotation.get('image_id') category_id = annotation.get('category_id') segmentation = annotation.get('segmentation', []) keypoints = annotation.get('keypoints', []) area = annotation.get('area', 0) bbox = annotation.get('bbox', [0, 0, 0, 0]) isbbox = annotation.get('isbbox', False) progress += 1 task.set_progress((progress / total_items) * 100, socket=socket) has_segmentation = (len(segmentation) > 0 or isbbox) and sum(bbox) > 1 has_keypoints = len(keypoints) > 0 if not has_segmentation and not has_keypoints: task.warning( f"Annotation {annotation.get('id')} has no segmentation, bbox or keypoints") continue try: image_model = images_id[image_id] category_model_id = categories_id[category_id] image_categories = categories_by_image[image_id] except KeyError: task.warning( f"Could not find image assoicated with annotation {annotation.get('id')}") continue annotation_model = AnnotationModel.objects( image_id=image_model.id, category_id=category_model_id, segmentation=segmentation, keypoints=keypoints, bbox = bbox ).first() if annotation_model is None: task.info(f"Creating annotation data ({image_id}, {category_id})") annotation_model = AnnotationModel(image_id=image_model.id) annotation_model.category_id = category_model_id annotation_model.color = annotation.get('color') annotation_model.metadata = annotation.get('metadata', {}) if has_segmentation: if len(segmentation) < 1 or len(segmentation[0]) < 1: ing segment from bbox {bbox}") segmentation = [bbox2seg(bbox)] isbbox = True annotation_model.segmentation = segmentation annotation_model.area = area annotation_model.bbox = bbox if has_keypoints: annotation_model.keypoints = keypoints annotation_model.isbbox = isbbox annotation_model.save() image_categories.append(category_id) else: annotation_model.update(deleted=False, isbbox=isbbox) task.info( f"Annotation already exists (i:{image_id}, c:{category_id})") for image_id in images_id: image_model = images_id[image_id] category_ids = categories_by_image[image_id] all_category_ids = list(image_model.category_ids) all_category_ids += category_ids num_annotations = AnnotationModel.objects( Q(image_id=image_id) & Q(deleted=False) & (Q(area__gt=0) | Q(keypoints__size__gt=0)) ).count() image_model.update( set__annotated=True, set__category_ids=list(set(all_category_ids)), set__num_annotations=num_annotations ) task.set_progress(100, socket=socket) __all__ = ["export_annotations", "import_annotations"]
true
true
f70fe15e691eebb7b57139f7a24cfd21bcde7dd8
516
py
Python
tests/r/test_biomass.py
hajime9652/observations
2c8b1ac31025938cb17762e540f2f592e302d5de
[ "Apache-2.0" ]
199
2017-07-24T01:34:27.000Z
2022-01-29T00:50:55.000Z
tests/r/test_biomass.py
hajime9652/observations
2c8b1ac31025938cb17762e540f2f592e302d5de
[ "Apache-2.0" ]
46
2017-09-05T19:27:20.000Z
2019-01-07T09:47:26.000Z
tests/r/test_biomass.py
hajime9652/observations
2c8b1ac31025938cb17762e540f2f592e302d5de
[ "Apache-2.0" ]
45
2017-07-26T00:10:44.000Z
2022-03-16T20:44:59.000Z
from __future__ import absolute_import from __future__ import division from __future__ import print_function import shutil import sys import tempfile from observations.r.biomass import biomass def test_biomass(): """Test module biomass.py by downloading biomass.csv and testing shape of extracted data has 153 rows and 8 columns """ test_path = tempfile.mkdtemp() x_train, metadata = biomass(test_path) try: assert x_train.shape == (153, 8) except: shutil.rmtree(test_path) raise()
21.5
44
0.753876
from __future__ import absolute_import from __future__ import division from __future__ import print_function import shutil import sys import tempfile from observations.r.biomass import biomass def test_biomass(): test_path = tempfile.mkdtemp() x_train, metadata = biomass(test_path) try: assert x_train.shape == (153, 8) except: shutil.rmtree(test_path) raise()
true
true
f70fe1b1005484c82991bd4f9673404e1958f315
1,236
py
Python
examples/satellite-monitor-example.py
VLiu7/mobileinsight-core
a007f18230e09a5102d8035a929c284de0007938
[ "Apache-2.0" ]
null
null
null
examples/satellite-monitor-example.py
VLiu7/mobileinsight-core
a007f18230e09a5102d8035a929c284de0007938
[ "Apache-2.0" ]
null
null
null
examples/satellite-monitor-example.py
VLiu7/mobileinsight-core
a007f18230e09a5102d8035a929c284de0007938
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python # Filename: monitor-example.py import os import sys # Import MobileInsight modules from mobile_insight.monitor import OnlineMonitor from mobile_insight.analyzer import MsgLogger if __name__ == "__main__": if len(sys.argv) < 3: print("Error: please specify physical port name and baudrate.") print((__file__, "SERIAL_PORT_NAME BAUNRATE")) sys.exit(1) # Initialize a 3G/4G monitor src = OnlineMonitor() src.set_serial_port(sys.argv[1]) # the serial port to collect the traces src.set_baudrate(int(sys.argv[2])) # the baudrate of the port # Save the monitoring results as an offline log src.save_log_as("./monitor-example.mi2log") # print("finish config") # Enable 3G/4G messages to be monitored. Here we enable RRC (radio # resource control) monitoring # src.enable_log("LTE_RRC_OTA_Packet") # src.enable_log("WCDMA_RRC_OTA_Packet") # src.enable_log("WCDMA_RRC_Serv_Cell_Info") # print("finish enable") # Dump the messages to std I/O. Comment it if it is not needed. dumper = MsgLogger() dumper.set_source(src) dumper.set_decoding(MsgLogger.XML) # decode the message as xml # Start the monitoring src.run()
32.526316
77
0.703074
import os import sys from mobile_insight.monitor import OnlineMonitor from mobile_insight.analyzer import MsgLogger if __name__ == "__main__": if len(sys.argv) < 3: print("Error: please specify physical port name and baudrate.") print((__file__, "SERIAL_PORT_NAME BAUNRATE")) sys.exit(1) src = OnlineMonitor() src.set_serial_port(sys.argv[1]) src.set_baudrate(int(sys.argv[2])) src.save_log_as("./monitor-example.mi2log") dumper = MsgLogger() dumper.set_source(src) dumper.set_decoding(MsgLogger.XML) src.run()
true
true
f70fe2034157cdb53e2d92c4774b7dc656a4f901
1,572
bzl
Python
bazel/repos.bzl
4rdparty/bazel-rules-libcurl
6cc892f41b93d6f11eb15da981f7ef113baa9242
[ "Apache-2.0" ]
null
null
null
bazel/repos.bzl
4rdparty/bazel-rules-libcurl
6cc892f41b93d6f11eb15da981f7ef113baa9242
[ "Apache-2.0" ]
null
null
null
bazel/repos.bzl
4rdparty/bazel-rules-libcurl
6cc892f41b93d6f11eb15da981f7ef113baa9242
[ "Apache-2.0" ]
null
null
null
"""Adds repostories/archives.""" ######################################################################## # DO NOT EDIT THIS FILE unless you are inside the # https://github.com/3rdparty/bazel-rules-curl repository. If you # encounter it anywhere else it is because it has been copied there in # order to simplify adding transitive dependencies. If you want a # different version of bazel-rules-curl follow the Bazel build # instructions at https://github.com/3rdparty/bazel-rules-curl. ######################################################################## load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") def repos(external = True, repo_mapping = {}): if "rules_foreign_cc" not in native.existing_rules(): http_archive( name = "rules_foreign_cc", url = "https://github.com/bazelbuild/rules_foreign_cc/archive/0.5.1.tar.gz", sha256 = "33a5690733c5cc2ede39cb62ebf89e751f2448e27f20c8b2fbbc7d136b166804", strip_prefix = "rules_foreign_cc-0.5.1", repo_mapping = repo_mapping, ) if external and "com_github_4rdparty_bazel_rules_libcurl" not in native.existing_rules(): http_archive( name = "com_github_4rdparty_bazel_rules_libcurl", url = "https://github.com/4rdparty/bazel-rules-libcurl/archive/refs/tags/libcurl-7_78_0.tar.gz", sha256 = "8de476145536ded8df4aacf98f3d5511721d291f78568c1873bab8a080a4e985", strip_prefix = "bazel-rules-libcurl-libcurl-7_78_0", repo_mapping = repo_mapping, )
49.125
108
0.641221
true
true
f70fe2f85bb409f227eb7abc62201aa095f369f2
863
py
Python
setup.py
rlji/pinsage-pytorch
2f275ca916ce21d0d2c0e022eec664cfd09d06cf
[ "Apache-2.0" ]
null
null
null
setup.py
rlji/pinsage-pytorch
2f275ca916ce21d0d2c0e022eec664cfd09d06cf
[ "Apache-2.0" ]
null
null
null
setup.py
rlji/pinsage-pytorch
2f275ca916ce21d0d2c0e022eec664cfd09d06cf
[ "Apache-2.0" ]
null
null
null
from __future__ import absolute_import, unicode_literals from setuptools import setup, find_packages VERSION = '0.1.0' NAME = 'pinsage-pytorch' DESCRIPTION = 'This is a PinSage pytorch library.' URL = 'https://github.com/rlji/pinsage-pytorch' EMAIL = 'me@example.com' AUTHOR = 'rlji' # What python versions are supported? REQUIRES_PYTHON = ">=3.6" # What packages are required for this module to be executed? REQUIRED = [ 'dgl', 'pandas', 'dask[complete]', 'torch', 'numpy', 'scipy', 'tqdm', ] # What packages are optional? EXTRAS = { } setup( name=NAME, version=VERSION, description=DESCRIPTION, author=AUTHOR, author_email=EMAIL, python_requires=REQUIRES_PYTHON, url=URL, packages=find_packages(exclude=["tests"]), install_requires=REQUIRED, extras_require=EXTRAS, include_package_data=True, )
21.04878
60
0.701043
from __future__ import absolute_import, unicode_literals from setuptools import setup, find_packages VERSION = '0.1.0' NAME = 'pinsage-pytorch' DESCRIPTION = 'This is a PinSage pytorch library.' URL = 'https://github.com/rlji/pinsage-pytorch' EMAIL = 'me@example.com' AUTHOR = 'rlji' REQUIRES_PYTHON = ">=3.6" REQUIRED = [ 'dgl', 'pandas', 'dask[complete]', 'torch', 'numpy', 'scipy', 'tqdm', ] EXTRAS = { } setup( name=NAME, version=VERSION, description=DESCRIPTION, author=AUTHOR, author_email=EMAIL, python_requires=REQUIRES_PYTHON, url=URL, packages=find_packages(exclude=["tests"]), install_requires=REQUIRED, extras_require=EXTRAS, include_package_data=True, )
true
true
f70fe33ba97c88acc056615ad526b5b4f292cf98
9,924
py
Python
packit/utils/repo.py
wickdChromosome/packit
ee31b8bbab579679f928a05db8125897bf2cad62
[ "MIT" ]
null
null
null
packit/utils/repo.py
wickdChromosome/packit
ee31b8bbab579679f928a05db8125897bf2cad62
[ "MIT" ]
null
null
null
packit/utils/repo.py
wickdChromosome/packit
ee31b8bbab579679f928a05db8125897bf2cad62
[ "MIT" ]
null
null
null
# Copyright Contributors to the Packit project. # SPDX-License-Identifier: MIT import logging import re import subprocess import tempfile from pathlib import Path from typing import Tuple, Optional, Union, List import git import yaml from ogr.parsing import RepoUrl, parse_git_repo from packit.exceptions import PackitException logger = logging.getLogger(__name__) class RepositoryCache: """ Cache for git repositories base on the reference option of `git clone`. * The cache is located in the specified directory and contains separate git repository for each project. * Project name is used to match the git project in the cache. """ def __init__(self, cache_path: Union[str, Path], add_new=False) -> None: self.cache_path = ( Path(cache_path) if isinstance(cache_path, str) else cache_path ) self.add_new = add_new logger.debug( f"Instantiation of the repository cache at {self.cache_path}. " f"New projects will {'not ' if not self.add_new else ''}be added." ) @property def cached_projects(self) -> List[str]: """Project names we have in the cache.""" if not self.cache_path.is_dir(): self.cache_path.mkdir(parents=True) return [f.name for f in self.cache_path.iterdir() if f.is_dir()] def _clone(self, **kwargs) -> git.Repo: """Wrapper around git function so we are able to check the call in tests more easily.""" return git.repo.Repo.clone_from(**kwargs) def get_repo( self, url: str, directory: Union[Path, str] = None, ) -> git.Repo: """ Clone the repository. * If we have this repository in a cache, use the cached repo as a reference when cloning. * If we don't have this repository in a cache and {add_new} is True, clone the repository to cache first and then use it as a reference. :param url: will be used to clone the repo :param directory: target path for cloning the repository :return: cloned repository """ directory = str(directory) if directory else tempfile.mkdtemp() if is_git_repo(directory=directory): logger.debug(f"Repo already exists in {directory}.") return git.repo.Repo(directory) logger.debug( f"Cloning repo {url} -> {directory} using repository cache at {self.cache_path}" ) cached_projects = self.cached_projects cached_projects_str = "\n".join(f"- {project}" for project in cached_projects) logger.debug( f"Repositories in the cache ({len(cached_projects)} project(s)):\n{cached_projects_str}" ) project_name = RepoUrl.parse(url).repo reference_repo = self.cache_path.joinpath(project_name) if project_name not in cached_projects and self.add_new: logger.debug(f"Creating reference repo: {reference_repo}") self._clone(url=url, to_path=str(reference_repo), tags=True) if self.add_new or project_name in cached_projects: logger.debug(f"Using reference repo: {reference_repo}") return self._clone( url=url, to_path=directory, tags=True, reference=str(reference_repo) ) return self._clone(url=url, to_path=directory, tags=True) def is_git_repo(directory: Union[Path, str]) -> bool: """ Test, if the directory is a git repo. (Has .git subdirectory?) """ return Path(directory, ".git").is_dir() def get_repo(url: str, directory: Union[Path, str] = None) -> git.Repo: """ Use directory as a git repo or clone repo to the tempdir. """ directory = str(directory) if directory else tempfile.mkdtemp() if is_git_repo(directory=directory): logger.debug(f"Repo already exists in {directory}.") repo = git.repo.Repo(directory) else: logger.debug(f"Cloning repo {url} -> {directory}") repo = git.repo.Repo.clone_from(url=url, to_path=directory, tags=True) return repo def get_namespace_and_repo_name(url: str) -> Tuple[Optional[str], str]: parsed_git_repo = parse_git_repo(url) if parsed_git_repo is None or not parsed_git_repo.repo: raise PackitException( f"Invalid URL format, can't obtain namespace and repository name: {url}" ) return parsed_git_repo.namespace, parsed_git_repo.repo def is_a_git_ref(repo: git.Repo, ref: str) -> bool: try: commit = repo.commit(ref) return bool(commit) except git.BadName: return False def get_default_branch(repository: git.Repo) -> str: """ Returns default branch for newly created repos in the parent directory of passed in repository. Accepts `repository` to ensure the closest override of git configuration is used. Args: repository (git.Repo): Git repository closest to the directory where the configuration is applied. Returns: Default branch for new repos, if not supported or not configured returns `master`. """ config = repository.config_reader() return config.get_value("init", "defaultBranch", "master") def git_remote_url_to_https_url(inp: str) -> str: """ turn provided git remote URL to https URL: returns empty string if the input can't be processed """ logger.debug(f"Parsing git remote URL {inp!r} and converting it to https-like URL.") parsed_repo = parse_git_repo(inp) if not parsed_repo or not parsed_repo.hostname: logger.debug(f"{inp!r} is not an URL we recognize.") return "" if inp.startswith(("http", "https")): logger.debug(f"Provided input {inp!r} is an url.") return inp optional_suffix = ".git" if inp.endswith(".git") else "" url_str = "https://{}/{}/{}{}".format( parsed_repo.hostname, parsed_repo.namespace, parsed_repo.repo, optional_suffix ) logger.debug(f"URL {inp!r} turned into HTTPS {url_str!r}") return url_str def get_current_version_command( glob_pattern: str, refs: Optional[str] = "tags" ) -> List[str]: """ Returns command that find latest git reference matching given pattern. :param glob_pattern: pattern that is used to find latest ref :param refs: specifies what kind of ref is used; \ default is `"tags"` that searches through all tags (including non-annotated), \ pass `None` to search only annotated tags or `"all"` to search through \ all refs (including branches and remote refs) :return: command to find latest ref """ return [ "git", "describe", "--abbrev=0", f"--{refs}" if refs else "", "--match", glob_pattern, ] def create_new_repo(cwd: Path, switches: List[str]): subprocess.check_call(["git", "init"] + switches + [str(cwd)]) # TODO: Replace with -b / --initial-branch in `git init` when possible if "--bare" not in switches: subprocess.check_call(["git", "checkout", "-b", "main"], cwd=cwd) else: subprocess.check_call( ["git", "symbolic-ref", "HEAD", "refs/heads/main"], cwd=cwd ) def git_patch_ish(patch: str) -> str: """ Massage patch to look like a Git-style patch, so that it can be passed to 'git patch-id' in order to calculate a patch-id. :param patch: Patch to transform. :return: Transformed patch. """ # Prettend as if format is 'diff --git' pattern = re.compile(r"^diff -\w+ ", flags=re.MULTILINE) repl = r"diff --git " patch = re.sub(pattern, repl, patch) # Remove timestamps from comparison lines pattern = re.compile(r"^((---|\+\+\+) .+)\t\d{4}.+$", flags=re.MULTILINE) repl = r"\1" patch = re.sub(pattern, repl, patch) # Add missing 'diff --git' lines if "diff --git " not in patch: # Timestamps (see above) already need to be removed # for this substitution pattern to work. pattern = re.compile(r"(\n--- (.+)\n\+\+\+ (.+)\n)") repl = r"\ndiff --git \2 \3\1" patch = re.sub(pattern, repl, patch) return patch def get_message_from_metadata(metadata: dict, header: Optional[str] = None) -> str: if not isinstance(metadata, dict): raise PackitException( f"We can save only dictionaries to metadata. Not {metadata}" ) content = ( yaml.dump(metadata, indent=4, default_flow_style=False) if metadata else "" ) if not header: return content return f"{header}\n\n{content}" def get_metadata_from_message(commit: git.Commit) -> Optional[dict]: """ Tries to load yaml format from the git message. We are skipping first line until the rest of the content is yaml-loaded to dictionary (yaml object type). If nothing found, we return None. Reference: https://gitpython.readthedocs.io/en/stable/reference.html ?highlight=archive#module-git.objects.commit e.g.: I) key: value another: value -> {"key": "value", "another": "value"} II) On sentence. key: value another: value -> {"key": "value", "another": "value"} III) A lot of text before keys. key: value another: value -> {"key": "value", "another": "value"} IV) Other values are supported as well: key: - first - second - third :param commit: git.Commit object :return: dict loaded from message if it satisfies the rules above """ splitted_message = commit.message.split("\n") for i in range(len(splitted_message)): message_part = "\n".join(splitted_message[i:]) try: loaded_part = yaml.safe_load(message_part) except yaml.YAMLError: continue if isinstance(loaded_part, dict): return loaded_part return None
31.305994
100
0.637948
import logging import re import subprocess import tempfile from pathlib import Path from typing import Tuple, Optional, Union, List import git import yaml from ogr.parsing import RepoUrl, parse_git_repo from packit.exceptions import PackitException logger = logging.getLogger(__name__) class RepositoryCache: def __init__(self, cache_path: Union[str, Path], add_new=False) -> None: self.cache_path = ( Path(cache_path) if isinstance(cache_path, str) else cache_path ) self.add_new = add_new logger.debug( f"Instantiation of the repository cache at {self.cache_path}. " f"New projects will {'not ' if not self.add_new else ''}be added." ) @property def cached_projects(self) -> List[str]: if not self.cache_path.is_dir(): self.cache_path.mkdir(parents=True) return [f.name for f in self.cache_path.iterdir() if f.is_dir()] def _clone(self, **kwargs) -> git.Repo: return git.repo.Repo.clone_from(**kwargs) def get_repo( self, url: str, directory: Union[Path, str] = None, ) -> git.Repo: directory = str(directory) if directory else tempfile.mkdtemp() if is_git_repo(directory=directory): logger.debug(f"Repo already exists in {directory}.") return git.repo.Repo(directory) logger.debug( f"Cloning repo {url} -> {directory} using repository cache at {self.cache_path}" ) cached_projects = self.cached_projects cached_projects_str = "\n".join(f"- {project}" for project in cached_projects) logger.debug( f"Repositories in the cache ({len(cached_projects)} project(s)):\n{cached_projects_str}" ) project_name = RepoUrl.parse(url).repo reference_repo = self.cache_path.joinpath(project_name) if project_name not in cached_projects and self.add_new: logger.debug(f"Creating reference repo: {reference_repo}") self._clone(url=url, to_path=str(reference_repo), tags=True) if self.add_new or project_name in cached_projects: logger.debug(f"Using reference repo: {reference_repo}") return self._clone( url=url, to_path=directory, tags=True, reference=str(reference_repo) ) return self._clone(url=url, to_path=directory, tags=True) def is_git_repo(directory: Union[Path, str]) -> bool: return Path(directory, ".git").is_dir() def get_repo(url: str, directory: Union[Path, str] = None) -> git.Repo: directory = str(directory) if directory else tempfile.mkdtemp() if is_git_repo(directory=directory): logger.debug(f"Repo already exists in {directory}.") repo = git.repo.Repo(directory) else: logger.debug(f"Cloning repo {url} -> {directory}") repo = git.repo.Repo.clone_from(url=url, to_path=directory, tags=True) return repo def get_namespace_and_repo_name(url: str) -> Tuple[Optional[str], str]: parsed_git_repo = parse_git_repo(url) if parsed_git_repo is None or not parsed_git_repo.repo: raise PackitException( f"Invalid URL format, can't obtain namespace and repository name: {url}" ) return parsed_git_repo.namespace, parsed_git_repo.repo def is_a_git_ref(repo: git.Repo, ref: str) -> bool: try: commit = repo.commit(ref) return bool(commit) except git.BadName: return False def get_default_branch(repository: git.Repo) -> str: config = repository.config_reader() return config.get_value("init", "defaultBranch", "master") def git_remote_url_to_https_url(inp: str) -> str: logger.debug(f"Parsing git remote URL {inp!r} and converting it to https-like URL.") parsed_repo = parse_git_repo(inp) if not parsed_repo or not parsed_repo.hostname: logger.debug(f"{inp!r} is not an URL we recognize.") return "" if inp.startswith(("http", "https")): logger.debug(f"Provided input {inp!r} is an url.") return inp optional_suffix = ".git" if inp.endswith(".git") else "" url_str = "https://{}/{}/{}{}".format( parsed_repo.hostname, parsed_repo.namespace, parsed_repo.repo, optional_suffix ) logger.debug(f"URL {inp!r} turned into HTTPS {url_str!r}") return url_str def get_current_version_command( glob_pattern: str, refs: Optional[str] = "tags" ) -> List[str]: return [ "git", "describe", "--abbrev=0", f"--{refs}" if refs else "", "--match", glob_pattern, ] def create_new_repo(cwd: Path, switches: List[str]): subprocess.check_call(["git", "init"] + switches + [str(cwd)]) # TODO: Replace with -b / --initial-branch in `git init` when possible if "--bare" not in switches: subprocess.check_call(["git", "checkout", "-b", "main"], cwd=cwd) else: subprocess.check_call( ["git", "symbolic-ref", "HEAD", "refs/heads/main"], cwd=cwd ) def git_patch_ish(patch: str) -> str: # Prettend as if format is 'diff --git' pattern = re.compile(r"^diff -\w+ ", flags=re.MULTILINE) repl = r"diff --git " patch = re.sub(pattern, repl, patch) # Remove timestamps from comparison lines pattern = re.compile(r"^((---|\+\+\+) .+)\t\d{4}.+$", flags=re.MULTILINE) repl = r"\1" patch = re.sub(pattern, repl, patch) # Add missing 'diff --git' lines if "diff --git " not in patch: # Timestamps (see above) already need to be removed # for this substitution pattern to work. pattern = re.compile(r"(\n--- (.+)\n\+\+\+ (.+)\n)") repl = r"\ndiff --git \2 \3\1" patch = re.sub(pattern, repl, patch) return patch def get_message_from_metadata(metadata: dict, header: Optional[str] = None) -> str: if not isinstance(metadata, dict): raise PackitException( f"We can save only dictionaries to metadata. Not {metadata}" ) content = ( yaml.dump(metadata, indent=4, default_flow_style=False) if metadata else "" ) if not header: return content return f"{header}\n\n{content}" def get_metadata_from_message(commit: git.Commit) -> Optional[dict]: splitted_message = commit.message.split("\n") for i in range(len(splitted_message)): message_part = "\n".join(splitted_message[i:]) try: loaded_part = yaml.safe_load(message_part) except yaml.YAMLError: continue if isinstance(loaded_part, dict): return loaded_part return None
true
true
f70fe4521b71231fe23de14972ca86fefa3f06d0
709
py
Python
returning.py
EnescanAkyuz/Python_Beginner
84c32d39ec0727f39655bd88c57ccdeba1929532
[ "MIT" ]
4
2022-03-16T20:49:07.000Z
2022-03-17T14:55:39.000Z
returning.py
EnescanAkyuz/Python_Beginner
84c32d39ec0727f39655bd88c57ccdeba1929532
[ "MIT" ]
null
null
null
returning.py
EnescanAkyuz/Python_Beginner
84c32d39ec0727f39655bd88c57ccdeba1929532
[ "MIT" ]
null
null
null
# def yetki_sorgula(page): # def inner(role): # if role == "admin" : # print("{0} rolü {1} sayfasına ulaşabilir." .format(page, role)) # else: # print("{0} rolü {1} sayfasına ulaşamaz." .format(page, role)) # return inner # user1 = yetki_sorgula('arayüz') # print(user1("admin")) def islem(islem_adi): def toplam(*args): toplam = 0 for i in args: toplam+=i return toplam def carpma(*args): carpim = 1 for i in args: carpim*=i return carpim if islem_adi == "toplam": return toplam else: return carpma toplama = islem("toplam") print(toplama(1,3,5,7,9))
22.15625
77
0.533145
def islem(islem_adi): def toplam(*args): toplam = 0 for i in args: toplam+=i return toplam def carpma(*args): carpim = 1 for i in args: carpim*=i return carpim if islem_adi == "toplam": return toplam else: return carpma toplama = islem("toplam") print(toplama(1,3,5,7,9))
true
true
f70fe4678dd09d611f534904898182e688634ba5
302
py
Python
utils/all_utils.py
codersatyam/Perceptron
8df4975405020c40aaec0179757f185d9ebbdff8
[ "MIT" ]
null
null
null
utils/all_utils.py
codersatyam/Perceptron
8df4975405020c40aaec0179757f185d9ebbdff8
[ "MIT" ]
null
null
null
utils/all_utils.py
codersatyam/Perceptron
8df4975405020c40aaec0179757f185d9ebbdff8
[ "MIT" ]
null
null
null
import numpy as np import pandas as pd import os import joblib def preparedata(df): x=df.drop("y",axis=1) y=df["y"] return x,y def save_model(model,filename): model_dir="model" os.makedirs(model_dir,exist_ok=True) filepath=os.path.join(model_dir,filename) joblib.dump(model,filepath)
20.133333
43
0.731788
import numpy as np import pandas as pd import os import joblib def preparedata(df): x=df.drop("y",axis=1) y=df["y"] return x,y def save_model(model,filename): model_dir="model" os.makedirs(model_dir,exist_ok=True) filepath=os.path.join(model_dir,filename) joblib.dump(model,filepath)
true
true
f70fe4c61d30fb5146ad4f79eb060ce3710ed779
1,178
py
Python
examples/flask_app.py
cristianMeli/ubatch
fb3c6dccf0a9e25e25f5956e2e91ed70e9ea01ee
[ "Apache-2.0" ]
null
null
null
examples/flask_app.py
cristianMeli/ubatch
fb3c6dccf0a9e25e25f5956e2e91ed70e9ea01ee
[ "Apache-2.0" ]
null
null
null
examples/flask_app.py
cristianMeli/ubatch
fb3c6dccf0a9e25e25f5956e2e91ed70e9ea01ee
[ "Apache-2.0" ]
null
null
null
import random import numpy as np from typing import Dict, List from flask import Flask from flask_restx import Resource, Api # from numpy import genfromtxt from ubatch import ubatch_decorator # from keras.models import load_model from sklearn.datasets import fetch_20newsgroups from sklearn.model_selection import train_test_split from joblib import load ngd = fetch_20newsgroups(subset="all") X = ngd.data y = ngd.target _, X_test, _, _ = train_test_split(X, y, test_size=0.33) model = load("xgbregressor.joblib") # X_test = genfromtxt("xgbregressor_inputs.csv", delimiter=",") app = Flask(__name__) api = Api(app) @ubatch_decorator(max_size=100, timeout=0.01) def predict(data: List[np.array]) -> List[np.float32]: return model.predict(np.array(data)) # type: ignore @api.route("/predict_ubatch") class BatchPredict(Resource): def post(self) -> Dict[str, float]: output = predict.ubatch(random.choice(X_test)) return {"prediction": float(output)} @api.route("/predict") class Predict(Resource): def post(self) -> Dict[str, float]: output = predict([random.choice(X_test)])[0] return {"prediction": float(output)}
23.098039
63
0.724109
import random import numpy as np from typing import Dict, List from flask import Flask from flask_restx import Resource, Api from ubatch import ubatch_decorator from sklearn.datasets import fetch_20newsgroups from sklearn.model_selection import train_test_split from joblib import load ngd = fetch_20newsgroups(subset="all") X = ngd.data y = ngd.target _, X_test, _, _ = train_test_split(X, y, test_size=0.33) model = load("xgbregressor.joblib") app = Flask(__name__) api = Api(app) @ubatch_decorator(max_size=100, timeout=0.01) def predict(data: List[np.array]) -> List[np.float32]: return model.predict(np.array(data)) @api.route("/predict_ubatch") class BatchPredict(Resource): def post(self) -> Dict[str, float]: output = predict.ubatch(random.choice(X_test)) return {"prediction": float(output)} @api.route("/predict") class Predict(Resource): def post(self) -> Dict[str, float]: output = predict([random.choice(X_test)])[0] return {"prediction": float(output)}
true
true
f70fe5076940addc4ee562f410471d9471696de1
178
py
Python
api/geocode/geocode_clear_cache.py
michael-pryor/GeoTweetSearch
cb6d0a7732a0584022f3720e3f696fb709dd45b5
[ "Apache-2.0" ]
1
2016-04-08T08:40:34.000Z
2016-04-08T08:40:34.000Z
api/geocode/geocode_clear_cache.py
watfordxp/GeoTweetSearch
cb6d0a7732a0584022f3720e3f696fb709dd45b5
[ "Apache-2.0" ]
null
null
null
api/geocode/geocode_clear_cache.py
watfordxp/GeoTweetSearch
cb6d0a7732a0584022f3720e3f696fb709dd45b5
[ "Apache-2.0" ]
2
2015-08-28T17:08:26.000Z
2016-12-30T21:59:46.000Z
from api.caching.caching_shared import getDatabase __author__ = 'Michael Pryor' if __name__ == '__main__': db = getDatabase() db.place.remove() db.geocode.remove()
19.777778
50
0.707865
from api.caching.caching_shared import getDatabase __author__ = 'Michael Pryor' if __name__ == '__main__': db = getDatabase() db.place.remove() db.geocode.remove()
true
true
f70fe77c34822d34426ad7b00c5ac679b16a18e2
2,689
py
Python
tests/initialisations/test_bradleyfayyad1998.py
simonharris/pykmeans
256e0c6c7284182aae9c10783cf50778af120514
[ "MIT" ]
1
2021-12-30T01:25:03.000Z
2021-12-30T01:25:03.000Z
tests/initialisations/test_bradleyfayyad1998.py
simonharris/pycluster
4d47eb12a2bbaf1b05d7ccfd0cfc9ccf78ddf86d
[ "MIT" ]
3
2020-11-12T12:36:00.000Z
2021-06-18T12:46:59.000Z
tests/initialisations/test_bradleyfayyad1998.py
simonharris/pycluster
4d47eb12a2bbaf1b05d7ccfd0cfc9ccf78ddf86d
[ "MIT" ]
1
2021-12-30T01:32:32.000Z
2021-12-30T01:32:32.000Z
""" Test for Bradley & Fayyad 1998 initialisation algorithm """ import unittest import numpy as np from datasets import testloader from initialisations import bradley as bfinit import kmeans # pylint: disable=R0201,W0212 class BfTestSuite(unittest.TestCase): """Test suite for B&F""" def test_code_runs(self): """At least prove it runs""" dataset = testloader.load_iris() centroids = bfinit.generate(dataset.data, 3) self.assertEqual((3, 4), centroids.shape) def test_with_hartigan(self): """A tiny dataset which can't possibly work here""" dataset = testloader.load_hartigan() with self.assertRaises(ValueError): bfinit.generate(dataset.data, 3) def test_find_furthest(self): """Find the data point furthest from its cluster center""" distances = np.array([ [1, 2, 3], # 1 [7, 5, 16], # 5 [7, 26, 4], # 4 [19, 20, 21], # 19 [6, 18, 8] # 6 ]) np.testing.assert_equal(bfinit._find_furthest(distances), [3]) np.testing.assert_equal(np.sort(bfinit._find_furthest(distances, 2)), [3, 4]) np.testing.assert_equal(np.sort(bfinit._find_furthest(distances, 3)), [1, 3, 4]) def test_with_1_empty(self): """Seeds and data known to leave one empty cluster after k_means(), and thus trigger k_means_mod() to reassign a centroid""" seeds = np.array([ [5.4, 3.0, 4.5, 1.5], [6.7, 3.0, 5.0, 1.7], [5.1, 3.8, 1.5, 0.3], # Doesn't get any data points assigned ]) data = np.array([ # Assigned to 0 but is furthest, so becomes the new 2 [6.4, 2.9, 4.3, 1.3], [6.3, 3.4, 5.6, 2.4], [6.8, 3.0, 5.5, 2.1], [5.0, 2.0, 3.5, 1.0], [5.8, 2.7, 5.1, 1.9], ]) expected_labels = [2, 1, 1, 0, 0] expected_centroids = [ [5.4, 2.35, 4.3, 1.45], [6.55, 3.2, 5.55, 2.25], [6.4, 2.9, 4.3, 1.3], # The new 2 ] centroids = bfinit._k_means_mod(seeds, data, len(seeds)) labels = kmeans.distance_table(data, centroids).argmin(1) np.testing.assert_array_equal(labels, expected_labels) np.testing.assert_array_equal(centroids, expected_centroids) def _test_with_n_empty(self): """Seeds and data known to leave more than one empty cluster This is left as TODO for now, since no way can I force sklearn to give me more than one empty cluster. """
29.877778
77
0.550019
import unittest import numpy as np from datasets import testloader from initialisations import bradley as bfinit import kmeans class BfTestSuite(unittest.TestCase): def test_code_runs(self): dataset = testloader.load_iris() centroids = bfinit.generate(dataset.data, 3) self.assertEqual((3, 4), centroids.shape) def test_with_hartigan(self): dataset = testloader.load_hartigan() with self.assertRaises(ValueError): bfinit.generate(dataset.data, 3) def test_find_furthest(self): distances = np.array([ [1, 2, 3], [7, 5, 16], [7, 26, 4], [19, 20, 21], [6, 18, 8] ]) np.testing.assert_equal(bfinit._find_furthest(distances), [3]) np.testing.assert_equal(np.sort(bfinit._find_furthest(distances, 2)), [3, 4]) np.testing.assert_equal(np.sort(bfinit._find_furthest(distances, 3)), [1, 3, 4]) def test_with_1_empty(self): seeds = np.array([ [5.4, 3.0, 4.5, 1.5], [6.7, 3.0, 5.0, 1.7], [5.1, 3.8, 1.5, 0.3], ]) data = np.array([ # Assigned to 0 but is furthest, so becomes the new 2 [6.4, 2.9, 4.3, 1.3], [6.3, 3.4, 5.6, 2.4], [6.8, 3.0, 5.5, 2.1], [5.0, 2.0, 3.5, 1.0], [5.8, 2.7, 5.1, 1.9], ]) expected_labels = [2, 1, 1, 0, 0] expected_centroids = [ [5.4, 2.35, 4.3, 1.45], [6.55, 3.2, 5.55, 2.25], [6.4, 2.9, 4.3, 1.3], # The new 2 ] centroids = bfinit._k_means_mod(seeds, data, len(seeds)) labels = kmeans.distance_table(data, centroids).argmin(1) np.testing.assert_array_equal(labels, expected_labels) np.testing.assert_array_equal(centroids, expected_centroids) def _test_with_n_empty(self):
true
true
f70fe7c61417fc91a75e5addb50e54ff45c99f58
10,424
py
Python
ultracart/models/item_revguard.py
UltraCart/rest_api_v2_sdk_python
d734ea13fabc7a57872ff68bac06861edb8fd882
[ "Apache-2.0" ]
1
2018-03-15T16:56:23.000Z
2018-03-15T16:56:23.000Z
ultracart/models/item_revguard.py
UltraCart/rest_api_v2_sdk_python
d734ea13fabc7a57872ff68bac06861edb8fd882
[ "Apache-2.0" ]
null
null
null
ultracart/models/item_revguard.py
UltraCart/rest_api_v2_sdk_python
d734ea13fabc7a57872ff68bac06861edb8fd882
[ "Apache-2.0" ]
null
null
null
# coding: utf-8 """ UltraCart Rest API V2 UltraCart REST API Version 2 # noqa: E501 OpenAPI spec version: 2.0.0 Contact: support@ultracart.com Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six class ItemRevguard(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'revguard_canceled_csr_prompt_group': 'int', 'revguard_canceled_ivr_prompt_group': 'int', 'revguard_canceled_web_prompt_group': 'int', 'revguard_client_brand': 'int', 'revguard_csr_prompt_group': 'int', 'revguard_ivr_prompt_group': 'int', 'revguard_web_prompt_group': 'int' } attribute_map = { 'revguard_canceled_csr_prompt_group': 'revguard_canceled_csr_prompt_group', 'revguard_canceled_ivr_prompt_group': 'revguard_canceled_ivr_prompt_group', 'revguard_canceled_web_prompt_group': 'revguard_canceled_web_prompt_group', 'revguard_client_brand': 'revguard_client_brand', 'revguard_csr_prompt_group': 'revguard_csr_prompt_group', 'revguard_ivr_prompt_group': 'revguard_ivr_prompt_group', 'revguard_web_prompt_group': 'revguard_web_prompt_group' } def __init__(self, revguard_canceled_csr_prompt_group=None, revguard_canceled_ivr_prompt_group=None, revguard_canceled_web_prompt_group=None, revguard_client_brand=None, revguard_csr_prompt_group=None, revguard_ivr_prompt_group=None, revguard_web_prompt_group=None): # noqa: E501 """ItemRevguard - a model defined in Swagger""" # noqa: E501 self._revguard_canceled_csr_prompt_group = None self._revguard_canceled_ivr_prompt_group = None self._revguard_canceled_web_prompt_group = None self._revguard_client_brand = None self._revguard_csr_prompt_group = None self._revguard_ivr_prompt_group = None self._revguard_web_prompt_group = None self.discriminator = None if revguard_canceled_csr_prompt_group is not None: self.revguard_canceled_csr_prompt_group = revguard_canceled_csr_prompt_group if revguard_canceled_ivr_prompt_group is not None: self.revguard_canceled_ivr_prompt_group = revguard_canceled_ivr_prompt_group if revguard_canceled_web_prompt_group is not None: self.revguard_canceled_web_prompt_group = revguard_canceled_web_prompt_group if revguard_client_brand is not None: self.revguard_client_brand = revguard_client_brand if revguard_csr_prompt_group is not None: self.revguard_csr_prompt_group = revguard_csr_prompt_group if revguard_ivr_prompt_group is not None: self.revguard_ivr_prompt_group = revguard_ivr_prompt_group if revguard_web_prompt_group is not None: self.revguard_web_prompt_group = revguard_web_prompt_group @property def revguard_canceled_csr_prompt_group(self): """Gets the revguard_canceled_csr_prompt_group of this ItemRevguard. # noqa: E501 Canceled CSR prompt group # noqa: E501 :return: The revguard_canceled_csr_prompt_group of this ItemRevguard. # noqa: E501 :rtype: int """ return self._revguard_canceled_csr_prompt_group @revguard_canceled_csr_prompt_group.setter def revguard_canceled_csr_prompt_group(self, revguard_canceled_csr_prompt_group): """Sets the revguard_canceled_csr_prompt_group of this ItemRevguard. Canceled CSR prompt group # noqa: E501 :param revguard_canceled_csr_prompt_group: The revguard_canceled_csr_prompt_group of this ItemRevguard. # noqa: E501 :type: int """ self._revguard_canceled_csr_prompt_group = revguard_canceled_csr_prompt_group @property def revguard_canceled_ivr_prompt_group(self): """Gets the revguard_canceled_ivr_prompt_group of this ItemRevguard. # noqa: E501 IVR prompt group # noqa: E501 :return: The revguard_canceled_ivr_prompt_group of this ItemRevguard. # noqa: E501 :rtype: int """ return self._revguard_canceled_ivr_prompt_group @revguard_canceled_ivr_prompt_group.setter def revguard_canceled_ivr_prompt_group(self, revguard_canceled_ivr_prompt_group): """Sets the revguard_canceled_ivr_prompt_group of this ItemRevguard. IVR prompt group # noqa: E501 :param revguard_canceled_ivr_prompt_group: The revguard_canceled_ivr_prompt_group of this ItemRevguard. # noqa: E501 :type: int """ self._revguard_canceled_ivr_prompt_group = revguard_canceled_ivr_prompt_group @property def revguard_canceled_web_prompt_group(self): """Gets the revguard_canceled_web_prompt_group of this ItemRevguard. # noqa: E501 Canceled web prompt group # noqa: E501 :return: The revguard_canceled_web_prompt_group of this ItemRevguard. # noqa: E501 :rtype: int """ return self._revguard_canceled_web_prompt_group @revguard_canceled_web_prompt_group.setter def revguard_canceled_web_prompt_group(self, revguard_canceled_web_prompt_group): """Sets the revguard_canceled_web_prompt_group of this ItemRevguard. Canceled web prompt group # noqa: E501 :param revguard_canceled_web_prompt_group: The revguard_canceled_web_prompt_group of this ItemRevguard. # noqa: E501 :type: int """ self._revguard_canceled_web_prompt_group = revguard_canceled_web_prompt_group @property def revguard_client_brand(self): """Gets the revguard_client_brand of this ItemRevguard. # noqa: E501 Client brand # noqa: E501 :return: The revguard_client_brand of this ItemRevguard. # noqa: E501 :rtype: int """ return self._revguard_client_brand @revguard_client_brand.setter def revguard_client_brand(self, revguard_client_brand): """Sets the revguard_client_brand of this ItemRevguard. Client brand # noqa: E501 :param revguard_client_brand: The revguard_client_brand of this ItemRevguard. # noqa: E501 :type: int """ self._revguard_client_brand = revguard_client_brand @property def revguard_csr_prompt_group(self): """Gets the revguard_csr_prompt_group of this ItemRevguard. # noqa: E501 CSR prompt group # noqa: E501 :return: The revguard_csr_prompt_group of this ItemRevguard. # noqa: E501 :rtype: int """ return self._revguard_csr_prompt_group @revguard_csr_prompt_group.setter def revguard_csr_prompt_group(self, revguard_csr_prompt_group): """Sets the revguard_csr_prompt_group of this ItemRevguard. CSR prompt group # noqa: E501 :param revguard_csr_prompt_group: The revguard_csr_prompt_group of this ItemRevguard. # noqa: E501 :type: int """ self._revguard_csr_prompt_group = revguard_csr_prompt_group @property def revguard_ivr_prompt_group(self): """Gets the revguard_ivr_prompt_group of this ItemRevguard. # noqa: E501 IVR prompt group # noqa: E501 :return: The revguard_ivr_prompt_group of this ItemRevguard. # noqa: E501 :rtype: int """ return self._revguard_ivr_prompt_group @revguard_ivr_prompt_group.setter def revguard_ivr_prompt_group(self, revguard_ivr_prompt_group): """Sets the revguard_ivr_prompt_group of this ItemRevguard. IVR prompt group # noqa: E501 :param revguard_ivr_prompt_group: The revguard_ivr_prompt_group of this ItemRevguard. # noqa: E501 :type: int """ self._revguard_ivr_prompt_group = revguard_ivr_prompt_group @property def revguard_web_prompt_group(self): """Gets the revguard_web_prompt_group of this ItemRevguard. # noqa: E501 Web prompt group # noqa: E501 :return: The revguard_web_prompt_group of this ItemRevguard. # noqa: E501 :rtype: int """ return self._revguard_web_prompt_group @revguard_web_prompt_group.setter def revguard_web_prompt_group(self, revguard_web_prompt_group): """Sets the revguard_web_prompt_group of this ItemRevguard. Web prompt group # noqa: E501 :param revguard_web_prompt_group: The revguard_web_prompt_group of this ItemRevguard. # noqa: E501 :type: int """ self._revguard_web_prompt_group = revguard_web_prompt_group def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(ItemRevguard, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ItemRevguard): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
36.447552
284
0.684574
import pprint import re import six class ItemRevguard(object): swagger_types = { 'revguard_canceled_csr_prompt_group': 'int', 'revguard_canceled_ivr_prompt_group': 'int', 'revguard_canceled_web_prompt_group': 'int', 'revguard_client_brand': 'int', 'revguard_csr_prompt_group': 'int', 'revguard_ivr_prompt_group': 'int', 'revguard_web_prompt_group': 'int' } attribute_map = { 'revguard_canceled_csr_prompt_group': 'revguard_canceled_csr_prompt_group', 'revguard_canceled_ivr_prompt_group': 'revguard_canceled_ivr_prompt_group', 'revguard_canceled_web_prompt_group': 'revguard_canceled_web_prompt_group', 'revguard_client_brand': 'revguard_client_brand', 'revguard_csr_prompt_group': 'revguard_csr_prompt_group', 'revguard_ivr_prompt_group': 'revguard_ivr_prompt_group', 'revguard_web_prompt_group': 'revguard_web_prompt_group' } def __init__(self, revguard_canceled_csr_prompt_group=None, revguard_canceled_ivr_prompt_group=None, revguard_canceled_web_prompt_group=None, revguard_client_brand=None, revguard_csr_prompt_group=None, revguard_ivr_prompt_group=None, revguard_web_prompt_group=None): self._revguard_canceled_csr_prompt_group = None self._revguard_canceled_ivr_prompt_group = None self._revguard_canceled_web_prompt_group = None self._revguard_client_brand = None self._revguard_csr_prompt_group = None self._revguard_ivr_prompt_group = None self._revguard_web_prompt_group = None self.discriminator = None if revguard_canceled_csr_prompt_group is not None: self.revguard_canceled_csr_prompt_group = revguard_canceled_csr_prompt_group if revguard_canceled_ivr_prompt_group is not None: self.revguard_canceled_ivr_prompt_group = revguard_canceled_ivr_prompt_group if revguard_canceled_web_prompt_group is not None: self.revguard_canceled_web_prompt_group = revguard_canceled_web_prompt_group if revguard_client_brand is not None: self.revguard_client_brand = revguard_client_brand if revguard_csr_prompt_group is not None: self.revguard_csr_prompt_group = revguard_csr_prompt_group if revguard_ivr_prompt_group is not None: self.revguard_ivr_prompt_group = revguard_ivr_prompt_group if revguard_web_prompt_group is not None: self.revguard_web_prompt_group = revguard_web_prompt_group @property def revguard_canceled_csr_prompt_group(self): return self._revguard_canceled_csr_prompt_group @revguard_canceled_csr_prompt_group.setter def revguard_canceled_csr_prompt_group(self, revguard_canceled_csr_prompt_group): self._revguard_canceled_csr_prompt_group = revguard_canceled_csr_prompt_group @property def revguard_canceled_ivr_prompt_group(self): return self._revguard_canceled_ivr_prompt_group @revguard_canceled_ivr_prompt_group.setter def revguard_canceled_ivr_prompt_group(self, revguard_canceled_ivr_prompt_group): self._revguard_canceled_ivr_prompt_group = revguard_canceled_ivr_prompt_group @property def revguard_canceled_web_prompt_group(self): return self._revguard_canceled_web_prompt_group @revguard_canceled_web_prompt_group.setter def revguard_canceled_web_prompt_group(self, revguard_canceled_web_prompt_group): self._revguard_canceled_web_prompt_group = revguard_canceled_web_prompt_group @property def revguard_client_brand(self): return self._revguard_client_brand @revguard_client_brand.setter def revguard_client_brand(self, revguard_client_brand): self._revguard_client_brand = revguard_client_brand @property def revguard_csr_prompt_group(self): return self._revguard_csr_prompt_group @revguard_csr_prompt_group.setter def revguard_csr_prompt_group(self, revguard_csr_prompt_group): self._revguard_csr_prompt_group = revguard_csr_prompt_group @property def revguard_ivr_prompt_group(self): return self._revguard_ivr_prompt_group @revguard_ivr_prompt_group.setter def revguard_ivr_prompt_group(self, revguard_ivr_prompt_group): self._revguard_ivr_prompt_group = revguard_ivr_prompt_group @property def revguard_web_prompt_group(self): return self._revguard_web_prompt_group @revguard_web_prompt_group.setter def revguard_web_prompt_group(self, revguard_web_prompt_group): self._revguard_web_prompt_group = revguard_web_prompt_group def to_dict(self): result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(ItemRevguard, dict): for key, value in self.items(): result[key] = value return result def to_str(self): return pprint.pformat(self.to_dict()) def __repr__(self): return self.to_str() def __eq__(self, other): if not isinstance(other, ItemRevguard): return False return self.__dict__ == other.__dict__ def __ne__(self, other): return not self == other
true
true
f70fe7e6b28f7caeabf6ae69e9564f3ee8605915
4,249
py
Python
cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_shellutil_filesystem_oper.py
tkamata-test/ydk-py
b637e7853a8edbbd31fbc05afa3aa4110b31c5f9
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_shellutil_filesystem_oper.py
tkamata-test/ydk-py
b637e7853a8edbbd31fbc05afa3aa4110b31c5f9
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_shellutil_filesystem_oper.py
tkamata-test/ydk-py
b637e7853a8edbbd31fbc05afa3aa4110b31c5f9
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
import re import collections from enum import Enum from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION from ydk.errors import YPYError, YPYModelError from ydk.providers._importer import _yang_ns _meta_table = { 'FileSystem.Node.FileSystem_' : { 'meta_info' : _MetaInfoClass('FileSystem.Node.FileSystem_', False, [ _MetaInfoClassMember('flags', ATTRIBUTE, 'str' , None, None, [], [], ''' Flags of file system ''', 'flags', 'Cisco-IOS-XR-shellutil-filesystem-oper', False), _MetaInfoClassMember('free', ATTRIBUTE, 'str' , None, None, [], [], ''' Free space in the file system in bytes ''', 'free', 'Cisco-IOS-XR-shellutil-filesystem-oper', False), _MetaInfoClassMember('prefixes', ATTRIBUTE, 'str' , None, None, [], [], ''' Prefixes of file system ''', 'prefixes', 'Cisco-IOS-XR-shellutil-filesystem-oper', False), _MetaInfoClassMember('size', ATTRIBUTE, 'str' , None, None, [], [], ''' Size of the file system in bytes ''', 'size', 'Cisco-IOS-XR-shellutil-filesystem-oper', False), _MetaInfoClassMember('type', ATTRIBUTE, 'str' , None, None, [], [], ''' Type of file system ''', 'type', 'Cisco-IOS-XR-shellutil-filesystem-oper', False), ], 'Cisco-IOS-XR-shellutil-filesystem-oper', 'file-system', _yang_ns._namespaces['Cisco-IOS-XR-shellutil-filesystem-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_shellutil_filesystem_oper' ), }, 'FileSystem.Node' : { 'meta_info' : _MetaInfoClass('FileSystem.Node', False, [ _MetaInfoClassMember('node-name', ATTRIBUTE, 'str' , None, None, [], ['([a-zA-Z0-9_]*\\d+/){1,2}([a-zA-Z0-9_]*\\d+)'], ''' Node name ''', 'node_name', 'Cisco-IOS-XR-shellutil-filesystem-oper', True), _MetaInfoClassMember('file-system', REFERENCE_LIST, 'FileSystem_' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_shellutil_filesystem_oper', 'FileSystem.Node.FileSystem_', [], [], ''' Available file systems ''', 'file_system', 'Cisco-IOS-XR-shellutil-filesystem-oper', False), ], 'Cisco-IOS-XR-shellutil-filesystem-oper', 'node', _yang_ns._namespaces['Cisco-IOS-XR-shellutil-filesystem-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_shellutil_filesystem_oper' ), }, 'FileSystem' : { 'meta_info' : _MetaInfoClass('FileSystem', False, [ _MetaInfoClassMember('node', REFERENCE_LIST, 'Node' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_shellutil_filesystem_oper', 'FileSystem.Node', [], [], ''' Node ID ''', 'node', 'Cisco-IOS-XR-shellutil-filesystem-oper', False), ], 'Cisco-IOS-XR-shellutil-filesystem-oper', 'file-system', _yang_ns._namespaces['Cisco-IOS-XR-shellutil-filesystem-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_shellutil_filesystem_oper' ), }, } _meta_table['FileSystem.Node.FileSystem_']['meta_info'].parent =_meta_table['FileSystem.Node']['meta_info'] _meta_table['FileSystem.Node']['meta_info'].parent =_meta_table['FileSystem']['meta_info']
42.069307
183
0.534243
import re import collections from enum import Enum from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION from ydk.errors import YPYError, YPYModelError from ydk.providers._importer import _yang_ns _meta_table = { 'FileSystem.Node.FileSystem_' : { 'meta_info' : _MetaInfoClass('FileSystem.Node.FileSystem_', False, [ _MetaInfoClassMember('flags', ATTRIBUTE, 'str' , None, None, [], [], ''' Flags of file system ''', 'flags', 'Cisco-IOS-XR-shellutil-filesystem-oper', False), _MetaInfoClassMember('free', ATTRIBUTE, 'str' , None, None, [], [], ''' Free space in the file system in bytes ''', 'free', 'Cisco-IOS-XR-shellutil-filesystem-oper', False), _MetaInfoClassMember('prefixes', ATTRIBUTE, 'str' , None, None, [], [], ''' Prefixes of file system ''', 'prefixes', 'Cisco-IOS-XR-shellutil-filesystem-oper', False), _MetaInfoClassMember('size', ATTRIBUTE, 'str' , None, None, [], [], ''' Size of the file system in bytes ''', 'size', 'Cisco-IOS-XR-shellutil-filesystem-oper', False), _MetaInfoClassMember('type', ATTRIBUTE, 'str' , None, None, [], [], ''' Type of file system ''', 'type', 'Cisco-IOS-XR-shellutil-filesystem-oper', False), ], 'Cisco-IOS-XR-shellutil-filesystem-oper', 'file-system', _yang_ns._namespaces['Cisco-IOS-XR-shellutil-filesystem-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_shellutil_filesystem_oper' ), }, 'FileSystem.Node' : { 'meta_info' : _MetaInfoClass('FileSystem.Node', False, [ _MetaInfoClassMember('node-name', ATTRIBUTE, 'str' , None, None, [], ['([a-zA-Z0-9_]*\\d+/){1,2}([a-zA-Z0-9_]*\\d+)'], ''' Node name ''', 'node_name', 'Cisco-IOS-XR-shellutil-filesystem-oper', True), _MetaInfoClassMember('file-system', REFERENCE_LIST, 'FileSystem_' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_shellutil_filesystem_oper', 'FileSystem.Node.FileSystem_', [], [], ''' Available file systems ''', 'file_system', 'Cisco-IOS-XR-shellutil-filesystem-oper', False), ], 'Cisco-IOS-XR-shellutil-filesystem-oper', 'node', _yang_ns._namespaces['Cisco-IOS-XR-shellutil-filesystem-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_shellutil_filesystem_oper' ), }, 'FileSystem' : { 'meta_info' : _MetaInfoClass('FileSystem', False, [ _MetaInfoClassMember('node', REFERENCE_LIST, 'Node' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_shellutil_filesystem_oper', 'FileSystem.Node', [], [], ''' Node ID ''', 'node', 'Cisco-IOS-XR-shellutil-filesystem-oper', False), ], 'Cisco-IOS-XR-shellutil-filesystem-oper', 'file-system', _yang_ns._namespaces['Cisco-IOS-XR-shellutil-filesystem-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_shellutil_filesystem_oper' ), }, } _meta_table['FileSystem.Node.FileSystem_']['meta_info'].parent =_meta_table['FileSystem.Node']['meta_info'] _meta_table['FileSystem.Node']['meta_info'].parent =_meta_table['FileSystem']['meta_info']
true
true
f70fe802d4a25dbb1db4f6162b56cf2586d5e4a1
22,011
py
Python
codalab/worker/bundle_manager.py
ethancaballero/codalab-cli
b9dbe569ef12e22242bf3ad8c1961443f1b736f4
[ "Apache-2.0" ]
null
null
null
codalab/worker/bundle_manager.py
ethancaballero/codalab-cli
b9dbe569ef12e22242bf3ad8c1961443f1b736f4
[ "Apache-2.0" ]
null
null
null
codalab/worker/bundle_manager.py
ethancaballero/codalab-cli
b9dbe569ef12e22242bf3ad8c1961443f1b736f4
[ "Apache-2.0" ]
null
null
null
import datetime import logging import os import random import re import sys import threading import time import traceback from codalab.objects.permission import check_bundles_have_read_permission from codalab.common import PermissionError from codalab.lib import bundle_util, formatting, path_util from codalabworker.file_util import remove_path from codalabworker.bundle_state import State logger = logging.getLogger(__name__) WORKER_TIMEOUT_SECONDS = 60 class BundleManager(object): """ Assigns run bundles to workers and makes make bundles. """ @staticmethod def create(codalab_manager): config = codalab_manager.config.get('workers') if not config: print >>sys.stderr, 'config.json file missing a workers section.' exit(1) from codalab.worker.default_bundle_manager import DefaultBundleManager self = DefaultBundleManager() self._model = codalab_manager.model() self._worker_model = codalab_manager.worker_model() self._bundle_store = codalab_manager.bundle_store() self._upload_manager = codalab_manager.upload_manager() self._exiting_lock = threading.Lock() self._exiting = False self._make_uuids_lock = threading.Lock() self._make_uuids = set() def parse(to_value, field): return to_value(config[field]) if field in config else None self._max_request_time = parse(formatting.parse_duration, 'max_request_time') self._max_request_memory = parse(formatting.parse_size, 'max_request_memory') self._max_request_disk = parse(formatting.parse_size, 'max_request_disk') self._default_cpu_image = config.get('default_cpu_image') self._default_gpu_image = config.get('default_gpu_image') logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO) return self def run(self, sleep_time): logger.info('Bundle manager running.') while not self._is_exiting(): try: self._run_iteration() except Exception: traceback.print_exc() time.sleep(sleep_time) while self._is_making_bundles(): time.sleep(sleep_time) def signal(self): with self._exiting_lock: self._exiting = True def _is_exiting(self): with self._exiting_lock: return self._exiting def _run_iteration(self): self._stage_bundles() self._make_bundles() self._schedule_run_bundles() def _schedule_run_bundles(self): """ Sub classes should implement this. See DefaultBundleManager """ raise NotImplementedError def _stage_bundles(self): """ Stages bundles by: 1) Failing any bundles that have any missing or failed dependencies. 2) Staging any bundles that have all ready dependencies. """ bundles = self._model.batch_get_bundles(state=State.CREATED) parent_uuids = set(dep.parent_uuid for bundle in bundles for dep in bundle.dependencies) parents = self._model.batch_get_bundles(uuid=parent_uuids) all_parent_states = {parent.uuid: parent.state for parent in parents} all_parent_uuids = set(all_parent_states) bundles_to_fail = [] bundles_to_stage = [] for bundle in bundles: parent_uuids = set(dep.parent_uuid for dep in bundle.dependencies) try: check_bundles_have_read_permission( self._model, self._model.get_user(bundle.owner_id), parent_uuids ) except PermissionError as e: bundles_to_fail.append((bundle, str(e))) continue missing_uuids = parent_uuids - all_parent_uuids if missing_uuids: bundles_to_fail.append( (bundle, 'Missing parent bundles: %s' % ', '.join(missing_uuids)) ) continue parent_states = {uuid: all_parent_states[uuid] for uuid in parent_uuids} acceptable_states = [State.READY] if bundle.metadata.allow_failed_dependencies: acceptable_states.append(State.FAILED) acceptable_states.append(State.KILLED) else: failed_uuids = [ uuid for uuid, state in parent_states.iteritems() if state == State.FAILED ] killed_uuids = [ uuid for uuid, state in parent_states.iteritems() if state == State.KILLED ] failure_message = '' if failed_uuids: failure_message += ' Parent bundles failed: %s' % ', '.join(failed_uuids) if killed_uuids: failure_message += ' Parent bundles were killed: %s' % ', '.join(killed_uuids) if failure_message: failure_message += ' (Please use the --allow-failed-dependencies flag to depend on results fo failed or killed bundles)' bundles_to_fail.append((bundle, failure_message)) continue if all(state in acceptable_states for state in parent_states.itervalues()): bundles_to_stage.append(bundle) for bundle, failure_message in bundles_to_fail: logger.info('Failing bundle %s: %s', bundle.uuid, failure_message) self._model.update_bundle( bundle, {'state': State.FAILED, 'metadata': {'failure_message': failure_message}} ) for bundle in bundles_to_stage: logger.info('Staging %s', bundle.uuid) self._model.update_bundle(bundle, {'state': State.STAGED}) def _make_bundles(self): # Re-stage any stuck bundles. This would happen if the bundle manager # died. for bundle in self._model.batch_get_bundles(state=State.MAKING, bundle_type='make'): if not self._is_making_bundle(bundle.uuid): logger.info('Re-staging make bundle %s', bundle.uuid) self._model.update_bundle(bundle, {'state': State.STAGED}) for bundle in self._model.batch_get_bundles(state=State.STAGED, bundle_type='make'): logger.info('Making bundle %s', bundle.uuid) self._model.update_bundle(bundle, {'state': State.MAKING}) with self._make_uuids_lock: self._make_uuids.add(bundle.uuid) # Making a bundle could take time, so do the work in a separate # thread to ensure quick scheduling. threading.Thread(target=BundleManager._make_bundle, args=[self, bundle]).start() def _is_making_bundles(self): with self._make_uuids_lock: return bool(self._make_uuids) def _is_making_bundle(self, uuid): with self._make_uuids_lock: return uuid in self._make_uuids def _make_bundle(self, bundle): try: path = os.path.normpath(self._bundle_store.get_bundle_location(bundle.uuid)) deps = [] for dep in bundle.dependencies: parent_bundle_path = os.path.normpath( self._bundle_store.get_bundle_location(dep.parent_uuid) ) dependency_path = os.path.normpath( os.path.join(parent_bundle_path, dep.parent_path) ) if not dependency_path.startswith(parent_bundle_path) or ( not os.path.islink(dependency_path) and not os.path.exists(dependency_path) ): raise Exception( 'Invalid dependency %s' % (path_util.safe_join(dep.parent_uuid, dep.parent_path)) ) child_path = os.path.normpath(os.path.join(path, dep.child_path)) if not child_path.startswith(path): raise Exception('Invalid key for dependency: %s' % (dep.child_path)) deps.append((dependency_path, child_path)) remove_path(path) if len(deps) == 1 and deps[0][1] == path: path_util.copy(deps[0][0], path, follow_symlinks=False) else: os.mkdir(path) for dependency_path, child_path in deps: path_util.copy(dependency_path, child_path, follow_symlinks=False) self._upload_manager.update_metadata_and_save(bundle, enforce_disk_quota=True) logger.info('Finished making bundle %s', bundle.uuid) self._model.update_bundle(bundle, {'state': State.READY}) except Exception as e: logger.info('Failing bundle %s: %s', bundle.uuid, str(e)) self._model.update_bundle( bundle, {'state': State.FAILED, 'metadata': {'failure_message': str(e)}} ) finally: with self._make_uuids_lock: self._make_uuids.remove(bundle.uuid) def _cleanup_dead_workers(self, workers, callback=None): """ Clean-up workers that we haven't heard from for more than WORKER_TIMEOUT_SECONDS seconds. Such workers probably died without checking out properly. """ for worker in workers.workers(): if datetime.datetime.now() - worker['checkin_time'] > datetime.timedelta( seconds=WORKER_TIMEOUT_SECONDS ): logger.info( 'Cleaning up dead worker (%s, %s)', worker['user_id'], worker['worker_id'] ) self._worker_model.worker_cleanup(worker['user_id'], worker['worker_id']) workers.remove(worker) if callback is not None: callback(worker) def _restage_stuck_starting_bundles(self, workers): """ Moves bundles that got stuck in the STARTING state back to the STAGED state so that they can be scheduled to run again. """ for bundle in self._model.batch_get_bundles(state=State.STARTING, bundle_type='run'): if ( not workers.is_running(bundle.uuid) or time.time() - bundle.metadata.last_updated > 5 * 60 ): # Run message went missing. logger.info('Re-staging run bundle %s', bundle.uuid) if self._model.restage_bundle(bundle): workers.restage(bundle.uuid) def _acknowledge_recently_finished_bundles(self, workers): """ Acknowledges recently finished bundles to workers so they can discard run information """ for bundle in self._model.batch_get_bundles(state=State.FINALIZING, bundle_type='run'): worker = workers.get_bundle_worker(bundle.uuid) if worker is None: logger.info( 'Bringing bundle offline %s: %s', bundle.uuid, 'No worker claims bundle' ) self._model.set_offline_bundle(bundle) elif self._worker_model.send_json_message( worker['socket_id'], {'type': 'mark_finalized', 'uuid': bundle.uuid}, 0.2 ): logger.info('Acknowleded finalization of run bundle %s', bundle.uuid) self._model.finish_bundle(bundle) def _bring_offline_stuck_running_bundles(self, workers): """ Make bundles that got stuck in the RUNNING or PREPARING state into WORKER_OFFLINE state. Bundles in WORKER_OFFLINE state can be moved back to the RUNNING or PREPARING state if a worker resumes the bundle indicating that it's still in one of those states. """ active_bundles = self._model.batch_get_bundles( state=State.RUNNING, bundle_type='run' ) + self._model.batch_get_bundles(state=State.PREPARING, bundle_type='run') now = time.time() for bundle in active_bundles: failure_message = None if not workers.is_running(bundle.uuid): failure_message = 'No worker claims bundle' if now - bundle.metadata.last_updated > WORKER_TIMEOUT_SECONDS: failure_message = 'Worker offline' if failure_message is not None: logger.info('Bringing bundle offline %s: %s', bundle.uuid, failure_message) self._model.set_offline_bundle(bundle) def _schedule_run_bundles_on_workers(self, workers, user_owned): """ Schedules STAGED bundles to run on the given workers. If user_owned is True, then schedules on workers run by the owner of each bundle. Otherwise, uses CodaLab-owned workers, which have user ID root_user_id. """ for bundle in self._model.batch_get_bundles(state=State.STAGED, bundle_type='run'): if user_owned: workers_list = workers.user_owned_workers(bundle.owner_id) else: workers_list = workers.user_owned_workers(self._model.root_user_id) workers_list = self._filter_and_sort_workers(workers_list, bundle) for worker in workers_list: if self._try_start_bundle(workers, worker, bundle): break else: continue # Try the next worker. def _deduct_worker_resources(self, workers_list): """ From each worker, subtract resources used by running bundles. Modifies the list. """ for worker in workers_list: for uuid in worker['run_uuids']: bundle = self._model.get_bundle(uuid) worker['cpus'] -= self._compute_request_cpus(bundle) worker['gpus'] -= self._compute_request_gpus(bundle) worker['memory_bytes'] -= self._compute_request_memory(bundle) def _filter_and_sort_workers(self, workers_list, bundle): """ Filters the workers to those that can run the given bundle and returns the list sorted in order of preference for running the bundle. """ # keep track of which workers have GPUs has_gpu = {} for worker in workers_list: worker_id = worker['worker_id'] has_gpu[worker_id] = worker['gpus'] > 0 # deduct worker resources based on running bundles self._deduct_worker_resources(workers_list) # Filter by CPUs. request_cpus = self._compute_request_cpus(bundle) if request_cpus: workers_list = filter(lambda worker: worker['cpus'] >= request_cpus, workers_list) # Filter by GPUs. request_gpus = self._compute_request_gpus(bundle) if request_gpus: workers_list = filter(lambda worker: worker['gpus'] >= request_gpus, workers_list) # Filter by memory. request_memory = self._compute_request_memory(bundle) if request_memory: workers_list = filter( lambda worker: worker['memory_bytes'] >= request_memory, workers_list ) # Filter by tag. request_queue = bundle.metadata.request_queue if request_queue: tagm = re.match('tag=(.+)', request_queue) if tagm: workers_list = filter(lambda worker: worker['tag'] == tagm.group(1), workers_list) else: # We don't know how to handle this type of request queue # argument. return [] # Sort workers list according to these keys in the following succession: # - whether the worker is a CPU-only worker, if the bundle doesn't request GPUs # - number of dependencies available, descending # - number of free cpus, descending # - random key # # Breaking ties randomly is important, since multiple workers frequently # have the same number of dependencies and free CPUs for a given bundle # (in particular, bundles with no dependencies) and we may end up # selecting the same worker over and over again for new jobs. While this # is not a problem for the performance of the jobs themselves, this can # cause one worker to collect a disproportionate number of dependencies # in its cache. needed_deps = set(map(lambda dep: (dep.parent_uuid, dep.parent_path), bundle.dependencies)) def get_sort_key(worker): deps = set(worker['dependencies']) worker_id = worker['worker_id'] # if the bundle doesn't request GPUs (only request CPUs), prioritize workers that don't have GPUs gpu_priority = self._compute_request_gpus(bundle) or not has_gpu[worker_id] return (gpu_priority, len(needed_deps & deps), worker['cpus'], random.random()) workers_list.sort(key=get_sort_key, reverse=True) return workers_list def _try_start_bundle(self, workers, worker, bundle): """ Tries to start running the bundle on the given worker, returning False if that failed. """ if self._model.set_starting_bundle(bundle, worker['user_id'], worker['worker_id']): workers.set_starting(bundle.uuid, worker) if ( self._worker_model.shared_file_system and worker['user_id'] == self._model.root_user_id ): # On a shared file system we create the path here to avoid NFS # directory cache issues. path = self._bundle_store.get_bundle_location(bundle.uuid) remove_path(path) os.mkdir(path) if self._worker_model.send_json_message( worker['socket_id'], self._construct_run_message(worker, bundle), 0.2 ): logger.info('Starting run bundle %s', bundle.uuid) return True else: self._model.restage_bundle(bundle) workers.restage(bundle.uuid) return False else: return False def _compute_request_cpus(self, bundle): """ Compute the CPU limit used for scheduling the run. The default of 1 is for backwards compatibilty for runs from before when we added client-side defaults """ if not bundle.metadata.request_cpus: return 1 return bundle.metadata.request_cpus def _compute_request_gpus(self, bundle): """ Compute the GPU limit used for scheduling the run. The default of 0 is for backwards compatibilty for runs from before when we added client-side defaults """ if bundle.metadata.request_gpus is None: return 0 return bundle.metadata.request_gpus def _compute_request_memory(self, bundle): """ Compute the memory limit used for scheduling the run. The default of 2g is for backwards compatibilty for runs from before when we added client-side defaults """ if not bundle.metadata.request_memory: return formatting.parse_size('2g') return formatting.parse_size(bundle.metadata.request_memory) def _compute_request_disk(self, bundle): """ Compute the disk limit used for scheduling the run. The default is min(disk quota the user has left, global max) """ if not bundle.metadata.request_disk: return min( self._model.get_user_disk_quota_left(bundle.owner_id) - 1, self._max_request_disk ) return formatting.parse_size(bundle.metadata.request_disk) def _compute_request_time(self, bundle): """ Compute the time limit used for scheduling the run. The default is min(time quota the user has left, global max) """ if not bundle.metadata.request_time: return min( self._model.get_user_time_quota_left(bundle.owner_id) - 1, self._max_request_time ) return formatting.parse_duration(bundle.metadata.request_time) def _get_docker_image(self, bundle): """ Set docker image to be the default if not specified Unlike other metadata fields this can actually be None from client """ if not bundle.metadata.request_docker_image: if bundle.metadata.request_gpus: return self._default_gpu_image else: return self._default_cpu_image return bundle.metadata.request_docker_image def _construct_run_message(self, worker, bundle): """ Constructs the run message that is sent to the given worker to tell it to run the given bundle. """ message = {} message['type'] = 'run' message['bundle'] = bundle_util.bundle_to_bundle_info(self._model, bundle) if self._worker_model.shared_file_system and worker['user_id'] == self._model.root_user_id: message['bundle']['location'] = self._bundle_store.get_bundle_location(bundle.uuid) for dependency in message['bundle']['dependencies']: dependency['location'] = self._bundle_store.get_bundle_location( dependency['parent_uuid'] ) # Figure out the resource requirements. resources = message['resources'] = {} resources['request_cpus'] = self._compute_request_cpus(bundle) resources['request_gpus'] = self._compute_request_gpus(bundle) resources['docker_image'] = self._get_docker_image(bundle) resources['request_time'] = self._compute_request_time(bundle) resources['request_memory'] = self._compute_request_memory(bundle) resources['request_disk'] = self._compute_request_disk(bundle) resources['request_network'] = bundle.metadata.request_network return message
41.846008
140
0.621008
import datetime import logging import os import random import re import sys import threading import time import traceback from codalab.objects.permission import check_bundles_have_read_permission from codalab.common import PermissionError from codalab.lib import bundle_util, formatting, path_util from codalabworker.file_util import remove_path from codalabworker.bundle_state import State logger = logging.getLogger(__name__) WORKER_TIMEOUT_SECONDS = 60 class BundleManager(object): @staticmethod def create(codalab_manager): config = codalab_manager.config.get('workers') if not config: print >>sys.stderr, 'config.json file missing a workers section.' exit(1) from codalab.worker.default_bundle_manager import DefaultBundleManager self = DefaultBundleManager() self._model = codalab_manager.model() self._worker_model = codalab_manager.worker_model() self._bundle_store = codalab_manager.bundle_store() self._upload_manager = codalab_manager.upload_manager() self._exiting_lock = threading.Lock() self._exiting = False self._make_uuids_lock = threading.Lock() self._make_uuids = set() def parse(to_value, field): return to_value(config[field]) if field in config else None self._max_request_time = parse(formatting.parse_duration, 'max_request_time') self._max_request_memory = parse(formatting.parse_size, 'max_request_memory') self._max_request_disk = parse(formatting.parse_size, 'max_request_disk') self._default_cpu_image = config.get('default_cpu_image') self._default_gpu_image = config.get('default_gpu_image') logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO) return self def run(self, sleep_time): logger.info('Bundle manager running.') while not self._is_exiting(): try: self._run_iteration() except Exception: traceback.print_exc() time.sleep(sleep_time) while self._is_making_bundles(): time.sleep(sleep_time) def signal(self): with self._exiting_lock: self._exiting = True def _is_exiting(self): with self._exiting_lock: return self._exiting def _run_iteration(self): self._stage_bundles() self._make_bundles() self._schedule_run_bundles() def _schedule_run_bundles(self): raise NotImplementedError def _stage_bundles(self): bundles = self._model.batch_get_bundles(state=State.CREATED) parent_uuids = set(dep.parent_uuid for bundle in bundles for dep in bundle.dependencies) parents = self._model.batch_get_bundles(uuid=parent_uuids) all_parent_states = {parent.uuid: parent.state for parent in parents} all_parent_uuids = set(all_parent_states) bundles_to_fail = [] bundles_to_stage = [] for bundle in bundles: parent_uuids = set(dep.parent_uuid for dep in bundle.dependencies) try: check_bundles_have_read_permission( self._model, self._model.get_user(bundle.owner_id), parent_uuids ) except PermissionError as e: bundles_to_fail.append((bundle, str(e))) continue missing_uuids = parent_uuids - all_parent_uuids if missing_uuids: bundles_to_fail.append( (bundle, 'Missing parent bundles: %s' % ', '.join(missing_uuids)) ) continue parent_states = {uuid: all_parent_states[uuid] for uuid in parent_uuids} acceptable_states = [State.READY] if bundle.metadata.allow_failed_dependencies: acceptable_states.append(State.FAILED) acceptable_states.append(State.KILLED) else: failed_uuids = [ uuid for uuid, state in parent_states.iteritems() if state == State.FAILED ] killed_uuids = [ uuid for uuid, state in parent_states.iteritems() if state == State.KILLED ] failure_message = '' if failed_uuids: failure_message += ' Parent bundles failed: %s' % ', '.join(failed_uuids) if killed_uuids: failure_message += ' Parent bundles were killed: %s' % ', '.join(killed_uuids) if failure_message: failure_message += ' (Please use the --allow-failed-dependencies flag to depend on results fo failed or killed bundles)' bundles_to_fail.append((bundle, failure_message)) continue if all(state in acceptable_states for state in parent_states.itervalues()): bundles_to_stage.append(bundle) for bundle, failure_message in bundles_to_fail: logger.info('Failing bundle %s: %s', bundle.uuid, failure_message) self._model.update_bundle( bundle, {'state': State.FAILED, 'metadata': {'failure_message': failure_message}} ) for bundle in bundles_to_stage: logger.info('Staging %s', bundle.uuid) self._model.update_bundle(bundle, {'state': State.STAGED}) def _make_bundles(self): for bundle in self._model.batch_get_bundles(state=State.MAKING, bundle_type='make'): if not self._is_making_bundle(bundle.uuid): logger.info('Re-staging make bundle %s', bundle.uuid) self._model.update_bundle(bundle, {'state': State.STAGED}) for bundle in self._model.batch_get_bundles(state=State.STAGED, bundle_type='make'): logger.info('Making bundle %s', bundle.uuid) self._model.update_bundle(bundle, {'state': State.MAKING}) with self._make_uuids_lock: self._make_uuids.add(bundle.uuid) threading.Thread(target=BundleManager._make_bundle, args=[self, bundle]).start() def _is_making_bundles(self): with self._make_uuids_lock: return bool(self._make_uuids) def _is_making_bundle(self, uuid): with self._make_uuids_lock: return uuid in self._make_uuids def _make_bundle(self, bundle): try: path = os.path.normpath(self._bundle_store.get_bundle_location(bundle.uuid)) deps = [] for dep in bundle.dependencies: parent_bundle_path = os.path.normpath( self._bundle_store.get_bundle_location(dep.parent_uuid) ) dependency_path = os.path.normpath( os.path.join(parent_bundle_path, dep.parent_path) ) if not dependency_path.startswith(parent_bundle_path) or ( not os.path.islink(dependency_path) and not os.path.exists(dependency_path) ): raise Exception( 'Invalid dependency %s' % (path_util.safe_join(dep.parent_uuid, dep.parent_path)) ) child_path = os.path.normpath(os.path.join(path, dep.child_path)) if not child_path.startswith(path): raise Exception('Invalid key for dependency: %s' % (dep.child_path)) deps.append((dependency_path, child_path)) remove_path(path) if len(deps) == 1 and deps[0][1] == path: path_util.copy(deps[0][0], path, follow_symlinks=False) else: os.mkdir(path) for dependency_path, child_path in deps: path_util.copy(dependency_path, child_path, follow_symlinks=False) self._upload_manager.update_metadata_and_save(bundle, enforce_disk_quota=True) logger.info('Finished making bundle %s', bundle.uuid) self._model.update_bundle(bundle, {'state': State.READY}) except Exception as e: logger.info('Failing bundle %s: %s', bundle.uuid, str(e)) self._model.update_bundle( bundle, {'state': State.FAILED, 'metadata': {'failure_message': str(e)}} ) finally: with self._make_uuids_lock: self._make_uuids.remove(bundle.uuid) def _cleanup_dead_workers(self, workers, callback=None): for worker in workers.workers(): if datetime.datetime.now() - worker['checkin_time'] > datetime.timedelta( seconds=WORKER_TIMEOUT_SECONDS ): logger.info( 'Cleaning up dead worker (%s, %s)', worker['user_id'], worker['worker_id'] ) self._worker_model.worker_cleanup(worker['user_id'], worker['worker_id']) workers.remove(worker) if callback is not None: callback(worker) def _restage_stuck_starting_bundles(self, workers): for bundle in self._model.batch_get_bundles(state=State.STARTING, bundle_type='run'): if ( not workers.is_running(bundle.uuid) or time.time() - bundle.metadata.last_updated > 5 * 60 ): logger.info('Re-staging run bundle %s', bundle.uuid) if self._model.restage_bundle(bundle): workers.restage(bundle.uuid) def _acknowledge_recently_finished_bundles(self, workers): for bundle in self._model.batch_get_bundles(state=State.FINALIZING, bundle_type='run'): worker = workers.get_bundle_worker(bundle.uuid) if worker is None: logger.info( 'Bringing bundle offline %s: %s', bundle.uuid, 'No worker claims bundle' ) self._model.set_offline_bundle(bundle) elif self._worker_model.send_json_message( worker['socket_id'], {'type': 'mark_finalized', 'uuid': bundle.uuid}, 0.2 ): logger.info('Acknowleded finalization of run bundle %s', bundle.uuid) self._model.finish_bundle(bundle) def _bring_offline_stuck_running_bundles(self, workers): active_bundles = self._model.batch_get_bundles( state=State.RUNNING, bundle_type='run' ) + self._model.batch_get_bundles(state=State.PREPARING, bundle_type='run') now = time.time() for bundle in active_bundles: failure_message = None if not workers.is_running(bundle.uuid): failure_message = 'No worker claims bundle' if now - bundle.metadata.last_updated > WORKER_TIMEOUT_SECONDS: failure_message = 'Worker offline' if failure_message is not None: logger.info('Bringing bundle offline %s: %s', bundle.uuid, failure_message) self._model.set_offline_bundle(bundle) def _schedule_run_bundles_on_workers(self, workers, user_owned): for bundle in self._model.batch_get_bundles(state=State.STAGED, bundle_type='run'): if user_owned: workers_list = workers.user_owned_workers(bundle.owner_id) else: workers_list = workers.user_owned_workers(self._model.root_user_id) workers_list = self._filter_and_sort_workers(workers_list, bundle) for worker in workers_list: if self._try_start_bundle(workers, worker, bundle): break else: continue def _deduct_worker_resources(self, workers_list): for worker in workers_list: for uuid in worker['run_uuids']: bundle = self._model.get_bundle(uuid) worker['cpus'] -= self._compute_request_cpus(bundle) worker['gpus'] -= self._compute_request_gpus(bundle) worker['memory_bytes'] -= self._compute_request_memory(bundle) def _filter_and_sort_workers(self, workers_list, bundle): has_gpu = {} for worker in workers_list: worker_id = worker['worker_id'] has_gpu[worker_id] = worker['gpus'] > 0 self._deduct_worker_resources(workers_list) request_cpus = self._compute_request_cpus(bundle) if request_cpus: workers_list = filter(lambda worker: worker['cpus'] >= request_cpus, workers_list) request_gpus = self._compute_request_gpus(bundle) if request_gpus: workers_list = filter(lambda worker: worker['gpus'] >= request_gpus, workers_list) request_memory = self._compute_request_memory(bundle) if request_memory: workers_list = filter( lambda worker: worker['memory_bytes'] >= request_memory, workers_list ) request_queue = bundle.metadata.request_queue if request_queue: tagm = re.match('tag=(.+)', request_queue) if tagm: workers_list = filter(lambda worker: worker['tag'] == tagm.group(1), workers_list) else: # argument. return [] # Sort workers list according to these keys in the following succession: # - whether the worker is a CPU-only worker, if the bundle doesn't request GPUs needed_deps = set(map(lambda dep: (dep.parent_uuid, dep.parent_path), bundle.dependencies)) def get_sort_key(worker): deps = set(worker['dependencies']) worker_id = worker['worker_id'] gpu_priority = self._compute_request_gpus(bundle) or not has_gpu[worker_id] return (gpu_priority, len(needed_deps & deps), worker['cpus'], random.random()) workers_list.sort(key=get_sort_key, reverse=True) return workers_list def _try_start_bundle(self, workers, worker, bundle): if self._model.set_starting_bundle(bundle, worker['user_id'], worker['worker_id']): workers.set_starting(bundle.uuid, worker) if ( self._worker_model.shared_file_system and worker['user_id'] == self._model.root_user_id ): path = self._bundle_store.get_bundle_location(bundle.uuid) remove_path(path) os.mkdir(path) if self._worker_model.send_json_message( worker['socket_id'], self._construct_run_message(worker, bundle), 0.2 ): logger.info('Starting run bundle %s', bundle.uuid) return True else: self._model.restage_bundle(bundle) workers.restage(bundle.uuid) return False else: return False def _compute_request_cpus(self, bundle): if not bundle.metadata.request_cpus: return 1 return bundle.metadata.request_cpus def _compute_request_gpus(self, bundle): if bundle.metadata.request_gpus is None: return 0 return bundle.metadata.request_gpus def _compute_request_memory(self, bundle): if not bundle.metadata.request_memory: return formatting.parse_size('2g') return formatting.parse_size(bundle.metadata.request_memory) def _compute_request_disk(self, bundle): if not bundle.metadata.request_disk: return min( self._model.get_user_disk_quota_left(bundle.owner_id) - 1, self._max_request_disk ) return formatting.parse_size(bundle.metadata.request_disk) def _compute_request_time(self, bundle): if not bundle.metadata.request_time: return min( self._model.get_user_time_quota_left(bundle.owner_id) - 1, self._max_request_time ) return formatting.parse_duration(bundle.metadata.request_time) def _get_docker_image(self, bundle): if not bundle.metadata.request_docker_image: if bundle.metadata.request_gpus: return self._default_gpu_image else: return self._default_cpu_image return bundle.metadata.request_docker_image def _construct_run_message(self, worker, bundle): message = {} message['type'] = 'run' message['bundle'] = bundle_util.bundle_to_bundle_info(self._model, bundle) if self._worker_model.shared_file_system and worker['user_id'] == self._model.root_user_id: message['bundle']['location'] = self._bundle_store.get_bundle_location(bundle.uuid) for dependency in message['bundle']['dependencies']: dependency['location'] = self._bundle_store.get_bundle_location( dependency['parent_uuid'] ) resources = message['resources'] = {} resources['request_cpus'] = self._compute_request_cpus(bundle) resources['request_gpus'] = self._compute_request_gpus(bundle) resources['docker_image'] = self._get_docker_image(bundle) resources['request_time'] = self._compute_request_time(bundle) resources['request_memory'] = self._compute_request_memory(bundle) resources['request_disk'] = self._compute_request_disk(bundle) resources['request_network'] = bundle.metadata.request_network return message
true
true
f70fe8cd388780589f821fdeb68e298ecd437657
2,365
py
Python
azure-servicemanagement-legacy/setup.py
HydAu/AzureSDKForPython
5cbe34e9e0b8ea1faacc9f205633ccc0b885c0f3
[ "Apache-2.0" ]
null
null
null
azure-servicemanagement-legacy/setup.py
HydAu/AzureSDKForPython
5cbe34e9e0b8ea1faacc9f205633ccc0b885c0f3
[ "Apache-2.0" ]
null
null
null
azure-servicemanagement-legacy/setup.py
HydAu/AzureSDKForPython
5cbe34e9e0b8ea1faacc9f205633ccc0b885c0f3
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python #------------------------------------------------------------------------- # Copyright (c) Microsoft. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #-------------------------------------------------------------------------- from setuptools import setup # azure v0.x is not compatible with this package # azure v0.x used to have a __version__ attribute (newer versions don't) try: import azure try: ver = azure.__version__ raise Exception( 'This package is incompatible with azure=={}. '.format(ver) + 'Uninstall it with "pip uninstall azure".' ) except AttributeError: pass except ImportError: pass setup( name='azure-servicemanagement-legacy', version='0.20.3', description='Microsoft Azure Legacy Service Management Client Library for Python', long_description=open('README.rst', 'r').read(), license='Apache License 2.0', author='Microsoft Corporation', author_email='ptvshelp@microsoft.com', url='https://github.com/Azure/azure-sdk-for-python', classifiers=[ 'Development Status :: 4 - Beta', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'License :: OSI Approved :: Apache Software License', ], zip_safe=False, packages=[ 'azure', 'azure.servicemanagement', 'azure.servicemanagement._http', ], install_requires=[ 'azure-common', 'requests', ], extras_require = { 'get_certificate_from_publish_settings' : ['pyopenssl'] }, )
34.275362
86
0.614799
from setuptools import setup try: import azure try: ver = azure.__version__ raise Exception( 'This package is incompatible with azure=={}. '.format(ver) + 'Uninstall it with "pip uninstall azure".' ) except AttributeError: pass except ImportError: pass setup( name='azure-servicemanagement-legacy', version='0.20.3', description='Microsoft Azure Legacy Service Management Client Library for Python', long_description=open('README.rst', 'r').read(), license='Apache License 2.0', author='Microsoft Corporation', author_email='ptvshelp@microsoft.com', url='https://github.com/Azure/azure-sdk-for-python', classifiers=[ 'Development Status :: 4 - Beta', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'License :: OSI Approved :: Apache Software License', ], zip_safe=False, packages=[ 'azure', 'azure.servicemanagement', 'azure.servicemanagement._http', ], install_requires=[ 'azure-common', 'requests', ], extras_require = { 'get_certificate_from_publish_settings' : ['pyopenssl'] }, )
true
true
f70fe8e5aa412881ec6f288fac376593ff84e297
74
py
Python
tests/unit/test_version.py
HoverHell/python-gron
21977c36b5fafde6be351b5488673e97a7cb4aeb
[ "MIT" ]
10
2018-06-23T11:32:14.000Z
2021-12-15T09:45:53.000Z
tests/unit/test_version.py
HoverHell/python-gron
21977c36b5fafde6be351b5488673e97a7cb4aeb
[ "MIT" ]
null
null
null
tests/unit/test_version.py
HoverHell/python-gron
21977c36b5fafde6be351b5488673e97a7cb4aeb
[ "MIT" ]
1
2021-04-06T10:56:37.000Z
2021-04-06T10:56:37.000Z
import gron def test_version(): assert hasattr(gron, '__VERSION__')
12.333333
39
0.716216
import gron def test_version(): assert hasattr(gron, '__VERSION__')
true
true
f70fe9543b5a7ed3a240ddb45d836fbfea8bd6f1
2,147
py
Python
podaac/tests/oceancolor_test.py
wongvh07/SPAC4C
d8186bd4dab25472f3a45a7b0464aa95553c92f9
[ "Apache-2.0" ]
null
null
null
podaac/tests/oceancolor_test.py
wongvh07/SPAC4C
d8186bd4dab25472f3a45a7b0464aa95553c92f9
[ "Apache-2.0" ]
null
null
null
podaac/tests/oceancolor_test.py
wongvh07/SPAC4C
d8186bd4dab25472f3a45a7b0464aa95553c92f9
[ "Apache-2.0" ]
null
null
null
# Copyright 2016-2019 California Institute of Technology. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..oceancolor import OceanColor import os from nose.tools import assert_raises import unittest class TestOceanColor(unittest.TestCase): @classmethod def setUp(self): self.oceancolor = OceanColor() # test case for the function file_search() def test_file_search(self): data = self.oceancolor.file_search(sensor='octs', sdate='1996-11-01', edate='1997-01-01', dtype='L3b', add_url='1', results_as_file='1', search='*DAY_CHL*') assert data != None print(data) assert type(data) is type(u'') assert len(data) != 0 # must have a valid sensor AND either 'search' OR 'sub-id' data2 = self.oceancolor.file_search(sensor='octs', sub_id='2218') assert data2 != None assert_raises(Exception, self.oceancolor.file_search, sensor='random') assert_raises(Exception, self.oceancolor.file_search, sdate='1996-11-01', edate='1997-01-01', dtype='L3b', add_url='1', results_as_file='1', search='*DAY_CHL*') # test case for the function get_file(() def test_get_file(self): url = 'https://oceandata.sci.gsfc.nasa.gov/cgi/getfile/O1996307.L3b_DAY_CHL.nc' path = os.path.dirname(os.path.abspath(__file__)) granule_name = self.oceancolor.get_file(url, path) assert granule_name != None assert_raises(Exception, self.oceancolor.get_file, url='ABCDEF') path = os.path.join(os.path.dirname(__file__), granule_name) os.remove(path)
37.666667
101
0.681416
from ..oceancolor import OceanColor import os from nose.tools import assert_raises import unittest class TestOceanColor(unittest.TestCase): @classmethod def setUp(self): self.oceancolor = OceanColor() def test_file_search(self): data = self.oceancolor.file_search(sensor='octs', sdate='1996-11-01', edate='1997-01-01', dtype='L3b', add_url='1', results_as_file='1', search='*DAY_CHL*') assert data != None print(data) assert type(data) is type(u'') assert len(data) != 0 data2 = self.oceancolor.file_search(sensor='octs', sub_id='2218') assert data2 != None assert_raises(Exception, self.oceancolor.file_search, sensor='random') assert_raises(Exception, self.oceancolor.file_search, sdate='1996-11-01', edate='1997-01-01', dtype='L3b', add_url='1', results_as_file='1', search='*DAY_CHL*') def test_get_file(self): url = 'https://oceandata.sci.gsfc.nasa.gov/cgi/getfile/O1996307.L3b_DAY_CHL.nc' path = os.path.dirname(os.path.abspath(__file__)) granule_name = self.oceancolor.get_file(url, path) assert granule_name != None assert_raises(Exception, self.oceancolor.get_file, url='ABCDEF') path = os.path.join(os.path.dirname(__file__), granule_name) os.remove(path)
true
true
f70fe95b85d78d30cacb9a1b024c50b672432093
591
py
Python
PokeType/compiler/data_types.py
Daggy1234/PokeType
a79c8115ca9bb13e24c4fd4db4931b3094a96549
[ "MIT" ]
2
2021-11-06T14:09:40.000Z
2021-11-14T21:24:56.000Z
PokeType/compiler/data_types.py
Daggy1234/PokeType
a79c8115ca9bb13e24c4fd4db4931b3094a96549
[ "MIT" ]
null
null
null
PokeType/compiler/data_types.py
Daggy1234/PokeType
a79c8115ca9bb13e24c4fd4db4931b3094a96549
[ "MIT" ]
null
null
null
from rply import ParserGenerator from poketype.ast import Number, Boolean, NegNumber class DataTypes(): def __init__(self, pg: ParserGenerator) -> None: @pg.production('expression : NUMBER') def expression_number(p): return Number(int(p[0].getstr())) @pg.production('expression : BOOLEAN') def expression_number(p): b_val = p[0].getstr() if b_val == "true": return Boolean(True) else: return Boolean(False) @pg.production('expression : NEG NUMBER') def expression_number_neg(p): b_val = p[1].getstr() return NegNumber(int(p[1].getstr()) * -1)
24.625
51
0.681895
from rply import ParserGenerator from poketype.ast import Number, Boolean, NegNumber class DataTypes(): def __init__(self, pg: ParserGenerator) -> None: @pg.production('expression : NUMBER') def expression_number(p): return Number(int(p[0].getstr())) @pg.production('expression : BOOLEAN') def expression_number(p): b_val = p[0].getstr() if b_val == "true": return Boolean(True) else: return Boolean(False) @pg.production('expression : NEG NUMBER') def expression_number_neg(p): b_val = p[1].getstr() return NegNumber(int(p[1].getstr()) * -1)
true
true
f70fea670ad876700ffca4a80a4ba82548c929e5
1,300
py
Python
cinder/brick/initiator/executor.py
hopem/cinder
7df656ff0be9fef34a4e19f7b83a0cae554db1e7
[ "Apache-2.0" ]
null
null
null
cinder/brick/initiator/executor.py
hopem/cinder
7df656ff0be9fef34a4e19f7b83a0cae554db1e7
[ "Apache-2.0" ]
null
null
null
cinder/brick/initiator/executor.py
hopem/cinder
7df656ff0be9fef34a4e19f7b83a0cae554db1e7
[ "Apache-2.0" ]
null
null
null
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # (c) Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Generic exec utility that allows us to set the execute and root_helper attributes for putils. Some projects need their own execute wrapper and root_helper settings, so this provides that hook. """ from cinder.openstack.common import processutils as putils class Executor(object): def __init__(self, execute=putils.execute, root_helper="sudo", *args, **kwargs): self.set_execute(execute) self.set_root_helper(root_helper) def set_execute(self, execute): self._execute = execute def set_root_helper(self, helper): self._root_helper = helper
35.135135
78
0.718462
from cinder.openstack.common import processutils as putils class Executor(object): def __init__(self, execute=putils.execute, root_helper="sudo", *args, **kwargs): self.set_execute(execute) self.set_root_helper(root_helper) def set_execute(self, execute): self._execute = execute def set_root_helper(self, helper): self._root_helper = helper
true
true
f70feab366fbb1f7eb3273b9e7e6a5b34188a5d9
8,461
py
Python
doc/oldcode/swhlab/core/memtest.py
swharden/SWHLab
a86c3c65323cec809a4bd4f81919644927094bf5
[ "MIT" ]
15
2017-03-09T03:08:32.000Z
2021-11-16T11:31:55.000Z
doc/oldcode/swhlab/core/memtest.py
swharden/SWHLab
a86c3c65323cec809a4bd4f81919644927094bf5
[ "MIT" ]
2
2016-12-06T16:27:54.000Z
2017-11-04T23:48:49.000Z
doc/oldcode/swhlab/core/memtest.py
swharden/SWHLab
a86c3c65323cec809a4bd4f81919644927094bf5
[ "MIT" ]
9
2016-10-19T13:32:10.000Z
2020-04-01T21:53:40.000Z
""" Membrane test routines for voltage clamp experiments. creates abf.MTs[sweep]={} #with keys like Ih, Ra, Rm, etc Example usage: abf=swhlab.ABF('../abfs/group/16701010.abf') swhlab.memtest.memtest(abf) #performs memtest on all sweeps swhlab.memtest.checkSweep(abf) #lets you eyeball check how it did pylab.show() """ import os import sys import pylab import numpy as np import time import swhlab import swhlab.core.common as cm exampleABF=swhlab.ABF() def memtestSweepVC(abf=exampleABF): """ perform memtest on current sweep in VC mode. Return Ih, Ra, Rm, etc. All variable names are explained in /swhlab/docs/memtest.ppt """ if abf.protoSeqY[1]>abf.protoSeqY[0] or len(abf.protoSeqY)<3: return "protocol doesn't step down and back up" TA,TB=int(abf.protoSeqX[1]),int(abf.protoSeqX[2]) dT=int(TB-TA) T1A=int(TA+.5*dT) T1B=int(TA+.9*dT) T2A=T1A+dT T2B=T1B+dT P1=np.average(abf.dataY[T1A:T1B]) P2=np.average(abf.dataY[T2A:T2B]) dI=P2-P1 dV=abf.protoSeqY[2]-abf.protoSeqY[1] PP=np.max(abf.dataY[TB:TB+100])# peak found within first 100 points TP=np.where(abf.dataY[TB:TB+150]==PP)[0][0]+TB dP=PP-P1 dTC=PP-P2 PCA=P2+.9*dTC # upper fraction for Cm detection PCB=P2+.1*dTC # upper fraction for Cm detection PCtau=P2+.37*dTC # crossing point of theoretical tau TCA=np.where(abf.dataY[TP:T2A]<PCA)[0][0]+TP TCB=np.where(abf.dataY[TP:T2A]<PCB)[0][0]+TP dTCT=TCB-TCA #number of points available for fitting Ih=P2 Ra=(dV*10**3)/(PP-P2) #MOhm=uV/pA Rm=(dV*10**3)/(P2-P1) #MOhm=uV/pA fitM,fitT,fitB,fitTau=cm.fit_exp(abf.dataY[TCA:TCB]) #same units as given fitTau=fitTau*1000/abf.rate #time constant convert to ms units Tv=fitTau #time constant of extrinsic voltage clamp Cm=Tv/Ra*1000 #us/MOhm is pF Tm=Rm*Cm/1000 #time constant of cell membrane (intrinsic voltage clamp) del abf return locals() def memtestIC(abf=exampleABF): """ IC memtest is different. Make an average sweep, then curve fit it. This only RETURNS the memtest, it does not assign it. """ if abf.protoSeqY[1]>abf.protoSeqY[0] or len(abf.protoSeqY)<3: return "protocol doesn't step down and back up" abf.baseline=[abf.protoSeqX[1]/abf.rate*.75,abf.protoSeqX[1]/abf.rate] T1A,T1B=np.array(abf.baseline)*abf.rate Xs,Ys,Er=abf.average_sweep() T2A=abf.protoSeqX[2]-abf.protoSeqX[1] T2B=abf.protoSeqX[2] M2=np.average(Ys[T2A:T2B]) MCA=.1*M2 # set 90% here MCB=.9*M2 # set 10% here TCA=np.where(Ys<MCA)[0][0] TCB=np.where(Ys<MCB)[0][0] m,t,b,tc=cm.fit_exp(Ys[TCA:TCB]) #do the fit! dI=abs(abf.protoSeqY[2]-abf.protoSeqY[1]) #pA dV=abs(M2) #mV Rm=dV/dI*1000 #uV/pA = MOhm Cm=tc/Rm #ms/MOhm del abf,Ys,Xs,Er return locals() #convert to structured array def memtest(abf=exampleABF,firstSweepOnly=False,plotToo=False,saveToo=True): """perform memtest on all sweeps.""" timeStart=time.clock() if abf.units=="mV": abf.MTs = memtestIC(abf) else: abf.MTs=[None]*abf.sweeps for sweep in range(abf.sweeps): abf.setSweep(sweep) result=memtestSweepVC(abf) if type(result) is dict: abf.MTs[abf.currentSweep]=result else: print("MEMTEST FAILED - sweep %d -"%sweep,result) if firstSweepOnly: return abf.MTs = cm.matrixfromDicts(abf.MTs) #convert to structured array took=time.clock()-timeStart print(" -- memtest performed on %d sweeps in %.02f ms"%(abf.sweeps,took*1000)) if saveToo: abf.saveThing(abf.MTs,"MTs") def plot_standard4(abf=exampleABF): """make a standard memtest plot showing Ih, Ra, etc. with time.""" if abf.sweeps<2: return swhlab.plot.new(abf) Xs=np.arange(abf.sweeps)*abf.sweepInterval/60 subplots=[221,222,223,224] features=['Ih','Ra','Rm','Cm'] units=['pA','MOhm','MOhm','pF'] for subplot,feature,unit in zip(subplots,features,units): pylab.subplot(subplot) pylab.grid(alpha=.5) #pylab.title(feature) pylab.plot(Xs,cm.dictVals(abf.MTs,feature),'.-',alpha=.5) pylab.xlabel(None) pylab.ylabel("%s (%s)"%(feature,unit)) swhlab.plot.comments(abf,True) pylab.margins(0,.1) def checkSweepIC(abf=exampleABF,sweep=0): """Produce an eyeball-ready indication how the MT was calculated in IC.""" _keys = abf.MTs.dtype.names for key in _keys: globals()[key]=abf.MTs[key] # only global for this module, that's fine fitted=cm.algo_exp(np.arange(TCB-TCA),m,t,b) swhlab.plot.new(abf,forceNewFigure=True) Xs,Ys,Er=abf.average_sweep() for subplot in [121,122]: pylab.subplot(subplot) pylab.axhline(0,color='b',lw=2,alpha=.5,ls="--") pylab.axhline(M2,color='b',lw=2,alpha=.5,ls="--") swhlab.plot.sweep(abf,'all',rainbow=False,color='#CCCCCC',alpha=.5) pylab.plot(Xs,Ys,color='k',alpha=.5) pylab.plot(Xs[T1A:T1B],Ys[T1A:T1B],color='b',lw=2) pylab.plot(Xs[T2A:T2B],Ys[T2A:T2B],color='b',lw=2) pylab.plot(abf.dataX[TCA:TCB],fitted,color='r',lw=2,ls='--') pylab.axis([(TCA-100)/abf.rate,(TCB+100)/abf.rate,None,None]) pylab.tight_layout() msg="tau: %.02f ms\n"%(tc/abf.rate*1000) msg+="Rm: %.02f MOhm\n"%(Rm) msg+="Cm: %.02f pF"%(Cm) pylab.annotate(msg,(.75,.95),ha='left',va='top',weight='bold',family='monospace', xycoords='figure fraction',size=12,color='g') swhlab.plot.annotate(abf) return def checkSweep(abf=exampleABF,sweep=0): """Produce an eyeball-ready indication how the MT was calculated in VC.""" if abf.units=="mV": return checkSweepIC(abf,sweep) if abf.MTs[sweep] is None: return False #no memtest data even found _keys = abf.MTs[sweep].dtype.names for key in _keys: globals()[key]=abf.MTs[sweep][key] # only global for this module, that's fine. _msg2="Average (n=%d)\n"%abf.sweeps _msg="" for i in range(len(_keys)): _msg+="%s=%s\n"%(_keys[i],abf.MTs[sweep][i]) if _keys[i] in ['Ih','Ra','Rm','Cm','Tv','Tm']: _msg2+="%s=%.02f\n"%(_keys[i],abf.MTs[sweep][i]) fitted=cm.algo_exp(np.arange(TCB-TCA),fitM,fitT,fitB) pylab.figure(figsize=(8,8)) for subplot in [211,212]: pylab.subplot(subplot) #pylab.plot(abf.dataX,abf.dataY,alpha=.2,color='k',lw=2) pylab.plot(abf.dataX[:TCA],abf.dataY[:TCA],alpha=.2,color='k',lw=2) pylab.plot(abf.dataX[TCB:],abf.dataY[TCB:],alpha=.2,color='k',lw=2) pylab.plot(abf.dataX[TCA:TCB],abf.dataY[TCA:TCB],'o',alpha=.5,lw=4,mfc='none',mec='r') pylab.plot(abf.dataX[T1A:T1B],abf.dataY[T1A:T1B],alpha=.4,color='b') pylab.plot(abf.dataX[T2A:T2B],abf.dataY[T2A:T2B],alpha=.4,color='b') pylab.plot(abf.dataX[TCA:TCB],fitted,color='k',lw=2,ls="--") for i in [TA, TB]: pylab.axvline(i/abf.rate,color='k',ls='--',alpha=.4) for i in [P1,P2]: pylab.axhline(i,color='b',ls="--",alpha=.5) for i in [PCA,PCB,PP]: pylab.axhline(i,color='g',ls="--",alpha=.5) pylab.tight_layout() pylab.subplots_adjust(right=0.75) pylab.annotate(_msg,(.8,.75),ha='left',va='top',alpha=.5, xycoords='figure fraction',family='monospace',size=10) pylab.annotate(_msg2,(.8,.95),ha='left',va='top',weight='bold',family='monospace', xycoords='figure fraction',size=12,color='g') pylab.subplot(211) pylab.axis([None,abf.dataX[T2B]+.05,None,None]) pylab.subplot(212) pylab.axis([(TB-20)/abf.rate,(TCB+20)/abf.rate,P1-20,PP+20]) swhlab.plot.annotate(abf) for key in _keys: del key #be clean about screwing with globals() return def test(): """voltage clamp MT.""" abf=swhlab.ABF(r'C:\Apps\pythonModules\abfs\16701010.abf') swhlab.memtest.memtest(abf) #performs memtest on all sweeps swhlab.memtest.checkSweep(abf) #lets you eyeball check how it did pylab.show() def test2(): """current clamp MT.""" abf=swhlab.ABF(r'C:\Apps\pythonModules\abfs\16701006.abf') swhlab.memtest.memtest(abf) #performs memtest on all sweeps swhlab.memtest.checkSweep(abf) #lets you eyeball check how it did pylab.show() if __name__=="__main__": #test() #test2() test3() print("DONE")
38.811927
94
0.630304
import os import sys import pylab import numpy as np import time import swhlab import swhlab.core.common as cm exampleABF=swhlab.ABF() def memtestSweepVC(abf=exampleABF): if abf.protoSeqY[1]>abf.protoSeqY[0] or len(abf.protoSeqY)<3: return "protocol doesn't step down and back up" TA,TB=int(abf.protoSeqX[1]),int(abf.protoSeqX[2]) dT=int(TB-TA) T1A=int(TA+.5*dT) T1B=int(TA+.9*dT) T2A=T1A+dT T2B=T1B+dT P1=np.average(abf.dataY[T1A:T1B]) P2=np.average(abf.dataY[T2A:T2B]) dI=P2-P1 dV=abf.protoSeqY[2]-abf.protoSeqY[1] PP=np.max(abf.dataY[TB:TB+100])# peak found within first 100 points TP=np.where(abf.dataY[TB:TB+150]==PP)[0][0]+TB dP=PP-P1 dTC=PP-P2 PCA=P2+.9*dTC # upper fraction for Cm detection PCB=P2+.1*dTC # upper fraction for Cm detection PCtau=P2+.37*dTC # crossing point of theoretical tau TCA=np.where(abf.dataY[TP:T2A]<PCA)[0][0]+TP TCB=np.where(abf.dataY[TP:T2A]<PCB)[0][0]+TP dTCT=TCB-TCA #number of points available for fitting Ih=P2 Ra=(dV*10**3)/(PP-P2) #MOhm=uV/pA Rm=(dV*10**3)/(P2-P1) #MOhm=uV/pA fitM,fitT,fitB,fitTau=cm.fit_exp(abf.dataY[TCA:TCB]) #same units as given fitTau=fitTau*1000/abf.rate #time constant convert to ms units Tv=fitTau #time constant of extrinsic voltage clamp Cm=Tv/Ra*1000 #us/MOhm is pF Tm=Rm*Cm/1000 #time constant of cell membrane (intrinsic voltage clamp) del abf return locals() def memtestIC(abf=exampleABF): if abf.protoSeqY[1]>abf.protoSeqY[0] or len(abf.protoSeqY)<3: return "protocol doesn't step down and back up" abf.baseline=[abf.protoSeqX[1]/abf.rate*.75,abf.protoSeqX[1]/abf.rate] T1A,T1B=np.array(abf.baseline)*abf.rate Xs,Ys,Er=abf.average_sweep() T2A=abf.protoSeqX[2]-abf.protoSeqX[1] T2B=abf.protoSeqX[2] M2=np.average(Ys[T2A:T2B]) MCA=.1*M2 MCB=.9*M2 TCA=np.where(Ys<MCA)[0][0] TCB=np.where(Ys<MCB)[0][0] m,t,b,tc=cm.fit_exp(Ys[TCA:TCB]) dI=abs(abf.protoSeqY[2]-abf.protoSeqY[1]) dV=abs(M2) Rm=dV/dI*1000 Cm=tc/Rm del abf,Ys,Xs,Er return locals() def memtest(abf=exampleABF,firstSweepOnly=False,plotToo=False,saveToo=True): timeStart=time.clock() if abf.units=="mV": abf.MTs = memtestIC(abf) else: abf.MTs=[None]*abf.sweeps for sweep in range(abf.sweeps): abf.setSweep(sweep) result=memtestSweepVC(abf) if type(result) is dict: abf.MTs[abf.currentSweep]=result else: print("MEMTEST FAILED - sweep %d -"%sweep,result) if firstSweepOnly: return abf.MTs = cm.matrixfromDicts(abf.MTs) took=time.clock()-timeStart print(" -- memtest performed on %d sweeps in %.02f ms"%(abf.sweeps,took*1000)) if saveToo: abf.saveThing(abf.MTs,"MTs") def plot_standard4(abf=exampleABF): if abf.sweeps<2: return swhlab.plot.new(abf) Xs=np.arange(abf.sweeps)*abf.sweepInterval/60 subplots=[221,222,223,224] features=['Ih','Ra','Rm','Cm'] units=['pA','MOhm','MOhm','pF'] for subplot,feature,unit in zip(subplots,features,units): pylab.subplot(subplot) pylab.grid(alpha=.5) pylab.plot(Xs,cm.dictVals(abf.MTs,feature),'.-',alpha=.5) pylab.xlabel(None) pylab.ylabel("%s (%s)"%(feature,unit)) swhlab.plot.comments(abf,True) pylab.margins(0,.1) def checkSweepIC(abf=exampleABF,sweep=0): _keys = abf.MTs.dtype.names for key in _keys: globals()[key]=abf.MTs[key] fitted=cm.algo_exp(np.arange(TCB-TCA),m,t,b) swhlab.plot.new(abf,forceNewFigure=True) Xs,Ys,Er=abf.average_sweep() for subplot in [121,122]: pylab.subplot(subplot) pylab.axhline(0,color='b',lw=2,alpha=.5,ls="--") pylab.axhline(M2,color='b',lw=2,alpha=.5,ls="--") swhlab.plot.sweep(abf,'all',rainbow=False,color=' pylab.plot(Xs,Ys,color='k',alpha=.5) pylab.plot(Xs[T1A:T1B],Ys[T1A:T1B],color='b',lw=2) pylab.plot(Xs[T2A:T2B],Ys[T2A:T2B],color='b',lw=2) pylab.plot(abf.dataX[TCA:TCB],fitted,color='r',lw=2,ls='--') pylab.axis([(TCA-100)/abf.rate,(TCB+100)/abf.rate,None,None]) pylab.tight_layout() msg="tau: %.02f ms\n"%(tc/abf.rate*1000) msg+="Rm: %.02f MOhm\n"%(Rm) msg+="Cm: %.02f pF"%(Cm) pylab.annotate(msg,(.75,.95),ha='left',va='top',weight='bold',family='monospace', xycoords='figure fraction',size=12,color='g') swhlab.plot.annotate(abf) return def checkSweep(abf=exampleABF,sweep=0): if abf.units=="mV": return checkSweepIC(abf,sweep) if abf.MTs[sweep] is None: return False #no memtest data even found _keys = abf.MTs[sweep].dtype.names for key in _keys: globals()[key]=abf.MTs[sweep][key] # only global for this module, that's fine. _msg2="Average (n=%d)\n"%abf.sweeps _msg="" for i in range(len(_keys)): _msg+="%s=%s\n"%(_keys[i],abf.MTs[sweep][i]) if _keys[i] in ['Ih','Ra','Rm','Cm','Tv','Tm']: _msg2+="%s=%.02f\n"%(_keys[i],abf.MTs[sweep][i]) fitted=cm.algo_exp(np.arange(TCB-TCA),fitM,fitT,fitB) pylab.figure(figsize=(8,8)) for subplot in [211,212]: pylab.subplot(subplot) pylab.plot(abf.dataX[:TCA],abf.dataY[:TCA],alpha=.2,color='k',lw=2) pylab.plot(abf.dataX[TCB:],abf.dataY[TCB:],alpha=.2,color='k',lw=2) pylab.plot(abf.dataX[TCA:TCB],abf.dataY[TCA:TCB],'o',alpha=.5,lw=4,mfc='none',mec='r') pylab.plot(abf.dataX[T1A:T1B],abf.dataY[T1A:T1B],alpha=.4,color='b') pylab.plot(abf.dataX[T2A:T2B],abf.dataY[T2A:T2B],alpha=.4,color='b') pylab.plot(abf.dataX[TCA:TCB],fitted,color='k',lw=2,ls="--") for i in [TA, TB]: pylab.axvline(i/abf.rate,color='k',ls='--',alpha=.4) for i in [P1,P2]: pylab.axhline(i,color='b',ls="--",alpha=.5) for i in [PCA,PCB,PP]: pylab.axhline(i,color='g',ls="--",alpha=.5) pylab.tight_layout() pylab.subplots_adjust(right=0.75) pylab.annotate(_msg,(.8,.75),ha='left',va='top',alpha=.5, xycoords='figure fraction',family='monospace',size=10) pylab.annotate(_msg2,(.8,.95),ha='left',va='top',weight='bold',family='monospace', xycoords='figure fraction',size=12,color='g') pylab.subplot(211) pylab.axis([None,abf.dataX[T2B]+.05,None,None]) pylab.subplot(212) pylab.axis([(TB-20)/abf.rate,(TCB+20)/abf.rate,P1-20,PP+20]) swhlab.plot.annotate(abf) for key in _keys: del key return def test(): abf=swhlab.ABF(r'C:\Apps\pythonModules\abfs\16701010.abf') swhlab.memtest.memtest(abf) swhlab.memtest.checkSweep(abf) pylab.show() def test2(): abf=swhlab.ABF(r'C:\Apps\pythonModules\abfs\16701006.abf') swhlab.memtest.memtest(abf) swhlab.memtest.checkSweep(abf) pylab.show() if __name__=="__main__": test3() print("DONE")
true
true
f70feac9370b5f157bc2c9998745353b5bf8bb67
17,023
py
Python
lenstronomy/LensModel/MultiPlane/multi_plane_base.py
JelleAalbers/lenstronomy
6db785667ff099fa8338e972b66253b2901b2827
[ "MIT" ]
null
null
null
lenstronomy/LensModel/MultiPlane/multi_plane_base.py
JelleAalbers/lenstronomy
6db785667ff099fa8338e972b66253b2901b2827
[ "MIT" ]
null
null
null
lenstronomy/LensModel/MultiPlane/multi_plane_base.py
JelleAalbers/lenstronomy
6db785667ff099fa8338e972b66253b2901b2827
[ "MIT" ]
null
null
null
import numpy as np from lenstronomy.Cosmo.background import Background from lenstronomy.LensModel.profile_list_base import ProfileListBase import lenstronomy.Util.constants as const __all__ = ['MultiPlaneBase'] class MultiPlaneBase(ProfileListBase): """ Multi-plane lensing class The lens model deflection angles are in units of reduced deflections from the specified redshift of the lens to the source redshift of the class instance. """ def __init__(self, lens_model_list, lens_redshift_list, z_source_convention, cosmo=None, numerical_alpha_class=None, cosmo_interp=False, z_interp_stop=None, num_z_interp=100): """ A description of the recursive multi-plane formalism can be found e.g. here: https://arxiv.org/abs/1312.1536 :param lens_model_list: list of lens model strings :param lens_redshift_list: list of floats with redshifts of the lens models indicated in lens_model_list :param z_source_convention: float, redshift of a source to define the reduced deflection angles of the lens models. If None, 'z_source' is used. :param cosmo: instance of astropy.cosmology :param numerical_alpha_class: an instance of a custom class for use in NumericalAlpha() lens model (see documentation in Profiles/numerical_alpha) """ if z_interp_stop is None: z_interp_stop = z_source_convention self._cosmo_bkg = Background(cosmo, interp=cosmo_interp, z_stop=z_interp_stop, num_interp=num_z_interp) self._z_source_convention = z_source_convention if len(lens_redshift_list) > 0: z_lens_max = np.max(lens_redshift_list) if z_lens_max >= z_source_convention: raise ValueError('deflector redshifts higher or equal the source redshift convention (%s >= %s for the reduced lens' ' model quantities not allowed (leads to negative reduced deflection angles!' % (z_lens_max, z_source_convention)) if not len(lens_model_list) == len(lens_redshift_list): raise ValueError("The length of lens_model_list does not correspond to redshift_list") self._lens_redshift_list = lens_redshift_list super(MultiPlaneBase, self).__init__(lens_model_list, numerical_alpha_class=numerical_alpha_class, lens_redshift_list=lens_redshift_list, z_source_convention=z_source_convention) if len(lens_model_list) < 1: self._sorted_redshift_index = [] else: self._sorted_redshift_index = self._index_ordering(lens_redshift_list) z_before = 0 T_z = 0 self._T_ij_list = [] self._T_z_list = [] # Sort redshift for vectorized reduced2physical factor calculation if len(lens_model_list)<1: self._reduced2physical_factor = [] else: z_sort = np.array(self._lens_redshift_list)[self._sorted_redshift_index] z_source_array = np.ones(z_sort.shape)*z_source_convention self._reduced2physical_factor = self._cosmo_bkg.d_xy(0, z_source_convention) / self._cosmo_bkg.d_xy(z_sort, z_source_array) for idex in self._sorted_redshift_index: z_lens = self._lens_redshift_list[idex] if z_before == z_lens: delta_T = 0 else: T_z = self._cosmo_bkg.T_xy(0, z_lens) delta_T = self._cosmo_bkg.T_xy(z_before, z_lens) self._T_ij_list.append(delta_T) self._T_z_list.append(T_z) z_before = z_lens def ray_shooting_partial(self, x, y, alpha_x, alpha_y, z_start, z_stop, kwargs_lens, include_z_start=False, T_ij_start=None, T_ij_end=None): """ ray-tracing through parts of the coin, starting with (x,y) co-moving distances and angles (alpha_x, alpha_y) at redshift z_start and then backwards to redshift z_stop :param x: co-moving position [Mpc] :param y: co-moving position [Mpc] :param alpha_x: ray angle at z_start [arcsec] :param alpha_y: ray angle at z_start [arcsec] :param z_start: redshift of start of computation :param z_stop: redshift where output is computed :param kwargs_lens: lens model keyword argument list :param include_z_start: bool, if True, includes the computation of the deflection angle at the same redshift as the start of the ray-tracing. ATTENTION: deflection angles at the same redshift as z_stop will be computed always! This can lead to duplications in the computation of deflection angles. :param T_ij_start: transverse angular distance between the starting redshift to the first lens plane to follow. If not set, will compute the distance each time this function gets executed. :param T_ij_end: transverse angular distance between the last lens plane being computed and z_end. If not set, will compute the distance each time this function gets executed. :return: co-moving position and angles at redshift z_stop """ x = np.array(x, dtype=float) y = np.array(y, dtype=float) alpha_x = np.array(alpha_x) alpha_y = np.array(alpha_y) z_lens_last = z_start first_deflector = True for i, idex in enumerate(self._sorted_redshift_index): z_lens = self._lens_redshift_list[idex] if self._start_condition(include_z_start, z_lens, z_start) and z_lens <= z_stop: if first_deflector is True: if T_ij_start is None: if z_start == 0: delta_T = self._T_ij_list[0] else: delta_T = self._cosmo_bkg.T_xy(z_start, z_lens) else: delta_T = T_ij_start first_deflector = False else: delta_T = self._T_ij_list[i] x, y = self._ray_step_add(x, y, alpha_x, alpha_y, delta_T) alpha_x, alpha_y = self._add_deflection(x, y, alpha_x, alpha_y, kwargs_lens, i) z_lens_last = z_lens if T_ij_end is None: if z_lens_last == z_stop: delta_T = 0 else: delta_T = self._cosmo_bkg.T_xy(z_lens_last, z_stop) else: delta_T = T_ij_end x, y = self._ray_step_add(x, y, alpha_x, alpha_y, delta_T) return x, y, alpha_x, alpha_y def transverse_distance_start_stop(self, z_start, z_stop, include_z_start=False): """ computes the transverse distance (T_ij) that is required by the ray-tracing between the starting redshift and the first deflector afterwards and the last deflector before the end of the ray-tracing. :param z_start: redshift of the start of the ray-tracing :param z_stop: stop of ray-tracing :param include_z_start: boolean, if True includes the computation of the starting position if the first deflector is at z_start :return: T_ij_start, T_ij_end """ z_lens_last = z_start first_deflector = True T_ij_start = None for i, idex in enumerate(self._sorted_redshift_index): z_lens = self._lens_redshift_list[idex] if self._start_condition(include_z_start, z_lens, z_start) and z_lens <= z_stop: if first_deflector is True: T_ij_start = self._cosmo_bkg.T_xy(z_start, z_lens) first_deflector = False z_lens_last = z_lens T_ij_end = self._cosmo_bkg.T_xy(z_lens_last, z_stop) return T_ij_start, T_ij_end def geo_shapiro_delay(self, theta_x, theta_y, kwargs_lens, z_stop, T_z_stop=None, T_ij_end=None): """ geometric and Shapiro (gravitational) light travel time relative to a straight path through the coordinate (0,0) Negative sign means earlier arrival time :param theta_x: angle in x-direction on the image :param theta_y: angle in y-direction on the image :param kwargs_lens: lens model keyword argument list :param z_stop: redshift of the source to stop the backwards ray-tracing :param T_z_stop: optional, transversal angular distance from z=0 to z_stop :param T_ij_end: optional, transversal angular distance between the last lensing plane and the source plane :return: dt_geo, dt_shapiro, [days] """ dt_grav = np.zeros_like(theta_x, dtype=float) dt_geo = np.zeros_like(theta_x, dtype=float) x = np.zeros_like(theta_x, dtype=float) y = np.zeros_like(theta_y, dtype=float) alpha_x = np.array(theta_x, dtype=float) alpha_y = np.array(theta_y, dtype=float) i = 0 z_lens_last = 0 for i, index in enumerate(self._sorted_redshift_index): z_lens = self._lens_redshift_list[index] if z_lens <= z_stop: T_ij = self._T_ij_list[i] x_new, y_new = self._ray_step(x, y, alpha_x, alpha_y, T_ij) if i == 0: pass elif T_ij > 0: T_j = self._T_z_list[i] T_i = self._T_z_list[i - 1] beta_i_x, beta_i_y = x / T_i, y / T_i beta_j_x, beta_j_y = x_new / T_j, y_new / T_j dt_geo_new = self._geometrical_delay(beta_i_x, beta_i_y, beta_j_x, beta_j_y, T_i, T_j, T_ij) dt_geo += dt_geo_new x, y = x_new, y_new dt_grav_new = self._gravitational_delay(x, y, kwargs_lens, i, z_lens) alpha_x, alpha_y = self._add_deflection(x, y, alpha_x, alpha_y, kwargs_lens, i) dt_grav += dt_grav_new z_lens_last = z_lens if T_ij_end is None: T_ij_end = self._cosmo_bkg.T_xy(z_lens_last, z_stop) T_ij = T_ij_end x_new, y_new = self._ray_step(x, y, alpha_x, alpha_y, T_ij) if T_z_stop is None: T_z_stop = self._cosmo_bkg.T_xy(0, z_stop) T_j = T_z_stop T_i = self._T_z_list[i] beta_i_x, beta_i_y = x / T_i, y / T_i beta_j_x, beta_j_y = x_new / T_j, y_new / T_j dt_geo_new = self._geometrical_delay(beta_i_x, beta_i_y, beta_j_x, beta_j_y, T_i, T_j, T_ij) dt_geo += dt_geo_new return dt_geo, dt_grav @staticmethod def _index_ordering(redshift_list): """ :param redshift_list: list of redshifts :return: indexes in ascending order to be evaluated (from z=0 to z=z_source) """ redshift_list = np.array(redshift_list) #sort_index = np.argsort(redshift_list[redshift_list < z_source]) sort_index = np.argsort(redshift_list) #if len(sort_index) < 1: # Warning("There is no lens object between observer at z=0 and source at z=%s" % z_source) return sort_index def _reduced2physical_deflection(self, alpha_reduced, index_lens): """ alpha_reduced = D_ds/Ds alpha_physical :param alpha_reduced: reduced deflection angle :param index_lens: integer, index of the deflector plane :return: physical deflection angle """ factor = self._reduced2physical_factor[index_lens] return alpha_reduced * factor def _gravitational_delay(self, x, y, kwargs_lens, index, z_lens): """ :param x: co-moving coordinate at the lens plane :param y: co-moving coordinate at the lens plane :param kwargs_lens: lens model keyword arguments :param z_lens: redshift of the deflector :param index: index of the lens model in sorted redshfit convention :return: gravitational delay in units of days as seen at z=0 """ theta_x, theta_y = self._co_moving2angle(x, y, index) k = self._sorted_redshift_index[index] potential = self.func_list[k].function(theta_x, theta_y, **kwargs_lens[k]) delay_days = self._lensing_potential2time_delay(potential, z_lens, z_source=self._z_source_convention) return -delay_days @staticmethod def _geometrical_delay(beta_i_x, beta_i_y, beta_j_x, beta_j_y, T_i, T_j, T_ij): """ :param beta_i_x: angle on the sky at plane i :param beta_i_y: angle on the sky at plane i :param beta_j_x: angle on the sky at plane j :param beta_j_y: angle on the sky at plane j :param T_i: transverse diameter distance to z_i :param T_j: transverse diameter distance to z_j :param T_ij: transverse diameter distance from z_i to z_j :return: excess delay relative to a straight line """ d_beta_x = beta_j_x - beta_i_x d_beta_y = beta_j_y - beta_i_y tau_ij = T_i * T_j / T_ij * const.Mpc / const.c / const.day_s * const.arcsec**2 return tau_ij * (d_beta_x ** 2 + d_beta_y ** 2) / 2 def _lensing_potential2time_delay(self, potential, z_lens, z_source): """ transforms the lensing potential (in units arcsec^2) to a gravitational time-delay as measured at z=0 :param potential: lensing potential :param z_lens: redshift of the deflector :param z_source: redshift of source for the definition of the lensing quantities :return: gravitational time-delay in units of days """ D_dt = self._cosmo_bkg.ddt(z_lens, z_source) delay_days = const.delay_arcsec2days(potential, D_dt) return delay_days def _co_moving2angle(self, x, y, index): """ transforms co-moving distances Mpc into angles on the sky (radian) :param x: co-moving distance :param y: co-moving distance :param index: index of plane :return: angles on the sky """ T_z = self._T_z_list[index] theta_x = x / T_z theta_y = y / T_z return theta_x, theta_y @staticmethod def _ray_step(x, y, alpha_x, alpha_y, delta_T): """ ray propagation with small angle approximation :param x: co-moving x-position :param y: co-moving y-position :param alpha_x: deflection angle in x-direction at (x, y) :param alpha_y: deflection angle in y-direction at (x, y) :param delta_T: transverse angular diameter distance to the next step :return: co-moving position at the next step (backwards) """ x_ = x + alpha_x * delta_T y_ = y + alpha_y * delta_T return x_, y_ @staticmethod def _ray_step_add(x, y, alpha_x, alpha_y, delta_T): """ ray propagation with small angle approximation :param x: co-moving x-position :param y: co-moving y-position :param alpha_x: deflection angle in x-direction at (x, y) :param alpha_y: deflection angle in y-direction at (x, y) :param delta_T: transverse angular diameter distance to the next step :return: co-moving position at the next step (backwards) """ x += alpha_x * delta_T y += alpha_y * delta_T return x, y def _add_deflection(self, x, y, alpha_x, alpha_y, kwargs_lens, index): """ adds the physical deflection angle of a single lens plane to the deflection field :param x: co-moving distance at the deflector plane :param y: co-moving distance at the deflector plane :param alpha_x: physical angle (radian) before the deflector plane :param alpha_y: physical angle (radian) before the deflector plane :param kwargs_lens: lens model parameter kwargs :param index: index of the lens model to be added in sorted redshift list convention :param idex_lens: redshift of the deflector plane :return: updated physical deflection after deflector plane (in a backwards ray-tracing perspective) """ theta_x, theta_y = self._co_moving2angle(x, y, index) k = self._sorted_redshift_index[index] alpha_x_red, alpha_y_red = self.func_list[k].derivatives(theta_x, theta_y, **kwargs_lens[k]) alpha_x_phys = self._reduced2physical_deflection(alpha_x_red, index) alpha_y_phys = self._reduced2physical_deflection(alpha_y_red, index) return alpha_x - alpha_x_phys, alpha_y - alpha_y_phys @staticmethod def _start_condition(inclusive, z_lens, z_start): """ :param inclusive: boolean, if True selects z_lens including z_start, else only selects z_lens > z_start :param z_lens: deflector redshift :param z_start: starting redshift (lowest redshift) :return: boolean of condition """ if inclusive: return z_lens >= z_start else: return z_lens > z_start
46.638356
135
0.641661
import numpy as np from lenstronomy.Cosmo.background import Background from lenstronomy.LensModel.profile_list_base import ProfileListBase import lenstronomy.Util.constants as const __all__ = ['MultiPlaneBase'] class MultiPlaneBase(ProfileListBase): def __init__(self, lens_model_list, lens_redshift_list, z_source_convention, cosmo=None, numerical_alpha_class=None, cosmo_interp=False, z_interp_stop=None, num_z_interp=100): if z_interp_stop is None: z_interp_stop = z_source_convention self._cosmo_bkg = Background(cosmo, interp=cosmo_interp, z_stop=z_interp_stop, num_interp=num_z_interp) self._z_source_convention = z_source_convention if len(lens_redshift_list) > 0: z_lens_max = np.max(lens_redshift_list) if z_lens_max >= z_source_convention: raise ValueError('deflector redshifts higher or equal the source redshift convention (%s >= %s for the reduced lens' ' model quantities not allowed (leads to negative reduced deflection angles!' % (z_lens_max, z_source_convention)) if not len(lens_model_list) == len(lens_redshift_list): raise ValueError("The length of lens_model_list does not correspond to redshift_list") self._lens_redshift_list = lens_redshift_list super(MultiPlaneBase, self).__init__(lens_model_list, numerical_alpha_class=numerical_alpha_class, lens_redshift_list=lens_redshift_list, z_source_convention=z_source_convention) if len(lens_model_list) < 1: self._sorted_redshift_index = [] else: self._sorted_redshift_index = self._index_ordering(lens_redshift_list) z_before = 0 T_z = 0 self._T_ij_list = [] self._T_z_list = [] if len(lens_model_list)<1: self._reduced2physical_factor = [] else: z_sort = np.array(self._lens_redshift_list)[self._sorted_redshift_index] z_source_array = np.ones(z_sort.shape)*z_source_convention self._reduced2physical_factor = self._cosmo_bkg.d_xy(0, z_source_convention) / self._cosmo_bkg.d_xy(z_sort, z_source_array) for idex in self._sorted_redshift_index: z_lens = self._lens_redshift_list[idex] if z_before == z_lens: delta_T = 0 else: T_z = self._cosmo_bkg.T_xy(0, z_lens) delta_T = self._cosmo_bkg.T_xy(z_before, z_lens) self._T_ij_list.append(delta_T) self._T_z_list.append(T_z) z_before = z_lens def ray_shooting_partial(self, x, y, alpha_x, alpha_y, z_start, z_stop, kwargs_lens, include_z_start=False, T_ij_start=None, T_ij_end=None): x = np.array(x, dtype=float) y = np.array(y, dtype=float) alpha_x = np.array(alpha_x) alpha_y = np.array(alpha_y) z_lens_last = z_start first_deflector = True for i, idex in enumerate(self._sorted_redshift_index): z_lens = self._lens_redshift_list[idex] if self._start_condition(include_z_start, z_lens, z_start) and z_lens <= z_stop: if first_deflector is True: if T_ij_start is None: if z_start == 0: delta_T = self._T_ij_list[0] else: delta_T = self._cosmo_bkg.T_xy(z_start, z_lens) else: delta_T = T_ij_start first_deflector = False else: delta_T = self._T_ij_list[i] x, y = self._ray_step_add(x, y, alpha_x, alpha_y, delta_T) alpha_x, alpha_y = self._add_deflection(x, y, alpha_x, alpha_y, kwargs_lens, i) z_lens_last = z_lens if T_ij_end is None: if z_lens_last == z_stop: delta_T = 0 else: delta_T = self._cosmo_bkg.T_xy(z_lens_last, z_stop) else: delta_T = T_ij_end x, y = self._ray_step_add(x, y, alpha_x, alpha_y, delta_T) return x, y, alpha_x, alpha_y def transverse_distance_start_stop(self, z_start, z_stop, include_z_start=False): z_lens_last = z_start first_deflector = True T_ij_start = None for i, idex in enumerate(self._sorted_redshift_index): z_lens = self._lens_redshift_list[idex] if self._start_condition(include_z_start, z_lens, z_start) and z_lens <= z_stop: if first_deflector is True: T_ij_start = self._cosmo_bkg.T_xy(z_start, z_lens) first_deflector = False z_lens_last = z_lens T_ij_end = self._cosmo_bkg.T_xy(z_lens_last, z_stop) return T_ij_start, T_ij_end def geo_shapiro_delay(self, theta_x, theta_y, kwargs_lens, z_stop, T_z_stop=None, T_ij_end=None): dt_grav = np.zeros_like(theta_x, dtype=float) dt_geo = np.zeros_like(theta_x, dtype=float) x = np.zeros_like(theta_x, dtype=float) y = np.zeros_like(theta_y, dtype=float) alpha_x = np.array(theta_x, dtype=float) alpha_y = np.array(theta_y, dtype=float) i = 0 z_lens_last = 0 for i, index in enumerate(self._sorted_redshift_index): z_lens = self._lens_redshift_list[index] if z_lens <= z_stop: T_ij = self._T_ij_list[i] x_new, y_new = self._ray_step(x, y, alpha_x, alpha_y, T_ij) if i == 0: pass elif T_ij > 0: T_j = self._T_z_list[i] T_i = self._T_z_list[i - 1] beta_i_x, beta_i_y = x / T_i, y / T_i beta_j_x, beta_j_y = x_new / T_j, y_new / T_j dt_geo_new = self._geometrical_delay(beta_i_x, beta_i_y, beta_j_x, beta_j_y, T_i, T_j, T_ij) dt_geo += dt_geo_new x, y = x_new, y_new dt_grav_new = self._gravitational_delay(x, y, kwargs_lens, i, z_lens) alpha_x, alpha_y = self._add_deflection(x, y, alpha_x, alpha_y, kwargs_lens, i) dt_grav += dt_grav_new z_lens_last = z_lens if T_ij_end is None: T_ij_end = self._cosmo_bkg.T_xy(z_lens_last, z_stop) T_ij = T_ij_end x_new, y_new = self._ray_step(x, y, alpha_x, alpha_y, T_ij) if T_z_stop is None: T_z_stop = self._cosmo_bkg.T_xy(0, z_stop) T_j = T_z_stop T_i = self._T_z_list[i] beta_i_x, beta_i_y = x / T_i, y / T_i beta_j_x, beta_j_y = x_new / T_j, y_new / T_j dt_geo_new = self._geometrical_delay(beta_i_x, beta_i_y, beta_j_x, beta_j_y, T_i, T_j, T_ij) dt_geo += dt_geo_new return dt_geo, dt_grav @staticmethod def _index_ordering(redshift_list): redshift_list = np.array(redshift_list) sort_index = np.argsort(redshift_list) return sort_index def _reduced2physical_deflection(self, alpha_reduced, index_lens): factor = self._reduced2physical_factor[index_lens] return alpha_reduced * factor def _gravitational_delay(self, x, y, kwargs_lens, index, z_lens): theta_x, theta_y = self._co_moving2angle(x, y, index) k = self._sorted_redshift_index[index] potential = self.func_list[k].function(theta_x, theta_y, **kwargs_lens[k]) delay_days = self._lensing_potential2time_delay(potential, z_lens, z_source=self._z_source_convention) return -delay_days @staticmethod def _geometrical_delay(beta_i_x, beta_i_y, beta_j_x, beta_j_y, T_i, T_j, T_ij): d_beta_x = beta_j_x - beta_i_x d_beta_y = beta_j_y - beta_i_y tau_ij = T_i * T_j / T_ij * const.Mpc / const.c / const.day_s * const.arcsec**2 return tau_ij * (d_beta_x ** 2 + d_beta_y ** 2) / 2 def _lensing_potential2time_delay(self, potential, z_lens, z_source): D_dt = self._cosmo_bkg.ddt(z_lens, z_source) delay_days = const.delay_arcsec2days(potential, D_dt) return delay_days def _co_moving2angle(self, x, y, index): T_z = self._T_z_list[index] theta_x = x / T_z theta_y = y / T_z return theta_x, theta_y @staticmethod def _ray_step(x, y, alpha_x, alpha_y, delta_T): x_ = x + alpha_x * delta_T y_ = y + alpha_y * delta_T return x_, y_ @staticmethod def _ray_step_add(x, y, alpha_x, alpha_y, delta_T): x += alpha_x * delta_T y += alpha_y * delta_T return x, y def _add_deflection(self, x, y, alpha_x, alpha_y, kwargs_lens, index): theta_x, theta_y = self._co_moving2angle(x, y, index) k = self._sorted_redshift_index[index] alpha_x_red, alpha_y_red = self.func_list[k].derivatives(theta_x, theta_y, **kwargs_lens[k]) alpha_x_phys = self._reduced2physical_deflection(alpha_x_red, index) alpha_y_phys = self._reduced2physical_deflection(alpha_y_red, index) return alpha_x - alpha_x_phys, alpha_y - alpha_y_phys @staticmethod def _start_condition(inclusive, z_lens, z_start): if inclusive: return z_lens >= z_start else: return z_lens > z_start
true
true
f70fec6a4bb9f9aa6a836ce5a5311f9889db362b
3,307
py
Python
Friendly/LaTeX/figures/cosinecomparison.py
benvcutilli/CountingPlusFriendly
1947e2a765e20c87e080da22b4ecc4da1f272b02
[ "MIT" ]
null
null
null
Friendly/LaTeX/figures/cosinecomparison.py
benvcutilli/CountingPlusFriendly
1947e2a765e20c87e080da22b4ecc4da1f272b02
[ "MIT" ]
null
null
null
Friendly/LaTeX/figures/cosinecomparison.py
benvcutilli/CountingPlusFriendly
1947e2a765e20c87e080da22b4ecc4da1f272b02
[ "MIT" ]
null
null
null
# The Plotly^^^plotly^^^ package import plotly # Importing ^^^numpy^^^ import numpy def sigmoid(x): return (1 + numpy.exp(-x)) ** -1 samplesPerDimension = 500 # Using numpy.linspace to create x and y values is from somewhere on ^^^plotly^^^'s website, most # likely. It is a convenient way to do this, so that's why. evaluationRange = numpy.linspace([-5, -5], [5, 5], samplesPerDimension, axis=1) # Using the technique that I used from networkcomponents.py (PairwiseDifference) where one dimension # is on the first axis and the other is on the second axis so that they can broadcast to create all # permutations between the array of x values and the array of y values. Before broadcasting, we need # to add a dimension to both the x vector and y vector, but at the beginning and end of them, # respectively, which is also what happens in PairwiseDifference. However, this code doesn't # actually broadcast, but it mimics broadcasting with the .repeat(...) calls. #################################################################################################### # # x = numpy.expand_dims(evaluationRange[0], 0).repeat(samplesPerDimension, 0) y = numpy.expand_dims(evaluationRange[1], 1).repeat(samplesPerDimension, 1) evaluationPairs = numpy.stack([x, y], 2) # # #################################################################################################### weights = numpy.array([1, 1]) constant = 1.0 # Calculating every combination for the three functions dotProduct = numpy.dot(evaluationPairs, weights) cosine = dotProduct \ / \ ( numpy.linalg.norm(weights) * numpy.linalg.norm(evaluationPairs, axis=2) ) softenedCosine = dotProduct \ / \ ( numpy.linalg.norm(weights) * numpy.linalg.norm(evaluationPairs, axis=2) + constant) dotProductSurface = plotly.graph_objects.Surface( x=evaluationRange[0], y=evaluationRange[1], z=sigmoid(dotProduct) ) cosineSurface = plotly.graph_objects.Surface( x=evaluationRange[0], y=evaluationRange[1], z=cosine ) softenedCosineSurface = plotly.graph_objects.Surface( x=evaluationRange[0], y=evaluationRange[1], z=softenedCosine ) figure = plotly.graph_objects.Figure( softenedCosineSurface, layout={ "scene": { "aspectmode": "data" } } ) # "validate" left as True partially because I trust the default value listed in # ^^^plotlyfigureshow^^^ figure.show(renderer="firefox") #figure.write_image("graph.png", "png", 1200, 900, 1.0, True, "kaleido")
40.329268
100
0.498639
import plotly import numpy def sigmoid(x): return (1 + numpy.exp(-x)) ** -1 samplesPerDimension = 500 # likely. It is a convenient way to do this, so that's why. evaluationRange = numpy.linspace([-5, -5], [5, 5], samplesPerDimension, axis=1) # actually broadcast, but it mimics broadcasting with the .repeat(...) calls. #################################################################################################### # # x = numpy.expand_dims(evaluationRange[0], 0).repeat(samplesPerDimension, 0) y = numpy.expand_dims(evaluationRange[1], 1).repeat(samplesPerDimension, 1) evaluationPairs = numpy.stack([x, y], 2) # # #################################################################################################### weights = numpy.array([1, 1]) constant = 1.0 # Calculating every combination for the three functions dotProduct = numpy.dot(evaluationPairs, weights) cosine = dotProduct \ / \ ( numpy.linalg.norm(weights) * numpy.linalg.norm(evaluationPairs, axis=2) ) softenedCosine = dotProduct \ / \ ( numpy.linalg.norm(weights) * numpy.linalg.norm(evaluationPairs, axis=2) + constant) dotProductSurface = plotly.graph_objects.Surface( x=evaluationRange[0], y=evaluationRange[1], z=sigmoid(dotProduct) ) cosineSurface = plotly.graph_objects.Surface( x=evaluationRange[0], y=evaluationRange[1], z=cosine ) softenedCosineSurface = plotly.graph_objects.Surface( x=evaluationRange[0], y=evaluationRange[1], z=softenedCosine ) figure = plotly.graph_objects.Figure( softenedCosineSurface, layout={ "scene": { "aspectmode": "data" } } ) # "validate" left as True partially because I trust the default value listed in # ^^^plotlyfigureshow^^^ figure.show(renderer="firefox") #figure.write_image("graph.png", "png", 1200, 900, 1.0, True, "kaleido")
true
true
f70fec80d037fd24ce2c291102597e64ad2034bd
2,186
py
Python
python/maya/site-packages/pymel-1.0.5/extras/completion/py/maya/app/general/adjustBackgroundImageWin.py
CountZer0/PipelineConstructionSet
0aa73a8a63c72989b2d1c677efd78dad4388d335
[ "BSD-3-Clause" ]
21
2015-04-27T05:01:36.000Z
2021-11-22T13:45:14.000Z
python/maya/site-packages/pymel-1.0.5/extras/completion/py/maya/app/general/adjustBackgroundImageWin.py
0xb1dd1e/PipelineConstructionSet
621349da1b6d1437e95d0c9e48ee9f36d59f19fd
[ "BSD-3-Clause" ]
null
null
null
python/maya/site-packages/pymel-1.0.5/extras/completion/py/maya/app/general/adjustBackgroundImageWin.py
0xb1dd1e/PipelineConstructionSet
621349da1b6d1437e95d0c9e48ee9f36d59f19fd
[ "BSD-3-Clause" ]
7
2015-04-11T11:37:19.000Z
2020-05-22T09:49:04.000Z
import maya.mel as mel import maya import maya.cmds as cmds import sys class TselectionWin(object): """ Base class for a dialog which works on the user's selection """ def __del__(self): pass def __init__(self, title, selectionFilter='<function <lambda>>', objects=[]): """ selectionFilter - function which returns True if object is selectable """ pass def activate(self, window): """ Call this method once the window is created """ pass def close(self): pass def getWindowTitle(self): pass def onSelectionChanged(self, *args): """ Called anytime the selection list changes, self.objects is updated and window title is updated. """ pass __dict__ = None __weakref__ = None class TadjustBackgroundImageWin(TselectionWin): """ Adjust the background image for a container Dialog """ def __init__(self, editor): pass def hyperGraphCmd(self, *args, **kwargs): pass def loadImage(self, theFile): pass def onAdjustImagePositionHorizontal(self, val): pass def onAdjustImagePositionVertical(self, val): pass def onAdjustImageScale(self, val): pass def onFitToHeight(self, arg): pass def onFitToWidth(self, arg): pass def onImageFieldChange(self, val): pass def onLoadImage(self): pass def onSelectionChanged(self, *args): """ override selection callback """ pass def show(self): """ Build and show the dialog """ pass def update(self): """ update the ui after something has changed """ pass def adjustBackgroundImageWin(editor): """ Main entry point. Create and show the adjust-background-image dialog. """ pass
16.073529
81
0.524245
import maya.mel as mel import maya import maya.cmds as cmds import sys class TselectionWin(object): def __del__(self): pass def __init__(self, title, selectionFilter='<function <lambda>>', objects=[]): pass def activate(self, window): pass def close(self): pass def getWindowTitle(self): pass def onSelectionChanged(self, *args): pass __dict__ = None __weakref__ = None class TadjustBackgroundImageWin(TselectionWin): def __init__(self, editor): pass def hyperGraphCmd(self, *args, **kwargs): pass def loadImage(self, theFile): pass def onAdjustImagePositionHorizontal(self, val): pass def onAdjustImagePositionVertical(self, val): pass def onAdjustImageScale(self, val): pass def onFitToHeight(self, arg): pass def onFitToWidth(self, arg): pass def onImageFieldChange(self, val): pass def onLoadImage(self): pass def onSelectionChanged(self, *args): pass def show(self): pass def update(self): pass def adjustBackgroundImageWin(editor): pass
true
true
f70feca6b4336b31b7b0568bb1b1409e4206f656
1,454
py
Python
modules/runtime/runtime.py
Lizhu-Chen/bark
fad029f658e462eb1772c28c2c0971faf5176dc1
[ "MIT" ]
null
null
null
modules/runtime/runtime.py
Lizhu-Chen/bark
fad029f658e462eb1772c28c2c0971faf5176dc1
[ "MIT" ]
null
null
null
modules/runtime/runtime.py
Lizhu-Chen/bark
fad029f658e462eb1772c28c2c0971faf5176dc1
[ "MIT" ]
1
2020-08-12T17:09:05.000Z
2020-08-12T17:09:05.000Z
# Copyright (c) 2020 Julian Bernhard, Klemens Esterle, Patrick Hart and # Tobias Kessler # # This work is licensed under the terms of the MIT license. # For a copy, see <https://opensource.org/licenses/MIT>. from bark.world.opendrive import * from bark.world import * from bark.geometry import * from bark.runtime import PyRuntime class Runtime(PyRuntime): def __init__(self, step_time, viewer, scenario_generator=None, render=False): self._step_time = step_time self._viewer = viewer self._scenario_generator = scenario_generator self._scenario_idx = None self._scenario = None self._render = render self._reset_has_been_called = False def reset(self, scenario=None): if scenario: self._scenario = scenario else: self._scenario, self._scenario_idx = \ self._scenario_generator.get_next_scenario() self._world = self._scenario.GetWorldState() self._reset_has_been_called = True self._viewer.Reset() def step(self): assert(self._reset_has_been_called==True) self._world.Step(self._step_time) if self._render: self.render() def render(self): # self._viewer.clear() self._viewer.drawWorld( self._world, self._scenario._eval_agent_ids, scenario_idx=self._scenario_idx) self._viewer.clear() def run(self, steps): for step_count in range(steps): self.Step()
27.433962
71
0.683631
from bark.world.opendrive import * from bark.world import * from bark.geometry import * from bark.runtime import PyRuntime class Runtime(PyRuntime): def __init__(self, step_time, viewer, scenario_generator=None, render=False): self._step_time = step_time self._viewer = viewer self._scenario_generator = scenario_generator self._scenario_idx = None self._scenario = None self._render = render self._reset_has_been_called = False def reset(self, scenario=None): if scenario: self._scenario = scenario else: self._scenario, self._scenario_idx = \ self._scenario_generator.get_next_scenario() self._world = self._scenario.GetWorldState() self._reset_has_been_called = True self._viewer.Reset() def step(self): assert(self._reset_has_been_called==True) self._world.Step(self._step_time) if self._render: self.render() def render(self): self._viewer.drawWorld( self._world, self._scenario._eval_agent_ids, scenario_idx=self._scenario_idx) self._viewer.clear() def run(self, steps): for step_count in range(steps): self.Step()
true
true
f70fecd8c30479e0a7ac5310738add8af3fd85d6
14,799
py
Python
salt/payload.py
Noah-Huppert/salt
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
[ "Apache-2.0" ]
19
2016-01-29T14:37:52.000Z
2022-03-30T18:08:01.000Z
salt/payload.py
Noah-Huppert/salt
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
[ "Apache-2.0" ]
223
2016-03-02T16:39:41.000Z
2022-03-03T12:26:35.000Z
salt/payload.py
Noah-Huppert/salt
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
[ "Apache-2.0" ]
64
2016-02-04T19:45:26.000Z
2021-12-15T02:02:31.000Z
# -*- coding: utf-8 -*- """ Many aspects of the salt payload need to be managed, from the return of encrypted keys to general payload dynamics and packaging, these happen in here """ # Import python libs from __future__ import absolute_import, print_function, unicode_literals import datetime import gc # import sys # Use if sys is commented out below import logging # Import salt libs import salt.log import salt.transport.frame import salt.utils.immutabletypes as immutabletypes import salt.utils.msgpack import salt.utils.stringutils from salt.exceptions import SaltDeserializationError, SaltReqTimeoutError # Import third party libs from salt.ext import six from salt.utils.data import CaseInsensitiveDict try: import zmq except ImportError: # No need for zeromq in local mode pass log = logging.getLogger(__name__) def package(payload): """ This method for now just wraps msgpack.dumps, but it is here so that we can make the serialization a custom option in the future with ease. """ return salt.utils.msgpack.dumps(payload) def unpackage(package_): """ Unpackages a payload """ return salt.utils.msgpack.loads(package_, use_list=True) def format_payload(enc, **kwargs): """ Pass in the required arguments for a payload, the enc type and the cmd, then a list of keyword args to generate the body of the load dict. """ payload = {"enc": enc} load = {} for key in kwargs: load[key] = kwargs[key] payload["load"] = load return package(payload) class Serial(object): """ Create a serialization object, this object manages all message serialization in Salt """ def __init__(self, opts): if isinstance(opts, dict): self.serial = opts.get("serial", "msgpack") elif isinstance(opts, six.string_types): self.serial = opts else: self.serial = "msgpack" def loads(self, msg, encoding=None, raw=False): """ Run the correct loads serialization format :param encoding: Useful for Python 3 support. If the msgpack data was encoded using "use_bin_type=True", this will differentiate between the 'bytes' type and the 'str' type by decoding contents with 'str' type to what the encoding was set as. Recommended encoding is 'utf-8' when using Python 3. If the msgpack data was not encoded using "use_bin_type=True", it will try to decode all 'bytes' and 'str' data (the distinction has been lost in this case) to what the encoding is set as. In this case, it will fail if any of the contents cannot be converted. """ try: def ext_type_decoder(code, data): if code == 78: data = salt.utils.stringutils.to_unicode(data) return datetime.datetime.strptime(data, "%Y%m%dT%H:%M:%S.%f") return data gc.disable() # performance optimization for msgpack loads_kwargs = {"use_list": True, "ext_hook": ext_type_decoder} if salt.utils.msgpack.version >= (0, 4, 0): # msgpack only supports 'encoding' starting in 0.4.0. # Due to this, if we don't need it, don't pass it at all so # that under Python 2 we can still work with older versions # of msgpack. if salt.utils.msgpack.version >= (0, 5, 2): if encoding is None: loads_kwargs["raw"] = True else: loads_kwargs["raw"] = False else: loads_kwargs["encoding"] = encoding try: ret = salt.utils.msgpack.unpackb(msg, **loads_kwargs) except UnicodeDecodeError: # msg contains binary data loads_kwargs.pop("raw", None) loads_kwargs.pop("encoding", None) ret = salt.utils.msgpack.loads(msg, **loads_kwargs) else: ret = salt.utils.msgpack.loads(msg, **loads_kwargs) if six.PY3 and encoding is None and not raw: ret = salt.transport.frame.decode_embedded_strs(ret) except Exception as exc: # pylint: disable=broad-except log.critical( "Could not deserialize msgpack message. This often happens " "when trying to read a file not in binary mode. " "To see message payload, enable debug logging and retry. " "Exception: %s", exc, ) log.debug("Msgpack deserialization failure on message: %s", msg) gc.collect() raise six.raise_from( SaltDeserializationError( "Could not deserialize msgpack message." " See log for more info." ), exc, ) finally: gc.enable() return ret def load(self, fn_): """ Run the correct serialization to load a file """ data = fn_.read() fn_.close() if data: if six.PY3: return self.loads(data, encoding="utf-8") else: return self.loads(data) def dumps(self, msg, use_bin_type=False): """ Run the correct dumps serialization format :param use_bin_type: Useful for Python 3 support. Tells msgpack to differentiate between 'str' and 'bytes' types by encoding them differently. Since this changes the wire protocol, this option should not be used outside of IPC. """ def ext_type_encoder(obj): if isinstance(obj, six.integer_types): # msgpack can't handle the very long Python longs for jids # Convert any very long longs to strings return six.text_type(obj) elif isinstance(obj, (datetime.datetime, datetime.date)): # msgpack doesn't support datetime.datetime and datetime.date datatypes. # So here we have converted these types to custom datatype # This is msgpack Extended types numbered 78 return salt.utils.msgpack.ExtType( 78, salt.utils.stringutils.to_bytes(obj.strftime("%Y%m%dT%H:%M:%S.%f")), ) # The same for immutable types elif isinstance(obj, immutabletypes.ImmutableDict): return dict(obj) elif isinstance(obj, immutabletypes.ImmutableList): return list(obj) elif isinstance(obj, (set, immutabletypes.ImmutableSet)): # msgpack can't handle set so translate it to tuple return tuple(obj) elif isinstance(obj, CaseInsensitiveDict): return dict(obj) # Nothing known exceptions found. Let msgpack raise its own. return obj try: return salt.utils.msgpack.packb( msg, default=ext_type_encoder, use_bin_type=use_bin_type ) except (OverflowError, salt.utils.msgpack.exceptions.PackValueError): # msgpack<=0.4.6 don't call ext encoder on very long integers raising the error instead. # Convert any very long longs to strings and call dumps again. def verylong_encoder(obj, context): # Make sure we catch recursion here. objid = id(obj) # This instance list needs to correspond to the types recursed # in the below if/elif chain. Also update # tests/unit/test_payload.py if objid in context and isinstance(obj, (dict, list, tuple)): return "<Recursion on {} with id={}>".format( type(obj).__name__, id(obj) ) context.add(objid) # The isinstance checks in this if/elif chain need to be # kept in sync with the above recursion check. if isinstance(obj, dict): for key, value in six.iteritems(obj.copy()): obj[key] = verylong_encoder(value, context) return dict(obj) elif isinstance(obj, (list, tuple)): obj = list(obj) for idx, entry in enumerate(obj): obj[idx] = verylong_encoder(entry, context) return obj # A value of an Integer object is limited from -(2^63) upto (2^64)-1 by MessagePack # spec. Here we care only of JIDs that are positive integers. if isinstance(obj, six.integer_types) and obj >= pow(2, 64): return six.text_type(obj) else: return obj msg = verylong_encoder(msg, set()) return salt.utils.msgpack.packb( msg, default=ext_type_encoder, use_bin_type=use_bin_type ) def dump(self, msg, fn_): """ Serialize the correct data into the named file object """ if six.PY2: fn_.write(self.dumps(msg)) else: # When using Python 3, write files in such a way # that the 'bytes' and 'str' types are distinguishable # by using "use_bin_type=True". fn_.write(self.dumps(msg, use_bin_type=True)) fn_.close() class SREQ(object): """ Create a generic interface to wrap salt zeromq req calls. """ def __init__(self, master, id_="", serial="msgpack", linger=0, opts=None): self.master = master self.id_ = id_ self.serial = Serial(serial) self.linger = linger self.context = zmq.Context() self.poller = zmq.Poller() self.opts = opts @property def socket(self): """ Lazily create the socket. """ if not hasattr(self, "_socket"): # create a new one self._socket = self.context.socket(zmq.REQ) if hasattr(zmq, "RECONNECT_IVL_MAX"): self._socket.setsockopt(zmq.RECONNECT_IVL_MAX, 5000) self._set_tcp_keepalive() if self.master.startswith("tcp://["): # Hint PF type if bracket enclosed IPv6 address if hasattr(zmq, "IPV6"): self._socket.setsockopt(zmq.IPV6, 1) elif hasattr(zmq, "IPV4ONLY"): self._socket.setsockopt(zmq.IPV4ONLY, 0) self._socket.linger = self.linger if self.id_: self._socket.setsockopt(zmq.IDENTITY, self.id_) self._socket.connect(self.master) return self._socket def _set_tcp_keepalive(self): if hasattr(zmq, "TCP_KEEPALIVE") and self.opts: if "tcp_keepalive" in self.opts: self._socket.setsockopt(zmq.TCP_KEEPALIVE, self.opts["tcp_keepalive"]) if "tcp_keepalive_idle" in self.opts: self._socket.setsockopt( zmq.TCP_KEEPALIVE_IDLE, self.opts["tcp_keepalive_idle"] ) if "tcp_keepalive_cnt" in self.opts: self._socket.setsockopt( zmq.TCP_KEEPALIVE_CNT, self.opts["tcp_keepalive_cnt"] ) if "tcp_keepalive_intvl" in self.opts: self._socket.setsockopt( zmq.TCP_KEEPALIVE_INTVL, self.opts["tcp_keepalive_intvl"] ) def clear_socket(self): """ delete socket if you have it """ if hasattr(self, "_socket"): if isinstance(self.poller.sockets, dict): sockets = list(self.poller.sockets.keys()) for socket in sockets: log.trace("Unregistering socket: %s", socket) self.poller.unregister(socket) else: for socket in self.poller.sockets: log.trace("Unregistering socket: %s", socket) self.poller.unregister(socket[0]) del self._socket def send(self, enc, load, tries=1, timeout=60): """ Takes two arguments, the encryption type and the base payload """ payload = {"enc": enc} payload["load"] = load pkg = self.serial.dumps(payload) self.socket.send(pkg) self.poller.register(self.socket, zmq.POLLIN) tried = 0 while True: polled = self.poller.poll(timeout * 1000) tried += 1 if polled: break if tries > 1: log.info( "SaltReqTimeoutError: after %s seconds. (Try %s of %s)", timeout, tried, tries, ) if tried >= tries: self.clear_socket() raise SaltReqTimeoutError( "SaltReqTimeoutError: after {0} seconds, ran {1} " "tries".format(timeout * tried, tried) ) return self.serial.loads(self.socket.recv()) def send_auto(self, payload, tries=1, timeout=60): """ Detect the encryption type based on the payload """ enc = payload.get("enc", "clear") load = payload.get("load", {}) return self.send(enc, load, tries, timeout) def destroy(self): if isinstance(self.poller.sockets, dict): sockets = list(self.poller.sockets.keys()) for socket in sockets: if socket.closed is False: socket.setsockopt(zmq.LINGER, 1) socket.close() self.poller.unregister(socket) else: for socket in self.poller.sockets: if socket[0].closed is False: socket[0].setsockopt(zmq.LINGER, 1) socket[0].close() self.poller.unregister(socket[0]) if self.socket.closed is False: self.socket.setsockopt(zmq.LINGER, 1) self.socket.close() if self.context.closed is False: self.context.term() # pylint: disable=W1701 def __del__(self): self.destroy() # pylint: enable=W1701
38.043702
100
0.547064
from __future__ import absolute_import, print_function, unicode_literals import datetime import gc mport salt.transport.frame import salt.utils.immutabletypes as immutabletypes import salt.utils.msgpack import salt.utils.stringutils from salt.exceptions import SaltDeserializationError, SaltReqTimeoutError from salt.ext import six from salt.utils.data import CaseInsensitiveDict try: import zmq except ImportError: pass log = logging.getLogger(__name__) def package(payload): return salt.utils.msgpack.dumps(payload) def unpackage(package_): return salt.utils.msgpack.loads(package_, use_list=True) def format_payload(enc, **kwargs): payload = {"enc": enc} load = {} for key in kwargs: load[key] = kwargs[key] payload["load"] = load return package(payload) class Serial(object): def __init__(self, opts): if isinstance(opts, dict): self.serial = opts.get("serial", "msgpack") elif isinstance(opts, six.string_types): self.serial = opts else: self.serial = "msgpack" def loads(self, msg, encoding=None, raw=False): try: def ext_type_decoder(code, data): if code == 78: data = salt.utils.stringutils.to_unicode(data) return datetime.datetime.strptime(data, "%Y%m%dT%H:%M:%S.%f") return data gc.disable() loads_kwargs = {"use_list": True, "ext_hook": ext_type_decoder} if salt.utils.msgpack.version >= (0, 4, 0): if salt.utils.msgpack.version >= (0, 5, 2): if encoding is None: loads_kwargs["raw"] = True else: loads_kwargs["raw"] = False else: loads_kwargs["encoding"] = encoding try: ret = salt.utils.msgpack.unpackb(msg, **loads_kwargs) except UnicodeDecodeError: loads_kwargs.pop("raw", None) loads_kwargs.pop("encoding", None) ret = salt.utils.msgpack.loads(msg, **loads_kwargs) else: ret = salt.utils.msgpack.loads(msg, **loads_kwargs) if six.PY3 and encoding is None and not raw: ret = salt.transport.frame.decode_embedded_strs(ret) except Exception as exc: log.critical( "Could not deserialize msgpack message. This often happens " "when trying to read a file not in binary mode. " "To see message payload, enable debug logging and retry. " "Exception: %s", exc, ) log.debug("Msgpack deserialization failure on message: %s", msg) gc.collect() raise six.raise_from( SaltDeserializationError( "Could not deserialize msgpack message." " See log for more info." ), exc, ) finally: gc.enable() return ret def load(self, fn_): data = fn_.read() fn_.close() if data: if six.PY3: return self.loads(data, encoding="utf-8") else: return self.loads(data) def dumps(self, msg, use_bin_type=False): def ext_type_encoder(obj): if isinstance(obj, six.integer_types): # Convert any very long longs to strings return six.text_type(obj) elif isinstance(obj, (datetime.datetime, datetime.date)): # msgpack doesn't support datetime.datetime and datetime.date datatypes. return salt.utils.msgpack.ExtType( 78, salt.utils.stringutils.to_bytes(obj.strftime("%Y%m%dT%H:%M:%S.%f")), ) elif isinstance(obj, immutabletypes.ImmutableDict): return dict(obj) elif isinstance(obj, immutabletypes.ImmutableList): return list(obj) elif isinstance(obj, (set, immutabletypes.ImmutableSet)): return tuple(obj) elif isinstance(obj, CaseInsensitiveDict): return dict(obj) # Nothing known exceptions found. Let msgpack raise its own. return obj try: return salt.utils.msgpack.packb( msg, default=ext_type_encoder, use_bin_type=use_bin_type ) except (OverflowError, salt.utils.msgpack.exceptions.PackValueError): # msgpack<=0.4.6 don't call ext encoder on very long integers raising the error instead. def verylong_encoder(obj, context): objid = id(obj) if objid in context and isinstance(obj, (dict, list, tuple)): return "<Recursion on {} with id={}>".format( type(obj).__name__, id(obj) ) context.add(objid) if isinstance(obj, dict): for key, value in six.iteritems(obj.copy()): obj[key] = verylong_encoder(value, context) return dict(obj) elif isinstance(obj, (list, tuple)): obj = list(obj) for idx, entry in enumerate(obj): obj[idx] = verylong_encoder(entry, context) return obj if isinstance(obj, six.integer_types) and obj >= pow(2, 64): return six.text_type(obj) else: return obj msg = verylong_encoder(msg, set()) return salt.utils.msgpack.packb( msg, default=ext_type_encoder, use_bin_type=use_bin_type ) def dump(self, msg, fn_): if six.PY2: fn_.write(self.dumps(msg)) else: fn_.write(self.dumps(msg, use_bin_type=True)) fn_.close() class SREQ(object): def __init__(self, master, id_="", serial="msgpack", linger=0, opts=None): self.master = master self.id_ = id_ self.serial = Serial(serial) self.linger = linger self.context = zmq.Context() self.poller = zmq.Poller() self.opts = opts @property def socket(self): if not hasattr(self, "_socket"): self._socket = self.context.socket(zmq.REQ) if hasattr(zmq, "RECONNECT_IVL_MAX"): self._socket.setsockopt(zmq.RECONNECT_IVL_MAX, 5000) self._set_tcp_keepalive() if self.master.startswith("tcp://["): if hasattr(zmq, "IPV6"): self._socket.setsockopt(zmq.IPV6, 1) elif hasattr(zmq, "IPV4ONLY"): self._socket.setsockopt(zmq.IPV4ONLY, 0) self._socket.linger = self.linger if self.id_: self._socket.setsockopt(zmq.IDENTITY, self.id_) self._socket.connect(self.master) return self._socket def _set_tcp_keepalive(self): if hasattr(zmq, "TCP_KEEPALIVE") and self.opts: if "tcp_keepalive" in self.opts: self._socket.setsockopt(zmq.TCP_KEEPALIVE, self.opts["tcp_keepalive"]) if "tcp_keepalive_idle" in self.opts: self._socket.setsockopt( zmq.TCP_KEEPALIVE_IDLE, self.opts["tcp_keepalive_idle"] ) if "tcp_keepalive_cnt" in self.opts: self._socket.setsockopt( zmq.TCP_KEEPALIVE_CNT, self.opts["tcp_keepalive_cnt"] ) if "tcp_keepalive_intvl" in self.opts: self._socket.setsockopt( zmq.TCP_KEEPALIVE_INTVL, self.opts["tcp_keepalive_intvl"] ) def clear_socket(self): if hasattr(self, "_socket"): if isinstance(self.poller.sockets, dict): sockets = list(self.poller.sockets.keys()) for socket in sockets: log.trace("Unregistering socket: %s", socket) self.poller.unregister(socket) else: for socket in self.poller.sockets: log.trace("Unregistering socket: %s", socket) self.poller.unregister(socket[0]) del self._socket def send(self, enc, load, tries=1, timeout=60): payload = {"enc": enc} payload["load"] = load pkg = self.serial.dumps(payload) self.socket.send(pkg) self.poller.register(self.socket, zmq.POLLIN) tried = 0 while True: polled = self.poller.poll(timeout * 1000) tried += 1 if polled: break if tries > 1: log.info( "SaltReqTimeoutError: after %s seconds. (Try %s of %s)", timeout, tried, tries, ) if tried >= tries: self.clear_socket() raise SaltReqTimeoutError( "SaltReqTimeoutError: after {0} seconds, ran {1} " "tries".format(timeout * tried, tried) ) return self.serial.loads(self.socket.recv()) def send_auto(self, payload, tries=1, timeout=60): enc = payload.get("enc", "clear") load = payload.get("load", {}) return self.send(enc, load, tries, timeout) def destroy(self): if isinstance(self.poller.sockets, dict): sockets = list(self.poller.sockets.keys()) for socket in sockets: if socket.closed is False: socket.setsockopt(zmq.LINGER, 1) socket.close() self.poller.unregister(socket) else: for socket in self.poller.sockets: if socket[0].closed is False: socket[0].setsockopt(zmq.LINGER, 1) socket[0].close() self.poller.unregister(socket[0]) if self.socket.closed is False: self.socket.setsockopt(zmq.LINGER, 1) self.socket.close() if self.context.closed is False: self.context.term() def __del__(self): self.destroy()
true
true
f70fed361316e184c2d287967078dd0420d8ce66
6,658
py
Python
enquiry/migrations/0007_auto__add_field_enquiry_is_published.py
bitlabstudio/django-enquiry
2e82b68ea0631ae824d88282daf38b8d28d8f0ee
[ "MIT" ]
null
null
null
enquiry/migrations/0007_auto__add_field_enquiry_is_published.py
bitlabstudio/django-enquiry
2e82b68ea0631ae824d88282daf38b8d28d8f0ee
[ "MIT" ]
2
2020-02-11T22:02:26.000Z
2020-06-05T16:58:26.000Z
enquiry/migrations/0007_auto__add_field_enquiry_is_published.py
bitlabstudio/django-enquiry
2e82b68ea0631ae824d88282daf38b8d28d8f0ee
[ "MIT" ]
null
null
null
# flake8: noqa # -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'Enquiry.is_published' db.add_column('enquiry_enquiry', 'is_published', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False) def backwards(self, orm): # Deleting field 'Enquiry.is_published' db.delete_column('enquiry_enquiry', 'is_published') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'enquiry.answer': { 'Meta': {'object_name': 'Answer'}, 'enquiry': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['enquiry.Enquiry']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'enquiry.answertrans': { 'Meta': {'object_name': 'AnswerTrans'}, 'answer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['enquiry.Answer']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'text': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'enquiry.enquiry': { 'Meta': {'object_name': 'Enquiry'}, 'allow_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'enquiries'", 'null': 'True', 'to': "orm['auth.User']"}), 'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'end_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 10, 23, 0, 0)', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'start_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 10, 16, 0, 0)'}) }, 'enquiry.enquirytrans': { 'Meta': {'object_name': 'EnquiryTrans'}, 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'enquiry': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['enquiry.Enquiry']"}), 'extra_info': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'question': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'enquiry.vote': { 'Meta': {'object_name': 'Vote'}, 'answer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['enquiry.Answer']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'session_key': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'votes'", 'null': 'True', 'to': "orm['auth.User']"}) } } complete_apps = ['enquiry']
65.920792
182
0.560829
import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): db.add_column('enquiry_enquiry', 'is_published', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False) def backwards(self, orm): db.delete_column('enquiry_enquiry', 'is_published') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'enquiry.answer': { 'Meta': {'object_name': 'Answer'}, 'enquiry': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['enquiry.Enquiry']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'enquiry.answertrans': { 'Meta': {'object_name': 'AnswerTrans'}, 'answer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['enquiry.Answer']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'text': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'enquiry.enquiry': { 'Meta': {'object_name': 'Enquiry'}, 'allow_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'enquiries'", 'null': 'True', 'to': "orm['auth.User']"}), 'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'end_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 10, 23, 0, 0)', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'start_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 10, 16, 0, 0)'}) }, 'enquiry.enquirytrans': { 'Meta': {'object_name': 'EnquiryTrans'}, 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'enquiry': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['enquiry.Enquiry']"}), 'extra_info': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'question': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'enquiry.vote': { 'Meta': {'object_name': 'Vote'}, 'answer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['enquiry.Answer']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'session_key': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'votes'", 'null': 'True', 'to': "orm['auth.User']"}) } } complete_apps = ['enquiry']
true
true
f70fed5e21d5b67955d3f6a157574fc6d76f5cfd
3,434
py
Python
examples/sac.py
RushikeshJoshi4/rlkit
85db399565a45d8151f95c855f608ea9d827a0d0
[ "MIT" ]
null
null
null
examples/sac.py
RushikeshJoshi4/rlkit
85db399565a45d8151f95c855f608ea9d827a0d0
[ "MIT" ]
null
null
null
examples/sac.py
RushikeshJoshi4/rlkit
85db399565a45d8151f95c855f608ea9d827a0d0
[ "MIT" ]
null
null
null
from gym.envs.mujoco import HalfCheetahEnv import rlkit.torch.pytorch_util as ptu from rlkit.data_management.env_replay_buffer import EnvReplayBuffer from rlkit.envs.wrappers import NormalizedBoxEnv from rlkit.launchers.launcher_util import setup_logger from rlkit.samplers.data_collector import MdpPathCollector from rlkit.torch.sac.policies import TanhGaussianPolicy, MakeDeterministic from rlkit.torch.sac.sac import SACTrainer from rlkit.torch.networks import FlattenMlp from rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm def experiment(variant): expl_env = NormalizedBoxEnv(HalfCheetahEnv()) eval_env = NormalizedBoxEnv(HalfCheetahEnv()) obs_dim = expl_env.observation_space.low.size action_dim = eval_env.action_space.low.size M = variant['layer_size'] qf1 = FlattenMlp( input_size=obs_dim + action_dim, output_size=1, hidden_sizes=[M, M], ) qf2 = FlattenMlp( input_size=obs_dim + action_dim, output_size=1, hidden_sizes=[M, M], ) target_qf1 = FlattenMlp( input_size=obs_dim + action_dim, output_size=1, hidden_sizes=[M, M], ) target_qf2 = FlattenMlp( input_size=obs_dim + action_dim, output_size=1, hidden_sizes=[M, M], ) policy = TanhGaussianPolicy( obs_dim=obs_dim, action_dim=action_dim, hidden_sizes=[M, M], ) eval_policy = MakeDeterministic(policy) eval_path_collector = MdpPathCollector( eval_env, eval_policy, ) expl_path_collector = MdpPathCollector( expl_env, policy, ) replay_buffer = EnvReplayBuffer( variant['replay_buffer_size'], expl_env, ) trainer = SACTrainer( env=eval_env, policy=policy, qf1=qf1, qf2=qf2, target_qf1=target_qf1, target_qf2=target_qf2, **variant['trainer_kwargs'] ) algorithm = TorchBatchRLAlgorithm( trainer=trainer, exploration_env=expl_env, evaluation_env=eval_env, exploration_data_collector=expl_path_collector, evaluation_data_collector=eval_path_collector, replay_buffer=replay_buffer, **variant['algorithm_kwargs'] ) algorithm.to(ptu.device) algorithm.train(start_epoch=0) if __name__ == "__main__": # parser = argparse.ArgumentParser() # parser.add_argument('--initial_epoch', action='store_true') # args = parser.parse_args() # noinspection PyTypeChecker variant = dict( algorithm="SAC", version="normal", layer_size=256, replay_buffer_size=int(1E6), algorithm_kwargs=dict( num_epochs=3000, num_eval_steps_per_epoch=5000, num_trains_per_train_loop=1000, num_expl_steps_per_train_loop=1000, min_num_steps_before_training=1000, max_path_length=1000, batch_size=256, initial_epoch=None ), trainer_kwargs=dict( discount=0.99, soft_target_tau=5e-3, target_update_period=1, policy_lr=3E-4, qf_lr=3E-4, reward_scale=1, use_automatic_entropy_tuning=True ), ) setup_logger('name-of-experiment', variant=variant) ptu.set_gpu_mode(True) # optionally set the GPU (default=False) experiment(variant)
29.350427
74
0.657833
from gym.envs.mujoco import HalfCheetahEnv import rlkit.torch.pytorch_util as ptu from rlkit.data_management.env_replay_buffer import EnvReplayBuffer from rlkit.envs.wrappers import NormalizedBoxEnv from rlkit.launchers.launcher_util import setup_logger from rlkit.samplers.data_collector import MdpPathCollector from rlkit.torch.sac.policies import TanhGaussianPolicy, MakeDeterministic from rlkit.torch.sac.sac import SACTrainer from rlkit.torch.networks import FlattenMlp from rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm def experiment(variant): expl_env = NormalizedBoxEnv(HalfCheetahEnv()) eval_env = NormalizedBoxEnv(HalfCheetahEnv()) obs_dim = expl_env.observation_space.low.size action_dim = eval_env.action_space.low.size M = variant['layer_size'] qf1 = FlattenMlp( input_size=obs_dim + action_dim, output_size=1, hidden_sizes=[M, M], ) qf2 = FlattenMlp( input_size=obs_dim + action_dim, output_size=1, hidden_sizes=[M, M], ) target_qf1 = FlattenMlp( input_size=obs_dim + action_dim, output_size=1, hidden_sizes=[M, M], ) target_qf2 = FlattenMlp( input_size=obs_dim + action_dim, output_size=1, hidden_sizes=[M, M], ) policy = TanhGaussianPolicy( obs_dim=obs_dim, action_dim=action_dim, hidden_sizes=[M, M], ) eval_policy = MakeDeterministic(policy) eval_path_collector = MdpPathCollector( eval_env, eval_policy, ) expl_path_collector = MdpPathCollector( expl_env, policy, ) replay_buffer = EnvReplayBuffer( variant['replay_buffer_size'], expl_env, ) trainer = SACTrainer( env=eval_env, policy=policy, qf1=qf1, qf2=qf2, target_qf1=target_qf1, target_qf2=target_qf2, **variant['trainer_kwargs'] ) algorithm = TorchBatchRLAlgorithm( trainer=trainer, exploration_env=expl_env, evaluation_env=eval_env, exploration_data_collector=expl_path_collector, evaluation_data_collector=eval_path_collector, replay_buffer=replay_buffer, **variant['algorithm_kwargs'] ) algorithm.to(ptu.device) algorithm.train(start_epoch=0) if __name__ == "__main__": variant = dict( algorithm="SAC", version="normal", layer_size=256, replay_buffer_size=int(1E6), algorithm_kwargs=dict( num_epochs=3000, num_eval_steps_per_epoch=5000, num_trains_per_train_loop=1000, num_expl_steps_per_train_loop=1000, min_num_steps_before_training=1000, max_path_length=1000, batch_size=256, initial_epoch=None ), trainer_kwargs=dict( discount=0.99, soft_target_tau=5e-3, target_update_period=1, policy_lr=3E-4, qf_lr=3E-4, reward_scale=1, use_automatic_entropy_tuning=True ), ) setup_logger('name-of-experiment', variant=variant) ptu.set_gpu_mode(True) experiment(variant)
true
true
f70fed9caefe330512d53a8d653194961fe8edf4
412
py
Python
data_structure/recursion/example/example2.py
russellgao/algorithm
ad5e724d20a8492b8eba03fc0f24e4ff5964b3ea
[ "Apache-2.0" ]
3
2020-05-18T00:47:18.000Z
2020-06-08T01:57:13.000Z
data_structure/recursion/example/example2.py
russellgao/algorithm
ad5e724d20a8492b8eba03fc0f24e4ff5964b3ea
[ "Apache-2.0" ]
null
null
null
data_structure/recursion/example/example2.py
russellgao/algorithm
ad5e724d20a8492b8eba03fc0f24e4ff5964b3ea
[ "Apache-2.0" ]
null
null
null
def reverseLists(list1) : """ 原地用递归的方法反转list :param list1: :return: """ def helper(list1,left,right) : if left < right : list1[left] , list1[right] = list1[right] , list1[left] helper(list1,left + 1 , right -1) helper(list1,0,len(list1) - 1) if __name__ == "__main__" : list1 = ["a", "b", "c" , "d" , "d","e"] reverseLists(list1) print()
22.888889
67
0.524272
def reverseLists(list1) : def helper(list1,left,right) : if left < right : list1[left] , list1[right] = list1[right] , list1[left] helper(list1,left + 1 , right -1) helper(list1,0,len(list1) - 1) if __name__ == "__main__" : list1 = ["a", "b", "c" , "d" , "d","e"] reverseLists(list1) print()
true
true
f70fee4c9a0ab1f2c9ea59edc727df7573e770f1
2,531
py
Python
jina/clients/base/websocket.py
Satya2234/jina
bc4fa6ed8b2bdbc4885bf4a8f4b44722f0fc2deb
[ "Apache-2.0" ]
15,179
2020-04-28T10:23:56.000Z
2022-03-31T14:35:25.000Z
jina/clients/base/websocket.py
manavshah123/jina
f18b04eb82d18a3c554e2892bbae4b95fc0cb13e
[ "Apache-2.0" ]
3,912
2020-04-28T13:01:29.000Z
2022-03-31T14:36:46.000Z
jina/clients/base/websocket.py
manavshah123/jina
f18b04eb82d18a3c554e2892bbae4b95fc0cb13e
[ "Apache-2.0" ]
1,955
2020-04-28T10:50:49.000Z
2022-03-31T12:28:34.000Z
"""A module for the websockets-based Client for Jina.""" from typing import Callable, Optional from contextlib import nullcontext, AsyncExitStack from ..helper import callback_exec from .helper import WebsocketClientlet from ...importer import ImportExtensions from ..base import BaseClient, InputType from ...logging.profile import ProgressBar from ...peapods.stream.client import WebsocketClientStreamer class WebSocketBaseClient(BaseClient): """A Websocket Client.""" async def _get_results( self, inputs: InputType, on_done: Callable, on_error: Optional[Callable] = None, on_always: Optional[Callable] = None, **kwargs, ): """ :param inputs: the callable :param on_done: the callback for on_done :param on_error: the callback for on_error :param on_always: the callback for on_always :param kwargs: kwargs for _get_task_name and _get_requests :yields: generator over results """ with ImportExtensions(required=True): import aiohttp self.inputs = inputs request_iterator = self._get_requests(**kwargs) async with AsyncExitStack() as stack: try: cm1 = ( ProgressBar(total_length=self._inputs_length) if self.show_progress else nullcontext() ) p_bar = stack.enter_context(cm1) proto = 'wss' if self.args.https else 'ws' url = f'{proto}://{self.args.host}:{self.args.port}/' iolet = await stack.enter_async_context( WebsocketClientlet(url=url, logger=self.logger) ) streamer = WebsocketClientStreamer(self.args, iolet=iolet) async for response in streamer.stream(request_iterator): callback_exec( response=response, on_error=on_error, on_done=on_done, on_always=on_always, continue_on_error=self.continue_on_error, logger=self.logger, ) if self.show_progress: p_bar.update() yield response except aiohttp.ClientError as e: self.logger.error( f'Error while streaming response from websocket server {e!r}' )
35.647887
81
0.568945
from typing import Callable, Optional from contextlib import nullcontext, AsyncExitStack from ..helper import callback_exec from .helper import WebsocketClientlet from ...importer import ImportExtensions from ..base import BaseClient, InputType from ...logging.profile import ProgressBar from ...peapods.stream.client import WebsocketClientStreamer class WebSocketBaseClient(BaseClient): async def _get_results( self, inputs: InputType, on_done: Callable, on_error: Optional[Callable] = None, on_always: Optional[Callable] = None, **kwargs, ): with ImportExtensions(required=True): import aiohttp self.inputs = inputs request_iterator = self._get_requests(**kwargs) async with AsyncExitStack() as stack: try: cm1 = ( ProgressBar(total_length=self._inputs_length) if self.show_progress else nullcontext() ) p_bar = stack.enter_context(cm1) proto = 'wss' if self.args.https else 'ws' url = f'{proto}://{self.args.host}:{self.args.port}/' iolet = await stack.enter_async_context( WebsocketClientlet(url=url, logger=self.logger) ) streamer = WebsocketClientStreamer(self.args, iolet=iolet) async for response in streamer.stream(request_iterator): callback_exec( response=response, on_error=on_error, on_done=on_done, on_always=on_always, continue_on_error=self.continue_on_error, logger=self.logger, ) if self.show_progress: p_bar.update() yield response except aiohttp.ClientError as e: self.logger.error( f'Error while streaming response from websocket server {e!r}' )
true
true
f70fee9cb2b1dd8ce5770618cf71782ee9238674
367
py
Python
2_dictionaries/3_storing_dict_in_binary_file.py
nicholasjamesbaker/asd-class
c524971f800d649f4e18cc1e555c348029f6af1b
[ "MIT" ]
2
2022-01-17T13:13:23.000Z
2022-03-02T18:25:24.000Z
2_dictionaries/3_storing_dict_in_binary_file.py
nicholasjamesbaker/asd-class
c524971f800d649f4e18cc1e555c348029f6af1b
[ "MIT" ]
null
null
null
2_dictionaries/3_storing_dict_in_binary_file.py
nicholasjamesbaker/asd-class
c524971f800d649f4e18cc1e555c348029f6af1b
[ "MIT" ]
3
2022-01-12T17:58:44.000Z
2022-01-16T15:17:58.000Z
import pickle pranjal = { 'first_name': 'Pranjal', 'last_name': 'Patra', 'age': 35, 'NetWorth': 420.69, 'Vaccinated': True } with open("pranjal.bin", 'wb') as pranjal_file: pickle.dump(pranjal, pranjal_file) with open('pranjal.bin', 'rb') as pranjal_file: pranjal_from_bin_file = pickle.load(pranjal_file) print(pranjal_from_bin_file)
20.388889
53
0.675749
import pickle pranjal = { 'first_name': 'Pranjal', 'last_name': 'Patra', 'age': 35, 'NetWorth': 420.69, 'Vaccinated': True } with open("pranjal.bin", 'wb') as pranjal_file: pickle.dump(pranjal, pranjal_file) with open('pranjal.bin', 'rb') as pranjal_file: pranjal_from_bin_file = pickle.load(pranjal_file) print(pranjal_from_bin_file)
true
true
f70feefa39ebf9ab6d2dcc47e911d388f5415e72
855
py
Python
mysite/urls.py
kmayank39/kmayank
6ba102d239443949a2aa2e6f05fa8a977d92bc1e
[ "Apache-2.0" ]
null
null
null
mysite/urls.py
kmayank39/kmayank
6ba102d239443949a2aa2e6f05fa8a977d92bc1e
[ "Apache-2.0" ]
null
null
null
mysite/urls.py
kmayank39/kmayank
6ba102d239443949a2aa2e6f05fa8a977d92bc1e
[ "Apache-2.0" ]
null
null
null
"""mysite URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/3.2/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path from django.conf.urls import include urlpatterns = [ path('admin/', admin.site.urls), path('kumarmayank', include('home.urls')), ]
35.625
78
0.691228
from django.contrib import admin from django.urls import path from django.conf.urls import include urlpatterns = [ path('admin/', admin.site.urls), path('kumarmayank', include('home.urls')), ]
true
true
f70fef6228bd5c8669c5f12614b1af75010ac45f
1,255
py
Python
model/ops.py
530824679/YOLOv2
eff9ddbab58da970e7fb449cd1974fb810fd6023
[ "MIT" ]
null
null
null
model/ops.py
530824679/YOLOv2
eff9ddbab58da970e7fb449cd1974fb810fd6023
[ "MIT" ]
null
null
null
model/ops.py
530824679/YOLOv2
eff9ddbab58da970e7fb449cd1974fb810fd6023
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # -------------------------------------- # @Time : 2020/11/01 # @Author : Oscar Chen # @Email : 530824679@qq.com # @File : ops.py # Description :base operators. # -------------------------------------- import tensorflow as tf def leaky_relu(x): return tf.nn.leaky_relu(x, alpha=0.1, name='leaky_relu') def conv2d(inputs, filters_num, filters_size, pad_size=0, stride=1, batch_normalize=True, activation=leaky_relu, use_bias=False, is_train=True, name='conv2d'): if pad_size > 0: inputs = tf.pad(inputs, [[0,0], [pad_size, pad_size], [pad_size, pad_size],[0,0]]) out = tf.layers.conv2d(inputs, filters=filters_num, kernel_size=filters_size, strides=stride, padding='VALID', activation=None, use_bias=use_bias, name=name) if batch_normalize: out = tf.layers.batch_normalization(out, axis=-1, momentum=0.9, training=is_train, name=name+'_bn') if activation: out = activation(out) return out def maxpool(inputs, size=2, stride=2, name='maxpool'): with tf.name_scope(name): out = tf.layers.max_pooling2d(inputs, pool_size=size, strides=stride, padding='SAME') return out def reorg(inputs, stride): return tf.space_to_depth(inputs, block_size=stride)
35.857143
161
0.647012
import tensorflow as tf def leaky_relu(x): return tf.nn.leaky_relu(x, alpha=0.1, name='leaky_relu') def conv2d(inputs, filters_num, filters_size, pad_size=0, stride=1, batch_normalize=True, activation=leaky_relu, use_bias=False, is_train=True, name='conv2d'): if pad_size > 0: inputs = tf.pad(inputs, [[0,0], [pad_size, pad_size], [pad_size, pad_size],[0,0]]) out = tf.layers.conv2d(inputs, filters=filters_num, kernel_size=filters_size, strides=stride, padding='VALID', activation=None, use_bias=use_bias, name=name) if batch_normalize: out = tf.layers.batch_normalization(out, axis=-1, momentum=0.9, training=is_train, name=name+'_bn') if activation: out = activation(out) return out def maxpool(inputs, size=2, stride=2, name='maxpool'): with tf.name_scope(name): out = tf.layers.max_pooling2d(inputs, pool_size=size, strides=stride, padding='SAME') return out def reorg(inputs, stride): return tf.space_to_depth(inputs, block_size=stride)
true
true
f70ff03cf96fe013976425cf2112c255fa1ca390
15,840
py
Python
src/sentry/plugins/bases/issue2.py
AlexWayfer/sentry
ef935cda2b2e960bd602fda590540882d1b0712d
[ "BSD-3-Clause" ]
null
null
null
src/sentry/plugins/bases/issue2.py
AlexWayfer/sentry
ef935cda2b2e960bd602fda590540882d1b0712d
[ "BSD-3-Clause" ]
null
null
null
src/sentry/plugins/bases/issue2.py
AlexWayfer/sentry
ef935cda2b2e960bd602fda590540882d1b0712d
[ "BSD-3-Clause" ]
null
null
null
from __future__ import absolute_import import six from rest_framework.response import Response from social_auth.models import UserSocialAuth from django.conf import settings from django.conf.urls import url from django.core.urlresolvers import reverse from django.utils.html import format_html from sentry.api.serializers.models.plugin import PluginSerializer # api compat from sentry.exceptions import PluginError # NOQA from sentry.models import Activity, Event, GroupMeta from sentry.plugins import Plugin from sentry.plugins.base.configuration import react_plugin_config from sentry.plugins.endpoints import PluginGroupEndpoint from sentry.signals import issue_tracker_used from sentry.utils.auth import get_auth_providers from sentry.utils.http import absolute_uri from sentry.utils.safe import safe_execute # TODO(dcramer): remove this in favor of GroupEndpoint class IssueGroupActionEndpoint(PluginGroupEndpoint): view_method_name = None plugin = None def _handle(self, request, group, *args, **kwargs): GroupMeta.objects.populate_cache([group]) return getattr(self.plugin, self.view_method_name)(request, group, *args, **kwargs) class IssueTrackingPlugin2(Plugin): auth_provider = None allowed_actions = ('create', 'link', 'unlink') # we default this to None to support legacy integrations, but newer style # should explicitly call out what is stored issue_fields = None # issue_fields = frozenset(['id', 'title', 'url']) def configure(self, project, request): return react_plugin_config(self, project, request) def get_plugin_type(self): return 'issue-tracking' def has_project_conf(self): return True def get_group_body(self, request, group, event, **kwargs): result = [] for interface in six.itervalues(event.interfaces): output = safe_execute(interface.to_string, event, _with_transaction=False) if output: result.append(output) return '\n\n'.join(result) def get_group_description(self, request, group, event): output = [ absolute_uri(group.get_absolute_url()), ] body = self.get_group_body(request, group, event) if body: output.extend([ '', '```', body, '```', ]) return '\n'.join(output) def get_group_title(self, request, group, event): return event.error() def is_configured(self, request, project, **kwargs): raise NotImplementedError def get_group_urls(self): _urls = [] for action in self.allowed_actions: view_method_name = 'view_%s' % action _urls.append( url( r'^%s/' % action, PluginGroupEndpoint.as_view( view=getattr(self, view_method_name), ), ) ) return _urls def get_auth_for_user(self, user, **kwargs): """ Return a ``UserSocialAuth`` object for the given user based on this plugins ``auth_provider``. """ assert self.auth_provider, 'There is no auth provider configured for this plugin.' if not user.is_authenticated(): return None try: return UserSocialAuth.objects.filter(user=user, provider=self.auth_provider)[0] except IndexError: return None def needs_auth(self, request, project, **kwargs): """ Return ``True`` if the authenticated user needs to associate an auth service before performing actions with this plugin. """ if self.auth_provider is None: return False if not request.user.is_authenticated(): return True return not UserSocialAuth.objects.filter( user=request.user, provider=self.auth_provider ).exists() def get_new_issue_fields(self, request, group, event, **kwargs): """ If overriding, supported properties include 'readonly': true """ return [ { 'name': 'title', 'label': 'Title', 'default': self.get_group_title(request, group, event), 'type': 'text' }, { 'name': 'description', 'label': 'Description', 'default': self.get_group_description(request, group, event), 'type': 'textarea' } ] def get_link_existing_issue_fields(self, request, group, event, **kwargs): return [] def _get_issue_url_compat(self, group, issue, **kwargs): if self.issue_fields is None: return self.get_issue_url(group, issue['id']) return self.get_issue_url(group, issue) def _get_issue_label_compat(self, group, issue, **kwargs): if self.issue_fields is None: return self.get_issue_label(group, issue['id']) return self.get_issue_label(group, issue) def get_issue_url(self, group, issue, **kwargs): """ Given an issue context (issue_id string or issue dict) return an absolute URL to the issue's details page. """ raise NotImplementedError def get_issue_label(self, group, issue, **kwargs): """ Given an issue context (issue_id string or issue dict) return a string representing the issue. e.g. GitHub represents issues as GH-XXX """ if isinstance(issue, dict): return u'#{}'.format(issue['id']) return u'#{}'.format(issue) def create_issue(self, request, group, form_data, **kwargs): """ Creates the issue on the remote service and returns an issue ID. Returns ``{'id': '1', 'title': issue_title}`` """ raise NotImplementedError def link_issue(self, request, group, form_data, **kwargs): """ Can be overridden for any actions needed when linking issues (like adding a comment to an existing issue). Returns ``{'id': '1', 'title': issue_title}`` """ pass def has_auth_configured(self, **kwargs): if not self.auth_provider: return True return self.auth_provider in get_auth_providers() def validate_form(self, fields, form_data): errors = {} for field in fields: if field.get('required', True) and not field.get('readonly'): value = form_data.get(field['name']) if value is None or value == '': errors[field['name']] = u'%s is a required field.' % field['label'] return errors def get_issue_field_map(self): # XXX(dcramer): legacy support conf_key = self.get_conf_key() if self.issue_fields is None: return { 'id': u'{}:tid'.format(conf_key) } return { key: u'{}:issue_{}'.format( conf_key, key, ) for key in self.issue_fields } def build_issue(self, group): issue_field_map = self.get_issue_field_map() issue = {} for key, meta_name in six.iteritems(issue_field_map): issue[key] = GroupMeta.objects.get_value(group, meta_name, None) if not any(issue.values()): return None return issue def has_linked_issue(self, group): return bool(self.build_issue(group)) def unlink_issue(self, request, group, issue, **kwargs): issue_field_map = self.get_issue_field_map() for meta_name in six.itervalues(issue_field_map): GroupMeta.objects.unset_value(group, meta_name) return self.redirect(group.get_absolute_url()) def view_create(self, request, group, **kwargs): auth_errors = self.check_config_and_auth(request, group) if auth_errors: return Response(auth_errors, status=400) event = group.get_latest_event() if event is None: return Response({ 'message': 'Unable to create issues: there are ' 'no events associated with this group', }, status=400) Event.objects.bind_nodes([event], 'data') try: fields = self.get_new_issue_fields(request, group, event, **kwargs) except Exception as e: return self.handle_api_error(e) if request.method == 'GET': return Response(fields) errors = self.validate_form(fields, request.DATA) if errors: return Response({'error_type': 'validation', 'errors': errors}, status=400) try: issue = self.create_issue( group=group, form_data=request.DATA, request=request, ) except Exception as e: return self.handle_api_error(e) if not isinstance(issue, dict): issue = {'id': issue} issue_field_map = self.get_issue_field_map() for key, meta_name in six.iteritems(issue_field_map): if key in issue: GroupMeta.objects.set_value(group, meta_name, issue[key]) else: GroupMeta.objects.unset_value(group, meta_name) issue_information = { 'title': issue.get('title') or request.DATA.get('title') or self._get_issue_label_compat(group, issue), 'provider': self.get_title(), 'location': self._get_issue_url_compat(group, issue), 'label': self._get_issue_label_compat(group, issue), } Activity.objects.create( project=group.project, group=group, type=Activity.CREATE_ISSUE, user=request.user, data=issue_information, ) issue_tracker_used.send_robust( plugin=self, project=group.project, user=request.user, sender=type(self) ) return Response({'issue_url': self.get_issue_url(group, issue), 'link': self._get_issue_url_compat(group, issue), 'label': self._get_issue_label_compat(group, issue), 'id': issue['id']}) def view_link(self, request, group, **kwargs): auth_errors = self.check_config_and_auth(request, group) if auth_errors: return Response(auth_errors, status=400) event = group.get_latest_event() if event is None: return Response({ 'message': 'Unable to create issues: there are ' 'no events associated with this group', }, status=400) Event.objects.bind_nodes([event], 'data') try: fields = self.get_link_existing_issue_fields(request, group, event, **kwargs) except Exception as e: return self.handle_api_error(e) if request.method == 'GET': return Response(fields) errors = self.validate_form(fields, request.DATA) if errors: return Response({'error_type': 'validation', 'errors': errors}, status=400) try: issue = self.link_issue( group=group, form_data=request.DATA, request=request, ) or {} except Exception as e: return self.handle_api_error(e) # HACK(dcramer): maintain data for legacy issues if 'id' not in issue and 'issue_id' in request.DATA: issue['id'] = request.DATA['issue_id'] issue_field_map = self.get_issue_field_map() for key, meta_name in six.iteritems(issue_field_map): if key in issue: GroupMeta.objects.set_value(group, meta_name, issue[key]) else: GroupMeta.objects.unset_value(group, meta_name) issue_information = { 'title': issue.get('title') or self._get_issue_label_compat(group, issue), 'provider': self.get_title(), 'location': self._get_issue_url_compat(group, issue), 'label': self._get_issue_label_compat(group, issue), } Activity.objects.create( project=group.project, group=group, type=Activity.CREATE_ISSUE, user=request.user, data=issue_information, ) return Response({'message': 'Successfully linked issue.', 'link': self._get_issue_url_compat(group, issue), 'label': self._get_issue_label_compat(group, issue), 'id': issue['id']}) def view_unlink(self, request, group, **kwargs): auth_errors = self.check_config_and_auth(request, group) if auth_errors: return Response(auth_errors, status=400) issue = self.build_issue(group) if issue and 'unlink' in self.allowed_actions: self.unlink_issue(request, group, issue) return Response({'message': 'Successfully unlinked issue.'}) return Response({'message': 'No issues to unlink.'}, status=400) def plugin_issues(self, request, group, plugin_issues, **kwargs): if not self.is_configured(request=request, project=group.project): return plugin_issues item = { 'slug': self.slug, 'allowed_actions': self.allowed_actions, 'title': self.get_title() } issue = self.build_issue(group) if issue: item['issue'] = { 'issue_id': issue.get('id'), 'url': self._get_issue_url_compat(group, issue), 'label': self._get_issue_label_compat(group, issue), } item.update(PluginSerializer(group.project).serialize(self, None, request.user)) plugin_issues.append(item) return plugin_issues def get_config(self, *args, **kwargs): # TODO(dcramer): update existing plugins to just use get_config # TODO(dcramer): remove request kwarg after sentry-plugins has been # updated kwargs.setdefault('request', None) return self.get_configure_plugin_fields(*args, **kwargs) def check_config_and_auth(self, request, group): has_auth_configured = self.has_auth_configured() if not (has_auth_configured and self.is_configured( project=group.project, request=request)): if self.auth_provider: required_auth_settings = settings.AUTH_PROVIDERS[self.auth_provider] else: required_auth_settings = None return { 'error_type': 'config', 'has_auth_configured': has_auth_configured, 'auth_provider': self.auth_provider, 'required_auth_settings': required_auth_settings, } if self.needs_auth(project=group.project, request=request): return { 'error_type': 'auth', 'auth_url': reverse('socialauth_associate', args=[self.auth_provider]) } # TODO: should we get rid of this (move it to react?) def tags(self, request, group, tag_list, **kwargs): if not self.is_configured(request=request, project=group.project): return tag_list issue = self.build_issue(group) if not issue: return tag_list tag_list.append( format_html( '<a href="{}">{}</a>', self._get_issue_url_compat(group, issue), self._get_issue_label_compat(group, issue), ) ) return tag_list IssuePlugin2 = IssueTrackingPlugin2
35.357143
115
0.594192
from __future__ import absolute_import import six from rest_framework.response import Response from social_auth.models import UserSocialAuth from django.conf import settings from django.conf.urls import url from django.core.urlresolvers import reverse from django.utils.html import format_html from sentry.api.serializers.models.plugin import PluginSerializer from sentry.exceptions import PluginError from sentry.models import Activity, Event, GroupMeta from sentry.plugins import Plugin from sentry.plugins.base.configuration import react_plugin_config from sentry.plugins.endpoints import PluginGroupEndpoint from sentry.signals import issue_tracker_used from sentry.utils.auth import get_auth_providers from sentry.utils.http import absolute_uri from sentry.utils.safe import safe_execute class IssueGroupActionEndpoint(PluginGroupEndpoint): view_method_name = None plugin = None def _handle(self, request, group, *args, **kwargs): GroupMeta.objects.populate_cache([group]) return getattr(self.plugin, self.view_method_name)(request, group, *args, **kwargs) class IssueTrackingPlugin2(Plugin): auth_provider = None allowed_actions = ('create', 'link', 'unlink') issue_fields = None def configure(self, project, request): return react_plugin_config(self, project, request) def get_plugin_type(self): return 'issue-tracking' def has_project_conf(self): return True def get_group_body(self, request, group, event, **kwargs): result = [] for interface in six.itervalues(event.interfaces): output = safe_execute(interface.to_string, event, _with_transaction=False) if output: result.append(output) return '\n\n'.join(result) def get_group_description(self, request, group, event): output = [ absolute_uri(group.get_absolute_url()), ] body = self.get_group_body(request, group, event) if body: output.extend([ '', '```', body, '```', ]) return '\n'.join(output) def get_group_title(self, request, group, event): return event.error() def is_configured(self, request, project, **kwargs): raise NotImplementedError def get_group_urls(self): _urls = [] for action in self.allowed_actions: view_method_name = 'view_%s' % action _urls.append( url( r'^%s/' % action, PluginGroupEndpoint.as_view( view=getattr(self, view_method_name), ), ) ) return _urls def get_auth_for_user(self, user, **kwargs): assert self.auth_provider, 'There is no auth provider configured for this plugin.' if not user.is_authenticated(): return None try: return UserSocialAuth.objects.filter(user=user, provider=self.auth_provider)[0] except IndexError: return None def needs_auth(self, request, project, **kwargs): if self.auth_provider is None: return False if not request.user.is_authenticated(): return True return not UserSocialAuth.objects.filter( user=request.user, provider=self.auth_provider ).exists() def get_new_issue_fields(self, request, group, event, **kwargs): return [ { 'name': 'title', 'label': 'Title', 'default': self.get_group_title(request, group, event), 'type': 'text' }, { 'name': 'description', 'label': 'Description', 'default': self.get_group_description(request, group, event), 'type': 'textarea' } ] def get_link_existing_issue_fields(self, request, group, event, **kwargs): return [] def _get_issue_url_compat(self, group, issue, **kwargs): if self.issue_fields is None: return self.get_issue_url(group, issue['id']) return self.get_issue_url(group, issue) def _get_issue_label_compat(self, group, issue, **kwargs): if self.issue_fields is None: return self.get_issue_label(group, issue['id']) return self.get_issue_label(group, issue) def get_issue_url(self, group, issue, **kwargs): raise NotImplementedError def get_issue_label(self, group, issue, **kwargs): if isinstance(issue, dict): return u'#{}'.format(issue['id']) return u'#{}'.format(issue) def create_issue(self, request, group, form_data, **kwargs): raise NotImplementedError def link_issue(self, request, group, form_data, **kwargs): pass def has_auth_configured(self, **kwargs): if not self.auth_provider: return True return self.auth_provider in get_auth_providers() def validate_form(self, fields, form_data): errors = {} for field in fields: if field.get('required', True) and not field.get('readonly'): value = form_data.get(field['name']) if value is None or value == '': errors[field['name']] = u'%s is a required field.' % field['label'] return errors def get_issue_field_map(self): conf_key = self.get_conf_key() if self.issue_fields is None: return { 'id': u'{}:tid'.format(conf_key) } return { key: u'{}:issue_{}'.format( conf_key, key, ) for key in self.issue_fields } def build_issue(self, group): issue_field_map = self.get_issue_field_map() issue = {} for key, meta_name in six.iteritems(issue_field_map): issue[key] = GroupMeta.objects.get_value(group, meta_name, None) if not any(issue.values()): return None return issue def has_linked_issue(self, group): return bool(self.build_issue(group)) def unlink_issue(self, request, group, issue, **kwargs): issue_field_map = self.get_issue_field_map() for meta_name in six.itervalues(issue_field_map): GroupMeta.objects.unset_value(group, meta_name) return self.redirect(group.get_absolute_url()) def view_create(self, request, group, **kwargs): auth_errors = self.check_config_and_auth(request, group) if auth_errors: return Response(auth_errors, status=400) event = group.get_latest_event() if event is None: return Response({ 'message': 'Unable to create issues: there are ' 'no events associated with this group', }, status=400) Event.objects.bind_nodes([event], 'data') try: fields = self.get_new_issue_fields(request, group, event, **kwargs) except Exception as e: return self.handle_api_error(e) if request.method == 'GET': return Response(fields) errors = self.validate_form(fields, request.DATA) if errors: return Response({'error_type': 'validation', 'errors': errors}, status=400) try: issue = self.create_issue( group=group, form_data=request.DATA, request=request, ) except Exception as e: return self.handle_api_error(e) if not isinstance(issue, dict): issue = {'id': issue} issue_field_map = self.get_issue_field_map() for key, meta_name in six.iteritems(issue_field_map): if key in issue: GroupMeta.objects.set_value(group, meta_name, issue[key]) else: GroupMeta.objects.unset_value(group, meta_name) issue_information = { 'title': issue.get('title') or request.DATA.get('title') or self._get_issue_label_compat(group, issue), 'provider': self.get_title(), 'location': self._get_issue_url_compat(group, issue), 'label': self._get_issue_label_compat(group, issue), } Activity.objects.create( project=group.project, group=group, type=Activity.CREATE_ISSUE, user=request.user, data=issue_information, ) issue_tracker_used.send_robust( plugin=self, project=group.project, user=request.user, sender=type(self) ) return Response({'issue_url': self.get_issue_url(group, issue), 'link': self._get_issue_url_compat(group, issue), 'label': self._get_issue_label_compat(group, issue), 'id': issue['id']}) def view_link(self, request, group, **kwargs): auth_errors = self.check_config_and_auth(request, group) if auth_errors: return Response(auth_errors, status=400) event = group.get_latest_event() if event is None: return Response({ 'message': 'Unable to create issues: there are ' 'no events associated with this group', }, status=400) Event.objects.bind_nodes([event], 'data') try: fields = self.get_link_existing_issue_fields(request, group, event, **kwargs) except Exception as e: return self.handle_api_error(e) if request.method == 'GET': return Response(fields) errors = self.validate_form(fields, request.DATA) if errors: return Response({'error_type': 'validation', 'errors': errors}, status=400) try: issue = self.link_issue( group=group, form_data=request.DATA, request=request, ) or {} except Exception as e: return self.handle_api_error(e) if 'id' not in issue and 'issue_id' in request.DATA: issue['id'] = request.DATA['issue_id'] issue_field_map = self.get_issue_field_map() for key, meta_name in six.iteritems(issue_field_map): if key in issue: GroupMeta.objects.set_value(group, meta_name, issue[key]) else: GroupMeta.objects.unset_value(group, meta_name) issue_information = { 'title': issue.get('title') or self._get_issue_label_compat(group, issue), 'provider': self.get_title(), 'location': self._get_issue_url_compat(group, issue), 'label': self._get_issue_label_compat(group, issue), } Activity.objects.create( project=group.project, group=group, type=Activity.CREATE_ISSUE, user=request.user, data=issue_information, ) return Response({'message': 'Successfully linked issue.', 'link': self._get_issue_url_compat(group, issue), 'label': self._get_issue_label_compat(group, issue), 'id': issue['id']}) def view_unlink(self, request, group, **kwargs): auth_errors = self.check_config_and_auth(request, group) if auth_errors: return Response(auth_errors, status=400) issue = self.build_issue(group) if issue and 'unlink' in self.allowed_actions: self.unlink_issue(request, group, issue) return Response({'message': 'Successfully unlinked issue.'}) return Response({'message': 'No issues to unlink.'}, status=400) def plugin_issues(self, request, group, plugin_issues, **kwargs): if not self.is_configured(request=request, project=group.project): return plugin_issues item = { 'slug': self.slug, 'allowed_actions': self.allowed_actions, 'title': self.get_title() } issue = self.build_issue(group) if issue: item['issue'] = { 'issue_id': issue.get('id'), 'url': self._get_issue_url_compat(group, issue), 'label': self._get_issue_label_compat(group, issue), } item.update(PluginSerializer(group.project).serialize(self, None, request.user)) plugin_issues.append(item) return plugin_issues def get_config(self, *args, **kwargs): kwargs.setdefault('request', None) return self.get_configure_plugin_fields(*args, **kwargs) def check_config_and_auth(self, request, group): has_auth_configured = self.has_auth_configured() if not (has_auth_configured and self.is_configured( project=group.project, request=request)): if self.auth_provider: required_auth_settings = settings.AUTH_PROVIDERS[self.auth_provider] else: required_auth_settings = None return { 'error_type': 'config', 'has_auth_configured': has_auth_configured, 'auth_provider': self.auth_provider, 'required_auth_settings': required_auth_settings, } if self.needs_auth(project=group.project, request=request): return { 'error_type': 'auth', 'auth_url': reverse('socialauth_associate', args=[self.auth_provider]) } def tags(self, request, group, tag_list, **kwargs): if not self.is_configured(request=request, project=group.project): return tag_list issue = self.build_issue(group) if not issue: return tag_list tag_list.append( format_html( '<a href="{}">{}</a>', self._get_issue_url_compat(group, issue), self._get_issue_label_compat(group, issue), ) ) return tag_list IssuePlugin2 = IssueTrackingPlugin2
true
true
f70ff08456cae98acf4a012e994d88495cb533a7
2,886
py
Python
nova/api/openstack/compute/schemas/volumes.py
ebalduf/nova-backports
6bf97ec73467de522d34ab7a17ca0e0874baa7f9
[ "Apache-2.0" ]
5
2016-04-28T16:20:38.000Z
2021-04-25T11:19:03.000Z
nova/api/openstack/compute/schemas/volumes.py
ebalduf/nova-backports
6bf97ec73467de522d34ab7a17ca0e0874baa7f9
[ "Apache-2.0" ]
11
2017-06-19T01:28:55.000Z
2017-06-23T02:01:47.000Z
nova/api/openstack/compute/schemas/volumes.py
ebalduf/nova-backports
6bf97ec73467de522d34ab7a17ca0e0874baa7f9
[ "Apache-2.0" ]
5
2020-04-08T20:24:45.000Z
2020-10-05T19:02:13.000Z
# Copyright 2014 IBM Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from nova.api.validation import parameter_types create = { 'type': 'object', 'properties': { 'volume': { 'type': 'object', 'properties': { 'volume_type': {'type': 'string'}, 'metadata': {'type': 'object'}, 'snapshot_id': {'type': 'string'}, 'size': { 'type': ['integer', 'string'], 'pattern': '^[0-9]+$', 'minimum': 1 }, 'availability_zone': {'type': 'string'}, 'display_name': {'type': 'string'}, 'display_description': {'type': 'string'}, }, 'required': ['size'], 'additionalProperties': False, }, }, 'required': ['volume'], 'additionalProperties': False, } snapshot_create = { 'type': 'object', 'properties': { 'snapshot': { 'type': 'object', 'properties': { 'volume_id': {'type': 'string'}, 'force': parameter_types.boolean, 'display_name': {'type': 'string'}, 'display_description': {'type': 'string'}, }, 'required': ['volume_id'], 'additionalProperties': False, }, }, 'required': ['snapshot'], 'additionalProperties': False, } create_volume_attachment = { 'type': 'object', 'properties': { 'volumeAttachment': { 'type': 'object', 'properties': { 'volumeId': parameter_types.volume_id, 'device': { 'type': ['string', 'null'], # NOTE: The validation pattern from match_device() in # nova/block_device.py. 'pattern': '(^/dev/x{0,1}[a-z]{0,1}d{0,1})([a-z]+)[0-9]*$' } }, 'required': ['volumeId'], 'additionalProperties': False, }, }, 'required': ['volumeAttachment'], 'additionalProperties': False, } update_volume_attachment = copy.deepcopy(create_volume_attachment) del update_volume_attachment['properties']['volumeAttachment'][ 'properties']['device']
32.066667
78
0.517672
import copy from nova.api.validation import parameter_types create = { 'type': 'object', 'properties': { 'volume': { 'type': 'object', 'properties': { 'volume_type': {'type': 'string'}, 'metadata': {'type': 'object'}, 'snapshot_id': {'type': 'string'}, 'size': { 'type': ['integer', 'string'], 'pattern': '^[0-9]+$', 'minimum': 1 }, 'availability_zone': {'type': 'string'}, 'display_name': {'type': 'string'}, 'display_description': {'type': 'string'}, }, 'required': ['size'], 'additionalProperties': False, }, }, 'required': ['volume'], 'additionalProperties': False, } snapshot_create = { 'type': 'object', 'properties': { 'snapshot': { 'type': 'object', 'properties': { 'volume_id': {'type': 'string'}, 'force': parameter_types.boolean, 'display_name': {'type': 'string'}, 'display_description': {'type': 'string'}, }, 'required': ['volume_id'], 'additionalProperties': False, }, }, 'required': ['snapshot'], 'additionalProperties': False, } create_volume_attachment = { 'type': 'object', 'properties': { 'volumeAttachment': { 'type': 'object', 'properties': { 'volumeId': parameter_types.volume_id, 'device': { 'type': ['string', 'null'], 'pattern': '(^/dev/x{0,1}[a-z]{0,1}d{0,1})([a-z]+)[0-9]*$' } }, 'required': ['volumeId'], 'additionalProperties': False, }, }, 'required': ['volumeAttachment'], 'additionalProperties': False, } update_volume_attachment = copy.deepcopy(create_volume_attachment) del update_volume_attachment['properties']['volumeAttachment'][ 'properties']['device']
true
true
f70ff0a6e57c3271138f94f558d78ab6c1ab11ea
37,879
py
Python
safe_control_gym/controllers/mpc/gp_mpc_hexa.py
thaipduong/safe-control-gym
69f8f627d232d50813a7fff6113dd6d5caccf930
[ "MIT" ]
null
null
null
safe_control_gym/controllers/mpc/gp_mpc_hexa.py
thaipduong/safe-control-gym
69f8f627d232d50813a7fff6113dd6d5caccf930
[ "MIT" ]
null
null
null
safe_control_gym/controllers/mpc/gp_mpc_hexa.py
thaipduong/safe-control-gym
69f8f627d232d50813a7fff6113dd6d5caccf930
[ "MIT" ]
null
null
null
"""Model Predictive Control with a Gaussian Process model. Based on: * L. Hewing, J. Kabzan and M. N. Zeilinger, "Cautious Model Predictive Control Using Gaussian Process Regression," in IEEE Transactions on Control Systems Technology, vol. 28, no. 6, pp. 2736-2743, Nov. 2020, doi: 10.1109/TCST.2019.2949757. Implementation details: 1. The previous time step MPC solution is used to compute the set constraints and GP dynamics rollout. Here, the dynamics are rolled out using the Mean Equivelence method, the fastest, but least accurate. 2. The GP is approximated using the Fully Independent Training Conditional (FITC) outlined in * J. Quinonero-Candela, C. E. Rasmussen, and R. Herbrich, “A unifying view of sparse approximate Gaussian process regression,” Journal of Machine Learning Research, vol. 6, pp. 1935–1959, 2005. https://www.jmlr.org/papers/volume6/quinonero-candela05a/quinonero-candela05a.pdf * E. Snelson and Z. Ghahramani, “Sparse gaussian processes using pseudo-inputs,” in Advances in Neural Information Processing Systems, Y. Weiss, B. Scholkopf, and J. C. Platt, Eds., 2006, pp. 1257–1264. and the inducing points are the previous MPC solution. 3. Each dimension of the learned error dynamics is an independent Zero Mean SE Kernel GP. """ import scipy import numpy as np import casadi as cs import time import torch import gpytorch from copy import deepcopy from skopt.sampler import Lhs from functools import partial from sklearn.model_selection import train_test_split from safe_control_gym.controllers.mpc.linear_mpc import LinearMPC, MPC from safe_control_gym.controllers.mpc.mpc_utils import discretize_linear_system from safe_control_gym.controllers.mpc.gp_utils import GaussianProcessCollection, ZeroMeanIndependentGPModel, covSEard from safe_control_gym.envs.benchmark_env import Task class GPMPC(MPC): """MPC with Gaussian Process as dynamics residual. """ def __init__( self, env_func, seed: int = 1337, horizon: int = 5, q_mpc: list = [1], r_mpc: list = [1], additional_constraints: list = None, use_prev_start: bool = True, train_iterations: int = 800, validation_iterations: int = 200, optimization_iterations: list = None, learning_rate: list = None, normalize_training_data: bool = False, use_gpu: bool = False, gp_model_path: str = None, prob: float = 0.955, initial_rollout_std: float = 0.005, input_mask: list = None, target_mask: list = None, gp_approx: str = 'mean_eq', sparse_gp: bool = False, online_learning: bool = False, inertial_prop: list = [1.0], prior_param_coeff: float = 1.0, output_dir: str = "results/temp", **kwargs ): """Initialize GP-MPC. Args: env_func (gym.Env): functionalized initialization of the environment. seed (int): random seed. horizon (int): MPC planning horizon. Q, R (np.array): cost weight matrix. use_prev_start (bool): Warmstart mpc with the previous solution. train_iterations (int): the number of training examples to use for each dimension of the GP. validation_iterations (int): the number of points to use use for the test set during training. optimization_iterations (list): the number of optimization iterations for each dimension of the GP. learning_rate (list): the learning rate for training each dimension of the GP. normalize_training_data (bool): Normalize the training data. use_gpu (bool): use GPU while training the gp. gp_model_path (str): path to a pretrained GP model. If None, will train a new one. output_dir (str): directory to store model and results. prob (float): desired probabilistic safety level. initial_rollout_std (float): the initial std (across all states) for the mean_eq rollout. inertial_prop (list): to initialize the inertial properties of the prior model. prior_param_coeff (float): constant multiplying factor to adjust the prior model intertial properties. input_mask (list): list of which input dimensions to use in GP model. If None, all are used. target_mask (list): list of which output dimensions to use in the GP model. If None, all are used. gp_approx (str): 'mean_eq' used mean equivalence rollout for the GP dynamics. Only one that works currently. online_learning (bool): if true, GP kernel values will be updated using past trajectory values. additional_constraints (list): list of Constraint objects defining additional constraints to be used. """ print("############################################### GP-MPC hexa ###########################################") self.prior_env_func = partial(env_func, inertial_prop=np.array(inertial_prop)*prior_param_coeff) self.prior_param_coeff = prior_param_coeff # Initialize the method using linear MPC. self.prior_ctrl = LinearMPC( self.prior_env_func, horizon=horizon, q_mpc=q_mpc, r_mpc=r_mpc, use_prev_start=use_prev_start, output_dir=output_dir, additional_constraints=additional_constraints, ) self.prior_ctrl.reset() super().__init__( self.prior_env_func, horizon=horizon, q_mpc=q_mpc, r_mpc=r_mpc, use_prev_start=use_prev_start, output_dir=output_dir, additional_constraints=additional_constraints, **kwargs) # Setup environments. self.env_func = env_func self.env = env_func(randomized_init=False) self.env_training = env_func(randomized_init=True) # No training data accumulated yet so keep the dynamics function as linear prior. self.train_data = None self.prior_dynamics_func = self.prior_ctrl.linear_dynamics_func # GP and training parameters. self.gaussian_process = None self.train_iterations = train_iterations self.validation_iterations = validation_iterations self.optimization_iterations = optimization_iterations self.learning_rate = learning_rate self.gp_model_path = gp_model_path self.normalize_training_data = normalize_training_data self.use_gpu = use_gpu self.seed = seed self.prob = prob self.sparse_gp = sparse_gp if input_mask is None: self.input_mask = np.arange(self.model.nx + self.model.nu).tolist() else: self.input_mask = input_mask if target_mask is None: self.target_mask = np.arange(self.model.nx).tolist() else: self.target_mask = target_mask Bd = np.eye(self.model.nx) self.Bd = Bd[:, self.target_mask] self.gp_approx = gp_approx self.online_learning = online_learning self.last_obs = None self.last_action = None self.initial_rollout_std = initial_rollout_std def setup_prior_dynamics(self): """Computes the LQR gain used for propograting GP uncertainty from the prior model dynamics. """ # Determine the LQR gain K to propogate the input uncertainty (doing this at each timestep will increase complexity). A, B = discretize_linear_system(self.prior_ctrl.dfdx, self.prior_ctrl.dfdu, self.dt) Q_lqr = self.Q R_lqr = self.R P = scipy.linalg.solve_discrete_are(A, B, Q_lqr, R_lqr) btp = np.dot(B.T, P) self.lqr_gain = -np.dot(np.linalg.inv(self.R + np.dot(btp, B)), np.dot(btp, A)) self.discrete_dfdx = A self.discrete_dfdu = B def set_gp_dynamics_func(self): """Updates symbolic dynamics. With actual control frequency, initialize GP model and add to the combined dynamics. """ self.setup_prior_dynamics() # Compute the probabilistic constraint inverse CDF according to section III.D.b in Hewing 2019. self.inverse_cdf = scipy.stats.norm.ppf(1 - (1/self.model.nx - (self.prob + 1)/(2*self.model.nx))) self.create_sparse_GP_machinery() def create_sparse_GP_machinery(self): """This setups the gaussian process approximations for FITC formulation. """ lengthscales, signal_var, noise_var, gp_K_plus_noise = self.gaussian_process.get_hyperparameters(as_numpy=True) self.length_scales = lengthscales.squeeze() self.signal_var = signal_var.squeeze() self.noise_var = noise_var.squeeze() self.gp_K_plus_noise = gp_K_plus_noise Nx = len(self.input_mask) Ny = len(self.target_mask) N = self.gaussian_process.n_training_samples # Create CasADI function for computing the kernel K_z_zind with parameters for z, z_ind, length scales and signal variance. # We need the CasADI version of this so that it can by symbolically differentiated in in the MPC optimization. z1 = cs.SX.sym('z1', Nx) z2 = cs.SX.sym('z2', Nx) ell_s = cs.SX.sym('ell', Nx) sf2_s = cs.SX.sym('sf2') z_ind = cs.SX.sym('z_ind', self.T, Nx) covSE = cs.Function('covSE', [z1, z2, ell_s, sf2_s], [covSEard(z1, z2, ell_s, sf2_s)]) ks = cs.SX.zeros(1, self.T) for i in range(self.T): ks[i] = covSE(z1, z_ind[i, :], ell_s, sf2_s) ks_func = cs.Function('K_s', [z1, z_ind, ell_s, sf2_s], [ks]) K_z_zind = cs.SX.zeros(Ny, self.T) for i in range(Ny): K_z_zind[i,:] = ks_func(z1, z_ind, self.length_scales[i,:], self.signal_var[i]) # This will be mulitplied by the mean_post_factor computed at every time step to compute the approximate mean. self.K_z_zind_func = cs.Function('K_z_zind', [z1, z_ind],[K_z_zind],['z1', 'z2'],['K']) def preprocess_training_data(self, x_seq, u_seq, x_next_seq ): """Converts trajectory data for GP trianing. Args: x_seq (list): state sequence of np.array (nx,). u_seq (list): action sequence of np.array (nu,). x_next_seq (list): next state sequence of np.array (nx,). Returns: np.array: inputs for GP training, (N, nx+nu). np.array: targets for GP training, (N, nx). """ # Get the predicted dynamics. This is a linear prior, thus we need to account for the fact that # it is linearized about an eq using self.X_GOAL and self.U_GOAL. x_pred_seq = self.prior_dynamics_func(x0=x_seq.T - self.prior_ctrl.X_LIN[:, None], p=u_seq.T - self.prior_ctrl.U_LIN[:,None])['xf'].toarray() targets = (x_next_seq.T - (x_pred_seq+self.prior_ctrl.X_LIN[:,None])).transpose() # (N, nx). inputs = np.hstack([x_seq, u_seq]) # (N, nx+nu). return inputs, targets def precompute_probabilistic_limits(self, print_sets=True ): """This updates the constraint value limits to account for the uncertainty in the dynamics rollout. Args: print_sets (bool): True to print out the sets for debugging purposes. """ nx, nu = self.model.nx, self.model.nu T = self.T state_covariances = np.zeros((self.T+1, nx, nx)) input_covariances = np.zeros((self.T, nu, nu)) # Initilize lists for the tightening of each constraint. state_constraint_set = [] for state_constraint in self.constraints.state_constraints: state_constraint_set.append(np.zeros((state_constraint.num_constraints, T+1))) input_constraint_set = [] for input_constraint in self.constraints.input_constraints: input_constraint_set.append(np.zeros((input_constraint.num_constraints, T))) if self.x_prev is not None and self.u_prev is not None: cov_x = np.diag([self.initial_rollout_std**2]*nx) for i in range(T): state_covariances[i] = cov_x cov_u = self.lqr_gain @ cov_x @ self.lqr_gain.T input_covariances[i] = cov_u cov_xu = cov_x @ self.lqr_gain.T z = np.hstack((self.x_prev[:,i], self.u_prev[:,i])) if self.gp_approx == 'taylor': raise NotImplementedError("Taylor GP approximation is currently not working.") elif self.gp_approx == 'mean_eq': _, cov_d_tensor = self.gaussian_process.predict(z[None,:], return_pred=False) cov_d = cov_d_tensor.detach().numpy() else: raise NotImplementedError('gp_approx method is incorrect or not implemented') # Loop through input constraints and tighten by the required ammount. for ui, input_constraint in enumerate(self.constraints.input_constraints): input_constraint_set[ui][:, i] = -1*self.inverse_cdf * \ np.absolute(input_constraint.A) @ np.sqrt(np.diag(cov_u)) for si, state_constraint in enumerate(self.constraints.state_constraints): state_constraint_set[si][:, i] = -1*self.inverse_cdf * \ np.absolute(state_constraint.A) @ np.sqrt(np.diag(cov_x)) if self.gp_approx == 'taylor': raise NotImplementedError("Taylor GP rollout not implemented.") elif self.gp_approx == 'mean_eq': # Compute the next step propogated state covariance using mean equivilence. cov_x = self.discrete_dfdx @ cov_x @ self.discrete_dfdx.T + \ self.discrete_dfdx @ cov_xu @ self.discrete_dfdu.T + \ self.discrete_dfdu @ cov_xu.T @ self.discrete_dfdx.T + \ self.discrete_dfdu @ cov_u @ self.discrete_dfdu.T + \ self.Bd @ cov_d @ self.Bd.T else: raise NotImplementedError('gp_approx method is incorrect or not implemented') # Udate Final covariance. for si, state_constraint in enumerate(self.constraints.state_constraints): state_constraint_set[si][:,-1] = -1 * self.inverse_cdf * \ np.absolute(state_constraint.A) @ np.sqrt(np.diag(cov_x)) state_covariances[-1] = cov_x if print_sets: print("Probabilistic State Constraint values along Horizon:") print(state_constraint_set) print("Probabilistic Input Constraint values along Horizon:") print(input_constraint_set) self.results_dict['input_constraint_set'].append(input_constraint_set) self.results_dict['state_constraint_set'].append(state_constraint_set) self.results_dict['state_horizon_cov'].append(state_covariances) self.results_dict['input_horizon_cov'].append(input_covariances) return state_constraint_set, input_constraint_set def precompute_sparse_gp_values(self): """Uses the last MPC solution to precomupte values associated with the FITC GP approximation. """ n_data_points = self.gaussian_process.n_training_samples dim_gp_inputs = len(self.input_mask) dim_gp_outputs = len(self.target_mask) inputs = self.train_data['train_inputs'] targets = self.train_data['train_targets'] # Get the inducing points. if self.x_prev is not None and self.u_prev is not None: # Use the previous MPC solution as in Hewing 2019. z_ind = np.hstack((self.x_prev[:,:-1].T, self.u_prev.T)) z_ind = z_ind[:,self.input_mask] else: # If there is no previous solution. Choose T random training set points. inds = self.env.np_random.choice(range(n_data_points), size=self.T) #z_ind = self.data_inputs[inds][:, self.input_mask] z_ind = inputs[inds][:, self.input_mask] K_zind_zind = self.gaussian_process.kernel(torch.Tensor(z_ind).double()) K_zind_zind_inv = self.gaussian_process.kernel_inv(torch.Tensor(z_ind).double()) K_x_zind = self.gaussian_process.kernel(torch.from_numpy(inputs[:, self.input_mask]).double(), torch.Tensor(z_ind).double()) Q_X_X = K_x_zind @ K_zind_zind_inv @ K_x_zind.transpose(1,2) Gamma = torch.diagonal(self.gaussian_process.K_plus_noise + Q_X_X, 0, 1, 2) Gamma_inv = torch.diag_embed(1/Gamma) Sigma = torch.pinverse(K_zind_zind + K_x_zind.transpose(1,2) @ Gamma_inv @ K_x_zind) mean_post_factor = torch.zeros((dim_gp_outputs, self.T)) for i in range(dim_gp_outputs): mean_post_factor[i] = Sigma[i] @ K_x_zind[i].T @ Gamma_inv[i] @ \ torch.from_numpy(targets[:,self.target_mask[i]]).double() return mean_post_factor.detach().numpy(), Sigma.detach().numpy(), K_zind_zind_inv.detach().numpy(), z_ind def setup_gp_optimizer(self): """Sets up nonlinear optimization problem including cost objective, variable bounds and dynamics constraints. """ nx, nu = self.model.nx, self.model.nu T = self.T # Define optimizer and variables. opti = cs.Opti() # States. x_var = opti.variable(nx, T + 1) # Inputs. u_var = opti.variable(nu, T) # Initial state. x_init = opti.parameter(nx, 1) # Reference (equilibrium point or trajectory, last step for terminal cost). x_ref = opti.parameter(nx, T + 1) # Chance constraint limits. state_constraint_set = [] for state_constraint in self.constraints.state_constraints: state_constraint_set.append(opti.parameter(state_constraint.num_constraints, T+1)) input_constraint_set = [] for input_constraint in self.constraints.input_constraints: input_constraint_set.append(opti.parameter(input_constraint.num_constraints, T)) # Sparse GP mean postfactor matrix. mean_post_factor = opti.parameter(len(self.target_mask), T) # Sparse GP inducing points. z_ind = opti.parameter(T, len(self.input_mask)) # Cost (cumulative). cost = 0 cost_func = self.model.loss for i in range(T): cost += cost_func(x=x_var[:, i], u=u_var[:, i], Xr=x_ref[:, i], Ur=np.zeros((nu, 1)), Q=self.Q, R=self.R)["l"] # Terminal cost. cost += cost_func(x=x_var[:, -1], u=np.zeros((nu, 1)), Xr=x_ref[:, -1], Ur=np.zeros((nu, 1)), Q=self.Q, R=self.R)["l"] opti.minimize(cost) z = cs.vertcat(x_var[:,:-1], u_var) z = z[self.input_mask,:] for i in range(self.T): # Dynamics constraints using the dynamics of the prior and the mean of the GP. # This follows the tractable dynamics formulation in Section III.B in Hewing 2019. # Note that for the GP approximation, we are purposely using elementwise multiplication *. if self.sparse_gp: next_state = self.prior_dynamics_func(x0=x_var[:, i]-self.prior_ctrl.X_LIN[:,None], p=u_var[:, i]-self.prior_ctrl.U_LIN[:,None])['xf'] + \ self.prior_ctrl.X_LIN[:,None]+ self.Bd @ cs.sum2(self.K_z_zind_func(z1=z[:,i].T, z2=z_ind)['K'] * mean_post_factor) else: # Sparse GP approximation doesn't always work well, thus, use Exact GP regression. This is much slower, # but for unstable systems, make performance much better. next_state = self.prior_dynamics_func(x0=x_var[:, i]-self.prior_ctrl.X_LIN[:,None], p=u_var[:, i]-self.prior_ctrl.U_LIN[:,None])['xf'] + \ self.prior_ctrl.X_LIN[:,None]+ self.Bd @ self.gaussian_process.casadi_predict(z=z[:,i])['mean'] opti.subject_to(x_var[:, i + 1] == next_state) # Probabilistic state and input constraints according to Hewing 2019 constraint tightening. for s_i, state_constraint in enumerate(self.state_constraints_sym): opti.subject_to(state_constraint(x_var[:, i]) <= state_constraint_set[s_i][:,i]) for u_i, input_constraint in enumerate(self.input_constraints_sym): opti.subject_to(input_constraint(u_var[:, i]) <= input_constraint_set[u_i][:,i]) # Final state constraints. for s_i, state_constraint in enumerate(self.state_constraints_sym): opti.subject_to(state_constraint(x_var[:, -1]) <= state_constraint_set[s_i][:,-1]) # Initial condition constraints. opti.subject_to(x_var[:, 0] == x_init) # Create solver (IPOPT solver in this version). opts = {"ipopt.print_level": 4, "ipopt.sb": "yes", "ipopt.max_iter": 100, #100, "print_time": 1} opti.solver('ipopt', opts) self.opti_dict = { "opti": opti, "x_var": x_var, "u_var": u_var, "x_init": x_init, "x_ref": x_ref, "state_constraint_set": state_constraint_set, "input_constraint_set": input_constraint_set, "mean_post_factor": mean_post_factor, "z_ind": z_ind, "cost": cost } def select_action_with_gp(self, obs ): """Solves nonlinear MPC problem to get next action. Args: obs (np.array): current state/observation. Returns: np.array: input/action to the task/env. """ opti_dict = self.opti_dict opti = opti_dict["opti"] x_var = opti_dict["x_var"] u_var = opti_dict["u_var"] x_init = opti_dict["x_init"] x_ref = opti_dict["x_ref"] state_constraint_set = opti_dict["state_constraint_set"] input_constraint_set = opti_dict["input_constraint_set"] mean_post_factor = opti_dict["mean_post_factor"] z_ind = opti_dict["z_ind"] cost = opti_dict["cost"] # Assign the initial state. opti.set_value(x_init, obs) # Assign reference trajectory within horizon. goal_states = self.get_references() opti.set_value(x_ref, goal_states) if self.mode == "tracking": self.traj_step += 1 # Set the probabilistic state and input constraint set limits. state_constraint_set_prev, input_constraint_set_prev = self.precompute_probabilistic_limits() for si in range(len(self.constraints.state_constraints)): opti.set_value(state_constraint_set[si], state_constraint_set_prev[si]) for ui in range(len(self.constraints.input_constraints)): opti.set_value(input_constraint_set[ui], input_constraint_set_prev[ui]) mean_post_factor_val, Sigma, K_zind_zind_inv, z_ind_val = self.precompute_sparse_gp_values() opti.set_value(mean_post_factor, mean_post_factor_val) opti.set_value(z_ind, z_ind_val) # Initial guess for the optimization problem. if self.warmstart and self.x_prev is not None and self.u_prev is not None: # shift previous solutions by 1 step x_guess = deepcopy(self.x_prev) u_guess = deepcopy(self.u_prev) x_guess[:, :-1] = x_guess[:, 1:] u_guess[:-1] = u_guess[1:] opti.set_initial(x_var, x_guess) opti.set_initial(u_var, u_guess) # Solve the optimization problem. try: sol = opti.solve() x_val, u_val = sol.value(x_var), sol.value(u_var) except RuntimeError: x_val, u_val = opti.debug.value(x_var), opti.debug.value(u_var) u_val = np.atleast_2d(u_val) self.x_prev = x_val self.u_prev = u_val self.results_dict['horizon_states'].append(deepcopy(self.x_prev)) self.results_dict['horizon_inputs'].append(deepcopy(self.u_prev)) zi = np.hstack((x_val[:,0], u_val[:,0])) zi = zi[self.input_mask] gp_contribution = np.sum(self.K_z_zind_func(z1=zi, z2=z_ind_val)['K'].toarray() * mean_post_factor_val,axis=1) print("GP Mean eq Contribution: %s" % gp_contribution) zi = np.hstack((x_val[:,0], u_val[:,0])) pred, _, _ = self.gaussian_process.predict(zi[None,:]) print("True GP value: %s" % pred.numpy()) lin_pred = self.prior_dynamics_func(x0=x_val[:,0]-self.prior_ctrl.X_LIN, p=u_val[:, 0]-self.prior_ctrl.U_LIN)['xf'].toarray() + \ self.prior_ctrl.X_LIN[:,None] self.results_dict['linear_pred'].append(lin_pred) self.results_dict['gp_mean_eq_pred'].append(gp_contribution) self.results_dict['gp_pred'].append(pred.numpy()) # Take the first one from solved action sequence. if u_val.ndim > 1: action = u_val[:, 0] else: action = np.array([u_val[0]]) self.prev_action = action, return action def learn(self, input_data=None, target_data=None, gp_model=None, plot=False ): """Performs GP training. Args: input_data, target_data (optiona, np.array): data to use for training gp_model (str): if not None, this is the path to pretrained models to use instead of training new ones. plot (bool): to plot validation trajectories or not. Returns: training_results (dict): Dictionary of the training results. """ if gp_model is None: gp_model = self.gp_model_path self.prior_ctrl.remove_constraints(self.prior_ctrl.additional_constraints) self.reset() if self.online_learning: input_data = np.zeros((self.train_iterations, len(self.input_mask))) target_data = np.zeros((self.train_iterations, len(self.target_mask))) if input_data is None and target_data is None: train_inputs = [] train_targets = [] train_info = [] ############ # Use Latin Hypercube Sampling to generate states withing environment bounds. lhs_sampler = Lhs(lhs_type='classic', criterion='maximin') limits = [(self.env.INIT_STATE_RAND_INFO[key].low, self.env.INIT_STATE_RAND_INFO[key].high) for key in self.env.INIT_STATE_RAND_INFO] # todo: parameterize this if we actually want it. num_eq_samples = 0 samples = lhs_sampler.generate(limits, self.train_iterations + self.validation_iterations - num_eq_samples, random_state=self.seed) # todo: choose if we want eq samples or not. delta = 0.01 eq_limits = [(self.prior_ctrl.X_LIN[eq]-delta, self.prior_ctrl.X_LIN[eq]+delta) for eq in range(self.model.nx)] if num_eq_samples > 0: eq_samples = lhs_sampler.generate(eq_limits, num_eq_samples, random_state=self.seed) #samples = samples.append(eq_samples) init_state_samples = np.array(samples + eq_samples) else: init_state_samples = np.array(samples) input_limits = np.vstack((self.constraints.input_constraints[0].lower_bounds, self.constraints.input_constraints[0].upper_bounds)).T input_samples = lhs_sampler.generate(input_limits, self.train_iterations + self.validation_iterations, random_state=self.seed) input_samples = np.array(input_samples) # not being used currently seeds = self.env.np_random.randint(0,99999, size=self.train_iterations + self.validation_iterations) load_from_file = False if load_from_file: gpmpc_data = np.load("/home/erl/repos/journal_zhichao/safe-control-gym/experiments/annual_reviews/figure6/data/small_drone/statecontroldata_rand_good1.npz") x_seq_all = gpmpc_data["x_seq_all"] x_next_seq_all = gpmpc_data["x_next_seq_all"] u_seq_all = gpmpc_data["u_seq_all"] else: x_seq_all = [] u_seq_all = [] x_next_seq_all = [] for i in range(self.train_iterations + self.validation_iterations): if load_from_file: x_seq = x_seq_all[i] x_next_seq = x_next_seq_all[i] u_seq = u_seq_all[i] else: # For random initial state training. init_state = init_state_samples[i,:] # Collect data with prior controller. run_env = self.env_func(init_state=init_state, randomized_init=False, seed=int(seeds[i])) episode_results = self.prior_ctrl.run(env=run_env, max_steps=1, gp_training = True) run_env.close() x_obs = episode_results['obs'][-3:,:] u_seq = episode_results['action'][-1:,:] run_env.close() x_seq = x_obs[:-1,:] x_next_seq = x_obs[1:,:] x_seq_all.append(x_seq) x_next_seq_all.append(x_next_seq) u_seq_all.append(u_seq) train_inputs_i, train_targets_i = self.preprocess_training_data(x_seq, u_seq, x_next_seq) train_inputs.append(train_inputs_i) train_targets.append(train_targets_i) np.savez("/home/erl/repos/journal_zhichao/safe-control-gym/experiments/annual_reviews/figure6/data/small_drone/statecontroldata_rand.npz", x_seq_all = x_seq_all, x_next_seq_all = x_next_seq_all, u_seq_all = u_seq_all) ########### else: train_inputs = input_data train_targets = target_data # assign all data train_inputs = np.vstack(train_inputs) train_targets = np.vstack(train_targets) self.data_inputs = train_inputs self.data_targets = train_targets train_idx, test_idx = train_test_split( #list(range(self.train_iterations + self.validation_iterations)), list(range(train_inputs.shape[0])), test_size=self.validation_iterations/(self.train_iterations+self.validation_iterations), random_state=self.seed ) train_inputs = self.data_inputs[train_idx, :] train_targets = self.data_targets[train_idx, :] self.train_data = {'train_inputs': train_inputs, 'train_targets': train_targets} test_inputs = self.data_inputs[test_idx, :] test_targets = self.data_targets[test_idx, :] self.test_data = {'test_inputs': test_inputs, 'test_targets': test_targets} train_inputs_tensor = torch.Tensor(train_inputs).double() train_targets_tensor = torch.Tensor(train_targets).double() test_inputs_tensor = torch.Tensor(test_inputs).double() test_targets_tensor = torch.Tensor(test_targets).double() if plot: init_state = np.array([-1.0, 0.0, 0.0, 0.0, 0.0, 0.0]) valid_env = self.env_func(init_state=init_state, randomized_init=False) validation_results = self.prior_ctrl.run(env=valid_env, max_steps=40) valid_env.close() x_obs = validation_results['obs'] u_seq = validation_results['action'] x_seq = x_obs[:-1, :] x_next_seq = x_obs[1:, :] # Define likelihood. likelihood = gpytorch.likelihoods.GaussianLikelihood( noise_constraint=gpytorch.constraints.GreaterThan(1e-6), ).double() self.gaussian_process = GaussianProcessCollection(ZeroMeanIndependentGPModel, likelihood, len(self.target_mask), input_mask=self.input_mask, target_mask=self.target_mask, normalize=self.normalize_training_data ) if gp_model: self.gaussian_process.init_with_hyperparam(train_inputs_tensor, train_targets_tensor, gp_model) else: # Train the GP. self.gaussian_process.train(train_inputs_tensor, train_targets_tensor, test_inputs_tensor, test_targets_tensor, n_train=self.optimization_iterations, learning_rate=self.learning_rate, gpu=self.use_gpu, dir=self.output_dir) # Plot validation. if plot: validation_inputs, validation_targets = self.preprocess_training_data(x_seq, u_seq, x_next_seq) fig_count = 0 fig_count = self.gaussian_process.plot_trained_gp(torch.Tensor(validation_inputs).double(), torch.Tensor(validation_targets).double(), fig_count=fig_count) self.set_gp_dynamics_func() self.setup_gp_optimizer() self.prior_ctrl.add_constraints(self.prior_ctrl.additional_constraints) self.prior_ctrl.reset() # Collect training results. training_results = {} training_results['train_targets'] = train_targets training_results['train_inputs'] = train_inputs try: training_results['info'] = train_info except UnboundLocalError: training_results['info'] = None return training_results def select_action(self, obs ): """Select the action based on the given observation. Args: obs (np.array): current observed state. Returns: action (np.array): desired policy action. """ if self.gaussian_process is None: action = self.prior_ctrl.select_action(obs) else: if(self.last_obs is not None and self.last_action is not None and self.online_learning): print("[ERROR]: Not yet supported.") exit() t1 = time.perf_counter() action = self.select_action_with_gp(obs) t2 = time.perf_counter() print("GP SELECT ACTION TIME: %s" %(t2 - t1)) self.last_obs = obs self.last_action = action return action def close(self): """Clean up. """ self.env_training.close() self.env.close() def reset_results_dict(self): """ """ "Result the results_dict before running." super().reset_results_dict() self.results_dict['input_constraint_set'] = [] self.results_dict['state_constraint_set'] = [] self.results_dict['state_horizon_cov'] = [] self.results_dict['input_horizon_cov'] = [] self.results_dict['gp_mean_eq_pred'] = [] self.results_dict['gp_pred'] = [] self.results_dict['linear_pred'] = [] def reset(self): """Reset the controller before running. """ # Setup reference input. if self.env.TASK == Task.STABILIZATION: self.mode = "stabilization" self.x_goal = self.env.X_GOAL elif self.env.TASK == Task.TRAJ_TRACKING: self.mode = "tracking" self.traj = self.env.X_GOAL.T self.traj_step = 0 # Dynamics model. if self.gaussian_process is not None: self.set_gp_dynamics_func() # CasADi optimizer. self.setup_gp_optimizer() self.prior_ctrl.reset() # Previously solved states & inputs, useful for warm start. self.x_prev = None self.u_prev = None
50.237401
229
0.590248
import scipy import numpy as np import casadi as cs import time import torch import gpytorch from copy import deepcopy from skopt.sampler import Lhs from functools import partial from sklearn.model_selection import train_test_split from safe_control_gym.controllers.mpc.linear_mpc import LinearMPC, MPC from safe_control_gym.controllers.mpc.mpc_utils import discretize_linear_system from safe_control_gym.controllers.mpc.gp_utils import GaussianProcessCollection, ZeroMeanIndependentGPModel, covSEard from safe_control_gym.envs.benchmark_env import Task class GPMPC(MPC): def __init__( self, env_func, seed: int = 1337, horizon: int = 5, q_mpc: list = [1], r_mpc: list = [1], additional_constraints: list = None, use_prev_start: bool = True, train_iterations: int = 800, validation_iterations: int = 200, optimization_iterations: list = None, learning_rate: list = None, normalize_training_data: bool = False, use_gpu: bool = False, gp_model_path: str = None, prob: float = 0.955, initial_rollout_std: float = 0.005, input_mask: list = None, target_mask: list = None, gp_approx: str = 'mean_eq', sparse_gp: bool = False, online_learning: bool = False, inertial_prop: list = [1.0], prior_param_coeff: float = 1.0, output_dir: str = "results/temp", **kwargs ): print("############################################### GP-MPC hexa ###########################################") self.prior_env_func = partial(env_func, inertial_prop=np.array(inertial_prop)*prior_param_coeff) self.prior_param_coeff = prior_param_coeff self.prior_ctrl = LinearMPC( self.prior_env_func, horizon=horizon, q_mpc=q_mpc, r_mpc=r_mpc, use_prev_start=use_prev_start, output_dir=output_dir, additional_constraints=additional_constraints, ) self.prior_ctrl.reset() super().__init__( self.prior_env_func, horizon=horizon, q_mpc=q_mpc, r_mpc=r_mpc, use_prev_start=use_prev_start, output_dir=output_dir, additional_constraints=additional_constraints, **kwargs) self.env_func = env_func self.env = env_func(randomized_init=False) self.env_training = env_func(randomized_init=True) self.train_data = None self.prior_dynamics_func = self.prior_ctrl.linear_dynamics_func self.gaussian_process = None self.train_iterations = train_iterations self.validation_iterations = validation_iterations self.optimization_iterations = optimization_iterations self.learning_rate = learning_rate self.gp_model_path = gp_model_path self.normalize_training_data = normalize_training_data self.use_gpu = use_gpu self.seed = seed self.prob = prob self.sparse_gp = sparse_gp if input_mask is None: self.input_mask = np.arange(self.model.nx + self.model.nu).tolist() else: self.input_mask = input_mask if target_mask is None: self.target_mask = np.arange(self.model.nx).tolist() else: self.target_mask = target_mask Bd = np.eye(self.model.nx) self.Bd = Bd[:, self.target_mask] self.gp_approx = gp_approx self.online_learning = online_learning self.last_obs = None self.last_action = None self.initial_rollout_std = initial_rollout_std def setup_prior_dynamics(self): A, B = discretize_linear_system(self.prior_ctrl.dfdx, self.prior_ctrl.dfdu, self.dt) Q_lqr = self.Q R_lqr = self.R P = scipy.linalg.solve_discrete_are(A, B, Q_lqr, R_lqr) btp = np.dot(B.T, P) self.lqr_gain = -np.dot(np.linalg.inv(self.R + np.dot(btp, B)), np.dot(btp, A)) self.discrete_dfdx = A self.discrete_dfdu = B def set_gp_dynamics_func(self): self.setup_prior_dynamics() self.inverse_cdf = scipy.stats.norm.ppf(1 - (1/self.model.nx - (self.prob + 1)/(2*self.model.nx))) self.create_sparse_GP_machinery() def create_sparse_GP_machinery(self): lengthscales, signal_var, noise_var, gp_K_plus_noise = self.gaussian_process.get_hyperparameters(as_numpy=True) self.length_scales = lengthscales.squeeze() self.signal_var = signal_var.squeeze() self.noise_var = noise_var.squeeze() self.gp_K_plus_noise = gp_K_plus_noise Nx = len(self.input_mask) Ny = len(self.target_mask) N = self.gaussian_process.n_training_samples z1 = cs.SX.sym('z1', Nx) z2 = cs.SX.sym('z2', Nx) ell_s = cs.SX.sym('ell', Nx) sf2_s = cs.SX.sym('sf2') z_ind = cs.SX.sym('z_ind', self.T, Nx) covSE = cs.Function('covSE', [z1, z2, ell_s, sf2_s], [covSEard(z1, z2, ell_s, sf2_s)]) ks = cs.SX.zeros(1, self.T) for i in range(self.T): ks[i] = covSE(z1, z_ind[i, :], ell_s, sf2_s) ks_func = cs.Function('K_s', [z1, z_ind, ell_s, sf2_s], [ks]) K_z_zind = cs.SX.zeros(Ny, self.T) for i in range(Ny): K_z_zind[i,:] = ks_func(z1, z_ind, self.length_scales[i,:], self.signal_var[i]) self.K_z_zind_func = cs.Function('K_z_zind', [z1, z_ind],[K_z_zind],['z1', 'z2'],['K']) def preprocess_training_data(self, x_seq, u_seq, x_next_seq ): x_pred_seq = self.prior_dynamics_func(x0=x_seq.T - self.prior_ctrl.X_LIN[:, None], p=u_seq.T - self.prior_ctrl.U_LIN[:,None])['xf'].toarray() targets = (x_next_seq.T - (x_pred_seq+self.prior_ctrl.X_LIN[:,None])).transpose() inputs = np.hstack([x_seq, u_seq]) return inputs, targets def precompute_probabilistic_limits(self, print_sets=True ): nx, nu = self.model.nx, self.model.nu T = self.T state_covariances = np.zeros((self.T+1, nx, nx)) input_covariances = np.zeros((self.T, nu, nu)) state_constraint_set = [] for state_constraint in self.constraints.state_constraints: state_constraint_set.append(np.zeros((state_constraint.num_constraints, T+1))) input_constraint_set = [] for input_constraint in self.constraints.input_constraints: input_constraint_set.append(np.zeros((input_constraint.num_constraints, T))) if self.x_prev is not None and self.u_prev is not None: cov_x = np.diag([self.initial_rollout_std**2]*nx) for i in range(T): state_covariances[i] = cov_x cov_u = self.lqr_gain @ cov_x @ self.lqr_gain.T input_covariances[i] = cov_u cov_xu = cov_x @ self.lqr_gain.T z = np.hstack((self.x_prev[:,i], self.u_prev[:,i])) if self.gp_approx == 'taylor': raise NotImplementedError("Taylor GP approximation is currently not working.") elif self.gp_approx == 'mean_eq': _, cov_d_tensor = self.gaussian_process.predict(z[None,:], return_pred=False) cov_d = cov_d_tensor.detach().numpy() else: raise NotImplementedError('gp_approx method is incorrect or not implemented') for ui, input_constraint in enumerate(self.constraints.input_constraints): input_constraint_set[ui][:, i] = -1*self.inverse_cdf * \ np.absolute(input_constraint.A) @ np.sqrt(np.diag(cov_u)) for si, state_constraint in enumerate(self.constraints.state_constraints): state_constraint_set[si][:, i] = -1*self.inverse_cdf * \ np.absolute(state_constraint.A) @ np.sqrt(np.diag(cov_x)) if self.gp_approx == 'taylor': raise NotImplementedError("Taylor GP rollout not implemented.") elif self.gp_approx == 'mean_eq': cov_x = self.discrete_dfdx @ cov_x @ self.discrete_dfdx.T + \ self.discrete_dfdx @ cov_xu @ self.discrete_dfdu.T + \ self.discrete_dfdu @ cov_xu.T @ self.discrete_dfdx.T + \ self.discrete_dfdu @ cov_u @ self.discrete_dfdu.T + \ self.Bd @ cov_d @ self.Bd.T else: raise NotImplementedError('gp_approx method is incorrect or not implemented') for si, state_constraint in enumerate(self.constraints.state_constraints): state_constraint_set[si][:,-1] = -1 * self.inverse_cdf * \ np.absolute(state_constraint.A) @ np.sqrt(np.diag(cov_x)) state_covariances[-1] = cov_x if print_sets: print("Probabilistic State Constraint values along Horizon:") print(state_constraint_set) print("Probabilistic Input Constraint values along Horizon:") print(input_constraint_set) self.results_dict['input_constraint_set'].append(input_constraint_set) self.results_dict['state_constraint_set'].append(state_constraint_set) self.results_dict['state_horizon_cov'].append(state_covariances) self.results_dict['input_horizon_cov'].append(input_covariances) return state_constraint_set, input_constraint_set def precompute_sparse_gp_values(self): n_data_points = self.gaussian_process.n_training_samples dim_gp_inputs = len(self.input_mask) dim_gp_outputs = len(self.target_mask) inputs = self.train_data['train_inputs'] targets = self.train_data['train_targets'] if self.x_prev is not None and self.u_prev is not None: z_ind = np.hstack((self.x_prev[:,:-1].T, self.u_prev.T)) z_ind = z_ind[:,self.input_mask] else: inds = self.env.np_random.choice(range(n_data_points), size=self.T) z_ind = inputs[inds][:, self.input_mask] K_zind_zind = self.gaussian_process.kernel(torch.Tensor(z_ind).double()) K_zind_zind_inv = self.gaussian_process.kernel_inv(torch.Tensor(z_ind).double()) K_x_zind = self.gaussian_process.kernel(torch.from_numpy(inputs[:, self.input_mask]).double(), torch.Tensor(z_ind).double()) Q_X_X = K_x_zind @ K_zind_zind_inv @ K_x_zind.transpose(1,2) Gamma = torch.diagonal(self.gaussian_process.K_plus_noise + Q_X_X, 0, 1, 2) Gamma_inv = torch.diag_embed(1/Gamma) Sigma = torch.pinverse(K_zind_zind + K_x_zind.transpose(1,2) @ Gamma_inv @ K_x_zind) mean_post_factor = torch.zeros((dim_gp_outputs, self.T)) for i in range(dim_gp_outputs): mean_post_factor[i] = Sigma[i] @ K_x_zind[i].T @ Gamma_inv[i] @ \ torch.from_numpy(targets[:,self.target_mask[i]]).double() return mean_post_factor.detach().numpy(), Sigma.detach().numpy(), K_zind_zind_inv.detach().numpy(), z_ind def setup_gp_optimizer(self): nx, nu = self.model.nx, self.model.nu T = self.T opti = cs.Opti() x_var = opti.variable(nx, T + 1) u_var = opti.variable(nu, T) x_init = opti.parameter(nx, 1) x_ref = opti.parameter(nx, T + 1) state_constraint_set = [] for state_constraint in self.constraints.state_constraints: state_constraint_set.append(opti.parameter(state_constraint.num_constraints, T+1)) input_constraint_set = [] for input_constraint in self.constraints.input_constraints: input_constraint_set.append(opti.parameter(input_constraint.num_constraints, T)) mean_post_factor = opti.parameter(len(self.target_mask), T) z_ind = opti.parameter(T, len(self.input_mask)) cost = 0 cost_func = self.model.loss for i in range(T): cost += cost_func(x=x_var[:, i], u=u_var[:, i], Xr=x_ref[:, i], Ur=np.zeros((nu, 1)), Q=self.Q, R=self.R)["l"] cost += cost_func(x=x_var[:, -1], u=np.zeros((nu, 1)), Xr=x_ref[:, -1], Ur=np.zeros((nu, 1)), Q=self.Q, R=self.R)["l"] opti.minimize(cost) z = cs.vertcat(x_var[:,:-1], u_var) z = z[self.input_mask,:] for i in range(self.T): if self.sparse_gp: next_state = self.prior_dynamics_func(x0=x_var[:, i]-self.prior_ctrl.X_LIN[:,None], p=u_var[:, i]-self.prior_ctrl.U_LIN[:,None])['xf'] + \ self.prior_ctrl.X_LIN[:,None]+ self.Bd @ cs.sum2(self.K_z_zind_func(z1=z[:,i].T, z2=z_ind)['K'] * mean_post_factor) else: # but for unstable systems, make performance much better. next_state = self.prior_dynamics_func(x0=x_var[:, i]-self.prior_ctrl.X_LIN[:,None], p=u_var[:, i]-self.prior_ctrl.U_LIN[:,None])['xf'] + \ self.prior_ctrl.X_LIN[:,None]+ self.Bd @ self.gaussian_process.casadi_predict(z=z[:,i])['mean'] opti.subject_to(x_var[:, i + 1] == next_state) # Probabilistic state and input constraints according to Hewing 2019 constraint tightening. for s_i, state_constraint in enumerate(self.state_constraints_sym): opti.subject_to(state_constraint(x_var[:, i]) <= state_constraint_set[s_i][:,i]) for u_i, input_constraint in enumerate(self.input_constraints_sym): opti.subject_to(input_constraint(u_var[:, i]) <= input_constraint_set[u_i][:,i]) # Final state constraints. for s_i, state_constraint in enumerate(self.state_constraints_sym): opti.subject_to(state_constraint(x_var[:, -1]) <= state_constraint_set[s_i][:,-1]) # Initial condition constraints. opti.subject_to(x_var[:, 0] == x_init) # Create solver (IPOPT solver in this version). opts = {"ipopt.print_level": 4, "ipopt.sb": "yes", "ipopt.max_iter": 100, #100, "print_time": 1} opti.solver('ipopt', opts) self.opti_dict = { "opti": opti, "x_var": x_var, "u_var": u_var, "x_init": x_init, "x_ref": x_ref, "state_constraint_set": state_constraint_set, "input_constraint_set": input_constraint_set, "mean_post_factor": mean_post_factor, "z_ind": z_ind, "cost": cost } def select_action_with_gp(self, obs ): opti_dict = self.opti_dict opti = opti_dict["opti"] x_var = opti_dict["x_var"] u_var = opti_dict["u_var"] x_init = opti_dict["x_init"] x_ref = opti_dict["x_ref"] state_constraint_set = opti_dict["state_constraint_set"] input_constraint_set = opti_dict["input_constraint_set"] mean_post_factor = opti_dict["mean_post_factor"] z_ind = opti_dict["z_ind"] cost = opti_dict["cost"] # Assign the initial state. opti.set_value(x_init, obs) # Assign reference trajectory within horizon. goal_states = self.get_references() opti.set_value(x_ref, goal_states) if self.mode == "tracking": self.traj_step += 1 # Set the probabilistic state and input constraint set limits. state_constraint_set_prev, input_constraint_set_prev = self.precompute_probabilistic_limits() for si in range(len(self.constraints.state_constraints)): opti.set_value(state_constraint_set[si], state_constraint_set_prev[si]) for ui in range(len(self.constraints.input_constraints)): opti.set_value(input_constraint_set[ui], input_constraint_set_prev[ui]) mean_post_factor_val, Sigma, K_zind_zind_inv, z_ind_val = self.precompute_sparse_gp_values() opti.set_value(mean_post_factor, mean_post_factor_val) opti.set_value(z_ind, z_ind_val) # Initial guess for the optimization problem. if self.warmstart and self.x_prev is not None and self.u_prev is not None: # shift previous solutions by 1 step x_guess = deepcopy(self.x_prev) u_guess = deepcopy(self.u_prev) x_guess[:, :-1] = x_guess[:, 1:] u_guess[:-1] = u_guess[1:] opti.set_initial(x_var, x_guess) opti.set_initial(u_var, u_guess) # Solve the optimization problem. try: sol = opti.solve() x_val, u_val = sol.value(x_var), sol.value(u_var) except RuntimeError: x_val, u_val = opti.debug.value(x_var), opti.debug.value(u_var) u_val = np.atleast_2d(u_val) self.x_prev = x_val self.u_prev = u_val self.results_dict['horizon_states'].append(deepcopy(self.x_prev)) self.results_dict['horizon_inputs'].append(deepcopy(self.u_prev)) zi = np.hstack((x_val[:,0], u_val[:,0])) zi = zi[self.input_mask] gp_contribution = np.sum(self.K_z_zind_func(z1=zi, z2=z_ind_val)['K'].toarray() * mean_post_factor_val,axis=1) print("GP Mean eq Contribution: %s" % gp_contribution) zi = np.hstack((x_val[:,0], u_val[:,0])) pred, _, _ = self.gaussian_process.predict(zi[None,:]) print("True GP value: %s" % pred.numpy()) lin_pred = self.prior_dynamics_func(x0=x_val[:,0]-self.prior_ctrl.X_LIN, p=u_val[:, 0]-self.prior_ctrl.U_LIN)['xf'].toarray() + \ self.prior_ctrl.X_LIN[:,None] self.results_dict['linear_pred'].append(lin_pred) self.results_dict['gp_mean_eq_pred'].append(gp_contribution) self.results_dict['gp_pred'].append(pred.numpy()) # Take the first one from solved action sequence. if u_val.ndim > 1: action = u_val[:, 0] else: action = np.array([u_val[0]]) self.prev_action = action, return action def learn(self, input_data=None, target_data=None, gp_model=None, plot=False ): if gp_model is None: gp_model = self.gp_model_path self.prior_ctrl.remove_constraints(self.prior_ctrl.additional_constraints) self.reset() if self.online_learning: input_data = np.zeros((self.train_iterations, len(self.input_mask))) target_data = np.zeros((self.train_iterations, len(self.target_mask))) if input_data is None and target_data is None: train_inputs = [] train_targets = [] train_info = [] ############ # Use Latin Hypercube Sampling to generate states withing environment bounds. lhs_sampler = Lhs(lhs_type='classic', criterion='maximin') limits = [(self.env.INIT_STATE_RAND_INFO[key].low, self.env.INIT_STATE_RAND_INFO[key].high) for key in self.env.INIT_STATE_RAND_INFO] # todo: parameterize this if we actually want it. num_eq_samples = 0 samples = lhs_sampler.generate(limits, self.train_iterations + self.validation_iterations - num_eq_samples, random_state=self.seed) # todo: choose if we want eq samples or not. delta = 0.01 eq_limits = [(self.prior_ctrl.X_LIN[eq]-delta, self.prior_ctrl.X_LIN[eq]+delta) for eq in range(self.model.nx)] if num_eq_samples > 0: eq_samples = lhs_sampler.generate(eq_limits, num_eq_samples, random_state=self.seed) #samples = samples.append(eq_samples) init_state_samples = np.array(samples + eq_samples) else: init_state_samples = np.array(samples) input_limits = np.vstack((self.constraints.input_constraints[0].lower_bounds, self.constraints.input_constraints[0].upper_bounds)).T input_samples = lhs_sampler.generate(input_limits, self.train_iterations + self.validation_iterations, random_state=self.seed) input_samples = np.array(input_samples) # not being used currently seeds = self.env.np_random.randint(0,99999, size=self.train_iterations + self.validation_iterations) load_from_file = False if load_from_file: gpmpc_data = np.load("/home/erl/repos/journal_zhichao/safe-control-gym/experiments/annual_reviews/figure6/data/small_drone/statecontroldata_rand_good1.npz") x_seq_all = gpmpc_data["x_seq_all"] x_next_seq_all = gpmpc_data["x_next_seq_all"] u_seq_all = gpmpc_data["u_seq_all"] else: x_seq_all = [] u_seq_all = [] x_next_seq_all = [] for i in range(self.train_iterations + self.validation_iterations): if load_from_file: x_seq = x_seq_all[i] x_next_seq = x_next_seq_all[i] u_seq = u_seq_all[i] else: # For random initial state training. init_state = init_state_samples[i,:] # Collect data with prior controller. run_env = self.env_func(init_state=init_state, randomized_init=False, seed=int(seeds[i])) episode_results = self.prior_ctrl.run(env=run_env, max_steps=1, gp_training = True) run_env.close() x_obs = episode_results['obs'][-3:,:] u_seq = episode_results['action'][-1:,:] run_env.close() x_seq = x_obs[:-1,:] x_next_seq = x_obs[1:,:] x_seq_all.append(x_seq) x_next_seq_all.append(x_next_seq) u_seq_all.append(u_seq) train_inputs_i, train_targets_i = self.preprocess_training_data(x_seq, u_seq, x_next_seq) train_inputs.append(train_inputs_i) train_targets.append(train_targets_i) np.savez("/home/erl/repos/journal_zhichao/safe-control-gym/experiments/annual_reviews/figure6/data/small_drone/statecontroldata_rand.npz", x_seq_all = x_seq_all, x_next_seq_all = x_next_seq_all, u_seq_all = u_seq_all) ########### else: train_inputs = input_data train_targets = target_data # assign all data train_inputs = np.vstack(train_inputs) train_targets = np.vstack(train_targets) self.data_inputs = train_inputs self.data_targets = train_targets train_idx, test_idx = train_test_split( #list(range(self.train_iterations + self.validation_iterations)), list(range(train_inputs.shape[0])), test_size=self.validation_iterations/(self.train_iterations+self.validation_iterations), random_state=self.seed ) train_inputs = self.data_inputs[train_idx, :] train_targets = self.data_targets[train_idx, :] self.train_data = {'train_inputs': train_inputs, 'train_targets': train_targets} test_inputs = self.data_inputs[test_idx, :] test_targets = self.data_targets[test_idx, :] self.test_data = {'test_inputs': test_inputs, 'test_targets': test_targets} train_inputs_tensor = torch.Tensor(train_inputs).double() train_targets_tensor = torch.Tensor(train_targets).double() test_inputs_tensor = torch.Tensor(test_inputs).double() test_targets_tensor = torch.Tensor(test_targets).double() if plot: init_state = np.array([-1.0, 0.0, 0.0, 0.0, 0.0, 0.0]) valid_env = self.env_func(init_state=init_state, randomized_init=False) validation_results = self.prior_ctrl.run(env=valid_env, max_steps=40) valid_env.close() x_obs = validation_results['obs'] u_seq = validation_results['action'] x_seq = x_obs[:-1, :] x_next_seq = x_obs[1:, :] # Define likelihood. likelihood = gpytorch.likelihoods.GaussianLikelihood( noise_constraint=gpytorch.constraints.GreaterThan(1e-6), ).double() self.gaussian_process = GaussianProcessCollection(ZeroMeanIndependentGPModel, likelihood, len(self.target_mask), input_mask=self.input_mask, target_mask=self.target_mask, normalize=self.normalize_training_data ) if gp_model: self.gaussian_process.init_with_hyperparam(train_inputs_tensor, train_targets_tensor, gp_model) else: # Train the GP. self.gaussian_process.train(train_inputs_tensor, train_targets_tensor, test_inputs_tensor, test_targets_tensor, n_train=self.optimization_iterations, learning_rate=self.learning_rate, gpu=self.use_gpu, dir=self.output_dir) # Plot validation. if plot: validation_inputs, validation_targets = self.preprocess_training_data(x_seq, u_seq, x_next_seq) fig_count = 0 fig_count = self.gaussian_process.plot_trained_gp(torch.Tensor(validation_inputs).double(), torch.Tensor(validation_targets).double(), fig_count=fig_count) self.set_gp_dynamics_func() self.setup_gp_optimizer() self.prior_ctrl.add_constraints(self.prior_ctrl.additional_constraints) self.prior_ctrl.reset() # Collect training results. training_results = {} training_results['train_targets'] = train_targets training_results['train_inputs'] = train_inputs try: training_results['info'] = train_info except UnboundLocalError: training_results['info'] = None return training_results def select_action(self, obs ): if self.gaussian_process is None: action = self.prior_ctrl.select_action(obs) else: if(self.last_obs is not None and self.last_action is not None and self.online_learning): print("[ERROR]: Not yet supported.") exit() t1 = time.perf_counter() action = self.select_action_with_gp(obs) t2 = time.perf_counter() print("GP SELECT ACTION TIME: %s" %(t2 - t1)) self.last_obs = obs self.last_action = action return action def close(self): self.env_training.close() self.env.close() def reset_results_dict(self): super().reset_results_dict() self.results_dict['input_constraint_set'] = [] self.results_dict['state_constraint_set'] = [] self.results_dict['state_horizon_cov'] = [] self.results_dict['input_horizon_cov'] = [] self.results_dict['gp_mean_eq_pred'] = [] self.results_dict['gp_pred'] = [] self.results_dict['linear_pred'] = [] def reset(self): # Setup reference input. if self.env.TASK == Task.STABILIZATION: self.mode = "stabilization" self.x_goal = self.env.X_GOAL elif self.env.TASK == Task.TRAJ_TRACKING: self.mode = "tracking" self.traj = self.env.X_GOAL.T self.traj_step = 0 # Dynamics model. if self.gaussian_process is not None: self.set_gp_dynamics_func() # CasADi optimizer. self.setup_gp_optimizer() self.prior_ctrl.reset() # Previously solved states & inputs, useful for warm start. self.x_prev = None self.u_prev = None
true
true
f70ff12f7fd786d101d3916da05e94855ad2e161
3,381
py
Python
test/Base/FrontEndTest.py
PLOS/rhino
e9a0b595421664aabd21f0f8117cf04f4456db95
[ "MIT" ]
5
2017-03-04T13:35:27.000Z
2018-10-30T20:46:03.000Z
test/Base/FrontEndTest.py
PLOS/rhino
e9a0b595421664aabd21f0f8117cf04f4456db95
[ "MIT" ]
65
2017-03-14T20:36:38.000Z
2021-06-01T21:57:50.000Z
test/Base/FrontEndTest.py
PLOS/rhino
e9a0b595421664aabd21f0f8117cf04f4456db95
[ "MIT" ]
1
2017-02-10T00:29:21.000Z
2017-02-10T00:29:21.000Z
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright (c) 2017 Public Library of Science # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. import unittest import random from teamcity import is_running_under_teamcity from teamcity.unittestpy import TeamcityTestRunner from .WebDriverFactory import WebDriverFactory __author__ = 'jkrzemien@plos.org' class FrontEndTest(unittest.TestCase): """ Base class to provide Front End tests with desired WebDriver instances, as defined in [[Config.py]]. It inherits from `TestCase` in order to count as a test suite for Python's `unittest` framework. """ # This defines any `FrontEndTest` derived class as able to be run by Nose in a parallel way. # Requires Nose's `MultiProcess` plugin to be *enabled* _multiprocess_can_split_ = True # Will contain a single driver instance for the current test _driver = None # Will contain a list of driver (not instantiated) for the current test variations # (for all browsers) _injected_drivers = [] # Factory object to instantiate drivers factory = WebDriverFactory() def setUp(self): pass def tearDown(self): """ Method in charge of destroying the WebDriver/Proxy instances once the test finished running (even upon test failure). """ if self._driver: self._driver.quit() else: self.factory.teardown_webdriver() def getDriver(self): """ Simple method to retrieve the WebDriver/Proxy instances for this class to test method. """ if not self._driver: if len(self._injected_drivers) > 0: self._driver = self.factory.setup_remote_webdriver(self._injected_drivers.pop()) else: self._driver = self.factory.setup_webdriver() return self._driver @staticmethod def _run_tests_randomly(): """ *Static* method for every test suite inheriting this class to be able to run its tests in, at least, a non linear fashion. """ unittest.TestLoader.sortTestMethodsUsing = lambda _, x, y: random.choice([-1, 1]) if is_running_under_teamcity(): runner = TeamcityTestRunner() else: runner = unittest.TextTestRunner() unittest.main(testRunner=runner)
35.589474
100
0.697723
import unittest import random from teamcity import is_running_under_teamcity from teamcity.unittestpy import TeamcityTestRunner from .WebDriverFactory import WebDriverFactory __author__ = 'jkrzemien@plos.org' class FrontEndTest(unittest.TestCase): _multiprocess_can_split_ = True # Will contain a single driver instance for the current test _driver = None # Will contain a list of driver (not instantiated) for the current test variations # (for all browsers) _injected_drivers = [] # Factory object to instantiate drivers factory = WebDriverFactory() def setUp(self): pass def tearDown(self): if self._driver: self._driver.quit() else: self.factory.teardown_webdriver() def getDriver(self): if not self._driver: if len(self._injected_drivers) > 0: self._driver = self.factory.setup_remote_webdriver(self._injected_drivers.pop()) else: self._driver = self.factory.setup_webdriver() return self._driver @staticmethod def _run_tests_randomly(): unittest.TestLoader.sortTestMethodsUsing = lambda _, x, y: random.choice([-1, 1]) if is_running_under_teamcity(): runner = TeamcityTestRunner() else: runner = unittest.TextTestRunner() unittest.main(testRunner=runner)
true
true
f70ff1e700306a5c895e23510f6547d0391e8b9a
1,253
py
Python
configs/selfsup/_base_/datasets/imagenet_rotation-pred.py
mitming/mmselfsup
5b5cb474776291cfcb9a1140afd11b696e11fcab
[ "Apache-2.0" ]
355
2021-12-16T04:32:49.000Z
2022-03-31T22:15:23.000Z
configs/selfsup/_base_/datasets/imagenet_rotation-pred.py
mitming/mmselfsup
5b5cb474776291cfcb9a1140afd11b696e11fcab
[ "Apache-2.0" ]
89
2021-12-16T05:15:42.000Z
2022-03-31T10:57:39.000Z
configs/selfsup/_base_/datasets/imagenet_rotation-pred.py
mitming/mmselfsup
5b5cb474776291cfcb9a1140afd11b696e11fcab
[ "Apache-2.0" ]
74
2021-12-16T04:40:02.000Z
2022-03-31T08:40:32.000Z
# dataset settings data_source = 'ImageNet' dataset_type = 'RotationPredDataset' img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_pipeline = [ dict(type='RandomResizedCrop', size=224), dict(type='RandomHorizontalFlip'), ] test_pipeline = [ dict(type='Resize', size=256), dict(type='CenterCrop', size=224), ] # prefetch prefetch = False if not prefetch: train_pipeline.extend( [dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)]) test_pipeline.extend( [dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)]) # dataset summary data = dict( samples_per_gpu=16, # (16*4) x 8 = 512 workers_per_gpu=2, train=dict( type=dataset_type, data_source=dict( type=data_source, data_prefix='data/imagenet/train', ann_file='data/imagenet/meta/train.txt', ), pipeline=train_pipeline, prefetch=prefetch), val=dict( type=dataset_type, data_source=dict( type=data_source, data_prefix='data/imagenet/val', ann_file='data/imagenet/meta/val.txt', ), pipeline=test_pipeline, prefetch=prefetch))
27.23913
74
0.614525
data_source = 'ImageNet' dataset_type = 'RotationPredDataset' img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_pipeline = [ dict(type='RandomResizedCrop', size=224), dict(type='RandomHorizontalFlip'), ] test_pipeline = [ dict(type='Resize', size=256), dict(type='CenterCrop', size=224), ] prefetch = False if not prefetch: train_pipeline.extend( [dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)]) test_pipeline.extend( [dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)]) data = dict( samples_per_gpu=16, workers_per_gpu=2, train=dict( type=dataset_type, data_source=dict( type=data_source, data_prefix='data/imagenet/train', ann_file='data/imagenet/meta/train.txt', ), pipeline=train_pipeline, prefetch=prefetch), val=dict( type=dataset_type, data_source=dict( type=data_source, data_prefix='data/imagenet/val', ann_file='data/imagenet/meta/val.txt', ), pipeline=test_pipeline, prefetch=prefetch))
true
true
f70ff2005d248bf2316ed76965ea609b3718b1e9
1,643
py
Python
annotate_txt.py
MuhammedAshraf2020/yolo_using_pytorch
c6de8fd6c725db13792239bc7e4b4d4a71d3cd50
[ "MIT" ]
1
2021-08-03T13:33:35.000Z
2021-08-03T13:33:35.000Z
annotate_txt.py
MuhammedAshraf2020/yolo_using_pytorch
c6de8fd6c725db13792239bc7e4b4d4a71d3cd50
[ "MIT" ]
null
null
null
annotate_txt.py
MuhammedAshraf2020/yolo_using_pytorch
c6de8fd6c725db13792239bc7e4b4d4a71d3cd50
[ "MIT" ]
null
null
null
import argparse from xml.etree import ElementTree as ET import os from pickle import dump from tqdm import tqdm parser = argparse.ArgumentParser() parser.add_argument("dir") parser.add_argument("save") args = parser.parse_args() path = os.path.join(args.dir) classes_nums = {"cat" : 0 , "dog" : 1} keys = list(classes_nums.keys()) try: os.mkdir(args.save) except: print("Folder is already exist !") def ToMidPoint(x1 , y1 , x2 , y2 , size): dw = 1.0 / size[0] dh = 1.0 / size[1] h = y2 - y1 w = x2 - x1 x = (x1 + (w/2)) y = (y1 + (h/2)) return x * dw , y * dh , w * dw , h * dh for File in tqdm(os.listdir(path)): obj_list = 0 xml_path = os.path.join(path , File) file_name = "{}/{}".format(args.save , File.replace("xml" , "txt")) tree = ET.parse(xml_path) root = tree.getroot() size = root.find('size') w_img = int(size.find('width').text) h_img = int(size.find('height').text) with open(file_name , "w") as F : for obj in root.iter("object"): class_name = obj.find("name").text if class_name not in keys: continue obj_list += 1 class_id = classes_nums[class_name] xml_box = obj.find("bndbox") nedded = ["xmin" , "ymin" , "xmax" , "ymax"] x1 , y1 = float(xml_box.find(nedded[0]).text) , float(xml_box.find(nedded[1]).text) x2 , y2 = float(xml_box.find(nedded[2]).text) , float(xml_box.find(nedded[3]).text) x , y , w , h = ToMidPoint(x1 , y1 , x2 , y2 , (w_img , h_img)) F.write("{} {} {} {} {}\n".format(class_id , x , y , w , h)) if obj_list == 0: os.remove(file_name)
29.872727
95
0.584297
import argparse from xml.etree import ElementTree as ET import os from pickle import dump from tqdm import tqdm parser = argparse.ArgumentParser() parser.add_argument("dir") parser.add_argument("save") args = parser.parse_args() path = os.path.join(args.dir) classes_nums = {"cat" : 0 , "dog" : 1} keys = list(classes_nums.keys()) try: os.mkdir(args.save) except: print("Folder is already exist !") def ToMidPoint(x1 , y1 , x2 , y2 , size): dw = 1.0 / size[0] dh = 1.0 / size[1] h = y2 - y1 w = x2 - x1 x = (x1 + (w/2)) y = (y1 + (h/2)) return x * dw , y * dh , w * dw , h * dh for File in tqdm(os.listdir(path)): obj_list = 0 xml_path = os.path.join(path , File) file_name = "{}/{}".format(args.save , File.replace("xml" , "txt")) tree = ET.parse(xml_path) root = tree.getroot() size = root.find('size') w_img = int(size.find('width').text) h_img = int(size.find('height').text) with open(file_name , "w") as F : for obj in root.iter("object"): class_name = obj.find("name").text if class_name not in keys: continue obj_list += 1 class_id = classes_nums[class_name] xml_box = obj.find("bndbox") nedded = ["xmin" , "ymin" , "xmax" , "ymax"] x1 , y1 = float(xml_box.find(nedded[0]).text) , float(xml_box.find(nedded[1]).text) x2 , y2 = float(xml_box.find(nedded[2]).text) , float(xml_box.find(nedded[3]).text) x , y , w , h = ToMidPoint(x1 , y1 , x2 , y2 , (w_img , h_img)) F.write("{} {} {} {} {}\n".format(class_id , x , y , w , h)) if obj_list == 0: os.remove(file_name)
true
true
f70ff2c39f33dd888d631eafd160e6a0bc3e455e
29,175
py
Python
Plotters/Results/Plots_Paper_One.py
PouyaREZ/Wastewater_Energy_Optimization
ead604b715337dc8c76871910d38965d1b8b1856
[ "MIT" ]
2
2021-02-18T19:36:18.000Z
2021-05-20T03:32:20.000Z
Plotters/Results/Plots_Paper_One.py
PouyaREZ/Wastewater_Energy_Optimization
ead604b715337dc8c76871910d38965d1b8b1856
[ "MIT" ]
null
null
null
Plotters/Results/Plots_Paper_One.py
PouyaREZ/Wastewater_Energy_Optimization
ead604b715337dc8c76871910d38965d1b8b1856
[ "MIT" ]
1
2022-01-21T18:39:45.000Z
2022-01-21T18:39:45.000Z
# -*- coding: utf-8 -*- """ Created on Fri Feb 4 2020 @Author: PouyaRZ ____________________________________________________ Plots to produce: 1. LCC of equipment for each scenario for all the individuals 2, SCC of equipment for each scenario for all the individuals 3. SCC vs LCC scatter plot. 4. SCC vs chiller type 5. SCC vs CHP type, 6. LCC vs chiller type 7. SCC vs CHP type 8. Traces of building types across all the runs ____________________________________________________ """ import pandas as pd import numpy as np from matplotlib import pyplot as plt def DF_Filter(filename): file = np.loadtxt(filename, dtype='float') inputDF = pd.DataFrame(file) error_tol = 1.15 # print('GFA stats:') # print(inputDF.iloc[:,38].describe()) print('+++++ processing %s +++++\n'%(filename)) print('Count duplicates:') condition1 = inputDF.duplicated()==True print(inputDF[condition1][38].count()) print('Count under the min GFA:') # Count non-trivial neighborhoods condition2 = inputDF[38] <= 1/error_tol#<=647497/10 print(inputDF[condition2][38].count()) print('Count over the max GFA:') condition3 = inputDF[38]>=647497*5*error_tol print(inputDF[condition3][38].count()) print('Count over the max Site GFA:') condition4 = inputDF[38]/inputDF[36]>=647497*error_tol print(inputDF[condition4][38].count()) print('Count valid answers:') print(len(inputDF) - inputDF[condition1 | condition2 | condition3 | condition4][38].count()) # print('------------------') # Filtering the inadmissible results Filtered = ~(condition1 | condition2 | condition3 | condition4) inputDF = inputDF[Filtered] inputDF.reset_index(inplace=True, drop=True) # print('Annual energy demand stats (MWh):') inputDF[26] /= inputDF[38] # Normalizing LCC ($/m2) inputDF[27] /= inputDF[38] # Normalizing SCC ($/m2) inputDF[39] /= inputDF[38] # Normalizing CO2 (Tonnes/m2) inputDF[40] /= (10**3*inputDF[38]) # Normalizing total energy demand (MWh/m2) inputDF[41] /= inputDF[38] # Normalizing total wwater treatment demand (L/m2) for i in range(29,36): # Converting percent areas to integer % inputDF[i] = inputDF[i] * 100 # print(inputDF[40].describe()) return inputDF ### MAIN FUNCTION print('loading data') filenames = ['../RQ1_W_CWWTP_ModConsts_Feb17/SDO_LHS_TestRuns288_Constraint_SF_Test.txt', '../RQ1_WO_CWWTP_ModConsts_Feb17/SDO_LHS_TestRuns288_Constraint_SF_Test.txt'] DFNames = ['CCHP|CWWTP','CCHP+WWT'] DFs = {} for i in range(2): DFs[DFNames[i]] = DF_Filter(filenames[i]) plt.style.use('ggplot') colors_rb = {DFNames[0]:'r', DFNames[1]:'b'} # ============================================================================= ## CHP/Chiller/Solar Types used in the individual neighborhood CHP_Types = {} CHP_Types[1] = 'Gas_1' CHP_Types[2] = 'Gas_2' CHP_Types[3] = 'Gas_3' CHP_Types[4] = 'Gas_4' CHP_Types[5] = 'Gas_5' CHP_Types[6] = 'Micro_1' CHP_Types[7] = 'Micro_2' CHP_Types[8] = 'Micro_3' CHP_Types[9] = 'Recipro_1' CHP_Types[10] = 'Recipro_2' CHP_Types[11] = 'Recipro_3' CHP_Types[12] = 'Recipro_4' CHP_Types[13] = 'Recipro_5' CHP_Types[14] = 'Steam_1' CHP_Types[15] = 'Steam_2' CHP_Types[16] = 'Steam_3' CHP_Types[17] = 'Fuel_Cell_1' CHP_Types[18] = 'Fuel_Cell_2' CHP_Types[19] = 'Fuel_Cell_3' CHP_Types[20] = 'Fuel_Cell_4' CHP_Types[21] = 'Fuel_Cell_5' CHP_Types[22] = 'Fuel_Cell_6' CHP_Types[23] = 'Bio_1' CHP_Types[24] = 'Bio_2' CHP_Types[25] = 'Bio_3' CHP_Types[26] = 'Bio_4' CHP_Types[27] = 'Bio_5' CHP_Types[28] = 'Bio_6' CHP_Types[29] = 'Bio_7' CHP_Types[30] = 'Bio_8' CHP_Types[31] = 'Bio_9' CHP_Types[32] = 'Bio_10' Chiller_Types = {} Chiller_Types[1] = 'Electric_1' Chiller_Types[2] = 'Electric_2' Chiller_Types[3] = 'Electric_3' Chiller_Types[4] = 'Electric_4' Chiller_Types[5] = 'Electric_5' Chiller_Types[6] = 'Electric_6' Chiller_Types[7] = 'Electric_7' Chiller_Types[8] = 'Electric_8' Chiller_Types[9] = 'Electric_9' Chiller_Types[10] = 'Absorp_1' Chiller_Types[11] = 'Absorp_2' Chiller_Types[12] = 'Absorp_3' Chiller_Types[13] = 'Absorp_4' Chiller_Types[14] = 'Absorp_5' Chiller_Types[15] = 'Absorp_6' Chiller_Types[16] = 'Absorp_7' Chiller_Types[17] = 'Absorp_8' WWT_Types = {} WWT_Types[1] = "FO_MD" WWT_Types[2] = "FO_RO" WWT_Types[3] = "CWWTP" ## CHP, Chiller and WWT name assignments # CHP = {} # Chiller = {} # WWT = {} for DFName in DFNames: # CHP[DFName] = np.array([CHP_Types[int(i)] for i in DFs[DFName][21]]) # Making strings of CHP names instead of integers DFs[DFName][21] = np.array([CHP_Types[int(i)] for i in DFs[DFName][21]]) # Making strings of CHP names instead of integers # Chiller[DFName] = np.array([Chiller_Types[int(i)] for i in DFs[DFName][22]]) # Making strings of Chiller names instead of integers DFs[DFName][22] = np.array([Chiller_Types[int(i)] for i in DFs[DFName][22]]) # Making strings of Chiller names instead of integers # WWT[DFName] = np.array([WWT_Types[int(i)] for i in DFs[DFName][24]]) # Making strings of WWT module names instead of integers DFs[DFName][24] = np.array([WWT_Types[int(i)] for i in DFs[DFName][24]]) # Making strings of WWT module names instead of integers # ============================================================================= ######################## PLOTS ########################## ############################################# print('plotting overall LCC and SCC graphs') # LCC plt.figure(figsize=(10,5)) for DFName in DFNames: sortedDF = DFs[DFName].sort_values(by=26, ascending=True).reset_index(drop=True) plt.scatter(x=sortedDF.index,y=(sortedDF[26]/10**3),label=DFName, s=2, alpha=0.5, c=colors_rb[DFName]) # (DFs[DFName][0][26]/10**6).plot(label=DFName) plt.xlabel('Rank') plt.ylabel(r'LCC (k\$/$m^2$)') # plt.title('LCC') plt.legend() plt.savefig('LCC_Ascending.png', dpi=400, bbox_inches='tight') # SCC plt.figure(figsize=(10,5)) for DFName in DFNames: sortedDF = DFs[DFName].sort_values(by=27, ascending=True).reset_index(drop=True) plt.scatter(x=sortedDF.index,y=(sortedDF[27]/10**3),label=DFName, s=2, alpha=0.5, c=colors_rb[DFName]) # (DFs[DFName][0][26]/10**6).plot(label=DFName) plt.xlabel('Rank') plt.ylabel(r'SCC (k\$/$m^2$)') # plt.title('SCC') plt.legend() plt.savefig('SCC_Ascending.png', dpi=400, bbox_inches='tight') plt.close('all') ############################################# print('plotting LCC and SCC box plots') print('\n#############################################') print('Stats of LCC ($/m2) for Disintegrated Case:\n',(DFs[DFNames[0]][26]).describe()) print('Stats of LCC ($/m2) for Integrated Case:\n',(DFs[DFNames[1]][26]).describe()) print('Stats of SCC ($/m2) for Disintegrated Case:\n',(DFs[DFNames[0]][27]).describe()) print('Stats of SCC ($/m2) for Integrated Case:\n',(DFs[DFNames[1]][27]).describe()) print('#############################################\n') # ============================================================================= # # LCC # plt.figure(figsize=(10,5)) # # for DFName in DFNames: # plt.boxplot(x=[(DFs[DFNames[0]][26]/10**3), (DFs[DFNames[1]][26]/10**3)]) # # (DFs[DFName][0][26]/10**6).plot(label=DFName) # # plt.xlabel('Rank') # plt.ylabel(r'LCC (k\$/$m^2$)') # plt.xticks([1,2],[DFNames[0],DFNames[1]]) # # plt.title('LCC') # plt.savefig('LCC_Boxplot.png', dpi=400, bbox_inches='tight') # # # # # SCC # plt.figure(figsize=(10,5)) # # for DFName in DFNames: # plt.boxplot(x=[(DFs[DFNames[0]][27]/10**3), (DFs[DFNames[1]][27]/10**3)]) # # (DFs[DFName][0][26]/10**6).plot(label=DFName) # # plt.xlabel('Rank') # plt.ylabel(r'SCC (k\$/$m^2$)') # plt.xticks([1,2],[DFNames[0],DFNames[1]]) # # plt.title('LCC') # plt.savefig('SCC_Boxplot.png', dpi=400, bbox_inches='tight') # # plt.close('all') # ============================================================================= ''' ############################################# print('plotting LCC/SCC vs total neighborhood energy and ww graphs') print('\n#############################################') print('Stats of Total Energy Demand (MWh/m2) for Disintegrated Case:\n',(DFs[DFNames[0]][40]).describe()) print('Stats of Total Energy Demand (MWh/m2) for Integrated Case:\n',(DFs[DFNames[1]][40]).describe()) print('Stats of Total Wastewater Treatment Demand (m3/m2) for Disintegrated Case:\n',(DFs[DFNames[0]][41]/10**3).describe()) print('Stats of Total Wastewater Treatment Demand (m3/m2) for Integrated Case:\n',(DFs[DFNames[1]][41]/10**3).describe()) print('#############################################\n') # LCC vs Neighborhood's Total Energy Use plt.figure(figsize=(10,5)) for DFName in DFNames: sortedDF = DFs[DFName].sort_values(by=40, ascending=True).reset_index(drop=True) plt.scatter(x=(sortedDF[40]),y=(sortedDF[26]/10**3),label=DFName, s=2, alpha=0.5, c=colors_rb[DFName]) # (DFs[DFName][0][26]/10**6).plot(label=DFName) plt.xlabel(r'Total Energy Demand (MWh/$m^2$)') plt.ylabel(r'LCC (k\$/$m^2$)') # plt.title('LCC') plt.legend() plt.savefig('LCC_vs_Energy_Demand.png', dpi=400, bbox_inches='tight') # LCC vs Neighborhood's Total WWater Demand plt.figure(figsize=(10,5)) for DFName in DFNames: sortedDF = DFs[DFName].sort_values(by=41, ascending=True).reset_index(drop=True) plt.scatter(x=(sortedDF[41]/10**3),y=(sortedDF[26]/10**3),label=DFName, s=2, alpha=0.5, c=colors_rb[DFName]) # (DFs[DFName][0][26]/10**6).plot(label=DFName) plt.xlabel(r'Total Wastewater Treatment Demand ($m^3$/$m^2$)') plt.ylabel(r'LCC (k\$/$m^2$)') # plt.title('LCC') plt.legend() plt.savefig('LCC_vs_WWater_Demand.png', dpi=400, bbox_inches='tight') # SCC vs Neighborhood's Total Energy Use plt.figure(figsize=(10,5)) for DFName in DFNames: sortedDF = DFs[DFName].sort_values(by=40, ascending=True).reset_index(drop=True) plt.scatter(x=(sortedDF[40]),y=(sortedDF[27]/10**3),label=DFName, s=2, alpha=0.5, c=colors_rb[DFName]) # (DFs[DFName][0][26]/10**6).plot(label=DFName) plt.xlabel(r'Total Energy Demand (MWh/$m^2$)') plt.ylabel(r'SCC (k\$/$m^2$)') # plt.title('LCC') plt.legend() plt.savefig('SCC_vs_Energy_Demand.png', dpi=400, bbox_inches='tight') # SCC vs Neighborhood's Total WWater Demand plt.figure(figsize=(10,5)) for DFName in DFNames: sortedDF = DFs[DFName].sort_values(by=41, ascending=True).reset_index(drop=True) plt.scatter(x=(sortedDF[41]/10**3),y=(sortedDF[27]/10**3),label=DFName, s=2, alpha=0.5, c=colors_rb[DFName]) # (DFs[DFName][0][26]/10**6).plot(label=DFName) plt.xlabel(r'Total Wastewater Treatment Demand ($m^3$/$m^2$)') plt.ylabel(r'SCC (k\$/$m^2$)') # plt.title('LCC') plt.legend() plt.savefig('SCC_vs_WWater_Demand.png', dpi=400, bbox_inches='tight') plt.close('all') ############################################# print('plotting building mix vs neighborhood energy and ww graphs') # Building Mix vs Neighborhood's Total WWater Demand (integrated) DFName = 'CCHP+WWT' bldg_types = ['Res','Off','Com','Ind','Hos','Med','Edu'] colors = ['m','b','c','g','y','orange','r'] columns = list(range(29,36)) plt.figure(figsize=(10,5)) sortedDF = DFs[DFName].sort_values(by=41, ascending=True).reset_index(drop=True) for i in range(len(bldg_types)): plt.scatter(x=(sortedDF[41]/10**3),y=DFs[DFName].iloc[:,columns[i]], s=0.5, label=bldg_types[i], c=colors[i], alpha=0.5) # (DFs[DFName][0][26]/10**6).plot(label=DFName) plt.xlabel(r'Total Wastewater Treatment Demand ($m^3$/$m^2$)') plt.ylabel('Percent of Total GFA (%)') plt.ylim(0, 100) plt.xlim(0,11) # plt.title('LCC') plt.legend() plt.savefig('Bldg_Mix_vs_WWater_Demand_Integ.png', dpi=400, bbox_inches='tight') # Building Mix vs Neighborhood's Total WWater Demand (Disintegrated) DFName = 'CCHP|CWWTP' bldg_types = ['Res','Off','Com','Ind','Hos','Med','Edu'] colors = ['m','b','c','g','y','orange','r'] columns = list(range(29,36)) plt.figure(figsize=(10,5)) sortedDF = DFs[DFName].sort_values(by=41, ascending=True).reset_index(drop=True) for i in range(len(bldg_types)): plt.scatter(x=(sortedDF[41]/10**3),y=DFs[DFName].iloc[:,columns[i]], s=0.5, label=bldg_types[i], c=colors[i], alpha=0.5) # (DFs[DFName][0][26]/10**6).plot(label=DFName) plt.xlabel(r'Total Wastewater Treatment Demand ($m^3$/$m^2$)') plt.ylabel('Percent of Total GFA (%)') # plt.title('LCC') plt.ylim(0, 100) plt.xlim(0,11) plt.legend() plt.savefig('Bldg_Mix_vs_WWater_Demand_Disinteg.png', dpi=400, bbox_inches='tight') # Building Mix vs Neighborhood's Total Energy Demand (integrated) DFName = 'CCHP+WWT' bldg_types = ['Res','Off','Com','Ind','Hos','Med','Edu'] colors = ['m','b','c','g','y','orange','r'] columns = list(range(29,36)) plt.figure(figsize=(10,5)) sortedDF = DFs[DFName].sort_values(by=40, ascending=True).reset_index(drop=True) for i in range(len(bldg_types)): plt.scatter(x=(sortedDF[40]),y=DFs[DFName].iloc[:,columns[i]], s=0.5, label=bldg_types[i], c=colors[i], alpha=0.5) # (DFs[DFName][0][26]/10**6).plot(label=DFName) plt.xlabel(r'Total Energy Demand (MWh/$m^2$)') plt.ylabel('Percent of Total GFA (%)') # plt.title('LCC') plt.ylim(0, 100) plt.xlim(0,1) plt.legend() plt.savefig('Bldg_Mix_vs_Energy_Demand_Integ.png', dpi=400, bbox_inches='tight') # Building Mix vs Neighborhood's Total Energy Demand (Disintegrated) DFName = 'CCHP|CWWTP' bldg_types = ['Res','Off','Com','Ind','Hos','Med','Edu'] colors = ['m','b','c','g','y','orange','r'] columns = list(range(29,36)) plt.figure(figsize=(10,5)) sortedDF = DFs[DFName].sort_values(by=40, ascending=True).reset_index(drop=True) for i in range(len(bldg_types)): plt.scatter(x=(sortedDF[40]),y=DFs[DFName].iloc[:,columns[i]], s=0.5, label=bldg_types[i], c=colors[i], alpha=0.5) # (DFs[DFName][0][26]/10**6).plot(label=DFName) plt.xlabel(r'Total Energy Demand (MWh/$m^2$)') plt.ylabel('Percent of Total GFA (%)') # plt.title('LCC') plt.ylim(0, 100) plt.xlim(0,1) plt.legend() plt.savefig('Bldg_Mix_vs_Energy_Demand_Disinteg.png', dpi=400, bbox_inches='tight') plt.close('all') ############################################# print('plotting Supply type vs total neighborhood energy and ww graphs') # Total Energy Demand vs CHP plt.figure(figsize=(10,5)) for DFName in DFNames: sortedDF = DFs[DFName].sort_values(by=40, ascending=True).reset_index(drop=True) plt.scatter(x=DFs[DFName][21],y=(sortedDF[40]),label=DFName, s=2, alpha=0.5, c=colors_rb[DFName]) plt.xlabel(r'CHP Type') plt.ylabel(r'Total Energy Demand (MWh/$m^2$)') plt.legend() plt.savefig('Total_Energy_vs_CHP.png', dpi=400, bbox_inches='tight') # Total WWater Demand vs CHP plt.figure(figsize=(10,5)) for DFName in DFNames: sortedDF = DFs[DFName].sort_values(by=41, ascending=True).reset_index(drop=True) plt.scatter(x=DFs[DFName][21],y=(sortedDF[41]/10**3),label=DFName, s=2, alpha=0.5, c=colors_rb[DFName]) plt.xlabel(r'CHP Type') plt.ylabel(r'Total Wastewater Treatment Demand ($m^3$/$m^2$)') plt.legend() plt.savefig('Total_WWater_vs_CHP.png', dpi=400, bbox_inches='tight') # Total Energy Demand vs Chiller plt.figure(figsize=(10,5)) for DFName in DFNames: sortedDF = DFs[DFName].sort_values(by=40, ascending=True).reset_index(drop=True) plt.scatter(x=DFs[DFName][22],y=(sortedDF[40]),label=DFName, s=2, alpha=0.5, c=colors_rb[DFName]) plt.xlabel(r'Chiller Type') plt.ylabel(r'Total Energy Demand (MWh/$m^2$)') plt.legend() plt.savefig('Total_Energy_vs_Chiller.png', dpi=400, bbox_inches='tight') # Total WWater Demand vs Chiller plt.figure(figsize=(10,5)) for DFName in DFNames: sortedDF = DFs[DFName].sort_values(by=41, ascending=True).reset_index(drop=True) plt.scatter(x=DFs[DFName][22],y=(sortedDF[41]/10**3),label=DFName, s=2, alpha=0.5, c=colors_rb[DFName]) plt.xlabel(r'Chiller Type') plt.ylabel(r'Total Wastewater Treatment Demand ($m^3$/$m^2$)') plt.legend() plt.savefig('Total_WWater_vs_Chiller.png', dpi=400, bbox_inches='tight') # Total Energy Demand vs WWT (integrated) plt.figure(figsize=(10,5)) DFName = 'CCHP+WWT' sortedDF = DFs[DFName].sort_values(by=40, ascending=True).reset_index(drop=True) plt.scatter(x=DFs[DFName][24],y=(sortedDF[40]),s=2, c=colors_rb[DFName]) plt.xlabel(r'WWT Type') plt.ylabel(r'Total Energy Demand (MWh/$m^2$)') plt.legend() plt.savefig('Total_Energy_vs_WWT_Integ.png', dpi=400, bbox_inches='tight') # Total WWater Demand vs WWT (integrated) plt.figure(figsize=(10,5)) DFName = 'CCHP+WWT' sortedDF = DFs[DFName].sort_values(by=41, ascending=True).reset_index(drop=True) plt.scatter(x=DFs[DFName][24],y=(sortedDF[41]/10**3), s=2, c=colors_rb[DFName]) plt.xlabel(r'WWT Type') plt.ylabel(r'Total Wastewater Treatment Demand ($m^3$/$m^2$)') plt.savefig('Total_Wwater_vs_WWT_Integ.png', dpi=400, bbox_inches='tight') ''' plt.close('all') ############################################# print('plotting pareto fronts') # LCC vs CO2 plt.figure(figsize=(10,5)) for DFName in DFNames: plt.scatter(x=DFs[DFName][26]/10**3,y=DFs[DFName][39],label=DFName, s=2, alpha=0.5, c=colors_rb[DFName]) plt.xlabel(r'LCC (k\$/$m^2$)') plt.ylabel(r'Lifecycle $CO_{2e}$ (T/$m^2$)') plt.legend() plt.savefig('CO2_vs_LCC.png', dpi=400, bbox_inches='tight') ############################################# # LCC vs SCC plt.figure(figsize=(10,5)) for DFName in DFNames: plt.scatter(x=DFs[DFName][26]/10**3,y=DFs[DFName][27]/10**3,label=DFName, s=2, alpha=0.5, c=colors_rb[DFName]) plt.xlabel(r'LCC (k\$/$m^2$)') plt.ylabel(r'SCC (k\$/$m^2$)') plt.legend() plt.savefig('SCC_vs_LCC.png', dpi=400, bbox_inches='tight') # LCC vs SCC w Generation-based transparency plt.figure(figsize=(10,5)) for DFName in DFNames: alphas = np.linspace(0.1, 1, len(DFs[DFName])) rgba_colors = np.zeros((len(DFs[DFName]),4)) if DFName == DFNames[0]: rgba_colors[:,0] = 1.0 # red else: rgba_colors[:,2] = 1.0 # blue rgba_colors[:,3] = alphas plt.scatter(x=DFs[DFName][26]/10**3,y=DFs[DFName][27]/10**3,label=DFName, s=1, c=rgba_colors) plt.xlabel(r'LCC (k\$/$m^2$)') plt.ylabel(r'SCC (k\$/$m^2$)') plt.legend() plt.savefig('SCC_vs_LCC_Gen_Colorcoded.png', dpi=400, bbox_inches='tight') # LCC vs SCC w Generation-based transparency and elite-filtered plt.figure(figsize=(10,5)) for DFName in DFNames: DF = DFs[DFName][DFs[DFName][26]/10**3 <= 500] DF = DF[DFs[DFName][27]/10**3 <= 0.1] alphas = np.linspace(0.1, 1, len(DF)) rgba_colors = np.zeros((len(DF),4)) if DFName == DFNames[0]: rgba_colors[:,0] = 1.0 # red else: rgba_colors[:,2] = 1.0 # blue rgba_colors[:,3] = alphas plt.scatter(x=DF[26]/10**3,y=DF[27]/10**3,label=DFName, s=1, c=rgba_colors) plt.xlabel(r'LCC (k\$/$m^2$)') plt.ylabel(r'SCC (k\$/$m^2$)') plt.legend() plt.savefig('SCC_vs_LCC_Gen_Colorcoded_Filtered.png', dpi=400, bbox_inches='tight') # ============================================================================= # # LCC vs SCC (integrated) # plt.figure(figsize=(10,5)) # DFName = 'CCHP+WWT' # plt.scatter(x=DFs[DFName][26]/10**3,y=DFs[DFName][27]/10**3, s=2) # plt.xlabel(r'LCC (k\$/$m^2$)') # plt.ylabel(r'SCC (k\$/$m^2$)') # plt.savefig('SCC_vs_LCC_Integ.png', dpi=400, bbox_inches='tight') # # # # LCC vs SCC (disintegrated) # plt.figure(figsize=(10,5)) # DFName = 'CCHP|CWWTP' # plt.scatter(x=DFs[DFName][26]/10**3,y=DFs[DFName][27]/10**3, s=2) # # (DFs[DFName][0][26]/10**6).plot(label=DFName) # plt.xlabel(r'LCC (k\$/$m^2$)') # plt.ylabel(r'SCC (k\$/$m^2$)') # # plt.title('LCC') # plt.savefig('SCC_vs_LCC_Disinteg.png', dpi=400, bbox_inches='tight') # # ============================================================================= ############################################# print('plotting Supply type vs opt objectives') print('\n#############################################') Disinteg_Grpd_by_CHP_meanLCC = DFs[DFNames[0]].groupby(21)[26].mean() Disnteg_Grpd_by_CHP_medLCC = DFs[DFNames[0]].groupby(21)[26].median() Disnteg_Grpd_by_CHP_meanSCC = DFs[DFNames[0]].groupby(21)[27].mean() Disnteg_Grpd_by_CHP_medSCC = DFs[DFNames[0]].groupby(21)[27].median() Integ_Grpd_by_CHP_meanLCC = DFs[DFNames[1]].groupby(21)[26].mean() Integ_Grpd_by_CHP_medLCC = DFs[DFNames[1]].groupby(21)[26].median() Integ_Grpd_by_CHP_meanSCC = DFs[DFNames[1]].groupby(21)[27].mean() Integ_Grpd_by_CHP_medSCC = DFs[DFNames[1]].groupby(21)[27].median() items = [Disinteg_Grpd_by_CHP_meanLCC, Disnteg_Grpd_by_CHP_medLCC, Disnteg_Grpd_by_CHP_meanSCC, Disnteg_Grpd_by_CHP_medSCC, Integ_Grpd_by_CHP_meanLCC, Integ_Grpd_by_CHP_medLCC, Integ_Grpd_by_CHP_meanSCC, Integ_Grpd_by_CHP_medSCC] items_names = ['Disinteg_Grpd_by_CHP_meanLCC', 'Disnteg_Grpd_by_CHP_medLCC', 'Disnteg_Grpd_by_CHP_meanSCC', 'Disnteg_Grpd_by_CHP_medSCC', 'Integ_Grpd_by_CHP_meanLCC', 'Integ_Grpd_by_CHP_medLCC', 'Integ_Grpd_by_CHP_meanSCC', 'Integ_Grpd_by_CHP_medSCC'] for i in range(len(items)): print(items_names[i], items[i]) print('#############################################\n') # shapes = {DFNames[0]: '+', DFNames[1]: 'x'} # LCC vs CHP for DFName in DFNames: plt.figure(figsize=(10,5)) DF = DFs[DFName].sort_values(by=21) plt.scatter(x=DF[21], y=DF[26]/10**3,label=DFName, s=2, alpha=0.5)#, c=colors_rb[DFName])#, marker=shapes[DFName]) plt.xlabel(r'CHP Type') plt.xticks(rotation=75) plt.ylabel(r'LCC (k\$/$m^2$)') plt.ylim(-5, 500) # plt.legend() if DFName == 'CCHP|CWWTP': plt.savefig('LCC_vs_CHP_disinteg.png', dpi=400, bbox_inches='tight') else: plt.savefig('LCC_vs_CHP_integ.png', dpi=400, bbox_inches='tight') # SCC vs CHP for DFName in DFNames: plt.figure(figsize=(10,5)) DF = DFs[DFName].sort_values(by=21) plt.scatter(x=DF[21], y=DF[27]/10**3,label=DFName, s=2, alpha=0.5)#, c=colors_rb[DFName]) plt.xlabel(r'CHP Type') plt.xticks(rotation=75) plt.ylabel(r'SCC (k\$/$m^2$)') plt.ylim(-0.01, 0.1) # plt.legend() if DFName == 'CCHP|CWWTP': plt.savefig('SCC_vs_CHP_disinteg.png', dpi=400, bbox_inches='tight') else: plt.savefig('SCC_vs_CHP_integ.png', dpi=400, bbox_inches='tight') # SCC vs CHP with LCC-oriented transparency for DFName in DFNames: plt.figure(figsize=(10,5)) DF = DFs[DFName].sort_values(by=21) DF = DF[(DF[26]<=100) & (DF[27]<=100)] print('number of indivs plotted: ', len(DF)) alphas = 1.2 - DF[26]/DF[26].max() # Normalized LCCs (lowest LCC: 1; highest LCC: 0) # alphas = np.linspace(0.1, 1, len(DFs[DFName])) rgba_colors = np.zeros((len(DF),4)) rgba_colors[:,3] = alphas plt.scatter(x=DF[21],y=DF[27]/10**3,label=DFName, s=1, c=rgba_colors) plt.xlabel(r'CHP Type') plt.xticks(rotation=75) plt.ylabel(r'SCC (k\$/$m^2$)') plt.ylim(-0.01, 0.1) # plt.legend() if DFName == 'CCHP|CWWTP': plt.savefig('SCC_vs_CHP_disinteg_colorCoded.png', dpi=400, bbox_inches='tight') else: plt.savefig('SCC_vs_CHP_integ_colorCoded.png', dpi=400, bbox_inches='tight') # ============================================================================= # # LCC vs CHP (integrated) # plt.figure(figsize=(10,5)) # DFName = 'CCHP+WWT' # plt.scatter(x=DFs[DFName][21], y=DFs[DFName][26]/10**3, s=2) # plt.xlabel(r'CHP Type') # plt.ylabel(r'LCC (k\$/$m^2$)') # plt.savefig('LCC_vs_CHP_Integ.png', dpi=400, bbox_inches='tight') # # # # LCC vs CHP (disintegrated) # plt.figure(figsize=(10,5)) # DFName = 'CCHP|CWWTP' # plt.scatter(x=DFs[DFName][21], y=DFs[DFName][26]/10**3, s=2) # plt.xlabel(r'CHP Type') # plt.ylabel(r'LCC (k\$/$m^2$)') # plt.savefig('LCC_vs_CHP_Disinteg.png', dpi=400, bbox_inches='tight') # ============================================================================= # LCC vs Chiller for DFName in DFNames: plt.figure(figsize=(10,5)) DF = DFs[DFName].sort_values(by=22) plt.scatter(x=DF[22], y=DF[26]/10**3,label=DFName, s=2, alpha=0.5)#, c=colors_rb[DFName]) plt.xlabel(r'Chiller Type') plt.xticks(rotation=75) plt.ylabel(r'LCC (k\$/$m^2$)') plt.ylim(-5, 500) # plt.legend() if DFName == 'CCHP|CWWTP': plt.savefig('LCC_vs_Chiller_disinteg.png', dpi=400, bbox_inches='tight') else: plt.savefig('LCC_vs_Chiller_integ.png', dpi=400, bbox_inches='tight') # SCC vs Chiller for DFName in DFNames: plt.figure(figsize=(10,5)) DF = DFs[DFName].sort_values(by=22) plt.scatter(x=DF[22], y=DF[27]/10**3,label=DFName, s=2, alpha=0.5)#, c=colors_rb[DFName]) plt.xlabel(r'Chiller Type') plt.xticks(rotation=75) plt.ylabel(r'SCC (k\$/$m^2$)') plt.ylim(-0.01, 0.1) # plt.legend() if DFName == 'CCHP|CWWTP': plt.savefig('SCC_vs_Chiller_disinteg.png', dpi=400, bbox_inches='tight') else: plt.savefig('SCC_vs_Chiller_integ.png', dpi=400, bbox_inches='tight') # SCC vs Chiller with LCC-oriented transparency for DFName in DFNames: plt.figure(figsize=(10,5)) DF = DFs[DFName].sort_values(by=22) DF = DF[(DF[26]<=100) & (DF[27]<=0.5)] print('number of indivs plotted: ', len(DF)) alphas = 1 - DF[26]/DF[26].max() # Normalized LCCs (lowest LCC: 1; highest LCC: 0) # alphas = np.linspace(0.1, 1, len(DFs[DFName])) rgba_colors = np.zeros((len(DF),4)) rgba_colors[:,3] = alphas plt.scatter(x=DF[22],y=DF[27]/10**3,label=DFName, s=1, c=rgba_colors) plt.xlabel(r'Chiller Type') plt.xticks(rotation=75) plt.ylabel(r'SCC (k\$/$m^2$)') plt.ylim(-0.01, 0.1) # plt.legend() if DFName == 'CCHP|CWWTP': plt.savefig('SCC_vs_Chiller_disinteg_colorCoded.png', dpi=400, bbox_inches='tight') else: plt.savefig('SCC_vs_Chiller_integ_colorCoded.png', dpi=400, bbox_inches='tight') # ============================================================================= # # LCC vs Chiller (integrated) # plt.figure(figsize=(10,5)) # DFName = 'CCHP+WWT' # plt.scatter(x=DFs[DFName][22], y=DFs[DFName][26]/10**3, s=2) # plt.xlabel(r'Chiller Type') # plt.ylabel(r'LCC (k\$/$m^2$)') # plt.savefig('LCC_vs_Chiller_Integ.png', dpi=400, bbox_inches='tight') # # # # LCC vs Chiller (disintegrated) # plt.figure(figsize=(10,5)) # DFName = 'CCHP|CWWTP' # plt.scatter(x=DFs[DFName][22], y=DFs[DFName][26]/10**3, s=2) # plt.xlabel(r'Chiller Type') # plt.ylabel(r'LCC (k\$/$m^2$)') # plt.savefig('LCC_vs_Chiller_Disinteg.png', dpi=400, bbox_inches='tight') # ============================================================================= # LCC vs WWT (integrated) plt.figure(figsize=(10,5)) DFName = 'CCHP+WWT' DF = DFs[DFName].sort_values(by=24) plt.scatter(x=DF[24], y=DF[26]/10**3, s=2)#, c=colors_rb[DFName]) plt.xlabel(r'WWT Type') plt.xticks(rotation=75) plt.ylabel(r'LCC (k\$/$m^2$)') plt.ylim(-5, 500) plt.savefig('LCC_vs_WWT_Integ.png', dpi=400, bbox_inches='tight') # SCC vs WWT (integrated) plt.figure(figsize=(10,5)) DFName = 'CCHP+WWT' DF = DFs[DFName].sort_values(by=24) plt.scatter(x=DF[24], y=DF[27]/10**3, s=2)#, c=colors_rb[DFName]) plt.xlabel(r'WWT Type') plt.xticks(rotation=75) plt.ylabel(r'SCC (k\$/$m^2$)') plt.ylim(-0.01, 0.1) plt.savefig('SCC_vs_WWT_Integ.png', dpi=400, bbox_inches='tight') # SCC vs WWT with LCC-oriented transparency (integrated) plt.figure(figsize=(10,5)) DFName = 'CCHP+WWT' DF = DFs[DFName].sort_values(by=24) DF = DF[(DF[26]<=100) & (DF[27]<=0.5)] print('number of indivs plotted: ', len(DF)) alphas = 1 - DF[26]/DF[26].max() # Normalized LCCs (lowest LCC: 1; highest LCC: 0) # alphas = np.linspace(0.1, 1, len(DFs[DFName])) rgba_colors = np.zeros((len(DF),4)) rgba_colors[:,3] = alphas plt.scatter(x=DF[24],y=DF[27]/10**3,s=1, c=rgba_colors) plt.xlabel(r'WWT Type') plt.xticks(rotation=75) plt.ylabel(r'SCC (k\$/$m^2$)') plt.ylim(-0.01, 0.1) plt.savefig('SCC_vs_WWT_Integ_colorCoded.png', dpi=400, bbox_inches='tight') plt.close('all') ############################################# ''' print('plotting building mix traces') # Building Mix trace plots DFName = 'CCHP+WWT' plt.figure(figsize=(10,5)) fig = plt.figure(figsize=(10,5)) ax = fig.add_subplot(111) Num_Individuals = len(DFs[DFName]) cm = plt.get_cmap('rainbow') ax.set_prop_cycle(color=[cm(1.*i/Num_Individuals) for i in range(Num_Individuals)])#ax.set_color_cycle([cm(1.*i/Num_Individuals) for i in range(Num_Individuals)]) for i in range(Num_Individuals): ax.plot(['Res','Off','Com','Ind','Hos','Med','Edu'], DFs[DFName].iloc[i,29:36],linewidth=0.2, alpha=0.5) ax.set_xlabel('Building-Use') ax.set_ylabel('Percent of Total GFA (%)') plt.ylim(0, 100) fig.savefig('Uses_Integ.png', dpi=400, bbox_inches='tight') DFName = 'CCHP|CWWTP' fig = plt.figure(figsize=(10,5)) ax = fig.add_subplot(111) Num_Individuals = len(DFs[DFName]) cm = plt.get_cmap('rainbow') ax.set_prop_cycle(color=[cm(1.*i/Num_Individuals) for i in range(Num_Individuals)])#ax.set_color_cycle([cm(1.*i/Num_Individuals) for i in range(Num_Individuals)]) y_array = np.array(DFs[DFName].iloc[:,29:36]) for i in range(Num_Individuals): ax.plot(['Res','Off','Com','Ind','Hos','Med','Edu'], DFs[DFName].iloc[i,29:36],linewidth=0.2, alpha=0.5) ax.set_xlabel('Building-Use') ax.set_ylabel('Percent of Total GFA (%)') plt.ylim(0, 100) fig.savefig('Uses_Disinteg.png', dpi=400, bbox_inches='tight') plt.close('all') '''
34.982014
162
0.637943
import pandas as pd import numpy as np from matplotlib import pyplot as plt def DF_Filter(filename): file = np.loadtxt(filename, dtype='float') inputDF = pd.DataFrame(file) error_tol = 1.15 print('+++++ processing %s +++++\n'%(filename)) print('Count duplicates:') condition1 = inputDF.duplicated()==True print(inputDF[condition1][38].count()) print('Count under the min GFA:') condition2 = inputDF[38] <= 1/error_tol print(inputDF[condition2][38].count()) print('Count over the max GFA:') condition3 = inputDF[38]>=647497*5*error_tol print(inputDF[condition3][38].count()) print('Count over the max Site GFA:') condition4 = inputDF[38]/inputDF[36]>=647497*error_tol print(inputDF[condition4][38].count()) print('Count valid answers:') print(len(inputDF) - inputDF[condition1 | condition2 | condition3 | condition4][38].count()) Filtered = ~(condition1 | condition2 | condition3 | condition4) inputDF = inputDF[Filtered] inputDF.reset_index(inplace=True, drop=True) inputDF[26] /= inputDF[38] inputDF[27] /= inputDF[38] inputDF[39] /= inputDF[38] inputDF[40] /= (10**3*inputDF[38]) inputDF[41] /= inputDF[38] for i in range(29,36): inputDF[i] = inputDF[i] * 100 return inputDF s = ['../RQ1_W_CWWTP_ModConsts_Feb17/SDO_LHS_TestRuns288_Constraint_SF_Test.txt', '../RQ1_WO_CWWTP_ModConsts_Feb17/SDO_LHS_TestRuns288_Constraint_SF_Test.txt'] DFNames = ['CCHP|CWWTP','CCHP+WWT'] DFs = {} for i in range(2): DFs[DFNames[i]] = DF_Filter(filenames[i]) plt.style.use('ggplot') colors_rb = {DFNames[0]:'r', DFNames[1]:'b'} CHP_Types[3] = 'Gas_3' CHP_Types[4] = 'Gas_4' CHP_Types[5] = 'Gas_5' CHP_Types[6] = 'Micro_1' CHP_Types[7] = 'Micro_2' CHP_Types[8] = 'Micro_3' CHP_Types[9] = 'Recipro_1' CHP_Types[10] = 'Recipro_2' CHP_Types[11] = 'Recipro_3' CHP_Types[12] = 'Recipro_4' CHP_Types[13] = 'Recipro_5' CHP_Types[14] = 'Steam_1' CHP_Types[15] = 'Steam_2' CHP_Types[16] = 'Steam_3' CHP_Types[17] = 'Fuel_Cell_1' CHP_Types[18] = 'Fuel_Cell_2' CHP_Types[19] = 'Fuel_Cell_3' CHP_Types[20] = 'Fuel_Cell_4' CHP_Types[21] = 'Fuel_Cell_5' CHP_Types[22] = 'Fuel_Cell_6' CHP_Types[23] = 'Bio_1' CHP_Types[24] = 'Bio_2' CHP_Types[25] = 'Bio_3' CHP_Types[26] = 'Bio_4' CHP_Types[27] = 'Bio_5' CHP_Types[28] = 'Bio_6' CHP_Types[29] = 'Bio_7' CHP_Types[30] = 'Bio_8' CHP_Types[31] = 'Bio_9' CHP_Types[32] = 'Bio_10' Chiller_Types = {} Chiller_Types[1] = 'Electric_1' Chiller_Types[2] = 'Electric_2' Chiller_Types[3] = 'Electric_3' Chiller_Types[4] = 'Electric_4' Chiller_Types[5] = 'Electric_5' Chiller_Types[6] = 'Electric_6' Chiller_Types[7] = 'Electric_7' Chiller_Types[8] = 'Electric_8' Chiller_Types[9] = 'Electric_9' Chiller_Types[10] = 'Absorp_1' Chiller_Types[11] = 'Absorp_2' Chiller_Types[12] = 'Absorp_3' Chiller_Types[13] = 'Absorp_4' Chiller_Types[14] = 'Absorp_5' Chiller_Types[15] = 'Absorp_6' Chiller_Types[16] = 'Absorp_7' Chiller_Types[17] = 'Absorp_8' WWT_Types = {} WWT_Types[1] = "FO_MD" WWT_Types[2] = "FO_RO" WWT_Types[3] = "CWWTP" in DFs[DFName][21]]) ] for i in DFs[DFName][22]]) in DFs[DFName][24]]) 0**3,label=DFName, s=2, alpha=0.5) plt.xlabel(r'Chiller Type') plt.xticks(rotation=75) plt.ylabel(r'SCC (k\$/$m^2$)') plt.ylim(-0.01, 0.1) if DFName == 'CCHP|CWWTP': plt.savefig('SCC_vs_Chiller_disinteg.png', dpi=400, bbox_inches='tight') else: plt.savefig('SCC_vs_Chiller_integ.png', dpi=400, bbox_inches='tight') for DFName in DFNames: plt.figure(figsize=(10,5)) DF = DFs[DFName].sort_values(by=22) DF = DF[(DF[26]<=100) & (DF[27]<=0.5)] print('number of indivs plotted: ', len(DF)) alphas = 1 - DF[26]/DF[26].max() rgba_colors = np.zeros((len(DF),4)) rgba_colors[:,3] = alphas plt.scatter(x=DF[22],y=DF[27]/10**3,label=DFName, s=1, c=rgba_colors) plt.xlabel(r'Chiller Type') plt.xticks(rotation=75) plt.ylabel(r'SCC (k\$/$m^2$)') plt.ylim(-0.01, 0.1) if DFName == 'CCHP|CWWTP': plt.savefig('SCC_vs_Chiller_disinteg_colorCoded.png', dpi=400, bbox_inches='tight') else: plt.savefig('SCC_vs_Chiller_integ_colorCoded.png', dpi=400, bbox_inches='tight') P+WWT' DF = DFs[DFName].sort_values(by=24) plt.scatter(x=DF[24], y=DF[26]/10**3, s=2) plt.xlabel(r'WWT Type') plt.xticks(rotation=75) plt.ylabel(r'LCC (k\$/$m^2$)') plt.ylim(-5, 500) plt.savefig('LCC_vs_WWT_Integ.png', dpi=400, bbox_inches='tight') plt.figure(figsize=(10,5)) DFName = 'CCHP+WWT' DF = DFs[DFName].sort_values(by=24) plt.scatter(x=DF[24], y=DF[27]/10**3, s=2) plt.xlabel(r'WWT Type') plt.xticks(rotation=75) plt.ylabel(r'SCC (k\$/$m^2$)') plt.ylim(-0.01, 0.1) plt.savefig('SCC_vs_WWT_Integ.png', dpi=400, bbox_inches='tight') plt.figure(figsize=(10,5)) DFName = 'CCHP+WWT' DF = DFs[DFName].sort_values(by=24) DF = DF[(DF[26]<=100) & (DF[27]<=0.5)] print('number of indivs plotted: ', len(DF)) alphas = 1 - DF[26]/DF[26].max() rgba_colors = np.zeros((len(DF),4)) rgba_colors[:,3] = alphas plt.scatter(x=DF[24],y=DF[27]/10**3,s=1, c=rgba_colors) plt.xlabel(r'WWT Type') plt.xticks(rotation=75) plt.ylabel(r'SCC (k\$/$m^2$)') plt.ylim(-0.01, 0.1) plt.savefig('SCC_vs_WWT_Integ_colorCoded.png', dpi=400, bbox_inches='tight') plt.close('all')
true
true
f70ff38ed88161f522ef82bebd021101b440decd
35,221
py
Python
lib/galaxy/tool_shed/util/repository_util.py
reid-wagner/galaxy
52175e60d52ae64a5c8e428fa277538c8cbddd7f
[ "CC-BY-3.0" ]
null
null
null
lib/galaxy/tool_shed/util/repository_util.py
reid-wagner/galaxy
52175e60d52ae64a5c8e428fa277538c8cbddd7f
[ "CC-BY-3.0" ]
null
null
null
lib/galaxy/tool_shed/util/repository_util.py
reid-wagner/galaxy
52175e60d52ae64a5c8e428fa277538c8cbddd7f
[ "CC-BY-3.0" ]
null
null
null
import logging import os import re import shutil from urllib.error import HTTPError from markupsafe import escape from sqlalchemy import ( and_, false, or_, ) from sqlalchemy.orm import joinedload from galaxy import util from galaxy import web from galaxy.tool_shed.util import basic_util from galaxy.util.tool_shed import common_util, encoding_util log = logging.getLogger(__name__) VALID_REPOSITORYNAME_RE = re.compile(r"^[a-z0-9\_]+$") def check_for_updates(app, model, repository_id=None): message = '' status = 'ok' if repository_id is None: success_count = 0 repository_names_not_updated = [] updated_count = 0 for repository in model.context.query(model.ToolShedRepository) \ .filter(model.ToolShedRepository.table.c.deleted == false()): ok, updated = \ check_or_update_tool_shed_status_for_installed_repository(app, repository) if ok: success_count += 1 else: repository_names_not_updated.append(f'<b>{escape(str(repository.name))}</b>') if updated: updated_count += 1 message = "Checked the status in the tool shed for %d repositories. " % success_count message += "Updated the tool shed status for %d repositories. " % updated_count if repository_names_not_updated: message += "Unable to retrieve status from the tool shed for the following repositories:\n" message += ", ".join(repository_names_not_updated) else: repository = get_tool_shed_repository_by_id(app, repository_id) ok, updated = \ check_or_update_tool_shed_status_for_installed_repository(app, repository) if ok: if updated: message = f"The tool shed status for repository <b>{escape(str(repository.name))}</b> has been updated." else: message = f"The status has not changed in the tool shed for repository <b>{escape(str(repository.name))}</b>." else: message = f"Unable to retrieve status from the tool shed for repository <b>{escape(str(repository.name))}</b>." status = 'error' return message, status def check_or_update_tool_shed_status_for_installed_repository(app, repository): updated = False tool_shed_status_dict = get_tool_shed_status_for_installed_repository(app, repository) if tool_shed_status_dict: ok = True if tool_shed_status_dict != repository.tool_shed_status: repository.tool_shed_status = tool_shed_status_dict app.install_model.context.add(repository) app.install_model.context.flush() updated = True else: ok = False return ok, updated def create_or_update_tool_shed_repository(app, name, description, installed_changeset_revision, ctx_rev, repository_clone_url, status, metadata_dict=None, current_changeset_revision=None, owner='', dist_to_shed=False): """ Update a tool shed repository record in the Galaxy database with the new information received. If a record defined by the received tool shed, repository name and owner does not exist, create a new record with the received information. """ metadata_dict = metadata_dict or {} # The received value for dist_to_shed will be True if the ToolMigrationManager is installing a repository # that contains tools or datatypes that used to be in the Galaxy distribution, but have been moved # to the main Galaxy tool shed. if current_changeset_revision is None: # The current_changeset_revision is not passed if a repository is being installed for the first # time. If a previously installed repository was later uninstalled, this value should be received # as the value of that change set to which the repository had been updated just prior to it being # uninstalled. current_changeset_revision = installed_changeset_revision context = app.install_model.context tool_shed = get_tool_shed_from_clone_url(repository_clone_url) if not owner: owner = get_repository_owner_from_clone_url(repository_clone_url) includes_datatypes = 'datatypes' in metadata_dict if status in [app.install_model.ToolShedRepository.installation_status.DEACTIVATED]: deleted = True uninstalled = False elif status in [app.install_model.ToolShedRepository.installation_status.UNINSTALLED]: deleted = True uninstalled = True else: deleted = False uninstalled = False tool_shed_repository = \ get_installed_repository(app, tool_shed=tool_shed, name=name, owner=owner, installed_changeset_revision=installed_changeset_revision) if tool_shed_repository: log.debug("Updating an existing row for repository '%s' in the tool_shed_repository table, status set to '%s'.", name, status) tool_shed_repository.description = description tool_shed_repository.changeset_revision = current_changeset_revision tool_shed_repository.ctx_rev = ctx_rev tool_shed_repository.metadata_ = metadata_dict tool_shed_repository.includes_datatypes = includes_datatypes tool_shed_repository.deleted = deleted tool_shed_repository.uninstalled = uninstalled tool_shed_repository.status = status else: log.debug("Adding new row for repository '%s' in the tool_shed_repository table, status set to '%s'.", name, status) tool_shed_repository = \ app.install_model.ToolShedRepository(tool_shed=tool_shed, name=name, description=description, owner=owner, installed_changeset_revision=installed_changeset_revision, changeset_revision=current_changeset_revision, ctx_rev=ctx_rev, metadata_=metadata_dict, includes_datatypes=includes_datatypes, dist_to_shed=dist_to_shed, deleted=deleted, uninstalled=uninstalled, status=status) context.add(tool_shed_repository) context.flush() return tool_shed_repository def extract_components_from_tuple(repository_components_tuple): '''Extract the repository components from the provided tuple in a backward-compatible manner.''' toolshed = repository_components_tuple[0] name = repository_components_tuple[1] owner = repository_components_tuple[2] changeset_revision = repository_components_tuple[3] components_list = [toolshed, name, owner, changeset_revision] if len(repository_components_tuple) == 5: toolshed, name, owner, changeset_revision, prior_installation_required = repository_components_tuple components_list = [toolshed, name, owner, changeset_revision, prior_installation_required] elif len(repository_components_tuple) == 6: toolshed, name, owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td = repository_components_tuple components_list = [toolshed, name, owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td] return components_list def generate_tool_shed_repository_install_dir(repository_clone_url, changeset_revision): """ Generate a repository installation directory that guarantees repositories with the same name will always be installed in different directories. The tool path will be of the form: <tool shed url>/repos/<repository owner>/<repository name>/<installed changeset revision> """ tmp_url = common_util.remove_protocol_and_user_from_clone_url(repository_clone_url) # Now tmp_url is something like: bx.psu.edu:9009/repos/some_username/column items = tmp_url.split('/repos/') tool_shed_url = items[0] repo_path = items[1] tool_shed_url = common_util.remove_port_from_tool_shed_url(tool_shed_url) return '/'.join((tool_shed_url, 'repos', repo_path, changeset_revision)) def get_absolute_path_to_file_in_repository(repo_files_dir, file_name): """Return the absolute path to a specified disk file contained in a repository.""" stripped_file_name = basic_util.strip_path(file_name) file_path = None for root, _, files in os.walk(repo_files_dir): if root.find('.hg') < 0: for name in files: if name == stripped_file_name: return os.path.abspath(os.path.join(root, name)) return file_path def get_ids_of_tool_shed_repositories_being_installed(app, as_string=False): installing_repository_ids = [] new_status = app.install_model.ToolShedRepository.installation_status.NEW cloning_status = app.install_model.ToolShedRepository.installation_status.CLONING setting_tool_versions_status = app.install_model.ToolShedRepository.installation_status.SETTING_TOOL_VERSIONS installing_dependencies_status = app.install_model.ToolShedRepository.installation_status.INSTALLING_TOOL_DEPENDENCIES loading_datatypes_status = app.install_model.ToolShedRepository.installation_status.LOADING_PROPRIETARY_DATATYPES for tool_shed_repository in \ app.install_model.context.query(app.install_model.ToolShedRepository) \ .filter(or_(app.install_model.ToolShedRepository.status == new_status, app.install_model.ToolShedRepository.status == cloning_status, app.install_model.ToolShedRepository.status == setting_tool_versions_status, app.install_model.ToolShedRepository.status == installing_dependencies_status, app.install_model.ToolShedRepository.status == loading_datatypes_status)): installing_repository_ids.append(app.security.encode_id(tool_shed_repository.id)) if as_string: return ','.join(installing_repository_ids) return installing_repository_ids def get_installed_repository(app, tool_shed=None, name=None, owner=None, changeset_revision=None, installed_changeset_revision=None, repository_id=None, from_cache=False): """ Return a tool shed repository database record defined by the combination of a toolshed, repository name, repository owner and either current or originally installed changeset_revision. """ # We store the port, if one exists, in the database. tool_shed = common_util.remove_protocol_from_tool_shed_url(tool_shed) if from_cache: tsr_cache = getattr(app, 'tool_shed_repository_cache', None) if tsr_cache: return app.tool_shed_repository_cache.get_installed_repository( tool_shed=tool_shed, name=name, owner=owner, installed_changeset_revision=installed_changeset_revision, changeset_revision=changeset_revision, repository_id=repository_id ) query = app.install_model.context.query(app.install_model.ToolShedRepository) if repository_id: clause_list = [app.install_model.ToolShedRepository.table.c.id == repository_id] else: clause_list = [app.install_model.ToolShedRepository.table.c.tool_shed == tool_shed, app.install_model.ToolShedRepository.table.c.name == name, app.install_model.ToolShedRepository.table.c.owner == owner] if changeset_revision is not None: clause_list.append(app.install_model.ToolShedRepository.table.c.changeset_revision == changeset_revision) if installed_changeset_revision is not None: clause_list.append(app.install_model.ToolShedRepository.table.c.installed_changeset_revision == installed_changeset_revision) return query.filter(and_(*clause_list)).first() def get_installed_tool_shed_repository(app, id): """Get a tool shed repository record from the Galaxy database defined by the id.""" rval = [] if isinstance(id, list): return_list = True else: id = [id] return_list = False repository_ids = [app.security.decode_id(i) for i in id] rval = [get_installed_repository(app=app, repository_id=repo_id, from_cache=False) for repo_id in repository_ids] if return_list: return rval return rval[0] def get_prior_import_or_install_required_dict(app, tsr_ids, repo_info_dicts): """ This method is used in the Tool Shed when exporting a repository and its dependencies, and in Galaxy when a repository and its dependencies are being installed. Return a dictionary whose keys are the received tsr_ids and whose values are a list of tsr_ids, each of which is contained in the received list of tsr_ids and whose associated repository must be imported or installed prior to the repository associated with the tsr_id key. """ # Initialize the dictionary. prior_import_or_install_required_dict = {} for tsr_id in tsr_ids: prior_import_or_install_required_dict[tsr_id] = [] # Inspect the repository dependencies for each repository about to be installed and populate the dictionary. for repo_info_dict in repo_info_dicts: repository, repository_dependencies = get_repository_and_repository_dependencies_from_repo_info_dict(app, repo_info_dict) if repository: encoded_repository_id = app.security.encode_id(repository.id) if encoded_repository_id in tsr_ids: # We've located the database table record for one of the repositories we're about to install, so find out if it has any repository # dependencies that require prior installation. prior_import_or_install_ids = get_repository_ids_requiring_prior_import_or_install(app, tsr_ids, repository_dependencies) prior_import_or_install_required_dict[encoded_repository_id] = prior_import_or_install_ids return prior_import_or_install_required_dict def get_repo_info_tuple_contents(repo_info_tuple): """Take care in handling the repo_info_tuple as it evolves over time as new tool shed features are introduced.""" if len(repo_info_tuple) == 6: description, repository_clone_url, changeset_revision, ctx_rev, repository_owner, tool_dependencies = repo_info_tuple repository_dependencies = None elif len(repo_info_tuple) == 7: description, repository_clone_url, changeset_revision, ctx_rev, repository_owner, repository_dependencies, tool_dependencies = repo_info_tuple return description, repository_clone_url, changeset_revision, ctx_rev, repository_owner, repository_dependencies, tool_dependencies def get_repository_admin_role_name(repository_name, repository_owner): return f'{str(repository_name)}_{str(repository_owner)}_admin' def get_repository_and_repository_dependencies_from_repo_info_dict(app, repo_info_dict): """Return a tool_shed_repository or repository record defined by the information in the received repo_info_dict.""" repository_name = list(repo_info_dict.keys())[0] repo_info_tuple = repo_info_dict[repository_name] description, repository_clone_url, changeset_revision, ctx_rev, repository_owner, repository_dependencies, tool_dependencies = \ get_repo_info_tuple_contents(repo_info_tuple) if hasattr(app, "install_model"): # In a tool shed client (Galaxy, or something install repositories like Galaxy) tool_shed = get_tool_shed_from_clone_url(repository_clone_url) repository = get_repository_for_dependency_relationship(app, tool_shed, repository_name, repository_owner, changeset_revision) else: # We're in the tool shed. repository = get_repository_by_name_and_owner(app, repository_name, repository_owner) return repository, repository_dependencies def get_repository_by_id(app, id): """Get a repository from the database via id.""" if is_tool_shed_client(app): return app.install_model.context.query(app.install_model.ToolShedRepository).get(app.security.decode_id(id)) else: sa_session = app.model.session return sa_session.query(app.model.Repository).get(app.security.decode_id(id)) def get_repository_by_name_and_owner(app, name, owner, eagerload_columns=None): """Get a repository from the database via name and owner""" repository_query = get_repository_query(app) if is_tool_shed_client(app): return repository_query \ .filter(and_(app.install_model.ToolShedRepository.table.c.name == name, app.install_model.ToolShedRepository.table.c.owner == owner)) \ .first() # We're in the tool shed. q = repository_query.filter( and_( app.model.Repository.table.c.name == name, app.model.User.table.c.username == owner, app.model.Repository.table.c.user_id == app.model.User.table.c.id ) ) if eagerload_columns: q = q.options(joinedload(*eagerload_columns)) return q.first() def get_repository_by_name(app, name): """Get a repository from the database via name.""" return get_repository_query(app).filter_by(name=name).first() def get_repository_dependency_types(repository_dependencies): """ Inspect the received list of repository_dependencies tuples and return boolean values for has_repository_dependencies and has_repository_dependencies_only_if_compiling_contained_td. """ # Set has_repository_dependencies, which will be True only if at least one repository_dependency # is defined with the value of # only_if_compiling_contained_td as False. has_repository_dependencies = False for rd_tup in repository_dependencies: tool_shed, name, owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td = \ common_util.parse_repository_dependency_tuple(rd_tup) if not util.asbool(only_if_compiling_contained_td): has_repository_dependencies = True break # Set has_repository_dependencies_only_if_compiling_contained_td, which will be True only if at # least one repository_dependency is defined with the value of only_if_compiling_contained_td as True. has_repository_dependencies_only_if_compiling_contained_td = False for rd_tup in repository_dependencies: tool_shed, name, owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td = \ common_util.parse_repository_dependency_tuple(rd_tup) if util.asbool(only_if_compiling_contained_td): has_repository_dependencies_only_if_compiling_contained_td = True break return has_repository_dependencies, has_repository_dependencies_only_if_compiling_contained_td def get_repository_for_dependency_relationship(app, tool_shed, name, owner, changeset_revision): """ Return an installed tool_shed_repository database record that is defined by either the current changeset revision or the installed_changeset_revision. """ # This method is used only in Galaxy, not the Tool Shed. We store the port (if one exists) in the database. tool_shed = common_util.remove_protocol_from_tool_shed_url(tool_shed) if tool_shed is None or name is None or owner is None or changeset_revision is None: message = "Unable to retrieve the repository record from the database because one or more of the following " message += "required parameters is None: tool_shed: %s, name: %s, owner: %s, changeset_revision: %s " % \ (str(tool_shed), str(name), str(owner), str(changeset_revision)) raise Exception(message) repository = get_installed_repository(app=app, tool_shed=tool_shed, name=name, owner=owner, installed_changeset_revision=changeset_revision) if not repository: repository = get_installed_repository(app=app, tool_shed=tool_shed, name=name, owner=owner, changeset_revision=changeset_revision) if not repository: tool_shed_url = common_util.get_tool_shed_url_from_tool_shed_registry(app, tool_shed) repository_clone_url = os.path.join(tool_shed_url, 'repos', owner, name) repo_info_tuple = (None, repository_clone_url, changeset_revision, None, owner, None, None) repository, pcr = repository_was_previously_installed(app, tool_shed_url, name, repo_info_tuple) if not repository: # The received changeset_revision is no longer installable, so get the next changeset_revision # in the repository's changelog in the tool shed that is associated with repository_metadata. tool_shed_url = common_util.get_tool_shed_url_from_tool_shed_registry(app, tool_shed) params = dict(name=name, owner=owner, changeset_revision=changeset_revision) pathspec = ['repository', 'next_installable_changeset_revision'] text = util.url_get(tool_shed_url, auth=app.tool_shed_registry.url_auth(tool_shed_url), pathspec=pathspec, params=params) if text: repository = get_installed_repository(app=app, tool_shed=tool_shed, name=name, owner=owner, changeset_revision=text) return repository def get_repository_ids_requiring_prior_import_or_install(app, tsr_ids, repository_dependencies): """ This method is used in the Tool Shed when exporting a repository and its dependencies, and in Galaxy when a repository and its dependencies are being installed. Inspect the received repository_dependencies and determine if the encoded id of each required repository is in the received tsr_ids. If so, then determine whether that required repository should be imported / installed prior to its dependent repository. Return a list of encoded repository ids, each of which is contained in the received list of tsr_ids, and whose associated repositories must be imported / installed prior to the dependent repository associated with the received repository_dependencies. """ prior_tsr_ids = [] if repository_dependencies: for key, rd_tups in repository_dependencies.items(): if key in ['description', 'root_key']: continue for rd_tup in rd_tups: tool_shed, \ name, \ owner, \ changeset_revision, \ prior_installation_required, \ only_if_compiling_contained_td = \ common_util.parse_repository_dependency_tuple(rd_tup) # If only_if_compiling_contained_td is False, then the repository dependency # is not required to be installed prior to the dependent repository even if # prior_installation_required is True. This is because the only meaningful # content of the repository dependency is its contained tool dependency, which # is required in order to compile the dependent repository's tool dependency. # In the scenario where the repository dependency is not installed prior to the # dependent repository's tool dependency compilation process, the tool dependency # compilation framework will install the repository dependency prior to compilation # of the dependent repository's tool dependency. if not util.asbool(only_if_compiling_contained_td): if util.asbool(prior_installation_required): if is_tool_shed_client(app): # We store the port, if one exists, in the database. tool_shed = common_util.remove_protocol_from_tool_shed_url(tool_shed) repository = get_repository_for_dependency_relationship(app, tool_shed, name, owner, changeset_revision) else: repository = get_repository_by_name_and_owner(app, name, owner) if repository: encoded_repository_id = app.security.encode_id(repository.id) if encoded_repository_id in tsr_ids: prior_tsr_ids.append(encoded_repository_id) return prior_tsr_ids def get_repository_in_tool_shed(app, id, eagerload_columns=None): """Get a repository on the tool shed side from the database via id.""" q = get_repository_query(app) if eagerload_columns: q = q.options(joinedload(*eagerload_columns)) return q.get(app.security.decode_id(id)) def get_repository_owner(cleaned_repository_url): """Gvien a "cleaned" repository clone URL, return the owner of the repository.""" items = cleaned_repository_url.split('/repos/') repo_path = items[1] if repo_path.startswith('/'): repo_path = repo_path.replace('/', '', 1) return repo_path.lstrip('/').split('/')[0] def get_repository_owner_from_clone_url(repository_clone_url): """Given a repository clone URL, return the owner of the repository.""" tmp_url = common_util.remove_protocol_and_user_from_clone_url(repository_clone_url) return get_repository_owner(tmp_url) def get_repository_query(app): if is_tool_shed_client(app): query = app.install_model.context.query(app.install_model.ToolShedRepository) else: query = app.model.context.query(app.model.Repository) return query def get_role_by_id(app, role_id): """Get a Role from the database by id.""" sa_session = app.model.session return sa_session.query(app.model.Role).get(app.security.decode_id(role_id)) def get_tool_shed_from_clone_url(repository_clone_url): tmp_url = common_util.remove_protocol_and_user_from_clone_url(repository_clone_url) return tmp_url.split('/repos/')[0].rstrip('/') def get_tool_shed_repository_by_id(app, repository_id): """Return a tool shed repository database record defined by the id.""" # This method is used only in Galaxy, not the tool shed. return app.install_model.context.query(app.install_model.ToolShedRepository) \ .filter(app.install_model.ToolShedRepository.table.c.id == app.security.decode_id(repository_id)) \ .first() def get_tool_shed_status_for_installed_repository(app, repository): """ Send a request to the tool shed to retrieve information about newer installable repository revisions, current revision updates, whether the repository revision is the latest downloadable revision, and whether the repository has been deprecated in the tool shed. The received repository is a ToolShedRepository object from Galaxy. """ tool_shed_url = common_util.get_tool_shed_url_from_tool_shed_registry(app, str(repository.tool_shed)) params = dict(name=repository.name, owner=repository.owner, changeset_revision=repository.changeset_revision) pathspec = ['repository', 'status_for_installed_repository'] try: encoded_tool_shed_status_dict = util.url_get(tool_shed_url, auth=app.tool_shed_registry.url_auth(tool_shed_url), pathspec=pathspec, params=params) tool_shed_status_dict = encoding_util.tool_shed_decode(encoded_tool_shed_status_dict) return tool_shed_status_dict except HTTPError as e: # This should handle backward compatility to the Galaxy 12/20/12 release. We used to only handle updates for an installed revision # using a boolean value. log.debug("Error attempting to get tool shed status for installed repository %s: %s\nAttempting older 'check_for_updates' method.\n" % (str(repository.name), str(e))) pathspec = ['repository', 'check_for_updates'] params['from_update_manager'] = True try: # The value of text will be 'true' or 'false', depending upon whether there is an update available for the installed revision. text = util.url_get(tool_shed_url, auth=app.tool_shed_registry.url_auth(tool_shed_url), pathspec=pathspec, params=params) return dict(revision_update=text) except Exception: # The required tool shed may be unavailable, so default the revision_update value to 'false'. return dict(revision_update='false') except Exception: log.exception("Error attempting to get tool shed status for installed repository %s", str(repository.name)) return {} def is_tool_shed_client(app): """ The tool shed and clients to the tool (i.e. Galaxy) require a lot of similar functionality in this file but with small differences. This method should determine if the app performing the action is the tool shed or a client of the tool shed. """ return hasattr(app, "install_model") def repository_was_previously_installed(app, tool_shed_url, repository_name, repo_info_tuple, from_tip=False): """ Find out if a repository is already installed into Galaxy - there are several scenarios where this is necessary. For example, this method will handle the case where the repository was previously installed using an older changeset_revsion, but later the repository was updated in the tool shed and now we're trying to install the latest changeset revision of the same repository instead of updating the one that was previously installed. We'll look in the database instead of on disk since the repository may be currently uninstalled. """ tool_shed_url = common_util.get_tool_shed_url_from_tool_shed_registry(app, tool_shed_url) description, repository_clone_url, changeset_revision, ctx_rev, repository_owner, repository_dependencies, tool_dependencies = \ get_repo_info_tuple_contents(repo_info_tuple) tool_shed = get_tool_shed_from_clone_url(repository_clone_url) # See if we can locate the repository using the value of changeset_revision. tool_shed_repository = get_installed_repository(app, tool_shed=tool_shed, name=repository_name, owner=repository_owner, installed_changeset_revision=changeset_revision) if tool_shed_repository: return tool_shed_repository, changeset_revision # Get all previous changeset revisions from the tool shed for the repository back to, but excluding, # the previous valid changeset revision to see if it was previously installed using one of them. params = dict(galaxy_url=web.url_for('/', qualified=True), name=repository_name, owner=repository_owner, changeset_revision=changeset_revision, from_tip=str(from_tip)) pathspec = ['repository', 'previous_changeset_revisions'] text = util.url_get(tool_shed_url, auth=app.tool_shed_registry.url_auth(tool_shed_url), pathspec=pathspec, params=params) if text: changeset_revisions = util.listify(text) for previous_changeset_revision in changeset_revisions: tool_shed_repository = get_installed_repository(app, tool_shed=tool_shed, name=repository_name, owner=repository_owner, installed_changeset_revision=previous_changeset_revision) if tool_shed_repository: return tool_shed_repository, previous_changeset_revision return None, None def set_repository_attributes(app, repository, status, error_message, deleted, uninstalled, remove_from_disk=False): if remove_from_disk: relative_install_dir = repository.repo_path(app) if relative_install_dir: clone_dir = os.path.abspath(relative_install_dir) try: shutil.rmtree(clone_dir) log.debug("Removed repository installation directory: %s", clone_dir) except Exception as e: log.debug("Error removing repository installation directory %s: %s", clone_dir, util.unicodify(e)) repository.error_message = error_message repository.status = status repository.deleted = deleted repository.uninstalled = uninstalled app.install_model.context.add(repository) app.install_model.context.flush() __all__ = ( 'check_for_updates', 'check_or_update_tool_shed_status_for_installed_repository', 'create_or_update_tool_shed_repository', 'extract_components_from_tuple', 'generate_tool_shed_repository_install_dir', 'get_absolute_path_to_file_in_repository', 'get_ids_of_tool_shed_repositories_being_installed', 'get_installed_repository', 'get_installed_tool_shed_repository', 'get_prior_import_or_install_required_dict', 'get_repo_info_tuple_contents', 'get_repository_admin_role_name', 'get_repository_and_repository_dependencies_from_repo_info_dict', 'get_repository_by_id', 'get_repository_by_name', 'get_repository_by_name_and_owner', 'get_repository_dependency_types', 'get_repository_for_dependency_relationship', 'get_repository_ids_requiring_prior_import_or_install', 'get_repository_in_tool_shed', 'get_repository_owner', 'get_repository_owner_from_clone_url', 'get_repository_query', 'get_role_by_id', 'get_tool_shed_from_clone_url', 'get_tool_shed_repository_by_id', 'get_tool_shed_status_for_installed_repository', 'is_tool_shed_client', 'repository_was_previously_installed', 'set_repository_attributes', )
54.186154
171
0.691406
import logging import os import re import shutil from urllib.error import HTTPError from markupsafe import escape from sqlalchemy import ( and_, false, or_, ) from sqlalchemy.orm import joinedload from galaxy import util from galaxy import web from galaxy.tool_shed.util import basic_util from galaxy.util.tool_shed import common_util, encoding_util log = logging.getLogger(__name__) VALID_REPOSITORYNAME_RE = re.compile(r"^[a-z0-9\_]+$") def check_for_updates(app, model, repository_id=None): message = '' status = 'ok' if repository_id is None: success_count = 0 repository_names_not_updated = [] updated_count = 0 for repository in model.context.query(model.ToolShedRepository) \ .filter(model.ToolShedRepository.table.c.deleted == false()): ok, updated = \ check_or_update_tool_shed_status_for_installed_repository(app, repository) if ok: success_count += 1 else: repository_names_not_updated.append(f'<b>{escape(str(repository.name))}</b>') if updated: updated_count += 1 message = "Checked the status in the tool shed for %d repositories. " % success_count message += "Updated the tool shed status for %d repositories. " % updated_count if repository_names_not_updated: message += "Unable to retrieve status from the tool shed for the following repositories:\n" message += ", ".join(repository_names_not_updated) else: repository = get_tool_shed_repository_by_id(app, repository_id) ok, updated = \ check_or_update_tool_shed_status_for_installed_repository(app, repository) if ok: if updated: message = f"The tool shed status for repository <b>{escape(str(repository.name))}</b> has been updated." else: message = f"The status has not changed in the tool shed for repository <b>{escape(str(repository.name))}</b>." else: message = f"Unable to retrieve status from the tool shed for repository <b>{escape(str(repository.name))}</b>." status = 'error' return message, status def check_or_update_tool_shed_status_for_installed_repository(app, repository): updated = False tool_shed_status_dict = get_tool_shed_status_for_installed_repository(app, repository) if tool_shed_status_dict: ok = True if tool_shed_status_dict != repository.tool_shed_status: repository.tool_shed_status = tool_shed_status_dict app.install_model.context.add(repository) app.install_model.context.flush() updated = True else: ok = False return ok, updated def create_or_update_tool_shed_repository(app, name, description, installed_changeset_revision, ctx_rev, repository_clone_url, status, metadata_dict=None, current_changeset_revision=None, owner='', dist_to_shed=False): metadata_dict = metadata_dict or {} if current_changeset_revision is None: current_changeset_revision = installed_changeset_revision context = app.install_model.context tool_shed = get_tool_shed_from_clone_url(repository_clone_url) if not owner: owner = get_repository_owner_from_clone_url(repository_clone_url) includes_datatypes = 'datatypes' in metadata_dict if status in [app.install_model.ToolShedRepository.installation_status.DEACTIVATED]: deleted = True uninstalled = False elif status in [app.install_model.ToolShedRepository.installation_status.UNINSTALLED]: deleted = True uninstalled = True else: deleted = False uninstalled = False tool_shed_repository = \ get_installed_repository(app, tool_shed=tool_shed, name=name, owner=owner, installed_changeset_revision=installed_changeset_revision) if tool_shed_repository: log.debug("Updating an existing row for repository '%s' in the tool_shed_repository table, status set to '%s'.", name, status) tool_shed_repository.description = description tool_shed_repository.changeset_revision = current_changeset_revision tool_shed_repository.ctx_rev = ctx_rev tool_shed_repository.metadata_ = metadata_dict tool_shed_repository.includes_datatypes = includes_datatypes tool_shed_repository.deleted = deleted tool_shed_repository.uninstalled = uninstalled tool_shed_repository.status = status else: log.debug("Adding new row for repository '%s' in the tool_shed_repository table, status set to '%s'.", name, status) tool_shed_repository = \ app.install_model.ToolShedRepository(tool_shed=tool_shed, name=name, description=description, owner=owner, installed_changeset_revision=installed_changeset_revision, changeset_revision=current_changeset_revision, ctx_rev=ctx_rev, metadata_=metadata_dict, includes_datatypes=includes_datatypes, dist_to_shed=dist_to_shed, deleted=deleted, uninstalled=uninstalled, status=status) context.add(tool_shed_repository) context.flush() return tool_shed_repository def extract_components_from_tuple(repository_components_tuple): toolshed = repository_components_tuple[0] name = repository_components_tuple[1] owner = repository_components_tuple[2] changeset_revision = repository_components_tuple[3] components_list = [toolshed, name, owner, changeset_revision] if len(repository_components_tuple) == 5: toolshed, name, owner, changeset_revision, prior_installation_required = repository_components_tuple components_list = [toolshed, name, owner, changeset_revision, prior_installation_required] elif len(repository_components_tuple) == 6: toolshed, name, owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td = repository_components_tuple components_list = [toolshed, name, owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td] return components_list def generate_tool_shed_repository_install_dir(repository_clone_url, changeset_revision): tmp_url = common_util.remove_protocol_and_user_from_clone_url(repository_clone_url) items = tmp_url.split('/repos/') tool_shed_url = items[0] repo_path = items[1] tool_shed_url = common_util.remove_port_from_tool_shed_url(tool_shed_url) return '/'.join((tool_shed_url, 'repos', repo_path, changeset_revision)) def get_absolute_path_to_file_in_repository(repo_files_dir, file_name): stripped_file_name = basic_util.strip_path(file_name) file_path = None for root, _, files in os.walk(repo_files_dir): if root.find('.hg') < 0: for name in files: if name == stripped_file_name: return os.path.abspath(os.path.join(root, name)) return file_path def get_ids_of_tool_shed_repositories_being_installed(app, as_string=False): installing_repository_ids = [] new_status = app.install_model.ToolShedRepository.installation_status.NEW cloning_status = app.install_model.ToolShedRepository.installation_status.CLONING setting_tool_versions_status = app.install_model.ToolShedRepository.installation_status.SETTING_TOOL_VERSIONS installing_dependencies_status = app.install_model.ToolShedRepository.installation_status.INSTALLING_TOOL_DEPENDENCIES loading_datatypes_status = app.install_model.ToolShedRepository.installation_status.LOADING_PROPRIETARY_DATATYPES for tool_shed_repository in \ app.install_model.context.query(app.install_model.ToolShedRepository) \ .filter(or_(app.install_model.ToolShedRepository.status == new_status, app.install_model.ToolShedRepository.status == cloning_status, app.install_model.ToolShedRepository.status == setting_tool_versions_status, app.install_model.ToolShedRepository.status == installing_dependencies_status, app.install_model.ToolShedRepository.status == loading_datatypes_status)): installing_repository_ids.append(app.security.encode_id(tool_shed_repository.id)) if as_string: return ','.join(installing_repository_ids) return installing_repository_ids def get_installed_repository(app, tool_shed=None, name=None, owner=None, changeset_revision=None, installed_changeset_revision=None, repository_id=None, from_cache=False): tool_shed = common_util.remove_protocol_from_tool_shed_url(tool_shed) if from_cache: tsr_cache = getattr(app, 'tool_shed_repository_cache', None) if tsr_cache: return app.tool_shed_repository_cache.get_installed_repository( tool_shed=tool_shed, name=name, owner=owner, installed_changeset_revision=installed_changeset_revision, changeset_revision=changeset_revision, repository_id=repository_id ) query = app.install_model.context.query(app.install_model.ToolShedRepository) if repository_id: clause_list = [app.install_model.ToolShedRepository.table.c.id == repository_id] else: clause_list = [app.install_model.ToolShedRepository.table.c.tool_shed == tool_shed, app.install_model.ToolShedRepository.table.c.name == name, app.install_model.ToolShedRepository.table.c.owner == owner] if changeset_revision is not None: clause_list.append(app.install_model.ToolShedRepository.table.c.changeset_revision == changeset_revision) if installed_changeset_revision is not None: clause_list.append(app.install_model.ToolShedRepository.table.c.installed_changeset_revision == installed_changeset_revision) return query.filter(and_(*clause_list)).first() def get_installed_tool_shed_repository(app, id): rval = [] if isinstance(id, list): return_list = True else: id = [id] return_list = False repository_ids = [app.security.decode_id(i) for i in id] rval = [get_installed_repository(app=app, repository_id=repo_id, from_cache=False) for repo_id in repository_ids] if return_list: return rval return rval[0] def get_prior_import_or_install_required_dict(app, tsr_ids, repo_info_dicts): prior_import_or_install_required_dict = {} for tsr_id in tsr_ids: prior_import_or_install_required_dict[tsr_id] = [] for repo_info_dict in repo_info_dicts: repository, repository_dependencies = get_repository_and_repository_dependencies_from_repo_info_dict(app, repo_info_dict) if repository: encoded_repository_id = app.security.encode_id(repository.id) if encoded_repository_id in tsr_ids: prior_import_or_install_ids = get_repository_ids_requiring_prior_import_or_install(app, tsr_ids, repository_dependencies) prior_import_or_install_required_dict[encoded_repository_id] = prior_import_or_install_ids return prior_import_or_install_required_dict def get_repo_info_tuple_contents(repo_info_tuple): if len(repo_info_tuple) == 6: description, repository_clone_url, changeset_revision, ctx_rev, repository_owner, tool_dependencies = repo_info_tuple repository_dependencies = None elif len(repo_info_tuple) == 7: description, repository_clone_url, changeset_revision, ctx_rev, repository_owner, repository_dependencies, tool_dependencies = repo_info_tuple return description, repository_clone_url, changeset_revision, ctx_rev, repository_owner, repository_dependencies, tool_dependencies def get_repository_admin_role_name(repository_name, repository_owner): return f'{str(repository_name)}_{str(repository_owner)}_admin' def get_repository_and_repository_dependencies_from_repo_info_dict(app, repo_info_dict): repository_name = list(repo_info_dict.keys())[0] repo_info_tuple = repo_info_dict[repository_name] description, repository_clone_url, changeset_revision, ctx_rev, repository_owner, repository_dependencies, tool_dependencies = \ get_repo_info_tuple_contents(repo_info_tuple) if hasattr(app, "install_model"): tool_shed = get_tool_shed_from_clone_url(repository_clone_url) repository = get_repository_for_dependency_relationship(app, tool_shed, repository_name, repository_owner, changeset_revision) else: repository = get_repository_by_name_and_owner(app, repository_name, repository_owner) return repository, repository_dependencies def get_repository_by_id(app, id): if is_tool_shed_client(app): return app.install_model.context.query(app.install_model.ToolShedRepository).get(app.security.decode_id(id)) else: sa_session = app.model.session return sa_session.query(app.model.Repository).get(app.security.decode_id(id)) def get_repository_by_name_and_owner(app, name, owner, eagerload_columns=None): repository_query = get_repository_query(app) if is_tool_shed_client(app): return repository_query \ .filter(and_(app.install_model.ToolShedRepository.table.c.name == name, app.install_model.ToolShedRepository.table.c.owner == owner)) \ .first() # We're in the tool shed. q = repository_query.filter( and_( app.model.Repository.table.c.name == name, app.model.User.table.c.username == owner, app.model.Repository.table.c.user_id == app.model.User.table.c.id ) ) if eagerload_columns: q = q.options(joinedload(*eagerload_columns)) return q.first() def get_repository_by_name(app, name): return get_repository_query(app).filter_by(name=name).first() def get_repository_dependency_types(repository_dependencies): has_repository_dependencies = False for rd_tup in repository_dependencies: tool_shed, name, owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td = \ common_util.parse_repository_dependency_tuple(rd_tup) if not util.asbool(only_if_compiling_contained_td): has_repository_dependencies = True break has_repository_dependencies_only_if_compiling_contained_td = False for rd_tup in repository_dependencies: tool_shed, name, owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td = \ common_util.parse_repository_dependency_tuple(rd_tup) if util.asbool(only_if_compiling_contained_td): has_repository_dependencies_only_if_compiling_contained_td = True break return has_repository_dependencies, has_repository_dependencies_only_if_compiling_contained_td def get_repository_for_dependency_relationship(app, tool_shed, name, owner, changeset_revision): tool_shed = common_util.remove_protocol_from_tool_shed_url(tool_shed) if tool_shed is None or name is None or owner is None or changeset_revision is None: message = "Unable to retrieve the repository record from the database because one or more of the following " message += "required parameters is None: tool_shed: %s, name: %s, owner: %s, changeset_revision: %s " % \ (str(tool_shed), str(name), str(owner), str(changeset_revision)) raise Exception(message) repository = get_installed_repository(app=app, tool_shed=tool_shed, name=name, owner=owner, installed_changeset_revision=changeset_revision) if not repository: repository = get_installed_repository(app=app, tool_shed=tool_shed, name=name, owner=owner, changeset_revision=changeset_revision) if not repository: tool_shed_url = common_util.get_tool_shed_url_from_tool_shed_registry(app, tool_shed) repository_clone_url = os.path.join(tool_shed_url, 'repos', owner, name) repo_info_tuple = (None, repository_clone_url, changeset_revision, None, owner, None, None) repository, pcr = repository_was_previously_installed(app, tool_shed_url, name, repo_info_tuple) if not repository: tool_shed_url = common_util.get_tool_shed_url_from_tool_shed_registry(app, tool_shed) params = dict(name=name, owner=owner, changeset_revision=changeset_revision) pathspec = ['repository', 'next_installable_changeset_revision'] text = util.url_get(tool_shed_url, auth=app.tool_shed_registry.url_auth(tool_shed_url), pathspec=pathspec, params=params) if text: repository = get_installed_repository(app=app, tool_shed=tool_shed, name=name, owner=owner, changeset_revision=text) return repository def get_repository_ids_requiring_prior_import_or_install(app, tsr_ids, repository_dependencies): prior_tsr_ids = [] if repository_dependencies: for key, rd_tups in repository_dependencies.items(): if key in ['description', 'root_key']: continue for rd_tup in rd_tups: tool_shed, \ name, \ owner, \ changeset_revision, \ prior_installation_required, \ only_if_compiling_contained_td = \ common_util.parse_repository_dependency_tuple(rd_tup) # If only_if_compiling_contained_td is False, then the repository dependency # is not required to be installed prior to the dependent repository even if # prior_installation_required is True. This is because the only meaningful # content of the repository dependency is its contained tool dependency, which # is required in order to compile the dependent repository's tool dependency. # compilation framework will install the repository dependency prior to compilation # of the dependent repository's tool dependency. if not util.asbool(only_if_compiling_contained_td): if util.asbool(prior_installation_required): if is_tool_shed_client(app): tool_shed = common_util.remove_protocol_from_tool_shed_url(tool_shed) repository = get_repository_for_dependency_relationship(app, tool_shed, name, owner, changeset_revision) else: repository = get_repository_by_name_and_owner(app, name, owner) if repository: encoded_repository_id = app.security.encode_id(repository.id) if encoded_repository_id in tsr_ids: prior_tsr_ids.append(encoded_repository_id) return prior_tsr_ids def get_repository_in_tool_shed(app, id, eagerload_columns=None): q = get_repository_query(app) if eagerload_columns: q = q.options(joinedload(*eagerload_columns)) return q.get(app.security.decode_id(id)) def get_repository_owner(cleaned_repository_url): items = cleaned_repository_url.split('/repos/') repo_path = items[1] if repo_path.startswith('/'): repo_path = repo_path.replace('/', '', 1) return repo_path.lstrip('/').split('/')[0] def get_repository_owner_from_clone_url(repository_clone_url): tmp_url = common_util.remove_protocol_and_user_from_clone_url(repository_clone_url) return get_repository_owner(tmp_url) def get_repository_query(app): if is_tool_shed_client(app): query = app.install_model.context.query(app.install_model.ToolShedRepository) else: query = app.model.context.query(app.model.Repository) return query def get_role_by_id(app, role_id): sa_session = app.model.session return sa_session.query(app.model.Role).get(app.security.decode_id(role_id)) def get_tool_shed_from_clone_url(repository_clone_url): tmp_url = common_util.remove_protocol_and_user_from_clone_url(repository_clone_url) return tmp_url.split('/repos/')[0].rstrip('/') def get_tool_shed_repository_by_id(app, repository_id): return app.install_model.context.query(app.install_model.ToolShedRepository) \ .filter(app.install_model.ToolShedRepository.table.c.id == app.security.decode_id(repository_id)) \ .first() def get_tool_shed_status_for_installed_repository(app, repository): tool_shed_url = common_util.get_tool_shed_url_from_tool_shed_registry(app, str(repository.tool_shed)) params = dict(name=repository.name, owner=repository.owner, changeset_revision=repository.changeset_revision) pathspec = ['repository', 'status_for_installed_repository'] try: encoded_tool_shed_status_dict = util.url_get(tool_shed_url, auth=app.tool_shed_registry.url_auth(tool_shed_url), pathspec=pathspec, params=params) tool_shed_status_dict = encoding_util.tool_shed_decode(encoded_tool_shed_status_dict) return tool_shed_status_dict except HTTPError as e: log.debug("Error attempting to get tool shed status for installed repository %s: %s\nAttempting older 'check_for_updates' method.\n" % (str(repository.name), str(e))) pathspec = ['repository', 'check_for_updates'] params['from_update_manager'] = True try: text = util.url_get(tool_shed_url, auth=app.tool_shed_registry.url_auth(tool_shed_url), pathspec=pathspec, params=params) return dict(revision_update=text) except Exception: return dict(revision_update='false') except Exception: log.exception("Error attempting to get tool shed status for installed repository %s", str(repository.name)) return {} def is_tool_shed_client(app): return hasattr(app, "install_model") def repository_was_previously_installed(app, tool_shed_url, repository_name, repo_info_tuple, from_tip=False): tool_shed_url = common_util.get_tool_shed_url_from_tool_shed_registry(app, tool_shed_url) description, repository_clone_url, changeset_revision, ctx_rev, repository_owner, repository_dependencies, tool_dependencies = \ get_repo_info_tuple_contents(repo_info_tuple) tool_shed = get_tool_shed_from_clone_url(repository_clone_url) tool_shed_repository = get_installed_repository(app, tool_shed=tool_shed, name=repository_name, owner=repository_owner, installed_changeset_revision=changeset_revision) if tool_shed_repository: return tool_shed_repository, changeset_revision params = dict(galaxy_url=web.url_for('/', qualified=True), name=repository_name, owner=repository_owner, changeset_revision=changeset_revision, from_tip=str(from_tip)) pathspec = ['repository', 'previous_changeset_revisions'] text = util.url_get(tool_shed_url, auth=app.tool_shed_registry.url_auth(tool_shed_url), pathspec=pathspec, params=params) if text: changeset_revisions = util.listify(text) for previous_changeset_revision in changeset_revisions: tool_shed_repository = get_installed_repository(app, tool_shed=tool_shed, name=repository_name, owner=repository_owner, installed_changeset_revision=previous_changeset_revision) if tool_shed_repository: return tool_shed_repository, previous_changeset_revision return None, None def set_repository_attributes(app, repository, status, error_message, deleted, uninstalled, remove_from_disk=False): if remove_from_disk: relative_install_dir = repository.repo_path(app) if relative_install_dir: clone_dir = os.path.abspath(relative_install_dir) try: shutil.rmtree(clone_dir) log.debug("Removed repository installation directory: %s", clone_dir) except Exception as e: log.debug("Error removing repository installation directory %s: %s", clone_dir, util.unicodify(e)) repository.error_message = error_message repository.status = status repository.deleted = deleted repository.uninstalled = uninstalled app.install_model.context.add(repository) app.install_model.context.flush() __all__ = ( 'check_for_updates', 'check_or_update_tool_shed_status_for_installed_repository', 'create_or_update_tool_shed_repository', 'extract_components_from_tuple', 'generate_tool_shed_repository_install_dir', 'get_absolute_path_to_file_in_repository', 'get_ids_of_tool_shed_repositories_being_installed', 'get_installed_repository', 'get_installed_tool_shed_repository', 'get_prior_import_or_install_required_dict', 'get_repo_info_tuple_contents', 'get_repository_admin_role_name', 'get_repository_and_repository_dependencies_from_repo_info_dict', 'get_repository_by_id', 'get_repository_by_name', 'get_repository_by_name_and_owner', 'get_repository_dependency_types', 'get_repository_for_dependency_relationship', 'get_repository_ids_requiring_prior_import_or_install', 'get_repository_in_tool_shed', 'get_repository_owner', 'get_repository_owner_from_clone_url', 'get_repository_query', 'get_role_by_id', 'get_tool_shed_from_clone_url', 'get_tool_shed_repository_by_id', 'get_tool_shed_status_for_installed_repository', 'is_tool_shed_client', 'repository_was_previously_installed', 'set_repository_attributes', )
true
true
f70ff3f6f11cf8baea1e696d4186195c3535c42f
2,126
py
Python
src/sequence_generator.py
Felihong/wikidata-sequence-analysis
1d86ad9812c90864eb2c9ab72e5e61474d439f1e
[ "MIT" ]
null
null
null
src/sequence_generator.py
Felihong/wikidata-sequence-analysis
1d86ad9812c90864eb2c9ab72e5e61474d439f1e
[ "MIT" ]
1
2019-11-04T12:45:02.000Z
2019-11-04T12:45:02.000Z
src/sequence_generator.py
Felihong/wikidata-sequence-analysis
1d86ad9812c90864eb2c9ab72e5e61474d439f1e
[ "MIT" ]
null
null
null
import pandas as pd from itertools import groupby from operator import itemgetter class SequenceGenerator: def __init__(self, csvfile, jsThreshold): self.datafile = csvfile self.jsThreshold = jsThreshold """ Convert the input csv file into dataframe """ def _csv2df(self): return pd.read_csv(self.datafile, dtype={'item_id':int, 'user_id':str}) """ Generate database by selecting the non-null sequences satisfying the js-distance threshold """ def generate_db(self): db = self._csv2df()[['item_id', 'user_id', 'edit_type', 'rev_timestamp', 'js_distance']].sort_values(by=['item_id','rev_timestamp']) filter = db.loc[db['js_distance'] >= self.jsThreshold][['item_id', 'user_id', 'edit_type']] return filter[filter.user_id.notnull()] def generate_dev_db(self, dev): db = self._csv2df()[['item_id', 'user_id', 'edit_type', 'rev_timestamp', 'prediction', 'js_distance']].sort_values(by=['item_id', 'rev_timestamp']) filter = db.loc[(db['js_distance']>=self.jsThreshold) & (db['prediction']==dev)][['item_id', 'user_id', 'edit_type']] return filter[filter.user_id.notnull()] """ Generate the sequence database by integrating all edits conducted upon one article in a list, where the serial edits from the same editor are collapsed into one sub-list Args: csv file of scheme: article_id : int editor_id : int edit_type : string Return: A list of list [[a], [b]], where a and b are collapsed edit types """ def generate_sequence(self): db = self.generate_db() df = db.groupby(['item_id', 'user_id']).agg({'edit_type': list}) result = df.groupby(['item_id']).agg({'edit_type': list}) tmp = [] for ls in result.values.tolist(): tmp.append(ls[0]) return tmp def generate_dev_sequence(self, dev): db = self.generate_dev_db(dev=dev) df = db.groupby(['item_id', 'user_id']).agg({'edit_type': list}) return df.values.tolist()
40.884615
155
0.625118
import pandas as pd from itertools import groupby from operator import itemgetter class SequenceGenerator: def __init__(self, csvfile, jsThreshold): self.datafile = csvfile self.jsThreshold = jsThreshold def _csv2df(self): return pd.read_csv(self.datafile, dtype={'item_id':int, 'user_id':str}) def generate_db(self): db = self._csv2df()[['item_id', 'user_id', 'edit_type', 'rev_timestamp', 'js_distance']].sort_values(by=['item_id','rev_timestamp']) filter = db.loc[db['js_distance'] >= self.jsThreshold][['item_id', 'user_id', 'edit_type']] return filter[filter.user_id.notnull()] def generate_dev_db(self, dev): db = self._csv2df()[['item_id', 'user_id', 'edit_type', 'rev_timestamp', 'prediction', 'js_distance']].sort_values(by=['item_id', 'rev_timestamp']) filter = db.loc[(db['js_distance']>=self.jsThreshold) & (db['prediction']==dev)][['item_id', 'user_id', 'edit_type']] return filter[filter.user_id.notnull()] def generate_sequence(self): db = self.generate_db() df = db.groupby(['item_id', 'user_id']).agg({'edit_type': list}) result = df.groupby(['item_id']).agg({'edit_type': list}) tmp = [] for ls in result.values.tolist(): tmp.append(ls[0]) return tmp def generate_dev_sequence(self, dev): db = self.generate_dev_db(dev=dev) df = db.groupby(['item_id', 'user_id']).agg({'edit_type': list}) return df.values.tolist()
true
true
f70ff59c20d1954f8016e360c1dc77483ec2a266
586
py
Python
actions/read_kv.py
RyanCopley/stackstorm-vault
0b7f611bc34423a4a853daa99ec6c754d2169cd2
[ "Apache-2.0" ]
null
null
null
actions/read_kv.py
RyanCopley/stackstorm-vault
0b7f611bc34423a4a853daa99ec6c754d2169cd2
[ "Apache-2.0" ]
null
null
null
actions/read_kv.py
RyanCopley/stackstorm-vault
0b7f611bc34423a4a853daa99ec6c754d2169cd2
[ "Apache-2.0" ]
null
null
null
from lib import action class VaultReadAction(action.VaultBaseAction): def run(self, path, kv_version, mount_point, version): value = None if kv_version == 1: value = self.vault.kv.v1.read_secret(path=path, mount_point=mount_point) elif kv_version == 2: value = self.vault.kv.v2.read_secret_version(path=path, mount_point=mount_point, version=version) if value: return value['data'] else: raise KeyError("Key was not found in Vault")
32.555556
92
0.578498
from lib import action class VaultReadAction(action.VaultBaseAction): def run(self, path, kv_version, mount_point, version): value = None if kv_version == 1: value = self.vault.kv.v1.read_secret(path=path, mount_point=mount_point) elif kv_version == 2: value = self.vault.kv.v2.read_secret_version(path=path, mount_point=mount_point, version=version) if value: return value['data'] else: raise KeyError("Key was not found in Vault")
true
true
f70ff5a1f8ceb3f00386cd39ca25c564aab1d602
2,730
py
Python
SERVER/control.py
azzhu/deeps
dda178497be3d62067a2f2a7a0a5aa1d793a89bc
[ "MIT" ]
1
2022-03-04T10:18:05.000Z
2022-03-04T10:18:05.000Z
SERVER/control.py
azzhu/deeps
dda178497be3d62067a2f2a7a0a5aa1d793a89bc
[ "MIT" ]
null
null
null
SERVER/control.py
azzhu/deeps
dda178497be3d62067a2f2a7a0a5aa1d793a89bc
[ "MIT" ]
null
null
null
#!/home/zhuqingjie/env/py3_tf_low/bin/python ''' @Time : 07.26 0026 下午 01:19 @Author : zhuqingjie @User : zhu @FileName: control.py @Software: PyCharm ''' ''' 总的控制逻辑 1,control只向外部暴露一个端口,外部向control发请求,control根据mode来去调用其他server模块 2,同时还解决了外部不能直接访问ai节点的问题。主服务跑在ai节点,control服务跑在登陆节点,这样外部就能访问了 ''' import json, os, requests, sys, time from flask import Flask, request # param ai01_ip = '10.11.1.81' ai02_ip = '10.11.1.82' ai03_ip = '10.11.1.83' ai04_ip = '10.11.1.84' ai05_ip = '10.11.1.85' IP = ai05_ip # 主服务的IP地址 app = Flask(__name__) print_ = lambda x: print(f"--> [{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))}]: {x}") printc = lambda s: print(f"\033[1;35m{s}\033[0m") mode_list = ['1', '2', '21', '22', '3', '4', '5', '51', '6'] def do_request(port, body): url = f'http://{IP}:{port}' printc(url) printc(body) response = requests.post(url, data=body) printc('do_request ok') return response.text @app.route('/', methods=['POST']) def handle(): print('\n') print('-' * 50) print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))) # 读取参数 dic_url = request.form print_(f'\n\tparams: {dic_url}') error_param = 'error_param' mode = dic_url.get('mode', error_param) if mode == error_param: return json.dumps({ 'status': -1, 'info': 'param error: not find "mode"!', 'dst_path': 'null', }) elif mode not in mode_list: return json.dumps({ 'status': -1, 'info': 'param error: "mode" must in 1-6!', 'dst_path': 'null', }) elif mode == '1': return do_request(9001, dic_url) elif mode == '2': return do_request(9002, dic_url) elif mode == '21': return do_request(9021, dic_url) elif mode == '22': return do_request(9022, dic_url) elif mode == '3': return do_request(9003, dic_url) elif mode == '4': return do_request(9004, dic_url) elif mode == '5': return do_request(9005, dic_url) elif mode == '51': return do_request(9051, dic_url) elif mode == '6': return do_request(9006, dic_url) # elif mode in ['10', '11']: # return do_request(9010, dic_url) else: return json.dumps({ 'status': 2, 'info': 'error: An impossible error.', 'dst_path': 'null', }) if __name__ == '__main__': # app.run(host='0.0.0.0', port='7006') body = { 'mode': '1', 'donotsave': '0', 'userID': 'zhuqingj', 'src_path': '/home/zhangli_lab/zhuqingjie/prj/tunet/res_test/0x.bmp', } res = do_request(9001, body) print(res)
25.754717
105
0.567399
import json, os, requests, sys, time from flask import Flask, request ai01_ip = '10.11.1.81' ai02_ip = '10.11.1.82' ai03_ip = '10.11.1.83' ai04_ip = '10.11.1.84' ai05_ip = '10.11.1.85' IP = ai05_ip app = Flask(__name__) print_ = lambda x: print(f"--> [{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))}]: {x}") printc = lambda s: print(f"\033[1;35m{s}\033[0m") mode_list = ['1', '2', '21', '22', '3', '4', '5', '51', '6'] def do_request(port, body): url = f'http://{IP}:{port}' printc(url) printc(body) response = requests.post(url, data=body) printc('do_request ok') return response.text @app.route('/', methods=['POST']) def handle(): print('\n') print('-' * 50) print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))) dic_url = request.form print_(f'\n\tparams: {dic_url}') error_param = 'error_param' mode = dic_url.get('mode', error_param) if mode == error_param: return json.dumps({ 'status': -1, 'info': 'param error: not find "mode"!', 'dst_path': 'null', }) elif mode not in mode_list: return json.dumps({ 'status': -1, 'info': 'param error: "mode" must in 1-6!', 'dst_path': 'null', }) elif mode == '1': return do_request(9001, dic_url) elif mode == '2': return do_request(9002, dic_url) elif mode == '21': return do_request(9021, dic_url) elif mode == '22': return do_request(9022, dic_url) elif mode == '3': return do_request(9003, dic_url) elif mode == '4': return do_request(9004, dic_url) elif mode == '5': return do_request(9005, dic_url) elif mode == '51': return do_request(9051, dic_url) elif mode == '6': return do_request(9006, dic_url) else: return json.dumps({ 'status': 2, 'info': 'error: An impossible error.', 'dst_path': 'null', }) if __name__ == '__main__': body = { 'mode': '1', 'donotsave': '0', 'userID': 'zhuqingj', 'src_path': '/home/zhangli_lab/zhuqingjie/prj/tunet/res_test/0x.bmp', } res = do_request(9001, body) print(res)
true
true
f70ff636a28fe34d71cffa801e0ffe548d58bc39
26,074
py
Python
src/openfermion/resource_estimates/molecule/pyscf_utils.py
cvmxn1/OpenFermion
cf53c063d0f124a02ff8776bb7f8afb110d4bde6
[ "Apache-2.0" ]
null
null
null
src/openfermion/resource_estimates/molecule/pyscf_utils.py
cvmxn1/OpenFermion
cf53c063d0f124a02ff8776bb7f8afb110d4bde6
[ "Apache-2.0" ]
null
null
null
src/openfermion/resource_estimates/molecule/pyscf_utils.py
cvmxn1/OpenFermion
cf53c063d0f124a02ff8776bb7f8afb110d4bde6
[ "Apache-2.0" ]
null
null
null
#coverage:ignore """ Drivers for various PySCF electronic structure routines """ from typing import Tuple, Optional import sys import h5py import numpy as np from pyscf import gto, scf, ao2mo, mcscf, lo, tools, cc from pyscf.mcscf import avas def stability(pyscf_mf): """ Test wave function stability and re-optimize SCF. Args: pyscf_mf: PySCF mean field object (e.g. `scf.RHF()`) Returns: pyscf_mf: Updated PySCF mean field object """ new_orbitals = pyscf_mf.stability()[0] new_1rdm = pyscf_mf.make_rdm1(new_orbitals, pyscf_mf.mo_occ) pyscf_mf = pyscf_mf.run(new_1rdm) return pyscf_mf def localize(pyscf_mf, loc_type='pm', verbose=0): """ Localize orbitals given a PySCF mean-field object Args: pyscf_mf: PySCF mean field object loc_type (str): localization type; Pipek-Mezey ('pm') or Edmiston-Rudenberg ('er') verbose (int): print level during localization Returns: pyscf_mf: Updated PySCF mean field object with localized orbitals """ # Note: After loading with `load_casfile_to_pyscf()` you can quiet message # by resetting mf.mol, i.e., mf.mol = gto.M(...) # but this assumes you have the *exact* molecular specification on hand. # I've gotten acceptable results by restoring mf.mol this way (usually # followed by calling mf.kernel()). But consistent localization is not a # given (not unique) despite restoring data this way, hence the message. if len(pyscf_mf.mol.atom) == 0: sys.exit("`localize()` requires atom loc. and atomic basis to be" + \ " defined.\n " + \ "It also can be sensitive to the initial guess and MO" + \ " coefficients.\n " + \ "Best to try re-creating the PySCF molecule and doing the" + \ " SCF, rather than\n " + \ "try to load the mean-field object with" + \ " `load_casfile_to_pyscf()`. You can \n " + \ "try to provide the missing information, but consistency" + \ " cannot be guaranteed!") # Split-localize (localize DOCC, SOCC, and virtual separately) docc_idx = np.where(np.isclose(pyscf_mf.mo_occ, 2.))[0] socc_idx = np.where(np.isclose(pyscf_mf.mo_occ, 1.))[0] virt_idx = np.where(np.isclose(pyscf_mf.mo_occ, 0.))[0] # Pipek-Mezey if loc_type.lower() == 'pm': print("Localizing doubly occupied ... ", end="") loc_docc_mo = lo.PM( pyscf_mf.mol, pyscf_mf.mo_coeff[:, docc_idx]).kernel(verbose=verbose) print("singly occupied ... ", end="") loc_socc_mo = lo.PM( pyscf_mf.mol, pyscf_mf.mo_coeff[:, socc_idx]).kernel(verbose=verbose) print("virtual ... ", end="") loc_virt_mo = lo.PM( pyscf_mf.mol, pyscf_mf.mo_coeff[:, virt_idx]).kernel(verbose=verbose) print("DONE") # Edmiston-Rudenberg elif loc_type.lower() == 'er': print("Localizing doubly occupied ... ", end="") loc_docc_mo = lo.ER( pyscf_mf.mol, pyscf_mf.mo_coeff[:, docc_idx]).kernel(verbose=verbose) print("singly occupied ... ", end="") loc_socc_mo = lo.ER( pyscf_mf.mol, pyscf_mf.mo_coeff[:, socc_idx]).kernel(verbose=verbose) print("virtual ... ", end="") loc_virt_mo = lo.ER( pyscf_mf.mol, pyscf_mf.mo_coeff[:, virt_idx]).kernel(verbose=verbose) print("DONE") # overwrite orbitals with localized orbitals pyscf_mf.mo_coeff[:, docc_idx] = loc_docc_mo.copy() pyscf_mf.mo_coeff[:, socc_idx] = loc_socc_mo.copy() pyscf_mf.mo_coeff[:, virt_idx] = loc_virt_mo.copy() return pyscf_mf def avas_active_space(pyscf_mf, ao_list=None, molden_fname='avas_localized_orbitals', **kwargs): """ Return AVAS active space as PySCF molecule and mean-field object Args: pyscf_mf: PySCF mean field object Kwargs: ao_list: list of strings of AOs (print mol.ao_labels() to see options) Example: ao_list = ['H 1s', 'O 2p', 'O 2s'] for water verbose (bool): do additional print molden_fname (str): MOLDEN filename to save AVAS active space orbitals. Default is to save to 'avas_localized_orbitals.molden' **kwargs: other keyworded arguments to pass into avas.avas() Returns: pyscf_active_space_mol: Updated PySCF molecule object from AVAS-selected active space pyscf_active_space_mf: Updated PySCF mean field object from AVAS-selected active space """ # Note: requires openshell_option = 3 for this to work, which keeps all # singly occupied in CAS # we also require canonicalize = False so that we don't destroy local orbs avas_output = avas.avas(pyscf_mf, ao_list, canonicalize=False, openshell_option=3, **kwargs) active_norb, active_ne, reordered_orbitals = avas_output active_alpha, _ = get_num_active_alpha_beta(pyscf_mf, active_ne) if molden_fname is not None: # save set of localized orbitals for active space if isinstance(pyscf_mf, scf.rohf.ROHF): frozen_alpha = pyscf_mf.nelec[0] - active_alpha assert frozen_alpha >= 0 else: frozen_alpha = pyscf_mf.mol.nelectron // 2 - active_alpha assert frozen_alpha >= 0 active_space_idx = slice(frozen_alpha, frozen_alpha + active_norb) active_mos = reordered_orbitals[:, active_space_idx] tools.molden.from_mo(pyscf_mf.mol, molden_fname + '.molden', mo_coeff=active_mos) # Choosing an active space changes the molecule ("freezing" electrons, # for example), so we # form the active space tensors first, then re-form the PySCF objects to # ensure consistency pyscf_active_space_mol, pyscf_active_space_mf = cas_to_pyscf( *pyscf_to_cas(pyscf_mf, cas_orbitals=active_norb, cas_electrons=active_ne, avas_orbs=reordered_orbitals)) return pyscf_active_space_mol, pyscf_active_space_mf def cas_to_pyscf(h1, eri, ecore, num_alpha, num_beta): """ Return a PySCF molecule and mean-field object from pre-computed CAS Ham Args: h1 (ndarray) - 2D matrix containing one-body terms (MO basis) eri (ndarray) - 4D tensor containing two-body terms (MO basis) ecore (float) - frozen core electronic energy + nuclear repulsion energy num_alpha (int) - number of spin up electrons in CAS space num_beta (int) - number of spin down electrons in CAS space Returns: pyscf_mol: PySCF molecule object pyscf_mf: PySCF mean field object """ n_orb = len(h1) # number orbitals assert [n_orb] * 4 == [*eri.shape] # check dims are consistent pyscf_mol = gto.M() pyscf_mol.nelectron = num_alpha + num_beta n_orb = h1.shape[0] alpha_diag = [1] * num_alpha + [0] * (n_orb - num_alpha) beta_diag = [1] * num_beta + [0] * (n_orb - num_beta) # Assumes Hamiltonian is either RHF or ROHF ... should be OK since UHF will # have two h1s, etc. if num_alpha == num_beta: pyscf_mf = scf.RHF(pyscf_mol) scf_energy = ecore + \ 2*np.einsum('ii', h1[:num_alpha,:num_alpha]) + \ 2*np.einsum('iijj', eri[:num_alpha,:num_alpha,:num_alpha,:num_alpha]) - \ np.einsum('ijji', eri[:num_alpha,:num_alpha,:num_alpha,:num_alpha]) else: pyscf_mf = scf.ROHF(pyscf_mol) pyscf_mf.nelec = (num_alpha, num_beta) # grab singly and doubly occupied orbitals (assume high-spin open shell) docc = slice(None, min(num_alpha, num_beta)) socc = slice(min(num_alpha, num_beta), max(num_alpha, num_beta)) scf_energy = ecore + \ 2.0*np.einsum('ii',h1[docc, docc]) + \ np.einsum('ii',h1[socc, socc]) + \ 2.0*np.einsum('iijj',eri[docc, docc, docc, docc]) - \ np.einsum('ijji',eri[docc, docc, docc, docc]) + \ np.einsum('iijj',eri[socc, socc, docc, docc]) - \ 0.5*np.einsum('ijji',eri[socc, docc, docc, socc]) + \ np.einsum('iijj',eri[docc, docc, socc, socc]) - \ 0.5*np.einsum('ijji',eri[docc, socc, socc, docc]) + \ 0.5*np.einsum('iijj',eri[socc, socc, socc, socc]) - \ 0.5*np.einsum('ijji',eri[socc, socc, socc, socc]) pyscf_mf.get_hcore = lambda *args: np.asarray(h1) pyscf_mf.get_ovlp = lambda *args: np.eye(h1.shape[0]) pyscf_mf.energy_nuc = lambda *args: ecore pyscf_mf._eri = eri # ao2mo.restore('8', np.zeros((8, 8, 8, 8)), 8) pyscf_mf.e_tot = scf_energy pyscf_mf.init_guess = '1e' pyscf_mf.mo_coeff = np.eye(n_orb) pyscf_mf.mo_occ = np.array(alpha_diag) + np.array(beta_diag) pyscf_mf.mo_energy, _ = np.linalg.eigh(pyscf_mf.get_fock()) return pyscf_mol, pyscf_mf def pyscf_to_cas(pyscf_mf, cas_orbitals: Optional[int] = None, cas_electrons: Optional[int] = None, avas_orbs=None): """ Return CAS Hamiltonian tensors from a PySCF mean-field object Args: pyscf_mf: PySCF mean field object cas_orbitals (int, optional): number of orbitals in CAS space, default all orbitals cas_electrons (int, optional): number of electrons in CAS space, default all electrons avas_orbs (ndarray, optional): orbitals selected by AVAS in PySCF Returns: h1 (ndarray) - 2D matrix containing one-body terms (MO basis) eri (ndarray) - 4D tensor containing two-body terms (MO basis) ecore (float) - frozen core electronic energy + nuclear repulsion energy num_alpha (int) - number of spin up electrons in CAS space num_beta (int) - number of spin down electrons in CAS space """ # Only RHF or ROHF possible with mcscf.CASCI assert isinstance(pyscf_mf, scf.rhf.RHF) # ROHF is child of RHF class if cas_orbitals is None: cas_orbitals = len(pyscf_mf.mo_coeff) if cas_electrons is None: cas_electrons = pyscf_mf.mol.nelectron cas = mcscf.CASCI(pyscf_mf, ncas=cas_orbitals, nelecas=cas_electrons) h1, ecore = cas.get_h1eff(mo_coeff=avas_orbs) eri = cas.get_h2cas(mo_coeff=avas_orbs) eri = ao2mo.restore('s1', eri, h1.shape[0]) # chemist convention (11|22) ecore = float(ecore) num_alpha, num_beta = get_num_active_alpha_beta(pyscf_mf, cas_electrons) return h1, eri, ecore, num_alpha, num_beta def get_num_active_alpha_beta(pyscf_mf, cas_electrons): """ Return number of alpha and beta electrons in the active space given number of CAS electrons This assumes that all the unpaired electrons are in the active space Args: pyscf_mf: PySCF mean field object cas_orbitals (int): number of electrons in CAS space, Returns: num_alpha (int): number of alpha (spin-up) electrons in active space num_beta (int): number of beta (spin-down) electrons in active space """ # Sanity checks and active space info total_electrons = pyscf_mf.mol.nelectron frozen_electrons = total_electrons - cas_electrons assert frozen_electrons % 2 == 0 # ROHF == RHF but RHF != ROHF, and we only do either RHF or ROHF if isinstance(pyscf_mf, scf.rohf.ROHF): frozen_alpha = frozen_electrons // 2 frozen_beta = frozen_electrons // 2 num_alpha = pyscf_mf.nelec[0] - frozen_alpha num_beta = pyscf_mf.nelec[1] - frozen_beta assert np.isclose(num_beta + num_alpha, cas_electrons) else: assert cas_electrons % 2 == 0 num_alpha = cas_electrons // 2 num_beta = cas_electrons // 2 return num_alpha, num_beta def load_casfile_to_pyscf(fname, num_alpha: Optional[int] = None, num_beta: Optional[int] = None): """ Load CAS Hamiltonian from pre-computed HD5 file into a PySCF molecule and mean-field object Args: fname (str): path to hd5 file to be created containing CAS one and two body terms num_alpha (int, optional): number of spin up electrons in CAS space num_beta (int, optional): number of spin down electrons in CAS space Returns: pyscf_mol: PySCF molecule object pyscf_mf: PySCF mean field object """ with h5py.File(fname, "r") as f: eri = np.asarray(f['eri'][()]) # h1 one body elements are sometimes called different things. Try a few. try: h1 = np.asarray(f['h0'][()]) except KeyError: try: h1 = np.asarray(f['hcore'][()]) except KeyError: try: h1 = np.asarray(f['h1'][()]) except KeyError: raise KeyError("Could not find 1-electron Hamiltonian") # ecore sometimes exists, and sometimes as enuc (no frozen electrons) try: ecore = float(f['ecore'][()]) except KeyError: try: ecore = float(f['enuc'][()]) except KeyError: ecore = 0.0 # read the number of spin up and spin down electrons if not input if (num_alpha is None) or (num_beta is None): try: num_alpha = int(f['active_nalpha'][()]) except KeyError: sys.exit("In `load_casfile_to_pyscf()`: \n" + \ " No values found on file for num_alpha " + \ "(key: 'active_nalpha' in h5). " + \ " Try passing in a value for num_alpha, or" + \ " re-check integral file.") try: num_beta = int(f['active_nbeta'][()]) except KeyError: sys.exit("In `load_casfile_to_pyscf()`: \n" + \ " No values found on file for num_beta " + \ "(key: 'active_nbeta' in h5). " + \ " Try passing in a value for num_beta, or" + \ " re-check integral file.") pyscf_mol, pyscf_mf = cas_to_pyscf(h1, eri, ecore, num_alpha, num_beta) return pyscf_mol, pyscf_mf def save_pyscf_to_casfile(fname, pyscf_mf, cas_orbitals: Optional[int] = None, cas_electrons: Optional[int] = None, avas_orbs=None): """ Save CAS Hamiltonian from a PySCF mean-field object to an HD5 file Args: fname (str): path to hd5 file to be created containing CAS terms pyscf_mf: PySCF mean field object cas_orbitals (int, optional): number of orb in CAS space, default all cas_electrons (int, optional): number of elec in CAS, default all elec avas_orbs (ndarray, optional): orbitals selected by AVAS in PySCF """ h1, eri, ecore, num_alpha, num_beta = \ pyscf_to_cas(pyscf_mf, cas_orbitals, cas_electrons, avas_orbs) with h5py.File(fname, 'w') as fid: fid.create_dataset('ecore', data=float(ecore), dtype=float) fid.create_dataset( 'h0', data=h1) # note the name change to be consistent with THC paper fid.create_dataset('eri', data=eri) fid.create_dataset('active_nalpha', data=int(num_alpha), dtype=int) fid.create_dataset('active_nbeta', data=int(num_beta), dtype=int) def factorized_ccsd_t(pyscf_mf, eri_rr = None, use_kernel = True,\ no_triples=False) -> Tuple[float, float, float]: """ Compute CCSD(T) energy using rank-reduced ERIs Args: pyscf_mf - PySCF mean field object eri_rr (ndarray) - rank-reduced ERIs, or use full ERIs from pyscf_mf use_kernel (bool) - re-do SCF, using canonical orbitals for one-body? no_triples (bool) - skip the perturbative triples correction? (CCSD) Returns: e_scf (float) - SCF energy e_cor (float) - Correlation energy from CCSD(T) e_tot (float) - Total energy; i.e. SCF + Corr energy from CCSD(T) """ h1, eri_full, ecore, num_alpha, num_beta = pyscf_to_cas(pyscf_mf) # If no rank-reduced ERIs, use the full (possibly local) ERIs from pyscf_mf if eri_rr is None: eri_rr = eri_full e_scf, e_cor, e_tot = ccsd_t(h1, eri_rr, ecore, num_alpha, num_beta,\ eri_full, use_kernel, no_triples) return e_scf, e_cor, e_tot def ccsd_t(h1, eri, ecore, num_alpha: int, num_beta: int, eri_full = None,\ use_kernel=True, no_triples=False) -> Tuple[float, float, float]: """ Helper function to do CCSD(T) on set of one- and two-body Hamil elems Args: h1 (ndarray) - 2D matrix containing one-body terms (MO basis) eri (ndarray) - 4D tensor containing two-body terms (MO basis) may be from integral factorization (e.g. SF/DF/THC) ecore (float) - frozen core electronic energy + nuclear repulsion energy num_alpha (int) - number of spin alpha electrons in Hamiltonian num_beta (int) - number of spin beta electrons in Hamiltonian eri_full (ndarray) - optional 4D tensor containing full two-body terms (MO basis) for the SCF procedure only use_kernel (bool) - re-run SCF prior to doing CCSD(T)? no_triples (bool) - skip the perturbative triples correction? (CCSD) Returns: e_scf (float) - SCF energy e_cor (float) - Correlation energy from CCSD(T) e_tot (float) - Total energy; i.e. SCF + Corr energy from CCSD(T) """ mol = gto.M() mol.nelectron = num_alpha + num_beta n_orb = h1.shape[0] alpha_diag = [1] * num_alpha + [0] * (n_orb - num_alpha) beta_diag = [1] * num_beta + [0] * (n_orb - num_beta) # If eri_full not provided, use (possibly rank-reduced) ERIs for check if eri_full is None: eri_full = eri # either RHF or ROHF ... should be OK since UHF will have two h1s, etc. if num_alpha == num_beta: mf = scf.RHF(mol) scf_energy = ecore + \ 2*np.einsum('ii',h1[:num_alpha,:num_alpha]) + \ 2*np.einsum('iijj',eri_full[:num_alpha,\ :num_alpha,\ :num_alpha,\ :num_alpha]) - \ np.einsum('ijji',eri_full[:num_alpha,\ :num_alpha,\ :num_alpha,\ :num_alpha]) else: mf = scf.ROHF(mol) mf.nelec = (num_alpha, num_beta) # grab singly and doubly occupied orbitals (assume high-spin open shell) docc = slice(None, min(num_alpha, num_beta)) socc = slice(min(num_alpha, num_beta), max(num_alpha, num_beta)) scf_energy = ecore + \ 2.0*np.einsum('ii',h1[docc, docc]) + \ np.einsum('ii',h1[socc, socc]) + \ 2.0*np.einsum('iijj',eri_full[docc, docc, docc, docc]) - \ np.einsum('ijji',eri_full[docc, docc, docc, docc]) + \ np.einsum('iijj',eri_full[socc, socc, docc, docc]) - \ 0.5*np.einsum('ijji',eri_full[socc, docc, docc, socc]) + \ np.einsum('iijj',eri_full[docc, docc, socc, socc]) - \ 0.5*np.einsum('ijji',eri_full[docc, socc, socc, docc]) + \ 0.5*np.einsum('iijj',eri_full[socc, socc, socc, socc]) - \ 0.5*np.einsum('ijji',eri_full[socc, socc, socc, socc]) mf.get_hcore = lambda *args: np.asarray(h1) mf.get_ovlp = lambda *args: np.eye(h1.shape[0]) mf.energy_nuc = lambda *args: ecore mf._eri = eri_full # ao2mo.restore('8', np.zeros((8, 8, 8, 8)), 8) mf.init_guess = '1e' mf.mo_coeff = np.eye(n_orb) mf.mo_occ = np.array(alpha_diag) + np.array(beta_diag) w, _ = np.linalg.eigh(mf.get_fock()) mf.mo_energy = w # Rotate the interaction tensors into the canonical basis. # Reiher and Li tensors, for example, are read-in in the local MO basis, # which is not optimal for the CCSD(T) calculation (canonical gives better # energy estimate whereas QPE is invariant to choice of basis) if use_kernel: mf.conv_tol = 1e-7 mf.init_guess = '1e' mf.verbose = 4 mf.diis_space = 24 mf.level_shift = 0.5 mf.conv_check = False mf.max_cycle = 800 mf.kernel(mf.make_rdm1(mf.mo_coeff, mf.mo_occ)) # use MO info to generate guess mf = stability(mf) mf = stability(mf) mf = stability(mf) # Check if SCF has changed by doing restart, and print warning if so try: assert np.isclose(scf_energy, mf.e_tot, rtol=1e-14) except AssertionError: print( "WARNING: E(SCF) from input integrals does not match E(SCF)" + \ " from mf.kernel()") print(" Will use E(SCF) = {:12.6f} from mf.kernel going forward.". format(mf.e_tot)) print("E(SCF, ints) = {:12.6f} whereas E(SCF) = {:12.6f}".format( scf_energy, mf.e_tot)) # New SCF energy and orbitals for CCSD(T) scf_energy = mf.e_tot # Now re-set the eri's to the (possibly rank-reduced) ERIs mf._eri = eri mf.mol.incore_anyway = True mycc = cc.CCSD(mf) mycc.max_cycle = 800 mycc.conv_tol = 1E-8 mycc.conv_tol_normt = 1E-4 mycc.diis_space = 24 mycc.verbose = 4 mycc.kernel() if no_triples: et = 0.0 else: et = mycc.ccsd_t() e_scf = scf_energy # may be read-in value or 'fresh' SCF value e_cor = mycc.e_corr + et e_tot = e_scf + e_cor print("E(SCF): ", e_scf) print("E(cor): ", e_cor) print("Total energy: ", e_tot) return e_scf, e_cor, e_tot def open_shell_t1_d1(t1a, t1b, mo_occ, nalpha, nbeta): """ T1-diagnostic for open-shell is defined w.r.t Sx eigenfunction of T1 where reference is ROHF. given i double occ, c unoccupied, x is single occuplied The T1 amps (high spin) in Sz basis are: T1 = t_{ia}^{ca}(ca^ ia) + t_{ib}^{cb}(cb^ ib) + t_{xa}^{ca}(ca^ xa) + t_{ib}^{xb}(xb^ ib) T1 in the Sx basis are T1 = f_{i}^{c}E_{ci} + v_{i}^{c}A_{ci} + sqrt(2)f_{x}^{c}(ca^ xa) + sqrt(2)f_{i}^{x}(xb^ ib) where E_{ci} = ca^ ia + cb^ ib and A_{ci} = ca^ ia - cb^ ib. See: The Journal of Chemical Physics 98, 9734 (1993); doi: 10.1063/1.464352 Chemical Physics Letters 372 (2003) 362–367; doi:10.1016/S0009-2614(03)00435-4 based on these and two papers from Lee the T1-openshell diagnostic is sqrt(sum_{ia}(f_{ia})^2 + 2sum_{xa}(t_{xa}^{ca})^2 + 2 sum_{ix}(t_{ib}^{xb})^2) / 2 sqrt{N} To get this relate eqs 3-7 from Chemical Physics Letters 372 (2003) 362–367 to Eqs. 45, 46, and 51 from Journal of Chemical Physics 98, 9734 (1993); doi: 10.1063/1.464352. """ # compute t1-diagnostic docc_idx = np.where(np.isclose(mo_occ, 2.))[0] socc_idx = np.where(np.isclose(mo_occ, 1.))[0] virt_idx = np.where(np.isclose(mo_occ, 0.))[0] t1a_docc = t1a[docc_idx, :] # double occ-> virtual t1b_docc = t1b[docc_idx, :][:, -len(virt_idx):] # double occ-> virtual if len(socc_idx) > 0: t1_xa = t1a[socc_idx, :] # single occ -> virtual t1_ix = t1b[docc_idx, :][:, :len(socc_idx)] # double occ -> single occ else: t1_xa = np.array(()) t1_ix = np.array(()) if nalpha - nbeta + len(virt_idx) != t1b.shape[1]: raise ValueError( "Inconsistent shapes na {}, nb {}, t1b.shape {},{}".format( nalpha, nbeta, t1b.shape[0], t1b.shape[1])) if t1a_docc.shape != (len(docc_idx), len(virt_idx)): raise ValueError("T1a_ia does not have the right shape") if t1b_docc.shape != (len(docc_idx), len(virt_idx)): raise ValueError("T1b_ia does not have the right shape") if len(socc_idx) > 0: if t1_ix.shape != (len(docc_idx), len(socc_idx)): raise ValueError("T1_ix does not have the right shape") if t1_xa.shape != (len(socc_idx), len(virt_idx)): raise ValueError("T1_xa does not have the right shape") t1_diagnostic = np.sqrt( np.sum((t1a_docc + t1b_docc)**2) + 2 * np.sum(t1_xa**2) + 2 * np.sum(t1_ix**2)) / (2 * np.sqrt(nalpha + nbeta)) # compute D1-diagnostic f_ia = 0.5 * (t1a_docc + t1b_docc) s_f_ia_2, _ = np.linalg.eigh(f_ia @ f_ia.T) s_f_ia_2_norm = np.sqrt(np.max(s_f_ia_2, initial=0)) if len(socc_idx) > 0: f_xa = np.sqrt(1 / 2) * t1_xa f_ix = np.sqrt(1 / 2) * t1_ix s_f_xa_2, _ = np.linalg.eigh(f_xa @ f_xa.T) s_f_ix_2, _ = np.linalg.eigh(f_ix @ f_ix.T) else: s_f_xa_2 = np.array(()) s_f_ix_2 = np.array(()) s_f_xa_2_norm = np.sqrt(np.max(s_f_xa_2, initial=0)) s_f_ix_2_norm = np.sqrt(np.max(s_f_ix_2, initial=0)) d1_diagnostic = np.max( np.array([s_f_ia_2_norm, s_f_xa_2_norm, s_f_ix_2_norm])) return t1_diagnostic, d1_diagnostic
40.677067
80
0.586523
from typing import Tuple, Optional import sys import h5py import numpy as np from pyscf import gto, scf, ao2mo, mcscf, lo, tools, cc from pyscf.mcscf import avas def stability(pyscf_mf): new_orbitals = pyscf_mf.stability()[0] new_1rdm = pyscf_mf.make_rdm1(new_orbitals, pyscf_mf.mo_occ) pyscf_mf = pyscf_mf.run(new_1rdm) return pyscf_mf def localize(pyscf_mf, loc_type='pm', verbose=0): # followed by calling mf.kernel()). But consistent localization is not a # given (not unique) despite restoring data this way, hence the message. if len(pyscf_mf.mol.atom) == 0: sys.exit("`localize()` requires atom loc. and atomic basis to be" + \ " defined.\n " + \ "It also can be sensitive to the initial guess and MO" + \ " coefficients.\n " + \ "Best to try re-creating the PySCF molecule and doing the" + \ " SCF, rather than\n " + \ "try to load the mean-field object with" + \ " `load_casfile_to_pyscf()`. You can \n " + \ "try to provide the missing information, but consistency" + \ " cannot be guaranteed!") # Split-localize (localize DOCC, SOCC, and virtual separately) docc_idx = np.where(np.isclose(pyscf_mf.mo_occ, 2.))[0] socc_idx = np.where(np.isclose(pyscf_mf.mo_occ, 1.))[0] virt_idx = np.where(np.isclose(pyscf_mf.mo_occ, 0.))[0] # Pipek-Mezey if loc_type.lower() == 'pm': print("Localizing doubly occupied ... ", end="") loc_docc_mo = lo.PM( pyscf_mf.mol, pyscf_mf.mo_coeff[:, docc_idx]).kernel(verbose=verbose) print("singly occupied ... ", end="") loc_socc_mo = lo.PM( pyscf_mf.mol, pyscf_mf.mo_coeff[:, socc_idx]).kernel(verbose=verbose) print("virtual ... ", end="") loc_virt_mo = lo.PM( pyscf_mf.mol, pyscf_mf.mo_coeff[:, virt_idx]).kernel(verbose=verbose) print("DONE") # Edmiston-Rudenberg elif loc_type.lower() == 'er': print("Localizing doubly occupied ... ", end="") loc_docc_mo = lo.ER( pyscf_mf.mol, pyscf_mf.mo_coeff[:, docc_idx]).kernel(verbose=verbose) print("singly occupied ... ", end="") loc_socc_mo = lo.ER( pyscf_mf.mol, pyscf_mf.mo_coeff[:, socc_idx]).kernel(verbose=verbose) print("virtual ... ", end="") loc_virt_mo = lo.ER( pyscf_mf.mol, pyscf_mf.mo_coeff[:, virt_idx]).kernel(verbose=verbose) print("DONE") # overwrite orbitals with localized orbitals pyscf_mf.mo_coeff[:, docc_idx] = loc_docc_mo.copy() pyscf_mf.mo_coeff[:, socc_idx] = loc_socc_mo.copy() pyscf_mf.mo_coeff[:, virt_idx] = loc_virt_mo.copy() return pyscf_mf def avas_active_space(pyscf_mf, ao_list=None, molden_fname='avas_localized_orbitals', **kwargs): # Note: requires openshell_option = 3 for this to work, which keeps all # singly occupied in CAS # we also require canonicalize = False so that we don't destroy local orbs avas_output = avas.avas(pyscf_mf, ao_list, canonicalize=False, openshell_option=3, **kwargs) active_norb, active_ne, reordered_orbitals = avas_output active_alpha, _ = get_num_active_alpha_beta(pyscf_mf, active_ne) if molden_fname is not None: if isinstance(pyscf_mf, scf.rohf.ROHF): frozen_alpha = pyscf_mf.nelec[0] - active_alpha assert frozen_alpha >= 0 else: frozen_alpha = pyscf_mf.mol.nelectron // 2 - active_alpha assert frozen_alpha >= 0 active_space_idx = slice(frozen_alpha, frozen_alpha + active_norb) active_mos = reordered_orbitals[:, active_space_idx] tools.molden.from_mo(pyscf_mf.mol, molden_fname + '.molden', mo_coeff=active_mos) pyscf_active_space_mol, pyscf_active_space_mf = cas_to_pyscf( *pyscf_to_cas(pyscf_mf, cas_orbitals=active_norb, cas_electrons=active_ne, avas_orbs=reordered_orbitals)) return pyscf_active_space_mol, pyscf_active_space_mf def cas_to_pyscf(h1, eri, ecore, num_alpha, num_beta): n_orb = len(h1) assert [n_orb] * 4 == [*eri.shape] pyscf_mol = gto.M() pyscf_mol.nelectron = num_alpha + num_beta n_orb = h1.shape[0] alpha_diag = [1] * num_alpha + [0] * (n_orb - num_alpha) beta_diag = [1] * num_beta + [0] * (n_orb - num_beta) if num_alpha == num_beta: pyscf_mf = scf.RHF(pyscf_mol) scf_energy = ecore + \ 2*np.einsum('ii', h1[:num_alpha,:num_alpha]) + \ 2*np.einsum('iijj', eri[:num_alpha,:num_alpha,:num_alpha,:num_alpha]) - \ np.einsum('ijji', eri[:num_alpha,:num_alpha,:num_alpha,:num_alpha]) else: pyscf_mf = scf.ROHF(pyscf_mol) pyscf_mf.nelec = (num_alpha, num_beta) docc = slice(None, min(num_alpha, num_beta)) socc = slice(min(num_alpha, num_beta), max(num_alpha, num_beta)) scf_energy = ecore + \ 2.0*np.einsum('ii',h1[docc, docc]) + \ np.einsum('ii',h1[socc, socc]) + \ 2.0*np.einsum('iijj',eri[docc, docc, docc, docc]) - \ np.einsum('ijji',eri[docc, docc, docc, docc]) + \ np.einsum('iijj',eri[socc, socc, docc, docc]) - \ 0.5*np.einsum('ijji',eri[socc, docc, docc, socc]) + \ np.einsum('iijj',eri[docc, docc, socc, socc]) - \ 0.5*np.einsum('ijji',eri[docc, socc, socc, docc]) + \ 0.5*np.einsum('iijj',eri[socc, socc, socc, socc]) - \ 0.5*np.einsum('ijji',eri[socc, socc, socc, socc]) pyscf_mf.get_hcore = lambda *args: np.asarray(h1) pyscf_mf.get_ovlp = lambda *args: np.eye(h1.shape[0]) pyscf_mf.energy_nuc = lambda *args: ecore pyscf_mf._eri = eri pyscf_mf.e_tot = scf_energy pyscf_mf.init_guess = '1e' pyscf_mf.mo_coeff = np.eye(n_orb) pyscf_mf.mo_occ = np.array(alpha_diag) + np.array(beta_diag) pyscf_mf.mo_energy, _ = np.linalg.eigh(pyscf_mf.get_fock()) return pyscf_mol, pyscf_mf def pyscf_to_cas(pyscf_mf, cas_orbitals: Optional[int] = None, cas_electrons: Optional[int] = None, avas_orbs=None): assert isinstance(pyscf_mf, scf.rhf.RHF) if cas_orbitals is None: cas_orbitals = len(pyscf_mf.mo_coeff) if cas_electrons is None: cas_electrons = pyscf_mf.mol.nelectron cas = mcscf.CASCI(pyscf_mf, ncas=cas_orbitals, nelecas=cas_electrons) h1, ecore = cas.get_h1eff(mo_coeff=avas_orbs) eri = cas.get_h2cas(mo_coeff=avas_orbs) eri = ao2mo.restore('s1', eri, h1.shape[0]) ecore = float(ecore) num_alpha, num_beta = get_num_active_alpha_beta(pyscf_mf, cas_electrons) return h1, eri, ecore, num_alpha, num_beta def get_num_active_alpha_beta(pyscf_mf, cas_electrons): total_electrons = pyscf_mf.mol.nelectron frozen_electrons = total_electrons - cas_electrons assert frozen_electrons % 2 == 0 if isinstance(pyscf_mf, scf.rohf.ROHF): frozen_alpha = frozen_electrons // 2 frozen_beta = frozen_electrons // 2 num_alpha = pyscf_mf.nelec[0] - frozen_alpha num_beta = pyscf_mf.nelec[1] - frozen_beta assert np.isclose(num_beta + num_alpha, cas_electrons) else: assert cas_electrons % 2 == 0 num_alpha = cas_electrons // 2 num_beta = cas_electrons // 2 return num_alpha, num_beta def load_casfile_to_pyscf(fname, num_alpha: Optional[int] = None, num_beta: Optional[int] = None): with h5py.File(fname, "r") as f: eri = np.asarray(f['eri'][()]) try: h1 = np.asarray(f['h0'][()]) except KeyError: try: h1 = np.asarray(f['hcore'][()]) except KeyError: try: h1 = np.asarray(f['h1'][()]) except KeyError: raise KeyError("Could not find 1-electron Hamiltonian") try: ecore = float(f['ecore'][()]) except KeyError: try: ecore = float(f['enuc'][()]) except KeyError: ecore = 0.0 if (num_alpha is None) or (num_beta is None): try: num_alpha = int(f['active_nalpha'][()]) except KeyError: sys.exit("In `load_casfile_to_pyscf()`: \n" + \ " No values found on file for num_alpha " + \ "(key: 'active_nalpha' in h5). " + \ " Try passing in a value for num_alpha, or" + \ " re-check integral file.") try: num_beta = int(f['active_nbeta'][()]) except KeyError: sys.exit("In `load_casfile_to_pyscf()`: \n" + \ " No values found on file for num_beta " + \ "(key: 'active_nbeta' in h5). " + \ " Try passing in a value for num_beta, or" + \ " re-check integral file.") pyscf_mol, pyscf_mf = cas_to_pyscf(h1, eri, ecore, num_alpha, num_beta) return pyscf_mol, pyscf_mf def save_pyscf_to_casfile(fname, pyscf_mf, cas_orbitals: Optional[int] = None, cas_electrons: Optional[int] = None, avas_orbs=None): h1, eri, ecore, num_alpha, num_beta = \ pyscf_to_cas(pyscf_mf, cas_orbitals, cas_electrons, avas_orbs) with h5py.File(fname, 'w') as fid: fid.create_dataset('ecore', data=float(ecore), dtype=float) fid.create_dataset( 'h0', data=h1) fid.create_dataset('eri', data=eri) fid.create_dataset('active_nalpha', data=int(num_alpha), dtype=int) fid.create_dataset('active_nbeta', data=int(num_beta), dtype=int) def factorized_ccsd_t(pyscf_mf, eri_rr = None, use_kernel = True,\ no_triples=False) -> Tuple[float, float, float]: h1, eri_full, ecore, num_alpha, num_beta = pyscf_to_cas(pyscf_mf) if eri_rr is None: eri_rr = eri_full e_scf, e_cor, e_tot = ccsd_t(h1, eri_rr, ecore, num_alpha, num_beta,\ eri_full, use_kernel, no_triples) return e_scf, e_cor, e_tot def ccsd_t(h1, eri, ecore, num_alpha: int, num_beta: int, eri_full = None,\ use_kernel=True, no_triples=False) -> Tuple[float, float, float]: mol = gto.M() mol.nelectron = num_alpha + num_beta n_orb = h1.shape[0] alpha_diag = [1] * num_alpha + [0] * (n_orb - num_alpha) beta_diag = [1] * num_beta + [0] * (n_orb - num_beta) if eri_full is None: eri_full = eri if num_alpha == num_beta: mf = scf.RHF(mol) scf_energy = ecore + \ 2*np.einsum('ii',h1[:num_alpha,:num_alpha]) + \ 2*np.einsum('iijj',eri_full[:num_alpha,\ :num_alpha,\ :num_alpha,\ :num_alpha]) - \ np.einsum('ijji',eri_full[:num_alpha,\ :num_alpha,\ :num_alpha,\ :num_alpha]) else: mf = scf.ROHF(mol) mf.nelec = (num_alpha, num_beta) docc = slice(None, min(num_alpha, num_beta)) socc = slice(min(num_alpha, num_beta), max(num_alpha, num_beta)) scf_energy = ecore + \ 2.0*np.einsum('ii',h1[docc, docc]) + \ np.einsum('ii',h1[socc, socc]) + \ 2.0*np.einsum('iijj',eri_full[docc, docc, docc, docc]) - \ np.einsum('ijji',eri_full[docc, docc, docc, docc]) + \ np.einsum('iijj',eri_full[socc, socc, docc, docc]) - \ 0.5*np.einsum('ijji',eri_full[socc, docc, docc, socc]) + \ np.einsum('iijj',eri_full[docc, docc, socc, socc]) - \ 0.5*np.einsum('ijji',eri_full[docc, socc, socc, docc]) + \ 0.5*np.einsum('iijj',eri_full[socc, socc, socc, socc]) - \ 0.5*np.einsum('ijji',eri_full[socc, socc, socc, socc]) mf.get_hcore = lambda *args: np.asarray(h1) mf.get_ovlp = lambda *args: np.eye(h1.shape[0]) mf.energy_nuc = lambda *args: ecore mf._eri = eri_full mf.init_guess = '1e' mf.mo_coeff = np.eye(n_orb) mf.mo_occ = np.array(alpha_diag) + np.array(beta_diag) w, _ = np.linalg.eigh(mf.get_fock()) mf.mo_energy = w if use_kernel: mf.conv_tol = 1e-7 mf.init_guess = '1e' mf.verbose = 4 mf.diis_space = 24 mf.level_shift = 0.5 mf.conv_check = False mf.max_cycle = 800 mf.kernel(mf.make_rdm1(mf.mo_coeff, mf.mo_occ)) mf = stability(mf) mf = stability(mf) mf = stability(mf) try: assert np.isclose(scf_energy, mf.e_tot, rtol=1e-14) except AssertionError: print( "WARNING: E(SCF) from input integrals does not match E(SCF)" + \ " from mf.kernel()") print(" Will use E(SCF) = {:12.6f} from mf.kernel going forward.". format(mf.e_tot)) print("E(SCF, ints) = {:12.6f} whereas E(SCF) = {:12.6f}".format( scf_energy, mf.e_tot)) scf_energy = mf.e_tot mf._eri = eri mf.mol.incore_anyway = True mycc = cc.CCSD(mf) mycc.max_cycle = 800 mycc.conv_tol = 1E-8 mycc.conv_tol_normt = 1E-4 mycc.diis_space = 24 mycc.verbose = 4 mycc.kernel() if no_triples: et = 0.0 else: et = mycc.ccsd_t() e_scf = scf_energy # may be read-in value or 'fresh' SCF value e_cor = mycc.e_corr + et e_tot = e_scf + e_cor print("E(SCF): ", e_scf) print("E(cor): ", e_cor) print("Total energy: ", e_tot) return e_scf, e_cor, e_tot def open_shell_t1_d1(t1a, t1b, mo_occ, nalpha, nbeta): # compute t1-diagnostic docc_idx = np.where(np.isclose(mo_occ, 2.))[0] socc_idx = np.where(np.isclose(mo_occ, 1.))[0] virt_idx = np.where(np.isclose(mo_occ, 0.))[0] t1a_docc = t1a[docc_idx, :] # double occ-> virtual t1b_docc = t1b[docc_idx, :][:, -len(virt_idx):] # double occ-> virtual if len(socc_idx) > 0: t1_xa = t1a[socc_idx, :] # single occ -> virtual t1_ix = t1b[docc_idx, :][:, :len(socc_idx)] # double occ -> single occ else: t1_xa = np.array(()) t1_ix = np.array(()) if nalpha - nbeta + len(virt_idx) != t1b.shape[1]: raise ValueError( "Inconsistent shapes na {}, nb {}, t1b.shape {},{}".format( nalpha, nbeta, t1b.shape[0], t1b.shape[1])) if t1a_docc.shape != (len(docc_idx), len(virt_idx)): raise ValueError("T1a_ia does not have the right shape") if t1b_docc.shape != (len(docc_idx), len(virt_idx)): raise ValueError("T1b_ia does not have the right shape") if len(socc_idx) > 0: if t1_ix.shape != (len(docc_idx), len(socc_idx)): raise ValueError("T1_ix does not have the right shape") if t1_xa.shape != (len(socc_idx), len(virt_idx)): raise ValueError("T1_xa does not have the right shape") t1_diagnostic = np.sqrt( np.sum((t1a_docc + t1b_docc)**2) + 2 * np.sum(t1_xa**2) + 2 * np.sum(t1_ix**2)) / (2 * np.sqrt(nalpha + nbeta)) # compute D1-diagnostic f_ia = 0.5 * (t1a_docc + t1b_docc) s_f_ia_2, _ = np.linalg.eigh(f_ia @ f_ia.T) s_f_ia_2_norm = np.sqrt(np.max(s_f_ia_2, initial=0)) if len(socc_idx) > 0: f_xa = np.sqrt(1 / 2) * t1_xa f_ix = np.sqrt(1 / 2) * t1_ix s_f_xa_2, _ = np.linalg.eigh(f_xa @ f_xa.T) s_f_ix_2, _ = np.linalg.eigh(f_ix @ f_ix.T) else: s_f_xa_2 = np.array(()) s_f_ix_2 = np.array(()) s_f_xa_2_norm = np.sqrt(np.max(s_f_xa_2, initial=0)) s_f_ix_2_norm = np.sqrt(np.max(s_f_ix_2, initial=0)) d1_diagnostic = np.max( np.array([s_f_ia_2_norm, s_f_xa_2_norm, s_f_ix_2_norm])) return t1_diagnostic, d1_diagnostic
true
true