code
stringlengths
22
1.05M
apis
listlengths
1
3.31k
extract_api
stringlengths
75
3.25M
#!/usr/bin/python3 import json import sys from pprint import pprint import requests from config import database import MySQLdb try: db = MySQLdb.connect(database["host"], database["user"], database["passwd"], database["db"]) cur = db.cursor() payload = { "data": ( '[out:json][timeout:25];' 'area(3600109166)->.searchArea;' 'node["amenity"="bicycle_rental"]["network"="Citybike Wien"](area.searchArea);' 'out body;>;out skel qt;' ) } print("Overpass Abfrage") r = requests.get('https://overpass-api.de/api/interpreter', params=payload) data = r.json() print("erfolgreich") i = 0 for station in data["elements"]: if station["type"] == "node": tags = station["tags"] cur.execute("REPLACE INTO stationen (ref, lon, lat, name) VALUES (%s,%s,%s,%s)", (tags["ref"], station["lon"], station["lat"], tags["name"])) i += 1 db.commit() print("%s Stationen importiert" % i) db.close() except MySQLdb.Error as e: print("Error %d: %s" % (e.args[0], e.args[1])) sys.exit(1)
[ "MySQLdb.connect", "requests.get", "sys.exit" ]
[((146, 237), 'MySQLdb.connect', 'MySQLdb.connect', (["database['host']", "database['user']", "database['passwd']", "database['db']"], {}), "(database['host'], database['user'], database['passwd'],\n database['db'])\n", (161, 237), False, 'import MySQLdb\n'), ((635, 706), 'requests.get', 'requests.get', (['"""https://overpass-api.de/api/interpreter"""'], {'params': 'payload'}), "('https://overpass-api.de/api/interpreter', params=payload)\n", (647, 706), False, 'import requests\n'), ((1225, 1236), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1233, 1236), False, 'import sys\n')]
import numpy as np def compute_intensity(pos, pos_list, radius): return (norm(np.array(pos_list) - np.array(pos), axis=1) < radius).sum() def compute_colours(all_pos): colours = [compute_intensity(pos, all_pos, 1e-4) for pos in all_pos] colours /= max(colours) return colours
[ "numpy.array" ]
[((84, 102), 'numpy.array', 'np.array', (['pos_list'], {}), '(pos_list)\n', (92, 102), True, 'import numpy as np\n'), ((105, 118), 'numpy.array', 'np.array', (['pos'], {}), '(pos)\n', (113, 118), True, 'import numpy as np\n')]
#prints hello world letter by letter on windows system import os,time def slowhello(): s = 'Hello World!' for i in range(len(s)): os.system('cls') print (s[:i+1]) time.sleep(0.5) slowhello()
[ "os.system", "time.sleep" ]
[((135, 151), 'os.system', 'os.system', (['"""cls"""'], {}), "('cls')\n", (144, 151), False, 'import os, time\n'), ((172, 187), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (182, 187), False, 'import os, time\n')]
"""Parser Configuration Reader""" import typing from json import JSONDecodeError from pathlib import Path from nltk.corpus import stopwords from ..constants import ( BUILTIN, DEFAULT_IDEAL_LENGTH, DEFAULT_IDIOM, DEFAULT_LANGUAGE, DEFAULT_NLTK_STOPS, DEFAULT_USER_STOPS) from ..io import load_json from ..repr_able import ReprAble from ..typings import DictOfAny, StringList IdiomData = typing.Tuple[int, str, StringList] def get_config_path(root: str, idiom: str) -> Path: """Get path to idiom config Arguments: root {str} -- root directory of idiom config idiom {str} -- basename of idiom config Returns: Tuple[Path, Path] -- pathlib.Path to file """ root_path = Path(root) file_name = '{}.json'.format(idiom) return root_path.joinpath(file_name) def get_stop_words(idiom_spec: DictOfAny, language: str) -> set: """List stop words based on idiom configuration Returns: set -- list of stop words """ stop_cfg = { 'nltk': DEFAULT_NLTK_STOPS, 'user': DEFAULT_USER_STOPS, } # type: DictOfAny stop_cfg.update(idiom_spec.get('stop_words', {})) use_nltk = bool(stop_cfg['nltk']) use_user = isinstance(stop_cfg['user'], list) nltk = stopwords.words(language) if use_nltk else [] user = [str(word) for word in stop_cfg['user']] if use_user else [] return set(nltk + user) def parse_config(path: Path) -> typing.Tuple[int, str, StringList]: """Load defaults, override with loaded data Arguments: path {Path} -- path to config file Raises: ValueError -- unable to read file Returns: typing.Tuple[int, str, StringList] -- ideal length, NLTK language, stop words """ try: idiom_spec = load_json(path.absolute()) ideal = idiom_spec.get( 'ideal', DEFAULT_IDEAL_LENGTH) # type: int language = idiom_spec.get( 'language', DEFAULT_LANGUAGE) # type: str stop_words = get_stop_words( idiom_spec, language) # type: typing.Set[str] except (JSONDecodeError, AttributeError): raise ValueError('invalid config file: {!r}'.format(path)) return int(ideal), str(language), list(stop_words) def load_idiom( root: str = BUILTIN, idiom: str = DEFAULT_IDIOM) -> IdiomData: """Get class initialization data from `root`/`idiom`.json Arguments: root {str} -- root directory of idiom data (default: {parser.BUILTIN}) idiom {str} -- basename of idiom file (default: {parser.DEFAULT_IDIOM}) Raises: PermissionError -- directory traversal via idiom JSONDecodeError -- unable to load JSON from file Returns: IDIOM_DATA -- tuple(ideal length, NLTK language, stop words) """ root_path = Path(root) cfg_path = get_config_path(root, idiom) try: # pylint: disable=no-member cfg_path.resolve().relative_to(root_path.resolve()) except (ValueError, OSError): raise PermissionError('directory traversal in idiom: ' + idiom) config = parse_config(cfg_path) return config class ParserConfig(ReprAble): # pylint: disable=too-few-public-methods """Parser configuration data""" def __init__(self, root: str, idiom: str) -> None: ideal, language, stop_words = load_idiom(root, idiom) self.ideal_sentence_length = ideal # type: int self.language = language # type: str self.stop_words = stop_words # type: StringList
[ "nltk.corpus.stopwords.words", "pathlib.Path" ]
[((723, 733), 'pathlib.Path', 'Path', (['root'], {}), '(root)\n', (727, 733), False, 'from pathlib import Path\n'), ((2870, 2880), 'pathlib.Path', 'Path', (['root'], {}), '(root)\n', (2874, 2880), False, 'from pathlib import Path\n'), ((1255, 1280), 'nltk.corpus.stopwords.words', 'stopwords.words', (['language'], {}), '(language)\n', (1270, 1280), False, 'from nltk.corpus import stopwords\n')]
""" This is module implementing fixed point numbers for python. For a motivation, description and some examples, we refer to the docstring of the FixedPoint class. """ from __future__ import annotations import numbers from secrets import randbelow from typing import Optional, Tuple, Union # Add numpy support, if available. try: import numpy as np SUPPORT_NUMPY = True except ImportError: SUPPORT_NUMPY = False FxpInputType = Union["FixedPoint", numbers.Integral, str, float] class FixedPoint: r""" Outline: 1. Motivation 2. Description 3. Examples 1. Motivation Encryption schemes generally work with (scaled) fixed-point numbers (or integers), while such numbers are represented by floating points in python. This causes a discrepancy between the number that is encrypted and the respective decryption. This results in difficulties, e.g., when you want a random additive plaintext mask. This module addresses that discrepancy and provides arbitrary-precision fixed-point numbers including simple arithmetic such as addition, subtraction, multiplication and comparison operators. 2. Description A fixed-point number is defined by 2 integers: - value: an arbitrary-precision integer - precision: an integer indicating the position of the dot which separates the integer part from the fractional part (counting from the right) Fixed-point numbers can be instantiated from strings, integers, floats and other fixed-points. using the class method FixedPoint.fxp. 3. Examples: - fxp("10.001234") -> value = 10001234 precision = 6 (this represents 10.001234) - fxp("1234e-2") -> value = 1234 precision = 2 (this represents 12.34) - fxp(42) -> value = 42 precision = 0 (this represents 42) - fxp(-123, 2) -> value = -12300 precision = 2 (this represents -123.00) - fxp(123.) -> value = 123 precision = 0 (This represents 123) - fxp(-1.23) -> value = -123 precision = 2 (this represents -1.23) - fxp(-0.123, 5) -> value = -12300 precision = 5 (this represents -0.12300) - fxp(1234e-2) -> value = 1234 precision = 2 (this represents 12.34) """ __slots__ = ( "value", "precision", ) DEFAULT_FLOAT_PRECISION = 16 @classmethod def fxp( cls, input_value: FxpInputType, target_precision: Optional[int] = None, ) -> FixedPoint: """ Create a fixed-point number from a string, int, float or fixed-point number with a specified precision. If no precision is provided, it is deduced from the input_value. If precision is provided but it contradicts the precision of the input_value value (too large or too small), the input_value value is either truncated or trailing zeroes are added. Legitimate input values: - str: a string containing numbers in the range [0-9]. This can be point-separated to represent an integer part (before the full stop) and a fractional part (after the full stop). - int: an arbitrarily large integer. By default,it will be converted to a fixed-point with a precision of 0, but if a precision is provided, then the fixed point represents the input_value value times 10**-precision. - float: a floating-point number. The default precision is 16 bits. The floating point is scaled and the value is extracted according to the floating point number and the precision. - FixedPoint: another fixed-point number. If no precision is provided, all values are copied. If a precision is provided, the fixed-point number is either truncated or trailing zeroes are added to attain the new precision. :param input_value: the number to be converted to a fixed-point. :param target_precision: The desired precision of the resulting fixed-point number. :return: A fixed point version of the provided input :raises TypeError: Raised if the input value is not an integer, float, fixed point or string """ # minimal precision is 0 if target_precision is not None: assert target_precision >= 0 if isinstance(input_value, str): return FixedPoint.initiate_from_string(input_value, target_precision) if isinstance(input_value, numbers.Integral): return FixedPoint.initiate_from_int(input_value, target_precision) if isinstance(input_value, float): return FixedPoint.initiate_from_float(input_value, target_precision) if SUPPORT_NUMPY and isinstance(input_value, np.floating): return FixedPoint.initiate_from_float(input_value, target_precision) if isinstance(input_value, FixedPoint): return FixedPoint.initiate_from_fxp(input_value, target_precision) raise TypeError("the input_value is not of type int, float, fixed-point or str") def __init__(self, value: int, precision: int) -> None: """ Initialise the fixed point number. :param value: The arbitrary-precision integer value representing the fixed point number :param precision: The location of the dot, counting from the right """ self.value = value assert precision >= 0 self.precision = precision @staticmethod def initiate_from_string( input_value: str, precision: Optional[int] = None ) -> FixedPoint: """ This is the most reliable way to instantiate a fixed point, as the string accurately represents how the fixed point will be represented. The input is parsed as <integer>.<fractional> or <integer> or <integer>e<integer>. the precision is extracted automatically. If a target precision is provided then zeroes are added if the target precision is higher and the number is rounded towards the right precision if the target precision is lower. :param input_value: string of decimals, possibly separated by a full stop :param precision: desired precision. This has precedence over the implicit string precision. :return: the resulting fixed-point number :raises ValueError: Raised if the provided string does not fit the parsing format. """ if " " in input_value: raise ValueError("It is not allowed to have spaces in the input") if "e" in input_value: e_split = input_value.split("e") left_side = FixedPoint.initiate_from_string(e_split[0]) power = int(e_split[1]) if power < 0: result = FixedPoint(left_side.value, left_side.precision - power) else: result = FixedPoint(left_side.value * 10 ** power, left_side.precision) return FixedPoint.fxp(result, precision) split = input_value.split(".") left = split[0] nr_entries = len(split) # If the format is <integer>, parse the input as an int and call the initiation function # for integers. if nr_entries == 1: try: value = int(left) except ValueError as format_error: raise ValueError( 'The input value does not conform to the expect format "x.y" or "x" for ' "integers x and y" ) from format_error if precision is None: return FixedPoint(value, 0) return FixedPoint(value * 10 ** precision, precision) # If the format is <integer>.<fractional>, determine whether the fractional part gives the # right precision. Correct the value if the precisions do not match. if nr_entries == 2: right = split[1] # combine the integer and fractional part into 1 big integer value_str = left + right # determine the precision of the fractional part input_precision = len(right) if precision is not None: dot_from_right = precision assert precision >= 0 difference = precision - input_precision if difference >= 0: # add (precision - input_precision) trailing zeroes value_str += "0" * difference value = int(value_str) else: # truncate the last (input_precision - precision) digits and round if necessary value_int = int(value_str) value = FixedPoint.round_to_precision( value_int, input_precision, precision ) else: dot_from_right = input_precision value = int(value_str) return FixedPoint(value, dot_from_right) # Raise an error if the input_value does not have the right format raise ValueError( 'The input value does not conform to the expect format "x.y" or "x" ' "for integers x and y" ) @staticmethod def initiate_from_int( input_value: numbers.Integral, precision: Optional[int] = None ) -> FixedPoint: """ If the input_value is an integer, we set the integer value to the input_value and decimal to zero. :param input_value: the input_value integer :param precision: position of the dot, counting from the right :return: the resulting fixed-point number """ if precision is None: return FixedPoint(int(input_value), 0) return FixedPoint(int(input_value * 10 ** precision), precision) @staticmethod def initiate_from_float( input_value: float, target_precision: Optional[int] = None ) -> FixedPoint: """ if the input value is a float, we multiply it by a power of 10 to create a scaled floating point number and then extract an integer to represent the fixed point number. If no precision is provided, the precision is extracted from the string representation. :param input_value: the input_value integer :param target_precision: desired precision :return: the resulting fixed-point number """ return FixedPoint.initiate_from_string(str(input_value), target_precision) @staticmethod def initiate_from_fxp( input_value: FixedPoint, target_precision: Optional[int] = None ) -> FixedPoint: """ If the input value is another fixed point, correct the value with respect to the target precision. :param input_value: the input_value fixed-point number :param target_precision: desired precision :return: the resulting fixed-point number """ if target_precision is None: return FixedPoint(input_value.value, input_value.precision) assert target_precision >= 0 if target_precision >= input_value.precision: value = input_value.value * 10 ** (target_precision - input_value.precision) else: value = FixedPoint.round_to_precision( input_value.value, input_value.precision, target_precision ) return FixedPoint(value, target_precision) # endregion @staticmethod def calibrate(*fixed_points: FixedPoint) -> Tuple[int, Tuple[FixedPoint, ...]]: """ Function that determines that maximum precision among all the fixed points and scales the fixed points according to the maximum precision. :param fixed_points: fixed point numbers :return: A tuple where the first entry is the maximum precision and the subsequent entries are the given fixed points scaled to this maximum precision. """ max_precision: int = max(fixed_point.precision for fixed_point in fixed_points) calibrated_fxps = tuple( FixedPoint.fxp(fixed_point, max_precision) for fixed_point in fixed_points ) return max_precision, calibrated_fxps def __repr__(self) -> str: """ Function that determines the representation of a fixed point object :return: string containing a representation of the fixed point object """ return str(self) def __str__(self) -> str: """ Function that casts a fixed point object to a string. First a representation without a dot is found and then the dot is inserted in the right place if the fixed point is not integer. :return: A string representing the fixed point object """ is_neg = self.value < 0 sign = "-" if is_neg else "" pos_string = str(abs(self.value)) string_len = len(pos_string) if self.precision == 0: return sign + pos_string if string_len <= self.precision: diff = self.precision - string_len pos_string = "0" * (diff + 1) + pos_string return ( sign + pos_string[: -self.precision] + "." + pos_string[-self.precision :] ) def __bool__(self) -> bool: """ Function that casts a fixed point object to a boolean. :return: A bool representing whether the fixed point object is unequal to zero. """ return bool(self.value) def __int__(self) -> int: """ Function that casts a fixed point object to an integer. The function uses rounding instead of downward truncation. :return: An integer representing the rounded fixed point object """ return int(FixedPoint.round_to_precision(self.value, self.precision, 0)) def __float__(self) -> float: """ Function that cases a fixed point object to a float. If the fixed point number is too large, the float might return an error. :return: A floating point number representing the fixed point object """ return float(self.value) / float(10 ** self.precision) def __eq__(self, other: object) -> bool: """ Function that determines whether the fixed point object is equal to another object. The other object does not have to be a fixed point object. Additionally, this is a 'weak' equality in the sense that we first cast the other object to a fixed point number if it is not already and then check for equality. The precision value does not need to be equal, as long as the calibrated fixed point objects are equal. For strong equality, use strong_eq. For example: - fxp("100.0") == fxp("100.0000") -> True - fxp("12.34") == 12.34 -> True - fxp("0.012") == fxp("0.01") -> False :param other: Fixed point object, integer, string or float :return: whether self and the other object are (weakly) equal :raise NotImplementedError: If the other object does not have a compatible type. """ if not isinstance(other, (str, numbers.Integral, float, FixedPoint)): raise NotImplementedError( "The compatible object types are string, integer, float and FixedPoint." ) if isinstance(other, FixedPoint): if self.precision != other.precision: _, (cal_self, cal_other) = FixedPoint.calibrate(self, other) return cal_self.value == cal_other.value return self.value == other.value return self == FixedPoint.fxp(other, self.precision) @staticmethod def strong_eq(fxp_1: FixedPoint, fxp_2: FixedPoint) -> bool: """ Function that determines whether two fixed points are exactly equal :param fxp_1: Fixed point number :param fxp_2: Fixed point number :return: Whether the values and precision of the fixed point objects are equal """ return fxp_1.value == fxp_2.value and fxp_1.precision == fxp_2.precision def __neg__(self) -> FixedPoint: """ Function that returns a fixed point number that represents the negation of the fixed point number. :return: negation of the fixed point number """ return FixedPoint(-self.value, self.precision) def __gt__(self, other: object) -> bool: """ Function that returns whether this fixed pont number is greater than another compatible data type instance. :param other: fixed point number, integer, string or float :return: whether self is greater than the fixed point version of other :raise NotImplementedError: If the other object does not have a compatible type. """ if not isinstance(other, (str, numbers.Integral, float, FixedPoint)): raise NotImplementedError( "The compatible object types are string, integer, float and FixedPoint." ) other_ = FixedPoint.fxp(other) _, (cal_self, cal_other) = FixedPoint.calibrate(self, other_) return cal_self.value > cal_other.value def __ge__(self, other: object) -> bool: """ Function that returns whether this fixed pont number is greater than or equal to another compatible data type instance. :param other: fixed point number, integer, string or float :return: whether self is greater than or equal to the fixed point version of other :raise NotImplementedError: If the other object does not have a compatible type. """ if not isinstance(other, (str, numbers.Integral, float, FixedPoint)): raise NotImplementedError( "The compatible object types are string, integer, float and FixedPoint." ) other_ = FixedPoint.fxp(other) _, (cal_self, cal_other) = FixedPoint.calibrate(self, other_) return cal_self.value >= cal_other.value def __lt__(self, other: object) -> bool: """ Function that returns whether this fixed pont number is less than another compatible data type instance. :param other: fixed point number, integer, string or float :return: whether self is less than the fixed point version of other :raise NotImplementedError: If the other object does not have a compatible type. """ if not isinstance(other, (str, numbers.Integral, float, FixedPoint)): raise NotImplementedError( "The compatible object types are string, integer, float and FixedPoint." ) other_ = FixedPoint.fxp(other) _, (cal_self, cal_other) = FixedPoint.calibrate(self, other_) return cal_self.value < cal_other.value def __le__(self, other: object) -> bool: """ Function that returns whether this fixed pont number is less than or equal to another compatible data type instance. :param other: fixed point number, integer, string or float :return: whether self is less than or equal to the fixed point version of other :raise NotImplementedError: If the other object does not have a compatible type. """ if not isinstance(other, (str, numbers.Integral, float, FixedPoint)): raise NotImplementedError( "The compatible object types are string, integer, float and FixedPoint." ) other_ = FixedPoint.fxp(other) _, (cal_self, cal_other) = FixedPoint.calibrate(self, other_) return cal_self.value <= cal_other.value def __abs__(self) -> FixedPoint: """ Function that returns a fixed point number that represents the absolute value of the fixed point number. :return: absolute value of the fixed point number """ return FixedPoint(abs(self.value), self.precision) def __sub__(self, other: object) -> FixedPoint: """ Subtract another fixed point number (or type convertible to a fixed point number) from self. :param other: a fixed point number, integer, string or float :return: the result of subtracting the other value from self :raise NotImplementedError: If the other object does not have a compatible type. """ if not isinstance(other, (str, numbers.Integral, float, FixedPoint)): raise NotImplementedError( "The compatible object types are string, integer, float and FixedPoint." ) other_ = FixedPoint.fxp(other) max_precision, (cal_self, cal_other) = FixedPoint.calibrate(self, other_) return FixedPoint(cal_self.value - cal_other.value, max_precision) def __rsub__(self, other: object) -> FixedPoint: """ Subtract self from an object of a type convertible to a fixed point number :param other: a fixed point number, integer, string or float :return: the result of subtracting self from the other value :raise NotImplementedError: If the other object does not have a compatible type. """ if not isinstance(other, (str, numbers.Integral, float, FixedPoint)): raise NotImplementedError( "The compatible object types are string, integer, float and FixedPoint." ) other_ = FixedPoint.fxp(other, self.precision) return other_ - self def __add__(self, other: object) -> FixedPoint: """ Add another fixed point number (or type convertible to a fixed point number) to self. :param other: a fixed pont number, integer, string or float :return: The addition of self to other :raise NotImplementedError: If the other object does not have a compatible type. """ if not isinstance(other, (str, numbers.Integral, float, FixedPoint)): raise NotImplementedError( "The compatible object types are string, integer, float and FixedPoint." ) other_ = FixedPoint.fxp(other) max_precision, (cal_self, cal_other) = FixedPoint.calibrate(self, other_) return FixedPoint(cal_self.value + cal_other.value, max_precision) __radd__ = __add__ @staticmethod def round_to_precision( value: int, current_precision: int, target_precision: int ) -> int: """ Function that takes a fixed point representation (value, precision) and changes the value to obtain the right precision for the fixed point representation. It uses rounding when the target precision is lower than the current precision. :param value: An integer representing the value :param current_precision: An integer representing the precision of the given value :param target_precision: The desired precision :return: A new value that represents a (rounded) fixed point number with the target precision :raise TypeError: If value, current_precision or target_precision is not an int """ if current_precision <= target_precision: return_value: int = value * 10 ** int(target_precision - current_precision) return return_value sign = int(value >= 0) * 2 - 1 abs_value: int = abs(value) to_reduce_by: int = current_precision - target_precision # to_reduce_by > 0, because current_precision > target_precision pre_scaled_value: int = abs_value // 10 ** (to_reduce_by - 1) last_digit: int = pre_scaled_value % 10 round_away_from_zero: bool = last_digit >= 5 scaled_value: int = pre_scaled_value // 10 # if we only truncate zeroes, we do not need any corrections correction: int = int(round_away_from_zero) if scaled_value * 10 ** to_reduce_by == abs_value: correction = 0 rounded_scaled_value = sign * (scaled_value + correction) return rounded_scaled_value def __mul__(self, other: object) -> FixedPoint: """ Multiply another fixed point number (or type convertible to a fixed point number) with self. Note that the result is calculated first with arbitrary precision and then rounded to obtain the maximum precision of the two inputs. For example: - fxp("0.1") * fxp("0.5") = fxp("0.1") - fxp("0.1") * fxp("0.4") = fxp("0.0") :param other: a fixed point number or other type convertible to a fixed point number. :return: a * b :raise NotImplementedError: If the other object does not have a compatible type. """ if not isinstance(other, (str, numbers.Integral, float, FixedPoint)): raise NotImplementedError( "The compatible object types are string, integer, float and FixedPoint." ) other_ = FixedPoint.fxp(other) max_precision = max(self.precision, other_.precision) # The multiplication value is simply the multiplied values mult = self.value * other_.value # This has precision self.precision + other_.precision new_precision = self.precision + other_.precision # scale down, such that is has a precision of max_precision scaled_mult = FixedPoint.round_to_precision( value=mult, current_precision=new_precision, target_precision=max_precision ) return FixedPoint(scaled_mult, max_precision) __rmul__ = __mul__ def __truediv__(self, other: object) -> FixedPoint: """ Divide self with another fixed point number (or type convertible to a fixed point number). Note that the result is calculated first with arbitrary precision and then rounded to obtain the maximum precision of the two inputs. For example: - fxp("0.2") / fxp("3.0") = fxp("0.7") - fxp("0.1") / fxp("2.1") = fxp("0.0") :param other: a fixed point number or other type convertible to a fixed point number. :return: a / b :raise NotImplementedError: If the other object does not have a compatible type. """ if not isinstance(other, (str, numbers.Integral, float, FixedPoint)): raise NotImplementedError( "The compatible object types are string, integer, float and FixedPoint." ) other_ = FixedPoint.fxp(other) max_precision = max(self.precision, other_.precision) # To divide we first determine a scaling factor (for higher precision) scale_factor = 10 ** (self.precision + 2 * other_.precision + 1) # use proper rounding div = (scale_factor * self.value + other_.value // 2) // other_.value # This has precision self.precision + other.precision new_precision = 2 * self.precision + other_.precision + 1 # scale down, such that is has a precision of max_precision scaled_div = FixedPoint.round_to_precision( value=div, current_precision=new_precision, target_precision=max_precision ) return FixedPoint(scaled_div, max_precision) def __rtruediv__(self, other: object) -> FixedPoint: """ Divide self with another fixed point number (or type convertible to a fixed point number). Note that the result is calculated first with arbitrary precision and then rounded to obtain the maximum precision of the two inputs. For example: - fxp("0.2") / fxp("3.0") = fxp("0.7") - fxp("0.1") / fxp("2.1") = fxp("0.0") :param other: a fixed point number or other type convertible to a fixed point number. :return: a / b :raise NotImplementedError: If the other object does not have a compatible type. """ if not isinstance(other, (str, numbers.Integral, float, FixedPoint)): raise NotImplementedError( "The compatible object types are string, integer, float and FixedPoint." ) return FixedPoint.fxp(other).__truediv__(self) @staticmethod def random_range( lower_bound: FixedPoint, upper_bound: FixedPoint, signed: bool = False, ) -> FixedPoint: r""" Return a uniformly random fixed-point in the interval [lower_bound, upper_bound). If signed is True, the interval becomes [lower_bound, upper_bound) $\cup$ (-upper_bound, lower_bound]. :param lower_bound: integer lower bound (inclusive) :param upper_bound: integer upper bound (exclusive) :param signed: whether the random fixed-point number should have a random sign or just be positive :return: a uniformly random fixed-point in the specified interval """ assert ( lower_bound < upper_bound ), "the upper bound needs to be larger than the lower bound" max_precision, (cal_lower_bound, cal_upper_bound) = FixedPoint.calibrate( lower_bound, upper_bound ) value = ( randbelow(cal_upper_bound.value - cal_lower_bound.value) + cal_lower_bound.value ) if signed: sign = randbelow(2) * 2 - 1 value *= sign return FixedPoint(value, max_precision)
[ "secrets.randbelow" ]
[((29141, 29197), 'secrets.randbelow', 'randbelow', (['(cal_upper_bound.value - cal_lower_bound.value)'], {}), '(cal_upper_bound.value - cal_lower_bound.value)\n', (29150, 29197), False, 'from secrets import randbelow\n'), ((29282, 29294), 'secrets.randbelow', 'randbelow', (['(2)'], {}), '(2)\n', (29291, 29294), False, 'from secrets import randbelow\n')]
from Point import Point class Rectangle: def __init__(self, p, w, h): self.CornerPoint = p self.width = w self.height = h def __str__(self): return f"Rectangle {self.width} by {self.height}, at {self.CornerPoint}." def transpose(self): self.width, self.height = self.height, self.width @property def diagonal(self): return (self.width**2 + self.height**2) ** 0.5 def contains(self, p): x_bound = self.CornerPoint.x + self.width y_bound = self.CornerPoint.y + self.height if self.CornerPoint.x <= p.x <= x_bound and self.CornerPoint.y <= p.y <= y_bound: return True else: return False @property def corners(self): cornersLst = [] cornersLst.append(self.CornerPoint) cornersLst.append(Point(self.CornerPoint.x + self.width, self.CornerPoint.y)) cornersLst.append(Point(self.CornerPoint.x + self.width, self.CornerPoint.y + self.height)) cornersLst.append(Point(self.CornerPoint.x, self.CornerPoint.y + self.height)) return cornersLst def P(self): return 2 * (self.width + self.height) def S(self): return self.width * self.height def isColliding(self, target): for corner in target.corners: if self.contains(corner): return True return False def t1(): r1 = Rectangle(Point(3, 4), 5, 6) print(r1) print(r1.P()) print(r1.S()) r1.transpose() print(r1) print(r1.diagonal) def t2(): r = Rectangle(Point(0, 0), 10, 5) r.contains(Point(0, 0)), #True r.contains(Point(3, 3)), #True r.contains(Point(3, 7)), #False r.contains(Point(3, 5)), #False r.contains(Point(3, 4.99999)), #True r.contains(Point(-3, -3)), #False print(r.diagonal) def t3(): r = Rectangle(Point(0, 0), 1, 1) r.contains(Point(0, 0)) r.contains(Point(1, 0)) r.contains(Point(0, 1)) r.contains(Point(1, 1)) [print(corner) for corner in r.corners] def t3(): r = Rectangle(Point(0, 0), 3, 3) #True [print(corner) for corner in r.corners] def t4(): r = Rectangle(Point(0, 0), 3, 3) #True k = Rectangle(Point(0, 0), 3, 3) #True s = Rectangle(Point(-2, 2), 3, 3) #True t = Rectangle(Point(3, 3), 3, 3) #True p = Rectangle(Point(-2, 2), 3, 3) #True v = Rectangle(Point(-2, -2), 1, 1) #False rects = [k, s, t, p, v] for rect in rects: print(r.isColliding(rect)) def main(): #t1() #t2() #t3() t4() if __name__ == "__main__": main()
[ "Point.Point" ]
[((1448, 1459), 'Point.Point', 'Point', (['(3)', '(4)'], {}), '(3, 4)\n', (1453, 1459), False, 'from Point import Point\n'), ((1605, 1616), 'Point.Point', 'Point', (['(0)', '(0)'], {}), '(0, 0)\n', (1610, 1616), False, 'from Point import Point\n'), ((1898, 1909), 'Point.Point', 'Point', (['(0)', '(0)'], {}), '(0, 0)\n', (1903, 1909), False, 'from Point import Point\n'), ((1932, 1943), 'Point.Point', 'Point', (['(0)', '(0)'], {}), '(0, 0)\n', (1937, 1943), False, 'from Point import Point\n'), ((1960, 1971), 'Point.Point', 'Point', (['(1)', '(0)'], {}), '(1, 0)\n', (1965, 1971), False, 'from Point import Point\n'), ((1988, 1999), 'Point.Point', 'Point', (['(0)', '(1)'], {}), '(0, 1)\n', (1993, 1999), False, 'from Point import Point\n'), ((2016, 2027), 'Point.Point', 'Point', (['(1)', '(1)'], {}), '(1, 1)\n', (2021, 2027), False, 'from Point import Point\n'), ((2103, 2114), 'Point.Point', 'Point', (['(0)', '(0)'], {}), '(0, 0)\n', (2108, 2114), False, 'from Point import Point\n'), ((2206, 2217), 'Point.Point', 'Point', (['(0)', '(0)'], {}), '(0, 0)\n', (2211, 2217), False, 'from Point import Point\n'), ((2249, 2260), 'Point.Point', 'Point', (['(0)', '(0)'], {}), '(0, 0)\n', (2254, 2260), False, 'from Point import Point\n'), ((2292, 2304), 'Point.Point', 'Point', (['(-2)', '(2)'], {}), '(-2, 2)\n', (2297, 2304), False, 'from Point import Point\n'), ((2336, 2347), 'Point.Point', 'Point', (['(3)', '(3)'], {}), '(3, 3)\n', (2341, 2347), False, 'from Point import Point\n'), ((2379, 2391), 'Point.Point', 'Point', (['(-2)', '(2)'], {}), '(-2, 2)\n', (2384, 2391), False, 'from Point import Point\n'), ((2423, 2436), 'Point.Point', 'Point', (['(-2)', '(-2)'], {}), '(-2, -2)\n', (2428, 2436), False, 'from Point import Point\n'), ((857, 915), 'Point.Point', 'Point', (['(self.CornerPoint.x + self.width)', 'self.CornerPoint.y'], {}), '(self.CornerPoint.x + self.width, self.CornerPoint.y)\n', (862, 915), False, 'from Point import Point\n'), ((943, 1015), 'Point.Point', 'Point', (['(self.CornerPoint.x + self.width)', '(self.CornerPoint.y + self.height)'], {}), '(self.CornerPoint.x + self.width, self.CornerPoint.y + self.height)\n', (948, 1015), False, 'from Point import Point\n'), ((1043, 1102), 'Point.Point', 'Point', (['self.CornerPoint.x', '(self.CornerPoint.y + self.height)'], {}), '(self.CornerPoint.x, self.CornerPoint.y + self.height)\n', (1048, 1102), False, 'from Point import Point\n'), ((1640, 1651), 'Point.Point', 'Point', (['(0)', '(0)'], {}), '(0, 0)\n', (1645, 1651), False, 'from Point import Point\n'), ((1675, 1686), 'Point.Point', 'Point', (['(3)', '(3)'], {}), '(3, 3)\n', (1680, 1686), False, 'from Point import Point\n'), ((1710, 1721), 'Point.Point', 'Point', (['(3)', '(7)'], {}), '(3, 7)\n', (1715, 1721), False, 'from Point import Point\n'), ((1746, 1757), 'Point.Point', 'Point', (['(3)', '(5)'], {}), '(3, 5)\n', (1751, 1757), False, 'from Point import Point\n'), ((1782, 1799), 'Point.Point', 'Point', (['(3)', '(4.99999)'], {}), '(3, 4.99999)\n', (1787, 1799), False, 'from Point import Point\n'), ((1823, 1836), 'Point.Point', 'Point', (['(-3)', '(-3)'], {}), '(-3, -3)\n', (1828, 1836), False, 'from Point import Point\n')]
# flake8: noqa from construct import * from xbox.sg.utils.struct import XStruct from xbox.sg.utils.adapters import XSwitch, XEnum, PrefixedBytes from xbox.nano.enum import ControlPayloadType, ControllerEvent """ ControlProtocol Streamer Messages """ session_init = XStruct( 'unk3' / GreedyBytes ) session_create = XStruct( 'guid' / Bytes(16), 'unk3' / PrefixedBytes(Int32ul) ) session_create_response = XStruct( 'guid' / Bytes(16) ) session_destroy = XStruct( 'unk3' / Float32l, 'unk5' / PrefixedBytes(Int32ul) ) video_statistics = XStruct( 'unk3' / Float32l, 'unk4' / Float32l, 'unk5' / Float32l, 'unk6' / Float32l, 'unk7' / Float32l, 'unk8' / Float32l ) realtime_telemetry = XStruct( 'data' / PrefixedArray(Int16ul, Struct( 'key' / Int16ul, 'value' / Int64ul )) ) change_video_quality = XStruct( 'unk3' / Int32ul, 'unk4' / Int32ul, 'unk5' / Int32ul, 'unk6' / Int32ul, 'unk7' / Int32ul, 'unk8' / Int32ul ) initiate_network_test = XStruct( 'guid' / Bytes(16) ) network_information = XStruct( 'guid' / Bytes(16), 'unk4' / Int64ul, 'unk5' / Int8ul, 'unk6' / Float32l ) network_test_response = XStruct( 'guid' / Bytes(16), 'unk3' / Float32l, 'unk4' / Float32l, 'unk5' / Float32l, 'unk6' / Float32l, 'unk7' / Float32l, 'unk8' / Int64ul, 'unk9' / Int64ul, 'unk10' / Float32l ) controller_event = XStruct( 'event' / XEnum(Int8ul, ControllerEvent), 'controller_num' / Int8ul ) control_packet = XStruct( 'prev_seq_dup' / Int32ul, 'unk1' / Int16ul, 'unk2' / Int16ul, 'opcode' / XEnum(Int16ul, ControlPayloadType), 'payload' / XSwitch( this.opcode, { ControlPayloadType.SessionInit: session_init, ControlPayloadType.SessionCreate: session_create, ControlPayloadType.SessionCreateResponse: session_create_response, ControlPayloadType.SessionDestroy: session_destroy, ControlPayloadType.VideoStatistics: video_statistics, ControlPayloadType.RealtimeTelemetry: realtime_telemetry, ControlPayloadType.ChangeVideoQuality: change_video_quality, ControlPayloadType.InitiateNetworkTest: initiate_network_test, ControlPayloadType.NetworkInformation: network_information, ControlPayloadType.NetworkTestResponse: network_test_response, ControlPayloadType.ControllerEvent: controller_event } ) )
[ "xbox.sg.utils.adapters.XEnum", "xbox.sg.utils.struct.XStruct", "xbox.sg.utils.adapters.XSwitch", "xbox.sg.utils.adapters.PrefixedBytes" ]
[((268, 297), 'xbox.sg.utils.struct.XStruct', 'XStruct', (["('unk3' / GreedyBytes)"], {}), "('unk3' / GreedyBytes)\n", (275, 297), False, 'from xbox.sg.utils.struct import XStruct\n'), ((567, 692), 'xbox.sg.utils.struct.XStruct', 'XStruct', (["('unk3' / Float32l)", "('unk4' / Float32l)", "('unk5' / Float32l)", "('unk6' / Float32l)", "('unk7' / Float32l)", "('unk8' / Float32l)"], {}), "('unk3' / Float32l, 'unk4' / Float32l, 'unk5' / Float32l, 'unk6' /\n Float32l, 'unk7' / Float32l, 'unk8' / Float32l)\n", (574, 692), False, 'from xbox.sg.utils.struct import XStruct\n'), ((876, 995), 'xbox.sg.utils.struct.XStruct', 'XStruct', (["('unk3' / Int32ul)", "('unk4' / Int32ul)", "('unk5' / Int32ul)", "('unk6' / Int32ul)", "('unk7' / Int32ul)", "('unk8' / Int32ul)"], {}), "('unk3' / Int32ul, 'unk4' / Int32ul, 'unk5' / Int32ul, 'unk6' /\n Int32ul, 'unk7' / Int32ul, 'unk8' / Int32ul)\n", (883, 995), False, 'from xbox.sg.utils.struct import XStruct\n'), ((369, 391), 'xbox.sg.utils.adapters.PrefixedBytes', 'PrefixedBytes', (['Int32ul'], {}), '(Int32ul)\n', (382, 391), False, 'from xbox.sg.utils.adapters import XSwitch, XEnum, PrefixedBytes\n'), ((521, 543), 'xbox.sg.utils.adapters.PrefixedBytes', 'PrefixedBytes', (['Int32ul'], {}), '(Int32ul)\n', (534, 543), False, 'from xbox.sg.utils.adapters import XSwitch, XEnum, PrefixedBytes\n'), ((1489, 1519), 'xbox.sg.utils.adapters.XEnum', 'XEnum', (['Int8ul', 'ControllerEvent'], {}), '(Int8ul, ControllerEvent)\n', (1494, 1519), False, 'from xbox.sg.utils.adapters import XSwitch, XEnum, PrefixedBytes\n'), ((1670, 1704), 'xbox.sg.utils.adapters.XEnum', 'XEnum', (['Int16ul', 'ControlPayloadType'], {}), '(Int16ul, ControlPayloadType)\n', (1675, 1704), False, 'from xbox.sg.utils.adapters import XSwitch, XEnum, PrefixedBytes\n'), ((1722, 2414), 'xbox.sg.utils.adapters.XSwitch', 'XSwitch', (['this.opcode', '{ControlPayloadType.SessionInit: session_init, ControlPayloadType.\n SessionCreate: session_create, ControlPayloadType.SessionCreateResponse:\n session_create_response, ControlPayloadType.SessionDestroy:\n session_destroy, ControlPayloadType.VideoStatistics: video_statistics,\n ControlPayloadType.RealtimeTelemetry: realtime_telemetry,\n ControlPayloadType.ChangeVideoQuality: change_video_quality,\n ControlPayloadType.InitiateNetworkTest: initiate_network_test,\n ControlPayloadType.NetworkInformation: network_information,\n ControlPayloadType.NetworkTestResponse: network_test_response,\n ControlPayloadType.ControllerEvent: controller_event}'], {}), '(this.opcode, {ControlPayloadType.SessionInit: session_init,\n ControlPayloadType.SessionCreate: session_create, ControlPayloadType.\n SessionCreateResponse: session_create_response, ControlPayloadType.\n SessionDestroy: session_destroy, ControlPayloadType.VideoStatistics:\n video_statistics, ControlPayloadType.RealtimeTelemetry:\n realtime_telemetry, ControlPayloadType.ChangeVideoQuality:\n change_video_quality, ControlPayloadType.InitiateNetworkTest:\n initiate_network_test, ControlPayloadType.NetworkInformation:\n network_information, ControlPayloadType.NetworkTestResponse:\n network_test_response, ControlPayloadType.ControllerEvent:\n controller_event})\n', (1729, 2414), False, 'from xbox.sg.utils.adapters import XSwitch, XEnum, PrefixedBytes\n')]
import cProfile import compas from compas.datastructures import Mesh, mesh_transform from compas.geometry import Frame, Point, Transformation, Vector print('compas.__version__ : ', compas.__version__) f1 = Frame([2, 2, 2], [0.12, 0.58, 0.81], [-0.80, 0.53, -0.26]) f2 = Frame([1, 1, 1], [0.68, 0.68, 0.27], [-0.67, 0.73, -0.15]) T = Transformation.from_frame_to_frame(f1, f2) mesh = Mesh.from_ply(compas.get_bunny()) print('mesh.number_of_vertices() : ', mesh.number_of_vertices()) def transforamtion_test(): for _ in range(1): mesh_transform(mesh, T) cProfile.run('transforamtion_test()', sort=2)
[ "compas.geometry.Frame", "compas.datastructures.mesh_transform", "cProfile.run", "compas.get_bunny", "compas.geometry.Transformation.from_frame_to_frame" ]
[((209, 266), 'compas.geometry.Frame', 'Frame', (['[2, 2, 2]', '[0.12, 0.58, 0.81]', '[-0.8, 0.53, -0.26]'], {}), '([2, 2, 2], [0.12, 0.58, 0.81], [-0.8, 0.53, -0.26])\n', (214, 266), False, 'from compas.geometry import Frame, Point, Transformation, Vector\n'), ((273, 331), 'compas.geometry.Frame', 'Frame', (['[1, 1, 1]', '[0.68, 0.68, 0.27]', '[-0.67, 0.73, -0.15]'], {}), '([1, 1, 1], [0.68, 0.68, 0.27], [-0.67, 0.73, -0.15])\n', (278, 331), False, 'from compas.geometry import Frame, Point, Transformation, Vector\n'), ((336, 378), 'compas.geometry.Transformation.from_frame_to_frame', 'Transformation.from_frame_to_frame', (['f1', 'f2'], {}), '(f1, f2)\n', (370, 378), False, 'from compas.geometry import Frame, Point, Transformation, Vector\n'), ((572, 617), 'cProfile.run', 'cProfile.run', (['"""transforamtion_test()"""'], {'sort': '(2)'}), "('transforamtion_test()', sort=2)\n", (584, 617), False, 'import cProfile\n'), ((400, 418), 'compas.get_bunny', 'compas.get_bunny', ([], {}), '()\n', (416, 418), False, 'import compas\n'), ((546, 569), 'compas.datastructures.mesh_transform', 'mesh_transform', (['mesh', 'T'], {}), '(mesh, T)\n', (560, 569), False, 'from compas.datastructures import Mesh, mesh_transform\n')]
#!/usr/bin/python3 # find_conserved_blocks_v*.py ########################### # Overview: # Script analyses DNA multi sequence alignment data and searches for areas # (blocks) which are conserved (i.e. identical or very similar). Required # input is any valid FASTA file with multiple sequences. # User can adjust matching fraction (1 = identical match) and length of # matching region. Computational progress shown in console and text file # generated at end showing results. # # Requirements: # biopython library: >> pip/pip3 install biopython # # License: # Public domain (Creative Commons Zero v1.0 Universal License) # # Imported libraries ########################### from Bio.Alphabet import IUPAC from Bio import AlignIO from Bio.Align import MultipleSeqAlignment import time import os # Measure execution time of script start_time = time.time() # Program settings ########################### matchingFraction = 0.85 # 1 = identical (no mismatches) minLength = 200 # minimum number of base pairs matching in sequence minSequences = 7 # minimum number of sequences in a block with matching sequence # Input file data ########################### inputFileName = "file.fasta" inputFileType = "fasta" alphabetFormat = IUPAC.unambiguous_dna gapCharacter = "-" inputFilePath = os.path.dirname( os.path.realpath(__file__)) + "/" + inputFileName outputFilePath = inputFilePath + "_(" + str(matchingFraction) + "-" + \ str(minLength) + "-" + str(minSequences) + ").txt" ########################### alignment = AlignIO.read(inputFilePath, inputFileType, alphabet=alphabetFormat) sequenceLength = alignment.get_alignment_length() numSequences = alignment.__len__() # Error checking for user input if matchingFraction < 0 or matchingFraction > 1: print('Error: \'matchingFraction\' must be between 0 and 1') exit() if minLength >= sequenceLength or minLength <= 0: print('Error: \'minLength\' must be a positive number less than the sequence length in input file') exit() if minSequences >= numSequences or minSequences <= 0: print('Error: \'minSequences\' must be a positive number less than the number of sequences in input file') exit() def isBlockConserved(blockSequence): maxAllowedMismatchedColumns = blockSequence.get_alignment_length( ) - int(blockSequence.get_alignment_length() * matchingFraction) mismatchedColumns = 0 for column in range(blockSequence.get_alignment_length()): # First, check if entire first column is filled with gap characters # If so, return False if column == 0: gapCharacterCount = 0 for row in range(blockSequence.__len__()): if blockSequence[row][column] == gapCharacter: gapCharacterCount += 1 if gapCharacterCount == blockSequence.__len__(): return False # Find the first non-gap character in column firstNonGapCharacter = "" for row in range(blockSequence.__len__()): if blockSequence[row][column] != gapCharacter and firstNonGapCharacter == "": firstNonGapCharacter = blockSequence[row][column] break # Next, count number of columns with non-identical values or gap characters # If count increases above threshold, return False for row in range(blockSequence.__len__()): if blockSequence[row][column] == gapCharacter or blockSequence[row][column] != firstNonGapCharacter: mismatchedColumns += 1 if mismatchedColumns > maxAllowedMismatchedColumns: return False break # If loop executes completely to end, return True return True def isBlockAlreadyCounted(startRow, startColumn, endRow, endColumn): for block in conservedBlocksInfo: if startRow >= block[0] and startColumn >= block[1] and endRow <= block[2] and endColumn <= block[3]: return True return False def removeTrailingBlankColumns(blockSequence): removedColumns = 0 blankColumn = True while blankColumn: gapCharacterCount = 0 for row in range(blockSequence.__len__()): if blockSequence[row][-1] == gapCharacter: gapCharacterCount += 1 if gapCharacterCount == blockSequence.__len__(): # Remove last column, as it contains only gap characters blockSequence = blockSequence[:, :-1] removedColumns += 1 else: blankColumn = False return blockSequence, removedColumns def getNumberBlankColumns(blockSequence): numberColumns = 0 for column in range(blockSequence.get_alignment_length()): gapCharacterCount = 0 for row in range(blockSequence.__len__()): if blockSequence[row][column] == gapCharacter: gapCharacterCount += 1 if gapCharacterCount == blockSequence.__len__(): numberColumns += 1 return numberColumns def getMismatchString(blockSequence): mismatchString = "" for column in range(blockSequence.get_alignment_length()): mismatch = False firstNonGapCharacter = "" for row in range(blockSequence.__len__()): if blockSequence[row][column] != gapCharacter and firstNonGapCharacter == "": firstNonGapCharacter = blockSequence[row][column] break for row in range(blockSequence.__len__()): if blockSequence[row][column] != firstNonGapCharacter: mismatchString += "X" mismatch = True break if not mismatch: mismatchString += "-" return mismatchString def getNumberPositionString(length, start, multiple): returnString = "" i = start while i < length + start: if i % multiple == 0: returnString += str(i) # Move counter forward by number of extra characters used to print string i += (len(str(i)) - 1) else: returnString += " " i += 1 return returnString print("Input file = " + inputFilePath) print("\'matchingFraction\' = " + str(matchingFraction)) print("\'minLength\' = " + str(minLength)) print("\'minSequences\' = " + str(minSequences)) print("") conservedBlocks = [] conservedBlocksInfo = [] for startSequence in range(numSequences - minSequences + 1): current_time = time.time() - start_time print("Start of sequence #" + str(startSequence + 1) + " (current time = " + str(int(current_time)) + "s)") for index in range(sequenceLength - minLength + 1): length = minLength sequences = minSequences # Only test block if it (or larger version of it) not already added if not isBlockAlreadyCounted(startSequence, index, startSequence + sequences, index + length): largestConservedBlock = MultipleSeqAlignment([]) # Empty object consensusRight = True consensusDown = True while consensusRight: block = alignment[startSequence:( startSequence + sequences), index:(index + length)] if (index + length) > sequenceLength: # Reached end of sequence consensusRight = False elif not isBlockConserved(block): consensusRight = False if length == minLength: consensusDown = False # Don't bother searching down, as start block is not conserved else: length -= 1 else: length += 1 largestConservedBlock = block while consensusDown: block = alignment[startSequence:( startSequence + sequences), index:(index + length)] if (startSequence + sequences) > numSequences: # Reached end of sequence list consensusDown = False elif not isBlockConserved(block): consensusDown = False sequences -= 1 else: sequences += 1 largestConservedBlock = block if index != sequenceLength and length > minLength: # Before saving data, remove any columns of gap characters at end of block largestConservedBlock, removedColumns = removeTrailingBlankColumns( largestConservedBlock) # Save block data in list conservedBlocks.append(largestConservedBlock) conservedBlocksInfo.append( [startSequence, index, startSequence + sequences, index + length]) # Print information about saved data print("Block found: " + str(conservedBlocks[-1].__len__()) + " sequences, " + str( conservedBlocks[-1].get_alignment_length()) + " long (from position " + str(index + 1) + " to " + str(index + length + 1) + ")") # Print output to text file textFile = open(outputFilePath, "w") textFile.write("Python script = " + os.path.basename(__file__) + "\n") textFile.write("Input file = " + inputFilePath + "\n") textFile.write("\'matchingFraction\' = " + str(matchingFraction) + "\n") textFile.write("\'minLength\' = " + str(minLength) + "\n") textFile.write("\'minSequences\' = " + str(minSequences) + "\n\n") i = 0 for block in conservedBlocks: textFile.write("Number blank columns = " + str(getNumberBlankColumns(block)) + "\n") textFile.write("Length = " + str(block.get_alignment_length() - getNumberBlankColumns(block)) + " / Length with gaps = " + str(block.get_alignment_length() ) + " (position " + str(conservedBlocksInfo[i][1] + 1) + " to " + str(conservedBlocksInfo[i][1] + block.get_alignment_length() + 1) + ")\n") textFile.write("Number of sequences = " + str(block.__len__()) + "\n") for row in range(block.__len__()): textFile.write("[" + str(conservedBlocksInfo[i][0] + row + 1) + "] " + block[row].name + "\n") textFile.write(getNumberPositionString( block.get_alignment_length(), 1, 50) + "\n") textFile.write(getMismatchString(block) + "\n") for row in range(block.__len__()): textFile.write(str(block[row, :].seq) + "\n") textFile.write("\n") i += 1 textFile.close() # Print execution time of script end_time = time.time() total_time = end_time - start_time print("Program execution time: " + str(int(total_time)) + "s")
[ "Bio.AlignIO.read", "os.path.realpath", "os.path.basename", "Bio.Align.MultipleSeqAlignment", "time.time" ]
[((859, 870), 'time.time', 'time.time', ([], {}), '()\n', (868, 870), False, 'import time\n'), ((1559, 1626), 'Bio.AlignIO.read', 'AlignIO.read', (['inputFilePath', 'inputFileType'], {'alphabet': 'alphabetFormat'}), '(inputFilePath, inputFileType, alphabet=alphabetFormat)\n', (1571, 1626), False, 'from Bio import AlignIO\n'), ((10675, 10686), 'time.time', 'time.time', ([], {}), '()\n', (10684, 10686), False, 'import time\n'), ((6450, 6461), 'time.time', 'time.time', ([], {}), '()\n', (6459, 6461), False, 'import time\n'), ((1339, 1365), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1355, 1365), False, 'import os\n'), ((6928, 6952), 'Bio.Align.MultipleSeqAlignment', 'MultipleSeqAlignment', (['[]'], {}), '([])\n', (6948, 6952), False, 'from Bio.Align import MultipleSeqAlignment\n'), ((9217, 9243), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (9233, 9243), False, 'import os\n')]
__author__ = "<NAME>" __email__ = "<EMAIL>" """ Baseline parallel BFS implementation. Algorithm 1 Parallel BFS algorithm: High-level overview [1] was implemented. Reference: [1] https://www.researchgate.net/publication/220782745_Scalable_Graph_Exploration_on_Multicore_Processors """ import numpy as np from multiprocessing import Pool import multiprocessing as mp import time from src.load_graph import get_graph, gen_balanced_tree from functools import partial P_ARR = [] def get_adjacent_nodes(G, x): idx_lst = [] adj_list = G[x] for idx, val in enumerate(adj_list): if val == 1: idx_lst.append(idx) return idx_lst def get_neighbour(u, G, target): nq = [] # For each v adjacent to u # print(u) found_node = False for v in get_adjacent_nodes(G, u): if v == target: found_node = True if P_ARR[v] == np.inf: P_ARR[v] = u nq.append(v) return nq, found_node def bfs_parallel(G, target): r = 0 CQ = [] # Init all values in P to inf for i in range(G.shape[0]): P_ARR.append(np.inf) # Set root node P_ARR[r] = 0 # Enqueue r CQ.append(r) while len(CQ) != 0: print(f"CQ: {CQ}") # Parallel Dequeue num_cpu = mp.cpu_count() with Pool(num_cpu) as pool: results = pool.map(partial(get_neighbour, G=G, target=target), CQ) nq_tmp = [x for (x,y) in results] for (x,y) in results: if y: return True # print(nq_tmp) NQ = list(np.concatenate(nq_tmp).ravel()) # Swap CQ and NQ CQ = NQ return False def main(): start_time = time.time() G = gen_balanced_tree(3, 4, directed=True) # G = get_graph() find_node = bfs_parallel(G, target=10000) print("--- %s seconds ---" % (time.time() - start_time)) if find_node: print(f"Node Found") else: print(f"Node not Found") if __name__=='__main__': main()
[ "multiprocessing.cpu_count", "src.load_graph.gen_balanced_tree", "functools.partial", "multiprocessing.Pool", "numpy.concatenate", "time.time" ]
[((1822, 1833), 'time.time', 'time.time', ([], {}), '()\n', (1831, 1833), False, 'import time\n'), ((1844, 1882), 'src.load_graph.gen_balanced_tree', 'gen_balanced_tree', (['(3)', '(4)'], {'directed': '(True)'}), '(3, 4, directed=True)\n', (1861, 1882), False, 'from src.load_graph import get_graph, gen_balanced_tree\n'), ((1370, 1384), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (1382, 1384), True, 'import multiprocessing as mp\n'), ((1399, 1412), 'multiprocessing.Pool', 'Pool', (['num_cpu'], {}), '(num_cpu)\n', (1403, 1412), False, 'from multiprocessing import Pool\n'), ((1454, 1496), 'functools.partial', 'partial', (['get_neighbour'], {'G': 'G', 'target': 'target'}), '(get_neighbour, G=G, target=target)\n', (1461, 1496), False, 'from functools import partial\n'), ((1994, 2005), 'time.time', 'time.time', ([], {}), '()\n', (2003, 2005), False, 'import time\n'), ((1686, 1708), 'numpy.concatenate', 'np.concatenate', (['nq_tmp'], {}), '(nq_tmp)\n', (1700, 1708), True, 'import numpy as np\n')]
from django.conf.urls import url from . import views urlpatterns = [ # generic profile endpoint url(r'^profile/(?P<username>\w+)/', views.profile, name='profile-api'), # current user profile url(r'^profile/', views.profile, name='profile-api'), ]
[ "django.conf.urls.url" ]
[((106, 176), 'django.conf.urls.url', 'url', (['"""^profile/(?P<username>\\\\w+)/"""', 'views.profile'], {'name': '"""profile-api"""'}), "('^profile/(?P<username>\\\\w+)/', views.profile, name='profile-api')\n", (109, 176), False, 'from django.conf.urls import url\n'), ((209, 260), 'django.conf.urls.url', 'url', (['"""^profile/"""', 'views.profile'], {'name': '"""profile-api"""'}), "('^profile/', views.profile, name='profile-api')\n", (212, 260), False, 'from django.conf.urls import url\n')]
from torch.utils.data import Dataset import torch import os import pandas as pd from PIL import Image class Dataset(Dataset): def __init__(self, csv_file, image_dir, mask_dir, img_col='image', mask_col='mask', transform=None, batch_size=32): """ Args: csv_file (Pandas dataframe): Path to the csv file with list of images in the dataset. image_dir (string): Directory with all the images. mask_dir (string): Directory with all the masks. col_filename (string): column name containing images names. transform_img (callable, optional): Optional transform to be applied on images only. transform_img_mask (callable, optional): Optional transform to be applied on images and masks simultaneously. """ self.image_names = pd.read_csv(csv_file)[img_col] self.mask_names = pd.read_csv(csv_file)[mask_col] self.image_dir = image_dir self.mask_dir = mask_dir self.transform = transform self.batch_size = batch_size def __len__(self): return len(self.image_names) def __getitem__(self, idx): if torch.is_tensor(idx): idx = idx.tolist() # get image data filename = self.image_names.iloc[idx] img_name = os.path.join(self.image_dir, filename) img = Image.open(img_name).convert('RGB') # get mask_data filename = self.mask_names.iloc[idx] mask_name = os.path.join(self.mask_dir, filename) mask = Image.open(mask_name).convert('RGB') if self.transform is not None: img, mask = self.transform(img, mask) return img, mask
[ "torch.is_tensor", "PIL.Image.open", "os.path.join", "pandas.read_csv" ]
[((1195, 1215), 'torch.is_tensor', 'torch.is_tensor', (['idx'], {}), '(idx)\n', (1210, 1215), False, 'import torch\n'), ((1338, 1376), 'os.path.join', 'os.path.join', (['self.image_dir', 'filename'], {}), '(self.image_dir, filename)\n', (1350, 1376), False, 'import os\n'), ((1525, 1562), 'os.path.join', 'os.path.join', (['self.mask_dir', 'filename'], {}), '(self.mask_dir, filename)\n', (1537, 1562), False, 'import os\n'), ((849, 870), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {}), '(csv_file)\n', (860, 870), True, 'import pandas as pd\n'), ((906, 927), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {}), '(csv_file)\n', (917, 927), True, 'import pandas as pd\n'), ((1391, 1411), 'PIL.Image.open', 'Image.open', (['img_name'], {}), '(img_name)\n', (1401, 1411), False, 'from PIL import Image\n'), ((1578, 1599), 'PIL.Image.open', 'Image.open', (['mask_name'], {}), '(mask_name)\n', (1588, 1599), False, 'from PIL import Image\n')]
#!/usr/bin/python3 # count_mkdir.py # Author: Guochao # Created on 17-01-2022 # Print timing of mkdir from bcc import BPF program = r""" #include <uapi/linux/ptrace.h> BPF_HASH(count); int do_trace(struct pt_regs *ctx) { u64 c1 = 1, *cnt, delta, key = 1; cnt = count.lookup(&key); if (cnt != NULL) { c1 = *cnt + 1; } count.update(&key, &c1); bpf_trace_printk("%d\n", c1); return 0; } """ b = BPF(text=program) b.attach_kprobe(event=b.get_syscall_fnname("mkdir"), fn_name="do_trace") start = 0 while 1: (task, pid, cpu, flags, ts, cnt) = b.trace_fields() if start == 0: start = ts ts = ts - start cnt = cnt.decode("utf8") print("At time %.2f s: mkdir detected, count: %s" % (ts, cnt))
[ "bcc.BPF" ]
[((440, 457), 'bcc.BPF', 'BPF', ([], {'text': 'program'}), '(text=program)\n', (443, 457), False, 'from bcc import BPF\n')]
import numpy as np import numpy.testing as npt import pytest from openscm_units import unit_registry as ur from test_model_base import TwoLayerVariantTester from openscm_twolayermodel import ImpulseResponseModel, TwoLayerModel from openscm_twolayermodel.base import _calculate_geoffroy_helper_parameters from openscm_twolayermodel.constants import DENSITY_WATER, HEAT_CAPACITY_WATER class TestImpulseResponseModel(TwoLayerVariantTester): tmodel = ImpulseResponseModel parameters = dict( q1=0.33 * ur("delta_degC/(W/m^2)"), q2=0.41 * ur("delta_degC/(W/m^2)"), d1=239.0 * ur("yr"), d2=4.1 * ur("yr"), efficacy=1.0 * ur("dimensionless"), delta_t=1 * ur("yr"), ) def test_init(self): init_kwargs = dict( q1=0.3 * ur("delta_degC/(W/m^2)"), q2=0.4 * ur("delta_degC/(W/m^2)"), d1=25.0 * ur("yr"), d2=300 * ur("yr"), efficacy=1.1 * ur("dimensionless"), delta_t=1 / 12 * ur("yr"), ) res = self.tmodel(**init_kwargs) for k, v in init_kwargs.items(): assert getattr(res, k) == v, "{} not set properly".format(k) assert np.isnan(res.erf) assert np.isnan(res._temp1_mag) assert np.isnan(res._temp2_mag) assert np.isnan(res._rndt_mag) def test_init_backwards_timescales_error(self): init_kwargs = dict(d1=250.0 * ur("yr"), d2=3 * ur("yr"),) error_msg = "The short-timescale must be d1" with pytest.raises(ValueError, match=error_msg): self.tmodel(**init_kwargs) def test_calculate_next_temp(self, check_same_unit): tdelta_t = 30 * 24 * 60 * 60 ttemp = 0.1 tq = 0.4 td = 35.0 tf = 1.2 res = self.tmodel._calculate_next_temp(tdelta_t, ttemp, tq, td, tf) expected = ttemp * np.exp(-tdelta_t / td) + tf * tq * ( 1 - np.exp(-tdelta_t / td) ) npt.assert_equal(res, expected) check_same_unit(self.tmodel._temp1_unit, self.tmodel._temp2_unit) check_same_unit(self.tmodel._q1_unit, self.tmodel._q2_unit) check_same_unit(self.tmodel._delta_t_unit, self.tmodel._d1_unit) check_same_unit(self.tmodel._delta_t_unit, self.tmodel._d2_unit) check_same_unit( self.tmodel._temp1_unit, (1.0 * ur(self.tmodel._erf_unit) * 1.0 * ur(self.tmodel._q1_unit)).units, ) def test_calculate_next_rndt(self, check_same_unit): ttemp1 = 1.1 ttemp_2 = 0.6 tq1 = 0.5 tq2 = 0.3 td1 = 30 td2 = 600 terf = 1.2 tefficacy = 1.13 helper = self.tmodel( q1=tq1 * ur("delta_degC/(W/m^2)"), q2=tq2 * ur("delta_degC/(W/m^2)"), d1=td1 * ur("yr"), d2=td2 * ur("yr"), efficacy=tefficacy * ur("dimensionless"), ) helper_twolayer = TwoLayerModel(**helper.get_two_layer_parameters()) gh = _calculate_geoffroy_helper_parameters( helper_twolayer.du, helper_twolayer.dl, helper_twolayer.lambda0, helper_twolayer.efficacy, helper_twolayer.eta, ) # see notebook for discussion of why this is so efficacy_term = ( helper_twolayer.eta * (helper_twolayer.efficacy - 1) * ( ((1 - gh["phi1"]) * ttemp1 * ur("delta_degC")) + ((1 - gh["phi2"]) * ttemp_2 * ur("delta_degC")) ) ) expected = ( terf * ur(helper._erf_unit) - ((ttemp1 + ttemp_2) * ur(helper._temp1_unit)) * helper_twolayer.lambda0 - efficacy_term ) assert str(expected.units) == "watt / meter ** 2" res = helper._calculate_next_rndt(ttemp1, ttemp_2, terf, tefficacy) npt.assert_allclose(res, expected.magnitude) # check internal units make sense check_same_unit(self.tmodel._q1_unit, self.tmodel._q2_unit) check_same_unit( helper_twolayer._lambda0_unit, (1.0 * ur(self.tmodel._q2_unit) ** -1) ) check_same_unit( self.tmodel._erf_unit, ( ( 1.0 * ur(self.tmodel._temp1_unit) / (1.0 * ur(self.tmodel._q1_unit)) ).units ), ) check_same_unit( self.tmodel._erf_unit, efficacy_term.units, ) def test_step(self): # move to integration tests terf = np.array([3, 4, 5, 6, 7]) * ur("W/m^2") model = self.tmodel() model.set_drivers(terf) model.reset() model.step() assert model._timestep_idx == 0 npt.assert_equal(model._temp1_mag[model._timestep_idx], 0) npt.assert_equal(model._temp2_mag[model._timestep_idx], 0) npt.assert_equal(model._rndt_mag[model._timestep_idx], 0) model.step() model.step() model.step() assert model._timestep_idx == 3 npt.assert_equal( model._temp1_mag[model._timestep_idx], model._calculate_next_temp( model._delta_t_mag, model._temp1_mag[model._timestep_idx - 1], model._q1_mag, model._d1_mag, model._erf_mag[model._timestep_idx - 1], ), ) npt.assert_equal( model._temp2_mag[model._timestep_idx], model._calculate_next_temp( model._delta_t_mag, model._temp2_mag[model._timestep_idx - 1], model._q2_mag, model._d2_mag, model._erf_mag[model._timestep_idx - 1], ), ) npt.assert_equal( model._rndt_mag[model._timestep_idx], model._calculate_next_rndt( model._temp1_mag[model._timestep_idx - 1], model._temp2_mag[model._timestep_idx - 1], model._erf_mag[model._timestep_idx - 1], model._efficacy_mag, ), ) def test_reset(self): terf = np.array([0, 1, 2]) * ur("W/m^2") model = self.tmodel() model.set_drivers(terf) def assert_is_nan_and_erf_shape(inp): assert np.isnan(inp).all() assert inp.shape == terf.shape model.reset() # after reset, we are not in any timestep assert np.isnan(model._timestep_idx) assert_is_nan_and_erf_shape(model._temp1_mag) assert_is_nan_and_erf_shape(model._temp2_mag) assert_is_nan_and_erf_shape(model._rndt_mag) def test_reset_run_reset(self): # move to integration tests terf = np.array([0, 1, 2, 3, 4, 5]) * ur("W/m^2") model = self.tmodel() model.set_drivers(terf) def assert_is_nan_and_erf_shape(inp): assert np.isnan(inp).all() assert inp.shape == terf.shape model.reset() assert_is_nan_and_erf_shape(model._temp1_mag) assert_is_nan_and_erf_shape(model._temp2_mag) assert_is_nan_and_erf_shape(model._rndt_mag) def assert_ge_zero_and_erf_shape(inp): assert not (inp < 0).any() assert inp.shape == terf.shape model.run() assert_ge_zero_and_erf_shape(model._temp1_mag) assert_ge_zero_and_erf_shape(model._temp2_mag) assert_ge_zero_and_erf_shape(model._rndt_mag) model.reset() assert_is_nan_and_erf_shape(model._temp1_mag) assert_is_nan_and_erf_shape(model._temp2_mag) assert_is_nan_and_erf_shape(model._rndt_mag) def test_get_two_layer_model_parameters(self, check_equal_pint): tq1 = 0.3 * ur("delta_degC/(W/m^2)") tq2 = 0.4 * ur("delta_degC/(W/m^2)") td1 = 3 * ur("yr") td2 = 300.0 * ur("yr") tefficacy = 1.2 * ur("dimensionless") start_paras = dict(d1=td1, d2=td2, q1=tq1, q2=tq2, efficacy=tefficacy,) mod_instance = self.tmodel(**start_paras) # for explanation of what is going on, see # impulse-response-equivalence.ipynb efficacy = tefficacy lambda0 = 1 / (tq1 + tq2) C = (td1 * td2) / (tq1 * td2 + tq2 * td1) a1 = lambda0 * tq1 a2 = lambda0 * tq2 tau1 = td1 tau2 = td2 C_D = (lambda0 * (tau1 * a1 + tau2 * a2) - C) / efficacy eta = C_D / (tau1 * a2 + tau2 * a1) expected = { "lambda0": lambda0, "du": C / (DENSITY_WATER * HEAT_CAPACITY_WATER), "dl": C_D / (DENSITY_WATER * HEAT_CAPACITY_WATER), "eta": eta, "efficacy": efficacy, } res = mod_instance.get_two_layer_parameters() assert res == expected # check circularity circular_params = TwoLayerModel(**res).get_impulse_response_parameters() for k, v in circular_params.items(): check_equal_pint(v, start_paras[k])
[ "numpy.testing.assert_equal", "openscm_units.unit_registry", "numpy.testing.assert_allclose", "openscm_twolayermodel.base._calculate_geoffroy_helper_parameters", "numpy.exp", "numpy.array", "openscm_twolayermodel.TwoLayerModel", "numpy.isnan", "pytest.raises" ]
[((1204, 1221), 'numpy.isnan', 'np.isnan', (['res.erf'], {}), '(res.erf)\n', (1212, 1221), True, 'import numpy as np\n'), ((1237, 1261), 'numpy.isnan', 'np.isnan', (['res._temp1_mag'], {}), '(res._temp1_mag)\n', (1245, 1261), True, 'import numpy as np\n'), ((1277, 1301), 'numpy.isnan', 'np.isnan', (['res._temp2_mag'], {}), '(res._temp2_mag)\n', (1285, 1301), True, 'import numpy as np\n'), ((1317, 1340), 'numpy.isnan', 'np.isnan', (['res._rndt_mag'], {}), '(res._rndt_mag)\n', (1325, 1340), True, 'import numpy as np\n'), ((1977, 2008), 'numpy.testing.assert_equal', 'npt.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (1993, 2008), True, 'import numpy.testing as npt\n'), ((3014, 3168), 'openscm_twolayermodel.base._calculate_geoffroy_helper_parameters', '_calculate_geoffroy_helper_parameters', (['helper_twolayer.du', 'helper_twolayer.dl', 'helper_twolayer.lambda0', 'helper_twolayer.efficacy', 'helper_twolayer.eta'], {}), '(helper_twolayer.du, helper_twolayer.\n dl, helper_twolayer.lambda0, helper_twolayer.efficacy, helper_twolayer.eta)\n', (3051, 3168), False, 'from openscm_twolayermodel.base import _calculate_geoffroy_helper_parameters\n'), ((3893, 3937), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['res', 'expected.magnitude'], {}), '(res, expected.magnitude)\n', (3912, 3937), True, 'import numpy.testing as npt\n'), ((4759, 4817), 'numpy.testing.assert_equal', 'npt.assert_equal', (['model._temp1_mag[model._timestep_idx]', '(0)'], {}), '(model._temp1_mag[model._timestep_idx], 0)\n', (4775, 4817), True, 'import numpy.testing as npt\n'), ((4826, 4884), 'numpy.testing.assert_equal', 'npt.assert_equal', (['model._temp2_mag[model._timestep_idx]', '(0)'], {}), '(model._temp2_mag[model._timestep_idx], 0)\n', (4842, 4884), True, 'import numpy.testing as npt\n'), ((4893, 4950), 'numpy.testing.assert_equal', 'npt.assert_equal', (['model._rndt_mag[model._timestep_idx]', '(0)'], {}), '(model._rndt_mag[model._timestep_idx], 0)\n', (4909, 4950), True, 'import numpy.testing as npt\n'), ((6479, 6508), 'numpy.isnan', 'np.isnan', (['model._timestep_idx'], {}), '(model._timestep_idx)\n', (6487, 6508), True, 'import numpy as np\n'), ((1527, 1569), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'error_msg'}), '(ValueError, match=error_msg)\n', (1540, 1569), False, 'import pytest\n'), ((4564, 4589), 'numpy.array', 'np.array', (['[3, 4, 5, 6, 7]'], {}), '([3, 4, 5, 6, 7])\n', (4572, 4589), True, 'import numpy as np\n'), ((4592, 4603), 'openscm_units.unit_registry', 'ur', (['"""W/m^2"""'], {}), "('W/m^2')\n", (4594, 4603), True, 'from openscm_units import unit_registry as ur\n'), ((6165, 6184), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (6173, 6184), True, 'import numpy as np\n'), ((6187, 6198), 'openscm_units.unit_registry', 'ur', (['"""W/m^2"""'], {}), "('W/m^2')\n", (6189, 6198), True, 'from openscm_units import unit_registry as ur\n'), ((6758, 6786), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5]'], {}), '([0, 1, 2, 3, 4, 5])\n', (6766, 6786), True, 'import numpy as np\n'), ((6789, 6800), 'openscm_units.unit_registry', 'ur', (['"""W/m^2"""'], {}), "('W/m^2')\n", (6791, 6800), True, 'from openscm_units import unit_registry as ur\n'), ((7766, 7790), 'openscm_units.unit_registry', 'ur', (['"""delta_degC/(W/m^2)"""'], {}), "('delta_degC/(W/m^2)')\n", (7768, 7790), True, 'from openscm_units import unit_registry as ur\n'), ((7811, 7835), 'openscm_units.unit_registry', 'ur', (['"""delta_degC/(W/m^2)"""'], {}), "('delta_degC/(W/m^2)')\n", (7813, 7835), True, 'from openscm_units import unit_registry as ur\n'), ((7854, 7862), 'openscm_units.unit_registry', 'ur', (['"""yr"""'], {}), "('yr')\n", (7856, 7862), True, 'from openscm_units import unit_registry as ur\n'), ((7885, 7893), 'openscm_units.unit_registry', 'ur', (['"""yr"""'], {}), "('yr')\n", (7887, 7893), True, 'from openscm_units import unit_registry as ur\n'), ((7920, 7939), 'openscm_units.unit_registry', 'ur', (['"""dimensionless"""'], {}), "('dimensionless')\n", (7922, 7939), True, 'from openscm_units import unit_registry as ur\n'), ((517, 541), 'openscm_units.unit_registry', 'ur', (['"""delta_degC/(W/m^2)"""'], {}), "('delta_degC/(W/m^2)')\n", (519, 541), True, 'from openscm_units import unit_registry as ur\n'), ((561, 585), 'openscm_units.unit_registry', 'ur', (['"""delta_degC/(W/m^2)"""'], {}), "('delta_degC/(W/m^2)')\n", (563, 585), True, 'from openscm_units import unit_registry as ur\n'), ((606, 614), 'openscm_units.unit_registry', 'ur', (['"""yr"""'], {}), "('yr')\n", (608, 614), True, 'from openscm_units import unit_registry as ur\n'), ((633, 641), 'openscm_units.unit_registry', 'ur', (['"""yr"""'], {}), "('yr')\n", (635, 641), True, 'from openscm_units import unit_registry as ur\n'), ((666, 685), 'openscm_units.unit_registry', 'ur', (['"""dimensionless"""'], {}), "('dimensionless')\n", (668, 685), True, 'from openscm_units import unit_registry as ur\n'), ((707, 715), 'openscm_units.unit_registry', 'ur', (['"""yr"""'], {}), "('yr')\n", (709, 715), True, 'from openscm_units import unit_registry as ur\n'), ((1882, 1904), 'numpy.exp', 'np.exp', (['(-tdelta_t / td)'], {}), '(-tdelta_t / td)\n', (1888, 1904), True, 'import numpy as np\n'), ((8873, 8893), 'openscm_twolayermodel.TwoLayerModel', 'TwoLayerModel', ([], {}), '(**res)\n', (8886, 8893), False, 'from openscm_twolayermodel import ImpulseResponseModel, TwoLayerModel\n'), ((798, 822), 'openscm_units.unit_registry', 'ur', (['"""delta_degC/(W/m^2)"""'], {}), "('delta_degC/(W/m^2)')\n", (800, 822), True, 'from openscm_units import unit_registry as ur\n'), ((845, 869), 'openscm_units.unit_registry', 'ur', (['"""delta_degC/(W/m^2)"""'], {}), "('delta_degC/(W/m^2)')\n", (847, 869), True, 'from openscm_units import unit_registry as ur\n'), ((893, 901), 'openscm_units.unit_registry', 'ur', (['"""yr"""'], {}), "('yr')\n", (895, 901), True, 'from openscm_units import unit_registry as ur\n'), ((924, 932), 'openscm_units.unit_registry', 'ur', (['"""yr"""'], {}), "('yr')\n", (926, 932), True, 'from openscm_units import unit_registry as ur\n'), ((961, 980), 'openscm_units.unit_registry', 'ur', (['"""dimensionless"""'], {}), "('dimensionless')\n", (963, 980), True, 'from openscm_units import unit_registry as ur\n'), ((1011, 1019), 'openscm_units.unit_registry', 'ur', (['"""yr"""'], {}), "('yr')\n", (1013, 1019), True, 'from openscm_units import unit_registry as ur\n'), ((1432, 1440), 'openscm_units.unit_registry', 'ur', (['"""yr"""'], {}), "('yr')\n", (1434, 1440), True, 'from openscm_units import unit_registry as ur\n'), ((1449, 1457), 'openscm_units.unit_registry', 'ur', (['"""yr"""'], {}), "('yr')\n", (1451, 1457), True, 'from openscm_units import unit_registry as ur\n'), ((1935, 1957), 'numpy.exp', 'np.exp', (['(-tdelta_t / td)'], {}), '(-tdelta_t / td)\n', (1941, 1957), True, 'import numpy as np\n'), ((2413, 2437), 'openscm_units.unit_registry', 'ur', (['self.tmodel._q1_unit'], {}), '(self.tmodel._q1_unit)\n', (2415, 2437), True, 'from openscm_units import unit_registry as ur\n'), ((2724, 2748), 'openscm_units.unit_registry', 'ur', (['"""delta_degC/(W/m^2)"""'], {}), "('delta_degC/(W/m^2)')\n", (2726, 2748), True, 'from openscm_units import unit_registry as ur\n'), ((2771, 2795), 'openscm_units.unit_registry', 'ur', (['"""delta_degC/(W/m^2)"""'], {}), "('delta_degC/(W/m^2)')\n", (2773, 2795), True, 'from openscm_units import unit_registry as ur\n'), ((2818, 2826), 'openscm_units.unit_registry', 'ur', (['"""yr"""'], {}), "('yr')\n", (2820, 2826), True, 'from openscm_units import unit_registry as ur\n'), ((2849, 2857), 'openscm_units.unit_registry', 'ur', (['"""yr"""'], {}), "('yr')\n", (2851, 2857), True, 'from openscm_units import unit_registry as ur\n'), ((2892, 2911), 'openscm_units.unit_registry', 'ur', (['"""dimensionless"""'], {}), "('dimensionless')\n", (2894, 2911), True, 'from openscm_units import unit_registry as ur\n'), ((3455, 3471), 'openscm_units.unit_registry', 'ur', (['"""delta_degC"""'], {}), "('delta_degC')\n", (3457, 3471), True, 'from openscm_units import unit_registry as ur\n'), ((3521, 3537), 'openscm_units.unit_registry', 'ur', (['"""delta_degC"""'], {}), "('delta_degC')\n", (3523, 3537), True, 'from openscm_units import unit_registry as ur\n'), ((3604, 3624), 'openscm_units.unit_registry', 'ur', (['helper._erf_unit'], {}), '(helper._erf_unit)\n', (3606, 3624), True, 'from openscm_units import unit_registry as ur\n'), ((4124, 4148), 'openscm_units.unit_registry', 'ur', (['self.tmodel._q2_unit'], {}), '(self.tmodel._q2_unit)\n', (4126, 4148), True, 'from openscm_units import unit_registry as ur\n'), ((6328, 6341), 'numpy.isnan', 'np.isnan', (['inp'], {}), '(inp)\n', (6336, 6341), True, 'import numpy as np\n'), ((6930, 6943), 'numpy.isnan', 'np.isnan', (['inp'], {}), '(inp)\n', (6938, 6943), True, 'import numpy as np\n'), ((3661, 3683), 'openscm_units.unit_registry', 'ur', (['helper._temp1_unit'], {}), '(helper._temp1_unit)\n', (3663, 3683), True, 'from openscm_units import unit_registry as ur\n'), ((4284, 4311), 'openscm_units.unit_registry', 'ur', (['self.tmodel._temp1_unit'], {}), '(self.tmodel._temp1_unit)\n', (4286, 4311), True, 'from openscm_units import unit_registry as ur\n'), ((4321, 4345), 'openscm_units.unit_registry', 'ur', (['self.tmodel._q1_unit'], {}), '(self.tmodel._q1_unit)\n', (4323, 4345), True, 'from openscm_units import unit_registry as ur\n'), ((2379, 2404), 'openscm_units.unit_registry', 'ur', (['self.tmodel._erf_unit'], {}), '(self.tmodel._erf_unit)\n', (2381, 2404), True, 'from openscm_units import unit_registry as ur\n')]
import pandas as pd from util import StockAnalysis, AllStocks import talib import os import numpy as np class FilterEma: def __init__(self, barCount, showtCount=None, longCount=None): self.sa = StockAnalysis() self.jsonData = self.sa.GetJson self.trendLength = int(os.getenv('FILTER_TREND_LENGTH', '30')) self.trendAt = int(os.getenv('FILTER_TREND_AT', '5')) self.nearPercent = 0.05 self.setBarCount(barCount) self.shortCount = int(os.getenv('FILTER_EMA_SHORT_COUNT', '14')) self.longCount = int(os.getenv('FILTER_EMA_LONG_COUNT', '50')) def setSymbol(self, symbol): self.symbol = symbol def setBarCount(self, barCount): self.barCount = barCount switcher = { 20: 'ema20', 50: 'ema50', 200: 'ema200' } self.filterName = switcher.get(barCount, 'ema20') self.trendLength = 30 def FilterOn(self, closes, outputShort, outputLong): # create dataframe with close and output idx = 0 repeatCount = 0 lastState = 0 longs = iter(outputLong) prices = iter(closes) for short in outputShort: idx += 1 long = next(longs) price = next(prices) if np.isnan(short) or np.isnan(long) or np.isnan(price): break if price > short and short > long: thisState = 1 elif price < short and short < long: thisState = -1 elif idx <= 5: thisState = None lastState = 0 repeatCount = 0 else: break if lastState == 0 or lastState == thisState: repeatCount += 1 else: break return repeatCount def isNearEma(self, close, open, ema): isNear = True if abs(close - ema) / close <= self.nearPercent else False if isNear: return True return True if abs(open - ema) / open <= self.nearPercent else False def Run(self, symbol): isLoaded, tp = AllStocks.GetDailyStockData(symbol) if isLoaded: try: self.setSymbol(symbol) close = tp.Close.to_numpy() open = tp.Open.to_numpy() output = talib.EMA(close[::-1], timeperiod=self.barCount) self.sa.UpdateFilter( self.jsonData, self.symbol, self.filterName, self.isNearEma(close[0], open[0], output[-1])) except Exception as e: print('filterEma.Run() {}'.format(e)) self.sa.UpdateFilter( self.jsonData, self.symbol, self.filterName, False) return False def Trending(self, symbol): isLoaded, tp = AllStocks.GetDailyStockData(symbol) if isLoaded: try: close = tp.Close.to_numpy() outputShort = talib.EMA( close[::-1], timeperiod=self.shortCount) outputLong = talib.EMA(close[::-1], timeperiod=self.longCount) trendingDays = self.FilterOn( close, outputShort[::-1], outputLong[::-1]) self.sa.UpdateFilter(self.jsonData, symbol, 'td', trendingDays) except Exception as e: print('filterEma.Run() {}'.format(e)) self.sa.UpdateFilter(self.jsonData, symbol, 'td', 0) return False def WriteFilter(self): self.sa.WriteJson(self.jsonData) @staticmethod def All(): filter = FilterEma(20) AllStocks.Run(filter.Run, False) filter.setBarCount(50) AllStocks.Run(filter.Run, False) filter.setBarCount(200) AllStocks.Run(filter.Run, False) AllStocks.Run(filter.Trending, False) filter.WriteFilter() if __name__ == '__main__': FilterEma.All() print('---------- done ----------') # filter = FilterEma(symbol='AAPL', barCount=20) # up, down = filter.Run(filter.symbol) # print(up, down)
[ "talib.EMA", "os.getenv", "util.StockAnalysis", "util.AllStocks.GetDailyStockData", "numpy.isnan", "util.AllStocks.Run" ]
[((207, 222), 'util.StockAnalysis', 'StockAnalysis', ([], {}), '()\n', (220, 222), False, 'from util import StockAnalysis, AllStocks\n'), ((2150, 2185), 'util.AllStocks.GetDailyStockData', 'AllStocks.GetDailyStockData', (['symbol'], {}), '(symbol)\n', (2177, 2185), False, 'from util import StockAnalysis, AllStocks\n'), ((2849, 2884), 'util.AllStocks.GetDailyStockData', 'AllStocks.GetDailyStockData', (['symbol'], {}), '(symbol)\n', (2876, 2884), False, 'from util import StockAnalysis, AllStocks\n'), ((3659, 3691), 'util.AllStocks.Run', 'AllStocks.Run', (['filter.Run', '(False)'], {}), '(filter.Run, False)\n', (3672, 3691), False, 'from util import StockAnalysis, AllStocks\n'), ((3731, 3763), 'util.AllStocks.Run', 'AllStocks.Run', (['filter.Run', '(False)'], {}), '(filter.Run, False)\n', (3744, 3763), False, 'from util import StockAnalysis, AllStocks\n'), ((3804, 3836), 'util.AllStocks.Run', 'AllStocks.Run', (['filter.Run', '(False)'], {}), '(filter.Run, False)\n', (3817, 3836), False, 'from util import StockAnalysis, AllStocks\n'), ((3845, 3882), 'util.AllStocks.Run', 'AllStocks.Run', (['filter.Trending', '(False)'], {}), '(filter.Trending, False)\n', (3858, 3882), False, 'from util import StockAnalysis, AllStocks\n'), ((294, 332), 'os.getenv', 'os.getenv', (['"""FILTER_TREND_LENGTH"""', '"""30"""'], {}), "('FILTER_TREND_LENGTH', '30')\n", (303, 332), False, 'import os\n'), ((361, 394), 'os.getenv', 'os.getenv', (['"""FILTER_TREND_AT"""', '"""5"""'], {}), "('FILTER_TREND_AT', '5')\n", (370, 394), False, 'import os\n'), ((493, 534), 'os.getenv', 'os.getenv', (['"""FILTER_EMA_SHORT_COUNT"""', '"""14"""'], {}), "('FILTER_EMA_SHORT_COUNT', '14')\n", (502, 534), False, 'import os\n'), ((565, 605), 'os.getenv', 'os.getenv', (['"""FILTER_EMA_LONG_COUNT"""', '"""50"""'], {}), "('FILTER_EMA_LONG_COUNT', '50')\n", (574, 605), False, 'import os\n'), ((1302, 1317), 'numpy.isnan', 'np.isnan', (['short'], {}), '(short)\n', (1310, 1317), True, 'import numpy as np\n'), ((1321, 1335), 'numpy.isnan', 'np.isnan', (['long'], {}), '(long)\n', (1329, 1335), True, 'import numpy as np\n'), ((1339, 1354), 'numpy.isnan', 'np.isnan', (['price'], {}), '(price)\n', (1347, 1354), True, 'import numpy as np\n'), ((2374, 2422), 'talib.EMA', 'talib.EMA', (['close[::-1]'], {'timeperiod': 'self.barCount'}), '(close[::-1], timeperiod=self.barCount)\n', (2383, 2422), False, 'import talib\n'), ((2997, 3047), 'talib.EMA', 'talib.EMA', (['close[::-1]'], {'timeperiod': 'self.shortCount'}), '(close[::-1], timeperiod=self.shortCount)\n', (3006, 3047), False, 'import talib\n'), ((3098, 3147), 'talib.EMA', 'talib.EMA', (['close[::-1]'], {'timeperiod': 'self.longCount'}), '(close[::-1], timeperiod=self.longCount)\n', (3107, 3147), False, 'import talib\n')]
import os import sys import inspect currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) parentdir = os.path.dirname(os.path.dirname(currentdir)) os.sys.path.insert(0,parentdir) import os sys.path.append(os.getcwd()+'/ReinforcementLearning') import MyGym import gym import argparse import pybullet as p import pybullet_envs import time def random_action_test(args): count = 0 if args.render: env = gym.make(args.env, render=True) else: env = gym.make(args.env) env.reset() sample = env.action_space.sample() action = sample count = 0 for i in range(args.steps): action = env.action_space.sample() #action = [0]*len(action) obs, rewards, done, _ = env.step(action) if done: obs = env.reset() # time.sleep(1./240.) time.sleep(0.016) def computation_time_test(args): t_0 = time.time() count = 0 env = gym.make(args.env) env.reset() sample = env.action_space.sample() t_1 = time.time() print("initialization time : ", t_1 - t_0) for i in range(args.steps): action = env.action_space.sample() action = [0]*len(action) obs, rewards, done, _ = env.step(action) # if done: # obs = env.reset() t_2 = time.time() print("computation time : ", t_2 - t_1) def main(): import argparse parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--env', help='environment ID') parser.add_argument('--render', help='OpenGL Visualizer', type=bool, default=True) parser.add_argument('--steps', help='Number of steps', type=int, default=5000) args = parser.parse_args() ### TEST random_action_test(args) # computation_time_test(args) if __name__ == '__main__': main()
[ "argparse.ArgumentParser", "inspect.currentframe", "os.sys.path.insert", "os.getcwd", "time.sleep", "os.path.dirname", "time.time", "gym.make" ]
[((180, 212), 'os.sys.path.insert', 'os.sys.path.insert', (['(0)', 'parentdir'], {}), '(0, parentdir)\n', (198, 212), False, 'import os\n'), ((151, 178), 'os.path.dirname', 'os.path.dirname', (['currentdir'], {}), '(currentdir)\n', (166, 178), False, 'import os\n'), ((918, 929), 'time.time', 'time.time', ([], {}), '()\n', (927, 929), False, 'import time\n'), ((954, 972), 'gym.make', 'gym.make', (['args.env'], {}), '(args.env)\n', (962, 972), False, 'import gym\n'), ((1038, 1049), 'time.time', 'time.time', ([], {}), '()\n', (1047, 1049), False, 'import time\n'), ((1315, 1326), 'time.time', 'time.time', ([], {}), '()\n', (1324, 1326), False, 'import time\n'), ((1417, 1496), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (1440, 1496), False, 'import argparse\n'), ((238, 249), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (247, 249), False, 'import os\n'), ((449, 480), 'gym.make', 'gym.make', (['args.env'], {'render': '(True)'}), '(args.env, render=True)\n', (457, 480), False, 'import gym\n'), ((505, 523), 'gym.make', 'gym.make', (['args.env'], {}), '(args.env)\n', (513, 523), False, 'import gym\n'), ((856, 873), 'time.sleep', 'time.sleep', (['(0.016)'], {}), '(0.016)\n', (866, 873), False, 'import time\n'), ((97, 119), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (117, 119), False, 'import inspect\n')]
from src.trainer import train_by_config, test_by_config import os # train_by_config(os.path.join('settings', 'training_settings_7.ini')) # test_by_config(os.path.join('test_settings', 'test_settings.ini'))
[ "os.path.join" ]
[((160, 210), 'os.path.join', 'os.path.join', (['"""test_settings"""', '"""test_settings.ini"""'], {}), "('test_settings', 'test_settings.ini')\n", (172, 210), False, 'import os\n')]
from collections import deque from itertools import islice def test_1(): list = ['a', 'b', 'c'] d = deque(list) assert 'a' == d[0] and 'b' == d[1] and 'c' == d[2], 'queue error' def test_2(): data = islice(['a', 'b', 'c'],None) d = deque(data) assert 'a' == d[0] and 'b' == d[1] and 'c' == d[2], 'queue error'
[ "itertools.islice", "collections.deque" ]
[((110, 121), 'collections.deque', 'deque', (['list'], {}), '(list)\n', (115, 121), False, 'from collections import deque\n'), ((219, 248), 'itertools.islice', 'islice', (["['a', 'b', 'c']", 'None'], {}), "(['a', 'b', 'c'], None)\n", (225, 248), False, 'from itertools import islice\n'), ((256, 267), 'collections.deque', 'deque', (['data'], {}), '(data)\n', (261, 267), False, 'from collections import deque\n')]
import numpy as np import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline from IPython import get_ipython get_ipython().run_line_magic('matplotlib', 'inline') sns.set() def gl_confmatrix_2_confmatrix(sf,number_label=3): Nlabels=max(len(sf['target_label'].unique()),len(sf['predicted_label'].unique())) matrix=np.zeros([number_label,number_label],dtype=np.float) for i in sf: matrix[i['target_label'],i['predicted_label']]=i['count'] sum row_sums = matrix.sum(axis=1) matrix=matrix / row_sums[:, np.newaxis] matrix*=100 plt.figure(figsize=(number_label, number_label)) dims = (8,8) fig, ax = plt.subplots(figsize=dims) sns.heatmap(matrix, annot=True, fmt='.2f', xticklabels=['0' ,'1','2'], yticklabels=['0' ,'1','2']); plt.title('Confusion Matrix'); plt.xlabel('Predicted label') plt.ylabel('True label') return matrix conf_matrix_train=gl.evaluation.confusion_matrix(train_data['label2'],model.predict(train_data)) conf_matrix_test=gl.evaluation.confusion_matrix(test_data['label2'],model.predict(test_data)) gl_confmatrix_2_confmatrix(conf_matrix_train) gl_confmatrix_2_confmatrix(conf_matrix_test) model.coefficients.sort('value').show() model=gl.logistic_classifier.create(train_data,'label1',features_to_train,class_weights='auto') conf_matrix_train=gl.evaluation.confusion_matrix(train_data['label1'],model.predict(train_data)) conf_matrix_test=gl.evaluation.confusion_matrix(test_data['label1'],model.predict(test_data)) gl_confmatrix_2_confmatrix(conf_matrix_train,number_label=2) gl_confmatrix_2_confmatrix(conf_matrix_test,number_label=2) model=gl.random_forest_classifier.create(train_data,'label2',features_to_train,class_weights='auto',num_trees=50) conf_matrix_train=gl.evaluation.confusion_matrix(train_data['label2'],model.predict(train_data)) conf_matrix_test=gl.evaluation.confusion_matrix(test_data['label2'],model.predict(test_data)) gl_confmatrix_2_confmatrix(conf_matrix_train) gl_confmatrix_2_confmatrix(conf_matrix_test) model=gl.boosted_trees_classifier.create(train_data,'label2',features_to_train,class_weights='auto') conf_matrix_train=gl.evaluation.confusion_matrix(train_data['label2'],model.predict(train_data)) conf_matrix_test=gl.evaluation.confusion_matrix(test_data['label2'],model.predict(test_data)) gl_confmatrix_2_confmatrix(conf_matrix_train) gl_confmatrix_2_confmatrix(conf_matrix_test)
[ "IPython.get_ipython", "seaborn.set", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "seaborn.heatmap", "matplotlib.pyplot.figure", "numpy.zeros", "matplotlib.pyplot.title", "matplotlib.pyplot.subplots" ]
[((182, 191), 'seaborn.set', 'sns.set', ([], {}), '()\n', (189, 191), True, 'import seaborn as sns\n'), ((342, 396), 'numpy.zeros', 'np.zeros', (['[number_label, number_label]'], {'dtype': 'np.float'}), '([number_label, number_label], dtype=np.float)\n', (350, 396), True, 'import numpy as np\n'), ((595, 643), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(number_label, number_label)'}), '(figsize=(number_label, number_label))\n', (605, 643), True, 'import matplotlib.pyplot as plt\n'), ((675, 701), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'dims'}), '(figsize=dims)\n', (687, 701), True, 'import matplotlib.pyplot as plt\n'), ((706, 810), 'seaborn.heatmap', 'sns.heatmap', (['matrix'], {'annot': '(True)', 'fmt': '""".2f"""', 'xticklabels': "['0', '1', '2']", 'yticklabels': "['0', '1', '2']"}), "(matrix, annot=True, fmt='.2f', xticklabels=['0', '1', '2'],\n yticklabels=['0', '1', '2'])\n", (717, 810), True, 'import seaborn as sns\n'), ((811, 840), 'matplotlib.pyplot.title', 'plt.title', (['"""Confusion Matrix"""'], {}), "('Confusion Matrix')\n", (820, 840), True, 'import matplotlib.pyplot as plt\n'), ((846, 875), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (856, 875), True, 'import matplotlib.pyplot as plt\n'), ((880, 904), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True label"""'], {}), "('True label')\n", (890, 904), True, 'import matplotlib.pyplot as plt\n'), ((127, 140), 'IPython.get_ipython', 'get_ipython', ([], {}), '()\n', (138, 140), False, 'from IPython import get_ipython\n')]
"""empty message Revision ID: 2d70b2b7f421 Revises: <PASSWORD> Create Date: 2017-01-07 15:40:46.326596 """ # revision identifiers, used by Alembic. revision = '2d70b2b7f421' down_revision = '<PASSWORD>' from alembic import op import sqlalchemy as sa def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_column('company', 'com_number') op.drop_column('company', 'tax_number') ### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.add_column('company', sa.Column('tax_number', sa.INTEGER(), autoincrement=False, nullable=True)) op.add_column('company', sa.Column('com_number', sa.INTEGER(), autoincrement=False, nullable=True)) ### end Alembic commands ###
[ "sqlalchemy.INTEGER", "alembic.op.drop_column" ]
[((339, 378), 'alembic.op.drop_column', 'op.drop_column', (['"""company"""', '"""com_number"""'], {}), "('company', 'com_number')\n", (353, 378), False, 'from alembic import op\n'), ((383, 422), 'alembic.op.drop_column', 'op.drop_column', (['"""company"""', '"""tax_number"""'], {}), "('company', 'tax_number')\n", (397, 422), False, 'from alembic import op\n'), ((592, 604), 'sqlalchemy.INTEGER', 'sa.INTEGER', ([], {}), '()\n', (602, 604), True, 'import sqlalchemy as sa\n'), ((696, 708), 'sqlalchemy.INTEGER', 'sa.INTEGER', ([], {}), '()\n', (706, 708), True, 'import sqlalchemy as sa\n')]
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2014 Intel Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Boolean, Column, DateTime from sqlalchemy import Integer, MetaData, String from sqlalchemy import Table, Index, ForeignKey from sqlalchemy.engine.base import Engine from migrate.changeset.constraint import ForeignKeyConstraint from sqlalchemy.engine import reflection from sqlalchemy import create_engine def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata if migrate_engine.name == 'sqlite': return storage_pools = 'storage_pools' storage_groups = 'storage_groups' col = '' insp = reflection.Inspector.from_engine(migrate_engine) foreign_keys = insp.get_foreign_keys(storage_pools) for key in foreign_keys: if storage_groups == key['referred_table']: sql_str = "ALTER TABLE %s DROP FOREIGN KEY %s;" % (storage_pools, key['name']) ret = migrate_engine.execute(sql_str) def downgrade(migrate_engine): if migrate_engine.name == 'sqlite': return #meta = MetaData() #meta.bind = migrate_engine #storage_group = Table('storage_groups', # meta, # autoload=True) #column_status = Column('status', String(255), default="OUT", nullable=False) try: #storage_group.drop_column(column_status) pass except Exception: raise
[ "sqlalchemy.engine.reflection.Inspector.from_engine" ]
[((1291, 1339), 'sqlalchemy.engine.reflection.Inspector.from_engine', 'reflection.Inspector.from_engine', (['migrate_engine'], {}), '(migrate_engine)\n', (1323, 1339), False, 'from sqlalchemy.engine import reflection\n')]
import argparse parser = argparse.ArgumentParser(description='This script takes a dihedral trajectory and detects change points using SIMPLE (simultaneous Penalized Likelihood Estimation, see Fan et al. P. Natl. Acad. Sci, 2015, 112, 7454-7459). Two parameters alpha and lambda are controlling the extent of simultaneous changes and total number of changes detected, respectively. alpha -> 1 means more simultaneous changes (0<alpha<1), and smaller lambda gives more changes.') parser.add_argument('shifteddihed', default='shifted_dihedral.dat', help='input shifted dihedral file') parser.add_argument('--alpha', type=float, default=0.7, help='extent of simultaneous changes, 0.7 by default suggested by the author if no prior ') parser.add_argument('--lam', type=float, default=10, help='sensitivity of detecting changes, 10 by default') args = parser.parse_args() import numpy as np from SIMPLEchangepoint import ComputeChanges import collections inputfile=args.shifteddihed lam=args.lam alpha=args.alpha outputfile=inputfile[:-4]+".lam"+str(lam)+"alpha"+str(alpha)+".transitionProba.dat" outputfile2=inputfile[:-4]+".lam"+str(lam)+"alpha"+str(alpha)+".transitionSummary.dat" alldata=np.loadtxt(inputfile).T time=alldata[0] data=alldata[1:] CPDresults = ComputeChanges(data,lam,alpha,lam_min=0,parallel=False) def changeORnot(con_set,size): x=[0]*size for i in con_set: x[i] = 1 return ' '.join(map(str,x)) od = collections.OrderedDict(sorted(CPDresults.items())) with open(outputfile,'w') as fo: for t in range(len(time)): if t not in od.keys(): fo.write(str(time[t])+' '+' '.join(map(str,[0]*len(data)))+'\n') else: fo.write(str(time[t])+' '+changeORnot(od[t],len(data))+'\n') def strplus1(x): return str(x+1) with open(outputfile2,'w') as fo2: for k, v in od.iteritems(): fo2.write('{:10.1f} {:5d} {:s}\n'.format(time[k],len(v),','.join(map(strplus1,v))))
[ "numpy.loadtxt", "SIMPLEchangepoint.ComputeChanges", "argparse.ArgumentParser" ]
[((25, 487), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""This script takes a dihedral trajectory and detects change points using SIMPLE (simultaneous Penalized Likelihood Estimation, see Fan et al. P. Natl. Acad. Sci, 2015, 112, 7454-7459). Two parameters alpha and lambda are controlling the extent of simultaneous changes and total number of changes detected, respectively. alpha -> 1 means more simultaneous changes (0<alpha<1), and smaller lambda gives more changes."""'}), "(description=\n 'This script takes a dihedral trajectory and detects change points using SIMPLE (simultaneous Penalized Likelihood Estimation, see Fan et al. P. Natl. Acad. Sci, 2015, 112, 7454-7459). Two parameters alpha and lambda are controlling the extent of simultaneous changes and total number of changes detected, respectively. alpha -> 1 means more simultaneous changes (0<alpha<1), and smaller lambda gives more changes.'\n )\n", (48, 487), False, 'import argparse\n'), ((1261, 1320), 'SIMPLEchangepoint.ComputeChanges', 'ComputeChanges', (['data', 'lam', 'alpha'], {'lam_min': '(0)', 'parallel': '(False)'}), '(data, lam, alpha, lam_min=0, parallel=False)\n', (1275, 1320), False, 'from SIMPLEchangepoint import ComputeChanges\n'), ((1190, 1211), 'numpy.loadtxt', 'np.loadtxt', (['inputfile'], {}), '(inputfile)\n', (1200, 1211), True, 'import numpy as np\n')]
import unittest from context import html2md from assertions import assertEq class DefinitioListTests(unittest.TestCase): def test_basic(self): in_html = u''' <dl> <dt>Apple</dt> <dd>Pomaceous fruit of plants of the genus Malus in the family Rosaceae.</dd> <dt>Orange</dt> <dd>The fruit of an evergreen tree of the genus Citrus.</dd> </dl>''' out_md = u''' Apple : Pomaceous fruit of plants of the genus Malus in the family Rosaceae. Orange : The fruit of an evergreen tree of the genus Citrus. '''.strip() assertEq(out_md, html2md.html2md(in_html, def_list=True)) def test_multi_dd(self): in_html = u''' <dl> <dt>Apple</dt> <dd>Pomaceous fruit of plants of the genus Malus in the family Rosaceae.</dd> <dd>An American computer company.</dd> <dt>Orange</dt> <dd>The fruit of an evergreen tree of the genus Citrus.</dd> </dl>''' out_md = u''' Apple : Pomaceous fruit of plants of the genus Malus in the family Rosaceae. : An American computer company. Orange : The fruit of an evergreen tree of the genus Citrus.''' assertEq(out_md, html2md.html2md(in_html, def_list=True)) def test_multi_dt(self): in_html = u''' <dl> <dt>Term 1</dt> <dt>Term 2</dt> <dd>Definition a</dd> <dt>Term 3</dt> <dd>Definition b</dd> </dl>''' out_md = u''' Term 1 Term 2 : Definition a Term 3 : Definition b '''.strip() assertEq(out_md, html2md.html2md(in_html, def_list=True)) def not_supported_test_paragraph_dd(self): in_html = u''' <dl> <dt>Apple</dt> <dd><p>Pomaceous fruit of plants of the genus Malus in the family Rosaceae.</p></dd> <dt>Orange</dt> <dd><p>The fruit of an evergreen tree of the genus Citrus.</p></dd> </dl>''' out_md = u''' Apple : Pomaceous fruit of plants of the genus Malus in the family Rosaceae. Orange : The fruit of an evergreen tree of the genus Citrus. ''' assertEq(out_md, html2md.html2md(in_html, def_list=True)) def suite(): return unittest.TestLoader().loadTestsFromTestCase(DefinitioListTests) if __name__ == '__main__': unittest.main()
[ "unittest.main", "context.html2md.html2md", "unittest.TestLoader" ]
[((2109, 2124), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2122, 2124), False, 'import unittest\n'), ((565, 604), 'context.html2md.html2md', 'html2md.html2md', (['in_html'], {'def_list': '(True)'}), '(in_html, def_list=True)\n', (580, 604), False, 'from context import html2md\n'), ((1115, 1154), 'context.html2md.html2md', 'html2md.html2md', (['in_html'], {'def_list': '(True)'}), '(in_html, def_list=True)\n', (1130, 1154), False, 'from context import html2md\n'), ((1432, 1471), 'context.html2md.html2md', 'html2md.html2md', (['in_html'], {'def_list': '(True)'}), '(in_html, def_list=True)\n', (1447, 1471), False, 'from context import html2md\n'), ((1945, 1984), 'context.html2md.html2md', 'html2md.html2md', (['in_html'], {'def_list': '(True)'}), '(in_html, def_list=True)\n', (1960, 1984), False, 'from context import html2md\n'), ((2012, 2033), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (2031, 2033), False, 'import unittest\n')]
import gym from gym.spaces.box import Box class TransposeImagesIfRequired(gym.ObservationWrapper): """ When environment observations are images, this wrapper transposes the axis. It is useful when the images have shape (W,H,C), as they can be transposed "on the fly" to (C,W,H) for PyTorch convolutions to be applied. Parameters ---------- env : gym.Env Original Gym environment, previous to applying the wrapper. op : list New axis ordering. """ def __init__(self, env=None, op=[2, 0, 1]): """Transpose observation space for images""" super(TransposeImagesIfRequired, self).__init__(env) self.op = op if isinstance(self.observation_space, gym.spaces.Box) and \ len(self.observation_space.shape) == 3: obs_shape = self.observation_space.shape self.observation_space = Box( self.observation_space.low[0, 0, 0], self.observation_space.high[0, 0, 0], [obs_shape[self.op[0]], obs_shape[self.op[1]], obs_shape[self.op[2]]], dtype=self.observation_space.dtype) elif isinstance(self.observation_space, gym.spaces.Dict): for k in self.observation_space.spaces: if isinstance(self.observation_space[k], gym.spaces.Box) and \ len(self.observation_space[k].shape) == 3: obs_shape = self.observation_space[k].shape self.observation_space[k] = Box( self.observation_space[k].low[0, 0, 0], self.observation_space[k].high[0, 0, 0], [obs_shape[self.op[0]], obs_shape[self.op[1]], obs_shape[self.op[2]]], dtype=self.observation_space.dtype) def observation(self, ob): """Transpose observation""" if isinstance(ob, dict): for k in ob: if len(ob[k].shape) == 3: ob[k] = ob[k].transpose(self.op[0], self.op[1], self.op[2]) else: if len(ob.shape) == 3: ob = ob.transpose(self.op[0], self.op[1], self.op[2]) return ob
[ "gym.spaces.box.Box" ]
[((902, 1097), 'gym.spaces.box.Box', 'Box', (['self.observation_space.low[0, 0, 0]', 'self.observation_space.high[0, 0, 0]', '[obs_shape[self.op[0]], obs_shape[self.op[1]], obs_shape[self.op[2]]]'], {'dtype': 'self.observation_space.dtype'}), '(self.observation_space.low[0, 0, 0], self.observation_space.high[0, 0, \n 0], [obs_shape[self.op[0]], obs_shape[self.op[1]], obs_shape[self.op[2]\n ]], dtype=self.observation_space.dtype)\n', (905, 1097), False, 'from gym.spaces.box import Box\n'), ((1530, 1731), 'gym.spaces.box.Box', 'Box', (['self.observation_space[k].low[0, 0, 0]', 'self.observation_space[k].high[0, 0, 0]', '[obs_shape[self.op[0]], obs_shape[self.op[1]], obs_shape[self.op[2]]]'], {'dtype': 'self.observation_space.dtype'}), '(self.observation_space[k].low[0, 0, 0], self.observation_space[k].high[\n 0, 0, 0], [obs_shape[self.op[0]], obs_shape[self.op[1]], obs_shape[self\n .op[2]]], dtype=self.observation_space.dtype)\n', (1533, 1731), False, 'from gym.spaces.box import Box\n')]
# File: guess.py # Author: <NAME> # Date: 11/21/2019 '''A guessing game. This is the classic game where someone thinks of a secret number and someone else tries to guess it. In this case, the computer thinks of the number and we (the user) have to guess what it is. ''' # LEARN: Python has a standard module for dealing with random numbers import random LOWEST = 1 # we change change this number to make the game easier (e.g. set to 10) or more # difficult (e.g. set to 1000) HIGHEST = 100 # This is the secret number. It will be different each time the program runs # because we are using a random number generator. NUMBER = random.randint(1, HIGHEST) correct = False # This variable tells us if the we guessed the correct number count = 0 # This variable keeps track of how many guesses have been made # LEARN: The `while` keyword is used to create a loop in the program. It # repeats the code block as long as the condition is true. # LEARN: The `not` keyword is a logic function that changes false to true and # true to false. # Since our program starts with correct = False, `not correct` is true and the # code block inside the loop will run. It will keep repeating until `not correct` # is false - in other words, it will keep repeating until correct is true. while not correct: # LEARN: `try/except` blocks are used to handle errors without crashing the # program. In this case, if the user types in something that is not an # integer, the int() function will raise a ValueError exception. We can # catch this exception and handle the error so that our program doesn't # crash if the user types in 'bob', for example. try: # REVIEW: We can use `+` to add strings together. But we can't add # strings and numbers, so we use `str()` to convert numbers to strings # before adding them to other strings. answer = int(input('Guess a number between ' + str(LOWEST) + ' and ' + str(HIGHEST) + ': ')) # LEARN: If an error occurred above, then the following line is skipped. # Instead, the program flow jumps to the `except ValueError` line. count += 1 except ValueError: # LEARN: This block of code will run only if there was a ValueError # after the `try:` and before `except ValueError:` print('That is not a number. Try again.') # REVIEW: the continue keyword makes the program flow jump to the # beginning of the loop (the `while not correct:` line in this program) # instead of continuing with everything else below. continue # REVIEW: We use `if` to run blocks of code if a condition is true or skip # blocks of code if a condition is false. # LEARN: In addition to `if` and `else`, we can have an `elif` statement # (short for "else if"). We can use these to chain test conditions. Only # the blocks of code after the first condition that is true will run. All # other blocks will be skipped, even if they are also true. if answer < LOWEST or answer > HIGHEST: # This will run if the answer is less than the lowest possible number or # greater than the highest possible number print('That number is out of range') elif answer == NUMBER: # this will run only when the answer is the secret number correct = True print('You guessed correct!') print('It took', count, 'guesses') elif answer < NUMBER: # This will only run if the answer is less than the secret number and # greater than or equal to the lowest number. We didn't have to write # the second part (`answer >= LOWEST`) because of the `elif`. print('Too low. Try again.') else: # This will only run if the answer is greather than the secret number # and less than or equal to the highest possible number. We don't have # to write `elif answer > NUMBER or answer <= HIGHEST:` because it is # the only possibility left after all of the other test cases above. print('Too high. Try again.')
[ "random.randint" ]
[((632, 658), 'random.randint', 'random.randint', (['(1)', 'HIGHEST'], {}), '(1, HIGHEST)\n', (646, 658), False, 'import random\n')]
# Copyright (c) 2021 ComputerAlgorithmsGroupAtKyotoU # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import graphillion from digraphillion import DiGraphSet import unittest from graphillion import GraphSet """ 1 <-> 2 <-> 3 ^ ^ ^ | | | v v v 4 <-> 5 <-> 6 """ e1 = (1, 2) e2 = (1, 4) e3 = (2, 3) e4 = (2, 5) e5 = (3, 6) e6 = (4, 5) e7 = (5, 6) e8 = (2, 1) e9 = (4, 1) e10 = (3, 2) e11 = (5, 2) e12 = (6, 3) e13 = (5, 4) e14 = (6, 5) universe_edges = [e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14] class TestDigraphillion(unittest.TestCase): def test_init(self): gs = DiGraphSet() self.assertEqual(len(gs), 0) def test_directed_cycles(self): DiGraphSet.set_universe(universe_edges) gs = DiGraphSet.directed_cycles() self.assertEqual(len(gs), 2 * (2 + 1) + len(universe_edges) / 2) self.assertTrue([(2, 3), (3, 2)] in gs) self.assertTrue([(1, 2), (2, 3), (3, 6), (6, 5), (5, 4), (4, 1)] in gs) self.assertTrue([(4, 5), (5, 2), (2, 1), (1, 4)] in gs) self.assertTrue([(5, 4), (4, 1), (1, 2), (2, 5)] in gs) self.assertTrue([(1, 2), (2, 5), (5, 4), (1, 4)] not in gs) self.assertTrue([(1, 4), (4, 5), (5, 4), (4, 1)] not in gs) def test_directed_hamiltonian_cycles(self): DiGraphSet.set_universe(universe_edges) gs = DiGraphSet.directed_hamiltonian_cycles() cycles = DiGraphSet.directed_cycles() self.assertEqual(len(gs), 2) for c in gs: self.assertTrue(c in cycles) self.assertTrue([(1, 2), (2, 3), (3, 6), (6, 5), (5, 4), (4, 1)] in gs) self.assertTrue([(1, 2), (2, 3)] not in gs) def test_directed_st_paths(self): DiGraphSet.set_universe(universe_edges) s, t = 1, 6 gs = DiGraphSet.directed_st_paths(s, t, False) self.assertTrue([(1, 4), (4, 5), (5, 6)] in gs) self.assertTrue([(1, 4), (4, 5), (5, 2), (2, 3), (3, 6)] in gs) self.assertTrue([(1, 4), (4, 5)] not in gs) self.assertEqual(len(gs), 4) def test_directed_st_paths_hack1(self): edges = [(1, 2), (1, 3), (1, 4), (2, 4), (3, 1)] DiGraphSet.set_universe(edges) s, t = 2, 4 gs = DiGraphSet.directed_st_paths(s, t, False) self.assertTrue([(2, 4)] in gs) self.assertEqual(len(gs), 1) def test_directed_st_paths_hack2(self): edges = [(1, 2)] DiGraphSet.set_universe(edges) s, t = 1, 2 gs = DiGraphSet.directed_st_paths(s, t) self.assertEqual(len(gs), 1) def test_directed_st_hamiltonian_paths(self): DiGraphSet.set_universe(universe_edges) s, t = 1, 6 s_to_t = [(1, 4), (4, 5), (5, 2), (2, 3), (3, 6)] t_to_s = [(6, 3), (3, 2), (2, 5), (5, 4), (4, 1)] gs = DiGraphSet.directed_st_paths(s, t, True) self.assertTrue(s_to_t in gs) self.assertTrue(t_to_s not in gs) self.assertEqual(len(gs), 1) gs = DiGraphSet.directed_st_paths(t, s, True) self.assertTrue(s_to_t not in gs) self.assertTrue(t_to_s in gs) self.assertEqual(len(gs), 1) def test_directed_st_hamiltonian_paths_hack1(self): edges = [(1, 2), (3, 4)] DiGraphSet.set_universe(edges) s, t = 1, 2 gs = DiGraphSet.directed_st_paths(s, t, is_hamiltonian=True) for gg in gs: print(gg) self.assertEqual(len(gs), 0) def test_hamiltonian_path_in_paths(self): DiGraphSet.set_universe(universe_edges) for s in range(1, 7): for t in range(1, 7): path = DiGraphSet.directed_st_paths(s, t, False) hamiltonian = DiGraphSet.directed_st_paths(s, t, True) self.assertTrue(hamiltonian.issubset(path)) def test_rooted_forests(self): DiGraphSet.set_universe(universe_edges) gs = DiGraphSet.rooted_forests() self.assertTrue([(1, 2), (2, 3), (3, 6), (6, 5), (5, 4)] in gs) self.assertTrue([(5, 4), (4, 1), (5, 6), (6, 3)] in gs) self.assertTrue([(4, 1), (4, 5), (2, 3), (3, 6)] in gs) self.assertTrue([(2, 1), (2, 5), (2, 3)] in gs) self.assertTrue([(5, 2), (6, 3)] in gs) self.assertTrue([(1, 4)] in gs) self.assertTrue([] in gs) self.assertTrue([(2, 1), (4, 1)] not in gs) self.assertTrue([(1, 2), (2, 5), (5, 2), (2, 1)] not in gs) self.assertTrue([(1, 2), (2, 1)] not in gs) roots = [1, 2, 3] gs = DiGraphSet.rooted_forests(roots) self.assertEqual(len(gs), 1) self.assertTrue([(1, 4), (2, 5), (3, 6)] in gs) roots = [1, 4] gs = DiGraphSet.rooted_forests(roots) self.assertEqual(len(gs), 6) self.assertTrue([(1, 2), (4, 5)] in gs) self.assertTrue([(1, 2), (2, 3), (3, 6), (4, 5)] in gs) self.assertTrue([(1, 2), (5, 4)] not in gs) self.assertTrue([(1, 2), (2, 5)] not in gs) roots = [2] gs = DiGraphSet.rooted_forests(roots) self.assertTrue([(2, 1), (1, 4), (4, 5), (5, 6), (6, 3)] in gs) self.assertTrue([(2, 1), (2, 3), (2, 5)] in gs) self.assertTrue([(3, 6), (6, 5), (5, 4)] not in gs) self.assertTrue([(1, 2)] not in gs) def test_rooted_spanning_forests(self): DiGraphSet.set_universe(universe_edges) is_spanning = True roots = [1, 4] gs = DiGraphSet.rooted_forests(roots, is_spanning) self.assertEqual(len(gs), 3) self.assertTrue([(1, 2), (4, 5), (2, 3), (5, 6)] in gs) self.assertTrue([(1, 2), (4, 5), (2, 3), (3, 6)] in gs) self.assertTrue([(1, 2), (4, 5), (6, 3), (5, 6)] in gs) roots = [3, 4] gs = DiGraphSet.rooted_forests(roots, is_spanning) self.assertTrue([(3, 6), (6, 5), (5, 2), (4, 1)] in gs) self.assertTrue([(3, 6), (6, 5), (5, 2), (1, 4)] not in gs) def test_rooted_spanning_trees(self): DiGraphSet.set_universe(universe_edges) root = 1 is_spanning = True gs = DiGraphSet.rooted_trees(root, is_spanning) self.assertEqual(len(gs), 15) # det(L) self.assertTrue([(1, 2), (2, 3), (1, 4), (2, 5), (3, 6)] in gs) self.assertTrue([(1, 2), (2, 3), (4, 1), (2, 5), (3, 6)] not in gs) for rooted_tree in gs: self.assertEqual(len(rooted_tree), 5) self.assertTrue((1, 2) in rooted_tree or (1, 4) in rooted_tree) def test_rooted_trees(self): DiGraphSet.set_universe(universe_edges) root = 1 is_spanning = False gs = DiGraphSet.rooted_trees(root, is_spanning) gs.issubset(DiGraphSet.rooted_trees(root, True)) self.assertEqual(len(gs), 45) self.assertTrue([] in gs) self.assertTrue([(1, 2)] in gs) self.assertTrue([(1, 2), (1, 4)] in gs) self.assertTrue([(1, 2), (1, 4), (4, 5)] in gs) self.assertTrue([(1, 2), (1, 4), (4, 5), (5, 6)] in gs) self.assertTrue([(1, 2), (1, 4), (4, 5), (5, 6), (6, 3)] in gs) self.assertTrue([(4, 1)] not in gs) self.assertTrue([(2, 3)] not in gs) self.assertTrue([(1, 2), (2, 5), (5, 4), (4, 1)] not in gs) def test_rooted_spanning_forests_and_trees(self): DiGraphSet.set_universe(universe_edges) is_spanning = True for root in range(1, 7): roots = [root] gs = DiGraphSet.rooted_forests(roots, is_spanning) spanning_trees = DiGraphSet.rooted_trees(root, is_spanning) self.assertEqual(gs, spanning_trees) def test_graphs(self): DiGraphSet.set_universe(universe_edges) gs = DiGraphSet.graphs() self.assertEqual(len(gs), 2**len(universe_edges)) def test_degree_constraints(self): DiGraphSet.set_universe(universe_edges) in_dc = {} out_dc = {} # cycles for v in DiGraphSet._vertices: in_dc[v] = out_dc[v] = range(1, 2) gs = DiGraphSet.graphs(in_degree_constraints=in_dc, out_degree_constraints=out_dc) self.assertEqual(len(gs), 9) self.assertTrue([(1, 2), (2, 3), (3, 6), (6, 5), (5, 4), (4, 1)] in gs) self.assertTrue([(1, 4), (4, 5), (5, 6), (6, 3), (3, 2), (2, 1)] in gs) self.assertTrue([(1, 2), (2, 5), (5, 4), (4, 1)] not in gs) in_dc = {} out_dc = {} # all subgraphs for v in DiGraphSet._vertices: in_dc[v] = out_dc[v] = range(0, 4) gs = DiGraphSet.graphs(in_degree_constraints=in_dc, out_degree_constraints=out_dc) self.assertEqual(len(gs), 2**len(universe_edges)) in_dc = {} for v in DiGraphSet._vertices: in_dc[v] = range(1, 2) gs = DiGraphSet.graphs(in_degree_constraints=in_dc) self.assertEqual(len(gs), 2**4 * 3**2) in_dc = {} for v in DiGraphSet._vertices: in_dc[v] = range(1, 4) gs = DiGraphSet.graphs(in_degree_constraints=in_dc) self.assertEqual(len(gs), 3**4 * 7**2) out_dc = {} for v in DiGraphSet._vertices: out_dc[v] = range(1, 2) gs = DiGraphSet.graphs(out_degree_constraints=out_dc) self.assertEqual(len(gs), 2**4 * 3**2) out_dc = {} for v in DiGraphSet._vertices: out_dc[v] = range(1, 4) gs = DiGraphSet.graphs(out_degree_constraints=out_dc) self.assertEqual(len(gs), 3**4 * 7**2) def test_trees_in_graphs(self): DiGraphSet.set_universe(universe_edges) root = 1 trees = DiGraphSet.rooted_trees(root, is_spanning=True) in_dc = {} out_dc = {} for v in range(1, len(DiGraphSet._vertices)+1): if v == root: in_dc[v] = range(0, 1) out_dc[v] = range(1, len(DiGraphSet._vertices)) else: in_dc[v] = range(1, 2) gs = DiGraphSet.graphs(in_degree_constraints=in_dc, out_degree_constraints=out_dc) self.assertTrue(trees.issubset(gs)) def test_with_graphillion(self): graphillion_universe = [e1, e2, e3, e4, e5, e6, e7] GraphSet.set_universe(graphillion_universe) DiGraphSet.set_universe(graphillion_universe) root = 1 trees_g = GraphSet.trees(root) trees_dg = DiGraphSet.rooted_trees(root) self.assertEqual(len(trees_g), 45) self.assertTrue([] in trees_g) self.assertTrue([(1, 2), (1, 4), (2, 5), (4, 5)] not in trees_g) self.assertTrue([(1, 2), (1, 4), (4, 5), (5, 6), (2, 3)] in trees_dg) self.assertTrue([(1, 2), (2, 5), (5, 4), (4, 1)] not in trees_dg) if __name__ == '__main__': unittest.main()
[ "digraphillion.DiGraphSet.graphs", "digraphillion.DiGraphSet.directed_st_paths", "digraphillion.DiGraphSet.rooted_forests", "digraphillion.DiGraphSet.rooted_trees", "graphillion.GraphSet.set_universe", "digraphillion.DiGraphSet.directed_cycles", "unittest.main", "digraphillion.DiGraphSet.directed_hami...
[((11825, 11840), 'unittest.main', 'unittest.main', ([], {}), '()\n', (11838, 11840), False, 'import unittest\n'), ((1650, 1662), 'digraphillion.DiGraphSet', 'DiGraphSet', ([], {}), '()\n', (1660, 1662), False, 'from digraphillion import DiGraphSet\n'), ((1745, 1784), 'digraphillion.DiGraphSet.set_universe', 'DiGraphSet.set_universe', (['universe_edges'], {}), '(universe_edges)\n', (1768, 1784), False, 'from digraphillion import DiGraphSet\n'), ((1798, 1826), 'digraphillion.DiGraphSet.directed_cycles', 'DiGraphSet.directed_cycles', ([], {}), '()\n', (1824, 1826), False, 'from digraphillion import DiGraphSet\n'), ((2351, 2390), 'digraphillion.DiGraphSet.set_universe', 'DiGraphSet.set_universe', (['universe_edges'], {}), '(universe_edges)\n', (2374, 2390), False, 'from digraphillion import DiGraphSet\n'), ((2404, 2444), 'digraphillion.DiGraphSet.directed_hamiltonian_cycles', 'DiGraphSet.directed_hamiltonian_cycles', ([], {}), '()\n', (2442, 2444), False, 'from digraphillion import DiGraphSet\n'), ((2462, 2490), 'digraphillion.DiGraphSet.directed_cycles', 'DiGraphSet.directed_cycles', ([], {}), '()\n', (2488, 2490), False, 'from digraphillion import DiGraphSet\n'), ((2769, 2808), 'digraphillion.DiGraphSet.set_universe', 'DiGraphSet.set_universe', (['universe_edges'], {}), '(universe_edges)\n', (2792, 2808), False, 'from digraphillion import DiGraphSet\n'), ((2842, 2883), 'digraphillion.DiGraphSet.directed_st_paths', 'DiGraphSet.directed_st_paths', (['s', 't', '(False)'], {}), '(s, t, False)\n', (2870, 2883), False, 'from digraphillion import DiGraphSet\n'), ((3211, 3241), 'digraphillion.DiGraphSet.set_universe', 'DiGraphSet.set_universe', (['edges'], {}), '(edges)\n', (3234, 3241), False, 'from digraphillion import DiGraphSet\n'), ((3276, 3317), 'digraphillion.DiGraphSet.directed_st_paths', 'DiGraphSet.directed_st_paths', (['s', 't', '(False)'], {}), '(s, t, False)\n', (3304, 3317), False, 'from digraphillion import DiGraphSet\n'), ((3474, 3504), 'digraphillion.DiGraphSet.set_universe', 'DiGraphSet.set_universe', (['edges'], {}), '(edges)\n', (3497, 3504), False, 'from digraphillion import DiGraphSet\n'), ((3539, 3573), 'digraphillion.DiGraphSet.directed_st_paths', 'DiGraphSet.directed_st_paths', (['s', 't'], {}), '(s, t)\n', (3567, 3573), False, 'from digraphillion import DiGraphSet\n'), ((3670, 3709), 'digraphillion.DiGraphSet.set_universe', 'DiGraphSet.set_universe', (['universe_edges'], {}), '(universe_edges)\n', (3693, 3709), False, 'from digraphillion import DiGraphSet\n'), ((3860, 3900), 'digraphillion.DiGraphSet.directed_st_paths', 'DiGraphSet.directed_st_paths', (['s', 't', '(True)'], {}), '(s, t, True)\n', (3888, 3900), False, 'from digraphillion import DiGraphSet\n'), ((4032, 4072), 'digraphillion.DiGraphSet.directed_st_paths', 'DiGraphSet.directed_st_paths', (['t', 's', '(True)'], {}), '(t, s, True)\n', (4060, 4072), False, 'from digraphillion import DiGraphSet\n'), ((4288, 4318), 'digraphillion.DiGraphSet.set_universe', 'DiGraphSet.set_universe', (['edges'], {}), '(edges)\n', (4311, 4318), False, 'from digraphillion import DiGraphSet\n'), ((4353, 4408), 'digraphillion.DiGraphSet.directed_st_paths', 'DiGraphSet.directed_st_paths', (['s', 't'], {'is_hamiltonian': '(True)'}), '(s, t, is_hamiltonian=True)\n', (4381, 4408), False, 'from digraphillion import DiGraphSet\n'), ((4545, 4584), 'digraphillion.DiGraphSet.set_universe', 'DiGraphSet.set_universe', (['universe_edges'], {}), '(universe_edges)\n', (4568, 4584), False, 'from digraphillion import DiGraphSet\n'), ((4889, 4928), 'digraphillion.DiGraphSet.set_universe', 'DiGraphSet.set_universe', (['universe_edges'], {}), '(universe_edges)\n', (4912, 4928), False, 'from digraphillion import DiGraphSet\n'), ((4942, 4969), 'digraphillion.DiGraphSet.rooted_forests', 'DiGraphSet.rooted_forests', ([], {}), '()\n', (4967, 4969), False, 'from digraphillion import DiGraphSet\n'), ((5562, 5594), 'digraphillion.DiGraphSet.rooted_forests', 'DiGraphSet.rooted_forests', (['roots'], {}), '(roots)\n', (5587, 5594), False, 'from digraphillion import DiGraphSet\n'), ((5725, 5757), 'digraphillion.DiGraphSet.rooted_forests', 'DiGraphSet.rooted_forests', (['roots'], {}), '(roots)\n', (5750, 5757), False, 'from digraphillion import DiGraphSet\n'), ((6045, 6077), 'digraphillion.DiGraphSet.rooted_forests', 'DiGraphSet.rooted_forests', (['roots'], {}), '(roots)\n', (6070, 6077), False, 'from digraphillion import DiGraphSet\n'), ((6363, 6402), 'digraphillion.DiGraphSet.set_universe', 'DiGraphSet.set_universe', (['universe_edges'], {}), '(universe_edges)\n', (6386, 6402), False, 'from digraphillion import DiGraphSet\n'), ((6467, 6512), 'digraphillion.DiGraphSet.rooted_forests', 'DiGraphSet.rooted_forests', (['roots', 'is_spanning'], {}), '(roots, is_spanning)\n', (6492, 6512), False, 'from digraphillion import DiGraphSet\n'), ((6779, 6824), 'digraphillion.DiGraphSet.rooted_forests', 'DiGraphSet.rooted_forests', (['roots', 'is_spanning'], {}), '(roots, is_spanning)\n', (6804, 6824), False, 'from digraphillion import DiGraphSet\n'), ((7008, 7047), 'digraphillion.DiGraphSet.set_universe', 'DiGraphSet.set_universe', (['universe_edges'], {}), '(universe_edges)\n', (7031, 7047), False, 'from digraphillion import DiGraphSet\n'), ((7106, 7148), 'digraphillion.DiGraphSet.rooted_trees', 'DiGraphSet.rooted_trees', (['root', 'is_spanning'], {}), '(root, is_spanning)\n', (7129, 7148), False, 'from digraphillion import DiGraphSet\n'), ((7544, 7583), 'digraphillion.DiGraphSet.set_universe', 'DiGraphSet.set_universe', (['universe_edges'], {}), '(universe_edges)\n', (7567, 7583), False, 'from digraphillion import DiGraphSet\n'), ((7643, 7685), 'digraphillion.DiGraphSet.rooted_trees', 'DiGraphSet.rooted_trees', (['root', 'is_spanning'], {}), '(root, is_spanning)\n', (7666, 7685), False, 'from digraphillion import DiGraphSet\n'), ((8317, 8356), 'digraphillion.DiGraphSet.set_universe', 'DiGraphSet.set_universe', (['universe_edges'], {}), '(universe_edges)\n', (8340, 8356), False, 'from digraphillion import DiGraphSet\n'), ((8665, 8704), 'digraphillion.DiGraphSet.set_universe', 'DiGraphSet.set_universe', (['universe_edges'], {}), '(universe_edges)\n', (8688, 8704), False, 'from digraphillion import DiGraphSet\n'), ((8719, 8738), 'digraphillion.DiGraphSet.graphs', 'DiGraphSet.graphs', ([], {}), '()\n', (8736, 8738), False, 'from digraphillion import DiGraphSet\n'), ((8845, 8884), 'digraphillion.DiGraphSet.set_universe', 'DiGraphSet.set_universe', (['universe_edges'], {}), '(universe_edges)\n', (8868, 8884), False, 'from digraphillion import DiGraphSet\n'), ((9041, 9118), 'digraphillion.DiGraphSet.graphs', 'DiGraphSet.graphs', ([], {'in_degree_constraints': 'in_dc', 'out_degree_constraints': 'out_dc'}), '(in_degree_constraints=in_dc, out_degree_constraints=out_dc)\n', (9058, 9118), False, 'from digraphillion import DiGraphSet\n'), ((9578, 9655), 'digraphillion.DiGraphSet.graphs', 'DiGraphSet.graphs', ([], {'in_degree_constraints': 'in_dc', 'out_degree_constraints': 'out_dc'}), '(in_degree_constraints=in_dc, out_degree_constraints=out_dc)\n', (9595, 9655), False, 'from digraphillion import DiGraphSet\n'), ((9852, 9898), 'digraphillion.DiGraphSet.graphs', 'DiGraphSet.graphs', ([], {'in_degree_constraints': 'in_dc'}), '(in_degree_constraints=in_dc)\n', (9869, 9898), False, 'from digraphillion import DiGraphSet\n'), ((10053, 10099), 'digraphillion.DiGraphSet.graphs', 'DiGraphSet.graphs', ([], {'in_degree_constraints': 'in_dc'}), '(in_degree_constraints=in_dc)\n', (10070, 10099), False, 'from digraphillion import DiGraphSet\n'), ((10256, 10304), 'digraphillion.DiGraphSet.graphs', 'DiGraphSet.graphs', ([], {'out_degree_constraints': 'out_dc'}), '(out_degree_constraints=out_dc)\n', (10273, 10304), False, 'from digraphillion import DiGraphSet\n'), ((10461, 10509), 'digraphillion.DiGraphSet.graphs', 'DiGraphSet.graphs', ([], {'out_degree_constraints': 'out_dc'}), '(out_degree_constraints=out_dc)\n', (10478, 10509), False, 'from digraphillion import DiGraphSet\n'), ((10602, 10641), 'digraphillion.DiGraphSet.set_universe', 'DiGraphSet.set_universe', (['universe_edges'], {}), '(universe_edges)\n', (10625, 10641), False, 'from digraphillion import DiGraphSet\n'), ((10676, 10723), 'digraphillion.DiGraphSet.rooted_trees', 'DiGraphSet.rooted_trees', (['root'], {'is_spanning': '(True)'}), '(root, is_spanning=True)\n', (10699, 10723), False, 'from digraphillion import DiGraphSet\n'), ((11019, 11096), 'digraphillion.DiGraphSet.graphs', 'DiGraphSet.graphs', ([], {'in_degree_constraints': 'in_dc', 'out_degree_constraints': 'out_dc'}), '(in_degree_constraints=in_dc, out_degree_constraints=out_dc)\n', (11036, 11096), False, 'from digraphillion import DiGraphSet\n'), ((11279, 11322), 'graphillion.GraphSet.set_universe', 'GraphSet.set_universe', (['graphillion_universe'], {}), '(graphillion_universe)\n', (11300, 11322), False, 'from graphillion import GraphSet\n'), ((11331, 11376), 'digraphillion.DiGraphSet.set_universe', 'DiGraphSet.set_universe', (['graphillion_universe'], {}), '(graphillion_universe)\n', (11354, 11376), False, 'from digraphillion import DiGraphSet\n'), ((11413, 11433), 'graphillion.GraphSet.trees', 'GraphSet.trees', (['root'], {}), '(root)\n', (11427, 11433), False, 'from graphillion import GraphSet\n'), ((11453, 11482), 'digraphillion.DiGraphSet.rooted_trees', 'DiGraphSet.rooted_trees', (['root'], {}), '(root)\n', (11476, 11482), False, 'from digraphillion import DiGraphSet\n'), ((7706, 7741), 'digraphillion.DiGraphSet.rooted_trees', 'DiGraphSet.rooted_trees', (['root', '(True)'], {}), '(root, True)\n', (7729, 7741), False, 'from digraphillion import DiGraphSet\n'), ((8462, 8507), 'digraphillion.DiGraphSet.rooted_forests', 'DiGraphSet.rooted_forests', (['roots', 'is_spanning'], {}), '(roots, is_spanning)\n', (8487, 8507), False, 'from digraphillion import DiGraphSet\n'), ((8537, 8579), 'digraphillion.DiGraphSet.rooted_trees', 'DiGraphSet.rooted_trees', (['root', 'is_spanning'], {}), '(root, is_spanning)\n', (8560, 8579), False, 'from digraphillion import DiGraphSet\n'), ((4672, 4713), 'digraphillion.DiGraphSet.directed_st_paths', 'DiGraphSet.directed_st_paths', (['s', 't', '(False)'], {}), '(s, t, False)\n', (4700, 4713), False, 'from digraphillion import DiGraphSet\n'), ((4744, 4784), 'digraphillion.DiGraphSet.directed_st_paths', 'DiGraphSet.directed_st_paths', (['s', 't', '(True)'], {}), '(s, t, True)\n', (4772, 4784), False, 'from digraphillion import DiGraphSet\n')]
from pathlib import Path import matplotlib import matplotlib.pyplot as plt import numpy as np with open(Path('tmp.txt'), 'r') as f: lines_read = f.readlines() lines = list() for line in lines_read: lines.append(line.split()) labels = list() naive_time = list() kapra_time = list() for index, line in enumerate(lines): if index % 2 == 0: # naive labels.append(line[1]) naive_time.append(float(line[2])) else: # kapra kapra_time.append(float(line[2])) x = np.arange(len(labels)) # the label locations width = 0.35 # the width of the bars fig, ax = plt.subplots() rects1 = ax.bar(x - width/2, naive_time, width, label='Naive') rects2 = ax.bar(x + width/2, kapra_time, width, label='Kapra') # Add some text for labels, title and custom x-axis tick labels, etc. ax.set_ylabel('Time (s)') ax.set_xlabel('Number of instances') ax.set_title('Time efficiency') ax.set_xticks(x) ax.set_xticklabels(labels) ax.legend() def autolabel(rects): """Attach a text label above each bar in *rects*, displaying its height.""" for rect in rects: height = rect.get_height() ax.annotate('{}'.format(height), xy=(rect.get_x() + rect.get_width() / 2, height), xytext=(0, 3), # 3 points vertical offset textcoords="offset points", ha='center', va='bottom') autolabel(rects1) autolabel(rects2) fig.tight_layout() #plt.show() plt.savefig('stat.png')
[ "matplotlib.pyplot.savefig", "matplotlib.pyplot.subplots", "pathlib.Path" ]
[((595, 609), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (607, 609), True, 'import matplotlib.pyplot as plt\n'), ((1459, 1482), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""stat.png"""'], {}), "('stat.png')\n", (1470, 1482), True, 'import matplotlib.pyplot as plt\n'), ((105, 120), 'pathlib.Path', 'Path', (['"""tmp.txt"""'], {}), "('tmp.txt')\n", (109, 120), False, 'from pathlib import Path\n')]
from datetime import date from datetime import datetime from datetime import timedelta from datetime import timezone class CommonDate(object): DEFAULT_ZONE = timezone(offset=timedelta(hours=8)) @staticmethod def today(tz=DEFAULT_ZONE): return date.today() @staticmethod def today_time(tz=DEFAULT_ZONE): return datetime.now().replace(hour=0, minute=0, second=0, microsecond=0) @staticmethod def yday(tz=DEFAULT_ZONE): return date.today() - timedelta(days=1) @staticmethod def yday_time(tz=DEFAULT_ZONE): yday_tmp = datetime.now() - timedelta(days=1) return yday_tmp.replace(hour=0, minute=0, second=0, microsecond=0) @staticmethod def today_ymd(tz=DEFAULT_ZONE): return CommonDate.today().strftime('%Y%m%d') @staticmethod def today_iso(tz=DEFAULT_ZONE): return CommonDate.today().strftime('%Y-%m-%d') @staticmethod def yday_ymd(tz=DEFAULT_ZONE): return CommonDate.yday().strftime('%Y%m%d') @staticmethod def yday_iso(tz=DEFAULT_ZONE): return CommonDate.yday().strftime('%Y-%m-%d') @staticmethod def today_ts(tz=DEFAULT_ZONE): """Today's 0:0:0 timestamp :return integer """ return int(CommonDate.today().strftime('%s')) @staticmethod def yday_ts(tz=DEFAULT_ZONE): """Yesterday's 0:0:0 timestamp :return integer """ return int(CommonDate.yday().strftime('%s')) @staticmethod def now_ts(tz=DEFAULT_ZONE): """Now's timestamp :return integer """ return int(datetime.timestamp(datetime.now())) @staticmethod def last_hour_start_ts(tz=DEFAULT_ZONE): """Last hour start timestamp :return integer """ t = datetime.now().replace(minute=0, second=0, microsecond=0) - timedelta(hours=1) return int(datetime.timestamp(t)) @staticmethod def last_hour_end_ts(tz=DEFAULT_ZONE): """Last hour end timestamp :return integer """ t = datetime.now().replace(minute=0, second=0, microsecond=0) return int(datetime.timestamp(t)) @staticmethod def now(tz=DEFAULT_ZONE): return datetime.now() @staticmethod def now_iso(tz=DEFAULT_ZONE): return CommonDate.now().strftime('%Y-%m-%d %H:%M:%S') @staticmethod def mts_2_iso(mts, tz=DEFAULT_ZONE): return datetime.fromtimestamp(mts/1000).strftime('%Y-%m-%d %H:%M:%S') @staticmethod def ts_2_iso(ts, tz=DEFAULT_ZONE): return datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
[ "datetime.datetime.fromtimestamp", "datetime.timedelta", "datetime.datetime.now", "datetime.datetime.timestamp", "datetime.date.today" ]
[((266, 278), 'datetime.date.today', 'date.today', ([], {}), '()\n', (276, 278), False, 'from datetime import date\n'), ((2239, 2253), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2251, 2253), False, 'from datetime import datetime\n'), ((180, 198), 'datetime.timedelta', 'timedelta', ([], {'hours': '(8)'}), '(hours=8)\n', (189, 198), False, 'from datetime import timedelta\n'), ((481, 493), 'datetime.date.today', 'date.today', ([], {}), '()\n', (491, 493), False, 'from datetime import date\n'), ((496, 513), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (505, 513), False, 'from datetime import timedelta\n'), ((588, 602), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (600, 602), False, 'from datetime import datetime\n'), ((605, 622), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (614, 622), False, 'from datetime import timedelta\n'), ((1869, 1887), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (1878, 1887), False, 'from datetime import timedelta\n'), ((1907, 1928), 'datetime.datetime.timestamp', 'datetime.timestamp', (['t'], {}), '(t)\n', (1925, 1928), False, 'from datetime import datetime\n'), ((2152, 2173), 'datetime.datetime.timestamp', 'datetime.timestamp', (['t'], {}), '(t)\n', (2170, 2173), False, 'from datetime import datetime\n'), ((350, 364), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (362, 364), False, 'from datetime import datetime\n'), ((1643, 1657), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1655, 1657), False, 'from datetime import datetime\n'), ((2075, 2089), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2087, 2089), False, 'from datetime import datetime\n'), ((2444, 2478), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['(mts / 1000)'], {}), '(mts / 1000)\n', (2466, 2478), False, 'from datetime import datetime\n'), ((2580, 2606), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['ts'], {}), '(ts)\n', (2602, 2606), False, 'from datetime import datetime\n'), ((1809, 1823), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1821, 1823), False, 'from datetime import datetime\n')]
#!/usr/bin/env python # encoding: utf-8 """ @author:nikan @file: project.py @time: 2018/5/14 下午4:20 """ import datetime from sqlalchemy import text from sqlalchemy.dialects import mysql from PyScraper.server.extensions import db class Project(db.Model): __tablename__ = "project" project_id = db.Column(db.Integer, autoincrement=True, primary_key=True, doc="自增id") project_name = db.Column(db.String(191), nullable=False, doc="项目名称") setting = db.Column(mysql.JSON, doc="项目配置") cron_config = db.Column(mysql.JSON, doc="项目调度配置") tag = db.Column(db.String(191), doc="项目标签") status = db.Column(db.String(191), nullable=False, default='stop', doc="项目状态") is_deleted = db.Column(db.Boolean, default=0, doc="项目是否删除的标记") update_timestamp = db.Column(db.TIMESTAMP, server_default=text('CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP'), doc="更新时间") create_timestamp = db.Column(db.TIMESTAMP, default=datetime.datetime.now, doc="创建时间")
[ "PyScraper.server.extensions.db.Column", "PyScraper.server.extensions.db.String", "sqlalchemy.text" ]
[((309, 380), 'PyScraper.server.extensions.db.Column', 'db.Column', (['db.Integer'], {'autoincrement': '(True)', 'primary_key': '(True)', 'doc': '"""自增id"""'}), "(db.Integer, autoincrement=True, primary_key=True, doc='自增id')\n", (318, 380), False, 'from PyScraper.server.extensions import db\n'), ((468, 501), 'PyScraper.server.extensions.db.Column', 'db.Column', (['mysql.JSON'], {'doc': '"""项目配置"""'}), "(mysql.JSON, doc='项目配置')\n", (477, 501), False, 'from PyScraper.server.extensions import db\n'), ((525, 560), 'PyScraper.server.extensions.db.Column', 'db.Column', (['mysql.JSON'], {'doc': '"""项目调度配置"""'}), "(mysql.JSON, doc='项目调度配置')\n", (534, 560), False, 'from PyScraper.server.extensions import db\n'), ((709, 758), 'PyScraper.server.extensions.db.Column', 'db.Column', (['db.Boolean'], {'default': '(0)', 'doc': '"""项目是否删除的标记"""'}), "(db.Boolean, default=0, doc='项目是否删除的标记')\n", (718, 758), False, 'from PyScraper.server.extensions import db\n'), ((911, 977), 'PyScraper.server.extensions.db.Column', 'db.Column', (['db.TIMESTAMP'], {'default': 'datetime.datetime.now', 'doc': '"""创建时间"""'}), "(db.TIMESTAMP, default=datetime.datetime.now, doc='创建时间')\n", (920, 977), False, 'from PyScraper.server.extensions import db\n'), ((410, 424), 'PyScraper.server.extensions.db.String', 'db.String', (['(191)'], {}), '(191)\n', (419, 424), False, 'from PyScraper.server.extensions import db\n'), ((581, 595), 'PyScraper.server.extensions.db.String', 'db.String', (['(191)'], {}), '(191)\n', (590, 595), False, 'from PyScraper.server.extensions import db\n'), ((632, 646), 'PyScraper.server.extensions.db.String', 'db.String', (['(191)'], {}), '(191)\n', (641, 646), False, 'from PyScraper.server.extensions import db\n'), ((821, 874), 'sqlalchemy.text', 'text', (['"""CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"""'], {}), "('CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP')\n", (825, 874), False, 'from sqlalchemy import text\n')]
import os from bottle import Bottle, static_file, run HERE = os.path.abspath(os.path.dirname(__file__)) STATIC = os.path.join(HERE, 'static') app = Bottle() @app.route('/') @app.route('/<filename:path>') def serve(filename='index.html'): return static_file(filename, root=STATIC) if __name__ == '__main__': run(app=app, host='localhost', port=8080)
[ "bottle.static_file", "bottle.Bottle", "os.path.join", "os.path.dirname", "bottle.run" ]
[((114, 142), 'os.path.join', 'os.path.join', (['HERE', '"""static"""'], {}), "(HERE, 'static')\n", (126, 142), False, 'import os\n'), ((150, 158), 'bottle.Bottle', 'Bottle', ([], {}), '()\n', (156, 158), False, 'from bottle import Bottle, static_file, run\n'), ((78, 103), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (93, 103), False, 'import os\n'), ((253, 287), 'bottle.static_file', 'static_file', (['filename'], {'root': 'STATIC'}), '(filename, root=STATIC)\n', (264, 287), False, 'from bottle import Bottle, static_file, run\n'), ((321, 362), 'bottle.run', 'run', ([], {'app': 'app', 'host': '"""localhost"""', 'port': '(8080)'}), "(app=app, host='localhost', port=8080)\n", (324, 362), False, 'from bottle import Bottle, static_file, run\n')]
import unittest from model.game import Game class GameTest(unittest.TestCase): def setUp(self): self.game = Game() def test_next_player(self): first_player = self.game.next_player self.game.turn(0, 0) second_player = self.game.next_player self.game.turn(1, 0) self.assertEqual(first_player, self.game.next_player) self.game.turn(0, 1) self.assertEqual(second_player, self.game.next_player) def test_turn_input_checked(self): with self.assertRaises(AssertionError): self.game.turn(-1, -1) with self.assertRaises(AssertionError): self.game.turn(3, 3) def test_turn_cell_occupied(self): self.game.turn(0, 0) with self.assertRaises(AssertionError): self.game.turn(0, 0)
[ "model.game.Game" ]
[((122, 128), 'model.game.Game', 'Game', ([], {}), '()\n', (126, 128), False, 'from model.game import Game\n')]
# -*-coding:utf-8 -*- import pymongo from pyramid.config import Configurator from pyramid.events import subscriber from pyramid.events import NewRequest from pyramid.request import Request from pyramid.authentication import AuthTktAuthenticationPolicy from pyramid.authorization import ACLAuthorizationPolicy from wf_blog.security import groupfinder from wf_blog.model import User, RootFactory from wf_blog import routers from pyramid.threadlocal import get_current_registry from pyramid.decorator import reify from pyramid.request import Request from pyramid.security import unauthenticated_userid from pyramid_beaker import session_factory_from_settings class RequestWithUserAttribute(Request): @reify def user(self): userid = unauthenticated_userid(self) if userid is not None: settings = get_current_registry().settings user = User.get_user(self.mongodb, userid) return user def main(global_config, **settings): """ This function returns a WSGI application. """ # auth settings = dict(settings) settings.setdefault('jinja2.i18n.domain', 'wf_blog') authn_policy = AuthTktAuthenticationPolicy(settings['security'], callback=groupfinder) authz_policy = ACLAuthorizationPolicy() session_factory = session_factory_from_settings(settings) config = Configurator(session_factory=session_factory, settings=settings, root_factory=RootFactory) config.set_authentication_policy(authn_policy) config.set_authorization_policy(authz_policy) # jinja2 from pyramid_jinja2 import renderer_factory config.include('pyramid_jinja2') config.add_jinja2_search_path("templates") config.add_renderer('.html', renderer_factory) config.add_static_view('static', 'wf_blog:static', cache_max_age=3600) # bind user to request config.set_request_factory(RequestWithUserAttribute) # set routers and views routers.includeme(config) config.scan("wf_blog.views") # MongoDB def add_mongo_db(event): settings = event.request.registry.settings url = settings['mongodb.url'] db_name = settings['mongodb.db_name'] db = settings['mongodb_conn'][db_name] db.authenticate(settings['mongodb.db_user'], settings['mongodb.passwd']) event.request.mongodb = db db_uri = settings['mongodb.url'] MongoDB = pymongo.Connection conn = MongoDB(db_uri) config.registry.settings['mongodb_conn'] = conn config.add_subscriber(add_mongo_db, NewRequest) if 'pyramid_debugtoolbar' in set(settings.values()): class MongoDB(pymongo.Connection): def __html__(self): return 'MongoDB: <b>{}></b>'.format(self) application = config.make_wsgi_app() return application
[ "pyramid.security.unauthenticated_userid", "pyramid.threadlocal.get_current_registry", "pyramid.authentication.AuthTktAuthenticationPolicy", "pyramid_beaker.session_factory_from_settings", "pyramid.config.Configurator", "pyramid.authorization.ACLAuthorizationPolicy", "wf_blog.model.User.get_user", "wf...
[((1157, 1228), 'pyramid.authentication.AuthTktAuthenticationPolicy', 'AuthTktAuthenticationPolicy', (["settings['security']"], {'callback': 'groupfinder'}), "(settings['security'], callback=groupfinder)\n", (1184, 1228), False, 'from pyramid.authentication import AuthTktAuthenticationPolicy\n'), ((1248, 1272), 'pyramid.authorization.ACLAuthorizationPolicy', 'ACLAuthorizationPolicy', ([], {}), '()\n', (1270, 1272), False, 'from pyramid.authorization import ACLAuthorizationPolicy\n'), ((1296, 1335), 'pyramid_beaker.session_factory_from_settings', 'session_factory_from_settings', (['settings'], {}), '(settings)\n', (1325, 1335), False, 'from pyramid_beaker import session_factory_from_settings\n'), ((1349, 1443), 'pyramid.config.Configurator', 'Configurator', ([], {'session_factory': 'session_factory', 'settings': 'settings', 'root_factory': 'RootFactory'}), '(session_factory=session_factory, settings=settings,\n root_factory=RootFactory)\n', (1361, 1443), False, 'from pyramid.config import Configurator\n'), ((1932, 1957), 'wf_blog.routers.includeme', 'routers.includeme', (['config'], {}), '(config)\n', (1949, 1957), False, 'from wf_blog import routers\n'), ((750, 778), 'pyramid.security.unauthenticated_userid', 'unauthenticated_userid', (['self'], {}), '(self)\n', (772, 778), False, 'from pyramid.security import unauthenticated_userid\n'), ((884, 919), 'wf_blog.model.User.get_user', 'User.get_user', (['self.mongodb', 'userid'], {}), '(self.mongodb, userid)\n', (897, 919), False, 'from wf_blog.model import User, RootFactory\n'), ((833, 855), 'pyramid.threadlocal.get_current_registry', 'get_current_registry', ([], {}), '()\n', (853, 855), False, 'from pyramid.threadlocal import get_current_registry\n')]
# #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ MIT License Copyright (c) 2019 magnusoy Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from communication import SerialCommunication from joystick import Joystick # ps4Controller = Joystick() arduino = SerialCommunication(port="COM10", baudrate=115200) if __name__ == "__main__": while(arduino.isConnected()): # ps4Controller.updateRate(20) # buttonStates = ps4Controller.readButtons() #buttonValues = [buttonStates[0], # buttonStates[1], # buttonStates[2], # buttonStates[3], # buttonStates[4]] buttonValues = [1, 1, 0, 0, 0] data = ','.join(map(str, buttonValues)) arduino.sendOutputStream(data) print(arduino.readInputStream())
[ "communication.SerialCommunication" ]
[((1305, 1355), 'communication.SerialCommunication', 'SerialCommunication', ([], {'port': '"""COM10"""', 'baudrate': '(115200)'}), "(port='COM10', baudrate=115200)\n", (1324, 1355), False, 'from communication import SerialCommunication\n')]
# -*- coding: utf-8 -*- # Review Heatmap Add-on for Anki # # Copyright (C) 2016-2018 <NAME>. <https//glutanimate.com/> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version, with the additions # listed at the end of the accompanied license file. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # # NOTE: This program is subject to certain additional terms pursuant to # Section 7 of the GNU Affero General Public License. You should have # received a copy of these additional terms immediately following the # terms and conditions of the GNU Affero General Public License which # accompanied this program. # # If not, please request a copy through one of the means of contact # listed here: <https://glutanimate.com/contact/>. # # Any modifications to this file must keep this entire header intact. """ Components related to gathering and analyzing user activity """ from __future__ import (absolute_import, division, print_function, unicode_literals) import datetime from aqt import mw from anki.utils import ids2str from .libaddon.platform import ANKI21 __all__ = ["ActivityReporter"] class ActivityReporter(object): def __init__(self, col, config, whole=False): self.col = col self.config = config # NOTE: Refactor the following instance variables if we # ever decide to persist ActivityReporter objects across # multiple invocations (e.g. to cache results) self.whole = whole self.offset = self._getColOffset() self.today = self._getToday(self.offset) # Public API ######################################################################### def getData(self, limhist=None, limfcst=None, mode="reviews"): time_limits = self._getTimeLimits(limhist, limfcst) if mode == "reviews": return self._getActivity(**self._reviewsData(time_limits)) else: raise NotImplementedError( "activity mode {} not implemented".format(mode)) # Activity calculations ######################################################################### # General def _getActivity(self, history, forecast={}): if not history: return None first_day = history[0][0] if history else None last_day = forecast[-1][0] if forecast else None # Stats: cumulative activity and streaks streak_max = streak_cur = streak_last = 0 current = total = 0 for idx, item in enumerate(history): current += 1 timestamp, activity = item try: next_timestamp = history[idx+1][0] except IndexError: # last item streak_last = current next_timestamp = None if timestamp + 86400 != next_timestamp: # >1 day gap. streak over. if current > streak_max: streak_max = current current = 0 total += activity days_learned = idx # Stats: current streak if history[-1][0] in (self.today, self.today - 86400): # last recorded date today or yesterday? streak_cur = streak_last # Stats: average count on days with activity avg_cur = int(round(total / max(days_learned, 1))) # Stats: percentage of days with activity # # NOTE: days_total is based on first recorded revlog entry, i.e. it is # not the grand total of days since collection creation date / whatever # history limits the user might have set. This value seems more # desirable and motivating than the raw percentage of days learned # in the date inclusion period. days_total = (self.today - first_day) / 86400 if days_total == 0: pdays = 100 # review history only extends to yesterday else: pdays = int(round((days_learned / days_total) * 100)) # Compose activity data activity = dict(history + forecast) if history[-1][0] == self.today: # history takes precedence for today activity[self.today] = history[-1][1] return { "activity": activity, # individual cal-heatmap dates need to be in ms: "start": first_day * 1000 if first_day else None, "stop": last_day * 1000 if last_day else None, "today": self.today * 1000, "offset": self.offset, "stats": { "streak_max": { "type": "streak", "value": streak_max }, "streak_cur": { "type": "streak", "value": streak_cur }, "pct_days_active": { "type": "percentage", "value": pdays }, "activity_daily_avg": { "type": "cards", "value": avg_cur } } } # Mode-specific def _reviewsData(self, time_limits): return { "history": self._cardsDone(start=time_limits[0]), "forecast": self._cardsDue(start=self.today, stop=time_limits[1]) } # Collection properties ######################################################################### def _getColOffset(self): """ Return daily scheduling cutoff time in hours """ if ANKI21 and self.col.schedVer() == 2: return self.col.conf.get("rollover", 4) start_date = datetime.datetime.fromtimestamp(self.col.crt) return start_date.hour @staticmethod def daystartEpoch(timestr, is_timestamp=True, offset=0): """ Convert strftime date string into unix timestamp of 00:00 UTC """ # Use db query instead of Python time-related modules to guarantee # consistency with rest of activity data (also: Anki does not seem # to ship 'pytz' by default, and 'calendar' might be removed from # packaging at some point, as Anki's code does not directly depend # on it) offset = " '-{} hours', ".format(offset) if offset else "" unixepoch = " 'unixepoch', " if is_timestamp else "" cmd = """ SELECT CAST(STRFTIME('%s', '{timestr}', {unixepoch} {offset} 'localtime', 'start of day') AS int)""".format(timestr=timestr, unixepoch=unixepoch, offset=offset) return mw.col.db.scalar(cmd) def _getToday(self, offset): """ Return unix epoch timestamp in seconds for today (00:00 UTC) """ return self.daystartEpoch("now", is_timestamp=False, offset=offset) # Time limits ######################################################################### def _getTimeLimits(self, limhist=None, limfcst=None): conf = self.config["synced"] if limhist is not None: history_start = self._daysFromToday(-limhist) else: history_start = self._getConfHistoryLimit( conf["limhist"], conf["limdate"]) if limfcst is not None: forecast_stop = self._daysFromToday(limfcst) else: forecast_stop = self._getConfForecastLimit( conf["limfcst"]) return (history_start, forecast_stop) def _getConfHistoryLimit(self, limit_days, limit_date): if limit_days is None and limit_date is None: return None if limit_days: limit_days_date = self._daysFromToday(-limit_days) else: limit_days_date = 0 limit_date = self.daystartEpoch(limit_date) if limit_date else None if (not limit_date or limit_date == self.daystartEpoch(self.col.crt)): # ignore zero value or default value limit_date = 0 else: limit_date = limit_date # choose most restricting limit return max(limit_days_date, limit_date) or None def _getConfForecastLimit(self, limit_days): if not limit_days: return None return self._daysFromToday(limit_days) def _daysFromToday(self, days): return self.today + 86400 * days # Deck limits ######################################################################### def _validDecks(self, excluded): all_excluded = [] for did in excluded: children = [d[1] for d in self.col.decks.children(did)] all_excluded.extend(children) all_excluded.extend(excluded) return [d['id'] for d in self.col.decks.all() if d['id'] not in all_excluded] def _didLimit(self): excluded_dids = self.config["synced"]["limdecks"] if self.whole: if excluded_dids: dids = self._validDecks(excluded_dids) else: dids = [d['id'] for d in self.col.decks.all()] else: dids = self.col.decks.active() return ids2str(dids) def _revlogLimit(self): excluded_dids = self.config["synced"]["limdecks"] ignore_deleted = self.config["synced"]["limcdel"] if self.whole: if excluded_dids: dids = self._validDecks(excluded_dids) elif ignore_deleted: # Limiting log entries to cids with assigned dids automatically # excludes deleted entries. In cases where we do not use a deck # limit we specify the following instead: return "cid IN (SELECT id FROM cards)" else: return "" else: dids = self.col.decks.active() return ("cid IN (SELECT id FROM cards WHERE did IN %s)" % ids2str(dids)) # Database queries for user activity ######################################################################### def _cardsDue(self, start=None, stop=None): # start, stop: timestamps in seconds. Set to None for unlimited. # start: inclusive; stop: exclusive lim = "" if start is not None: lim += " AND day >= {}".format(start) if stop is not None: lim += " AND day < {}".format(stop) cmd = """ SELECT STRFTIME('%s', 'now', 'localtime', 'start of day') + (due - :today) * 86400 AS day, -COUNT() -- negative to support heatmap legend FROM cards WHERE did IN {} AND queue IN (2,3) {} GROUP BY day ORDER BY day""".format(self._didLimit(), lim) return self.col.db.all(cmd, today=self.col.sched.today) def _cardsDone(self, start=None): """ start: timestamp in seconds to start reporting from Group revlog entries by day while taking local timezone and DST settings into account. Return as unix timestamps of UTC day start (00:00:00 UTC+0 of each day) We perform the grouping here instead of passing the raw data on to cal-heatmap because of performance reasons (user revlogs can easily reach >100K entries). Grouping-by-day needs to be timezone-aware to assign the recorded timestamps to the correct day. For that reason we include the 'localtime' strftime modifier, even though it does come at a performance penalty """ offset = self.offset * 3600 lims = [] if start is not None: lims.append("day >= {}".format(start)) deck_limit = self._revlogLimit() if deck_limit: lims.append(deck_limit) lim = "WHERE " + " AND ".join(lims) if lims else "" cmd = """ SELECT CAST(STRFTIME('%s', id / 1000 - {}, 'unixepoch', 'localtime', 'start of day') AS int) AS day, COUNT() FROM revlog {} GROUP BY day ORDER BY day""".format(offset, lim) return self.col.db.all(cmd)
[ "aqt.mw.col.db.scalar", "anki.utils.ids2str", "datetime.datetime.fromtimestamp" ]
[((6205, 6250), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['self.col.crt'], {}), '(self.col.crt)\n', (6236, 6250), False, 'import datetime\n'), ((7189, 7210), 'aqt.mw.col.db.scalar', 'mw.col.db.scalar', (['cmd'], {}), '(cmd)\n', (7205, 7210), False, 'from aqt import mw\n'), ((9756, 9769), 'anki.utils.ids2str', 'ids2str', (['dids'], {}), '(dids)\n', (9763, 9769), False, 'from anki.utils import ids2str\n'), ((10512, 10525), 'anki.utils.ids2str', 'ids2str', (['dids'], {}), '(dids)\n', (10519, 10525), False, 'from anki.utils import ids2str\n')]
import os import pandas as pd import numpy as np from collections import Counter, defaultdict def train_from_file(dir_path, leap_limit=15): file_list = os.listdir(dir_path) pig_format = [ "id", "onset", "offset", "pitch", "onsetvel", "offsetvel", "hand", "fingernum", ] right_init = Counter() right_transition_count = Counter() right_emission = defaultdict(Counter) left_init = Counter() left_transition_count = Counter() left_emission = defaultdict(Counter) for idx, file in enumerate(file_list): path = dir_path + "/" + file data_size = len(file_list) print(f"Processing: {path} ({idx + 1}/{data_size})") data = pd.read_csv(path, sep="\t", header=0, names=pig_format) if data.fingernum.dtype == object: data.fingernum = data.fingernum.apply( lambda x: x.split("_")[0] ).astype("int") left_hand = data[data.fingernum < 0] right_hand = data[data.fingernum > 0] init, transition, emission = count_fingering( right_hand, limit=leap_limit ) right_init += init right_transition_count += transition for k, counter in emission.items(): right_emission[k].update(counter) init, transition, emission = count_fingering( left_hand, limit=leap_limit ) left_init += init left_transition_count += transition for k, counter in emission.items(): left_emission[k].update(counter) return (right_init, right_transition_count, right_emission, left_init, left_transition_count, left_emission) def pitch_to_key(pitch: str): posx = {"C": 0, "D": 1, "E": 2, "F": 3, "G": 4, "A": 5, "B": 6}[pitch[0]] posy = 0 if pitch[1].isdigit(): posx += (int(pitch[1]) - 4) * 7 elif pitch[1] == "#": if pitch[2] == "#": posx += (int(pitch[3]) - 4) * 7 + 1 else: posy = 1 posx += (int(pitch[2]) - 4) * 7 elif pitch[1] == "b" or pitch[1] == "-": if pitch[2] == "b" or pitch[2] == "-": posx += (int(pitch[3]) - 4) * 7 else: posy = 1 posx += (int(pitch[2]) - 4) * 7 - 1 return (posx, posy) def note_to_diff(fingering_data, limit=15): pos_x, pos_y = zip(*fingering_data.pitch.map(pitch_to_key)) series_x = pd.Series(pos_x) series_y = pd.Series(pos_y) diffs = list( zip( series_x.diff() .fillna(0, downcast="infer") .apply(lambda x: limit if x > limit else x) .apply(lambda x: -limit if x < -limit else x), series_y.diff().fillna(0, downcast="infer"), ) ) return diffs def count_fingering(fingering_data, limit=15): hidden_state = list( zip( fingering_data.fingernum.shift(fill_value=0), fingering_data.fingernum, ) ) pos_x, pos_y = zip(*fingering_data.pitch.map(pitch_to_key)) model = pd.DataFrame( {"hidden_state": hidden_state, "pos_x": pos_x, "pos_y": pos_y} ) model["pos_diff"] = list( zip( model.pos_x.diff() .fillna(0, downcast="infer") .apply(lambda x: limit if x > limit else x) .apply(lambda x: -limit if x < -limit else x), model.pos_y.diff().fillna(0, downcast="infer"), ) ) # First observation only init = Counter([model.hidden_state[0][1]]) # Without first observation transition = Counter(model.hidden_state[1:]) # Emission emission = { state: Counter(model[model.hidden_state == state].pos_diff) for state in set(model.hidden_state[1:]) } return (init, transition, Counter(emission)) def normalize(v): return v / v.sum(axis=0) def init_count_to_prob(init_count): init_prob = np.zeros(5) for key, value in init_count.items(): if key < 0: init_prob[-key - 1] = value else: init_prob[key - 1] = value return normalize(init_prob) def transition_count_to_prob(transition_count): transition_prob = np.zeros((5, 5)) for key, value in transition_count.items(): if key[0] < 0 and key[1] < 0: transition_prob[-key[0] - 1, -key[1] - 1] = value else: transition_prob[key[0] - 1, key[1] - 1] = value return np.apply_along_axis(normalize, axis=1, arr=transition_prob) def series_to_matrix(emission_prob): out_prob = np.zeros((5, 5)) for key, value in emission_prob.items(): if key[0] < 0 and key[1] < 0: out_prob[-key[0] - 1, -key[1] - 1] = value else: out_prob[key[0] - 1, key[1] - 1] = value return out_prob def emission_count_to_prob(emission_count): prob_df = ( pd.DataFrame.from_dict(emission_count).fillna(0, downcast="infer") + 1 ).apply(normalize, axis=0) prob_dict = { out: series_to_matrix(prob_df.loc[out]) for out in prob_df.index } return prob_dict def decoding(init_prob, transition, out_prob, observations, hand): n_state = len(init_prob) obs_len = len(observations) delta = np.zeros((n_state, obs_len + 1)) psi = np.zeros((n_state, obs_len), dtype=int) delta[:, 0] = np.log(init_prob) for i, (pitch, time) in enumerate( zip(observations.pitch_diff, observations.time_diff) ): delta_mat = np.tile(delta[:, i], (n_state, 1)).transpose() prod = delta_mat + np.log(transition) + np.log(out_prob[pitch]) if time < 0.03: if hand == "R": if pitch[0] > 0: prod[np.tril_indices(n_state)] -= 5 else: prod[np.triu_indices(n_state)] -= 5 else: if pitch[0] > 0: prod[np.triu_indices(n_state)] -= 5 else: prod[np.tril_indices(n_state)] -= 5 delta[:, i + 1] = np.amax(prod, axis=0) psi[:, i] = prod.argmax(axis=0) + 1 opt_path = [np.argmax(delta[:, obs_len]) + 1] for i in range(obs_len - 1, -1, -1): opt_path.append(psi[opt_path[-1] - 1, i]) return opt_path[::-1]
[ "pandas.Series", "numpy.tile", "os.listdir", "numpy.triu_indices", "pandas.read_csv", "numpy.log", "numpy.argmax", "pandas.DataFrame.from_dict", "collections.Counter", "numpy.zeros", "numpy.apply_along_axis", "collections.defaultdict", "pandas.DataFrame", "numpy.tril_indices", "numpy.ama...
[((158, 178), 'os.listdir', 'os.listdir', (['dir_path'], {}), '(dir_path)\n', (168, 178), False, 'import os\n'), ((367, 376), 'collections.Counter', 'Counter', ([], {}), '()\n', (374, 376), False, 'from collections import Counter, defaultdict\n'), ((406, 415), 'collections.Counter', 'Counter', ([], {}), '()\n', (413, 415), False, 'from collections import Counter, defaultdict\n'), ((437, 457), 'collections.defaultdict', 'defaultdict', (['Counter'], {}), '(Counter)\n', (448, 457), False, 'from collections import Counter, defaultdict\n'), ((475, 484), 'collections.Counter', 'Counter', ([], {}), '()\n', (482, 484), False, 'from collections import Counter, defaultdict\n'), ((513, 522), 'collections.Counter', 'Counter', ([], {}), '()\n', (520, 522), False, 'from collections import Counter, defaultdict\n'), ((543, 563), 'collections.defaultdict', 'defaultdict', (['Counter'], {}), '(Counter)\n', (554, 563), False, 'from collections import Counter, defaultdict\n'), ((2458, 2474), 'pandas.Series', 'pd.Series', (['pos_x'], {}), '(pos_x)\n', (2467, 2474), True, 'import pandas as pd\n'), ((2490, 2506), 'pandas.Series', 'pd.Series', (['pos_y'], {}), '(pos_y)\n', (2499, 2506), True, 'import pandas as pd\n'), ((3089, 3165), 'pandas.DataFrame', 'pd.DataFrame', (["{'hidden_state': hidden_state, 'pos_x': pos_x, 'pos_y': pos_y}"], {}), "({'hidden_state': hidden_state, 'pos_x': pos_x, 'pos_y': pos_y})\n", (3101, 3165), True, 'import pandas as pd\n'), ((3528, 3563), 'collections.Counter', 'Counter', (['[model.hidden_state[0][1]]'], {}), '([model.hidden_state[0][1]])\n', (3535, 3563), False, 'from collections import Counter, defaultdict\n'), ((3614, 3645), 'collections.Counter', 'Counter', (['model.hidden_state[1:]'], {}), '(model.hidden_state[1:])\n', (3621, 3645), False, 'from collections import Counter, defaultdict\n'), ((3955, 3966), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (3963, 3966), True, 'import numpy as np\n'), ((4226, 4242), 'numpy.zeros', 'np.zeros', (['(5, 5)'], {}), '((5, 5))\n', (4234, 4242), True, 'import numpy as np\n'), ((4477, 4536), 'numpy.apply_along_axis', 'np.apply_along_axis', (['normalize'], {'axis': '(1)', 'arr': 'transition_prob'}), '(normalize, axis=1, arr=transition_prob)\n', (4496, 4536), True, 'import numpy as np\n'), ((4591, 4607), 'numpy.zeros', 'np.zeros', (['(5, 5)'], {}), '((5, 5))\n', (4599, 4607), True, 'import numpy as np\n'), ((5269, 5301), 'numpy.zeros', 'np.zeros', (['(n_state, obs_len + 1)'], {}), '((n_state, obs_len + 1))\n', (5277, 5301), True, 'import numpy as np\n'), ((5312, 5351), 'numpy.zeros', 'np.zeros', (['(n_state, obs_len)'], {'dtype': 'int'}), '((n_state, obs_len), dtype=int)\n', (5320, 5351), True, 'import numpy as np\n'), ((5370, 5387), 'numpy.log', 'np.log', (['init_prob'], {}), '(init_prob)\n', (5376, 5387), True, 'import numpy as np\n'), ((758, 813), 'pandas.read_csv', 'pd.read_csv', (['path'], {'sep': '"""\t"""', 'header': '(0)', 'names': 'pig_format'}), "(path, sep='\\t', header=0, names=pig_format)\n", (769, 813), True, 'import pandas as pd\n'), ((3694, 3746), 'collections.Counter', 'Counter', (['model[model.hidden_state == state].pos_diff'], {}), '(model[model.hidden_state == state].pos_diff)\n', (3701, 3746), False, 'from collections import Counter, defaultdict\n'), ((3833, 3850), 'collections.Counter', 'Counter', (['emission'], {}), '(emission)\n', (3840, 3850), False, 'from collections import Counter, defaultdict\n'), ((6066, 6087), 'numpy.amax', 'np.amax', (['prod'], {'axis': '(0)'}), '(prod, axis=0)\n', (6073, 6087), True, 'import numpy as np\n'), ((5611, 5634), 'numpy.log', 'np.log', (['out_prob[pitch]'], {}), '(out_prob[pitch])\n', (5617, 5634), True, 'import numpy as np\n'), ((6149, 6177), 'numpy.argmax', 'np.argmax', (['delta[:, obs_len]'], {}), '(delta[:, obs_len])\n', (6158, 6177), True, 'import numpy as np\n'), ((5516, 5550), 'numpy.tile', 'np.tile', (['delta[:, i]', '(n_state, 1)'], {}), '(delta[:, i], (n_state, 1))\n', (5523, 5550), True, 'import numpy as np\n'), ((5590, 5608), 'numpy.log', 'np.log', (['transition'], {}), '(transition)\n', (5596, 5608), True, 'import numpy as np\n'), ((4904, 4942), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['emission_count'], {}), '(emission_count)\n', (4926, 4942), True, 'import pandas as pd\n'), ((5745, 5769), 'numpy.tril_indices', 'np.tril_indices', (['n_state'], {}), '(n_state)\n', (5760, 5769), True, 'import numpy as np\n'), ((5823, 5847), 'numpy.triu_indices', 'np.triu_indices', (['n_state'], {}), '(n_state)\n', (5838, 5847), True, 'import numpy as np\n'), ((5930, 5954), 'numpy.triu_indices', 'np.triu_indices', (['n_state'], {}), '(n_state)\n', (5945, 5954), True, 'import numpy as np\n'), ((6008, 6032), 'numpy.tril_indices', 'np.tril_indices', (['n_state'], {}), '(n_state)\n', (6023, 6032), True, 'import numpy as np\n')]
import cocos import pyglet from world import Terrain TERRAIN_TEXTURES = {Terrain.GRASS: 'grass.png', Terrain.DIRT: 'dirt.png', Terrain.WATER: 'water.png', Terrain.MOUNTAIN: 'mountain.png'} CELL_SIZE = 32 DEFAULT_CHARACTER = pyglet.resource.image('res/img/dummy.png') class WorldMap(cocos.tiles.RectMapLayer): def __init__(self, world, id=None): self.world = world cells = self._generate_cells(world, cell_size=CELL_SIZE) super(WorldMap, self).__init__(id=None, tw=CELL_SIZE, th=CELL_SIZE, cells=cells) def _generate_cells(self, world, cell_size): tileset = cocos.tiles.TileSet(0, None) for cell_type in list(Terrain): image_path = 'res/img/tiles/{}'.format(TERRAIN_TEXTURES[cell_type]) image = pyglet.resource.image(image_path) tileset.add(image=image, id=cell_type, properties={}) cells = [] for i in range(world.width): column = [] for j in range(world.height): tile = tileset[world.cells[i][j]] cell = cocos.tiles.RectCell(i, j, cell_size, cell_size, properties={}, tile=tile) column.append(cell) cells.append(column) return cells class CharacterView2(cocos.layer.ScrollableLayer): def __init__(self, character): super(CharacterView2, self).__init__() self.character = character self.add(cocos.sprite.Sprite(DEFAULT_CHARACTER, position=(character.x * CELL_SIZE, character.y * CELL_SIZE), anchor=(CELL_SIZE / 2, CELL_SIZE / 2) )) # All the views below are from the old pyglet version and should be removed. class WorldView: def __init__(self, window, world): self.window = window self.world = world self.textures = {terrain_type: pyglet.resource.image('res/img/tiles/{}'.format(TERRAIN_TEXTURES[terrain_type])) for terrain_type in list(Terrain)} self.cursor_position = None self.selected_cell = None self.map_sprites = [] self.batch = pyglet.graphics.Batch() def set_cursor_position(self, x, y): self.cursor_position = (x, y) self.selected_cell = (x // CELL_SIZE, y // CELL_SIZE) def generate_sprites(self): min_x = max(0, self.window.x // CELL_SIZE) min_y = max(0, self.window.y // CELL_SIZE) view_cell_width = (self.window.width // CELL_SIZE) + 1 view_cell_height = (self.window.height // CELL_SIZE) + 1 max_x = min(min_x + view_cell_width, self.world.width - 1) max_y = min(min_y + view_cell_height, self.world.height - 1) map_sprites = [] for x in range(min_x, max_x + 1): for y in range(min_y, max_y + 1): sprite = pyglet.sprite.Sprite( self.textures[self.world.cells[x][y]], x=x * CELL_SIZE, y=y * CELL_SIZE, batch=self.batch) map_sprites.append(sprite) return map_sprites def draw(self): # This generates the sprites using the current batch self.map_sprites = self.generate_sprites() self.batch.draw() def draw_selected_cell(self): # TODO: make this use a batch? # Drawing rectangle around the currently selected cell if self.selected_cell is not None: x, y = self.selected_cell[0] * CELL_SIZE, self.selected_cell[1] * CELL_SIZE dx = dy = CELL_SIZE vertices = [x, y, x + dx, y, x + dx, y + dy, x, y + dy] pyglet.graphics.draw( 4, pyglet.gl.GL_LINE_LOOP, ('v2f', vertices), ('c3B', (0, 0, 255, 0, 255, 0, 255, 0, 0, 255, 255, 255))) class CharacterView: def __init__(self, window, batch, character): self.window = window self.character = character self.sprite = pyglet.sprite.Sprite( img=DEFAULT_CHARACTER, x=self.character.x, y=self.character.y, batch=batch) def draw(self): x = self.character.x * CELL_SIZE y = self.character.y * CELL_SIZE self.sprite.x = x self.sprite.y = y if self.character.goal is not None: goal_x = self.character.goal[0] * CELL_SIZE goal_y = self.character.goal[1] * CELL_SIZE vertices = [x, y, goal_x, goal_y] vertices = [v + CELL_SIZE / 2 for v in vertices] pyglet.graphics.draw( 2, pyglet.gl.GL_LINES, ('v2f', vertices), ('c4B', (255, 255, 255, 125) * 2)) class CharactersView: def __init__(self, window, game): self.window = window self.batch = pyglet.graphics.Batch() self.game = game self.cur_characters = set(self.game.characters) self.views = self._generate_views(self.cur_characters) def draw(self): if self.cur_characters != set(self.game.characters): self.cur_characters = set(self.game.characters) self.character_views = self._generate_views(self.cur_characters) for view in self.views: view.draw() self.batch.draw() def _generate_views(self, characters): return [CharacterView(self.window, self.batch, c) for c in characters]
[ "pyglet.resource.image", "cocos.tiles.TileSet", "cocos.tiles.RectCell", "pyglet.graphics.Batch", "cocos.sprite.Sprite", "pyglet.sprite.Sprite", "pyglet.graphics.draw" ]
[((246, 288), 'pyglet.resource.image', 'pyglet.resource.image', (['"""res/img/dummy.png"""'], {}), "('res/img/dummy.png')\n", (267, 288), False, 'import pyglet\n'), ((623, 651), 'cocos.tiles.TileSet', 'cocos.tiles.TileSet', (['(0)', 'None'], {}), '(0, None)\n', (642, 651), False, 'import cocos\n'), ((2231, 2254), 'pyglet.graphics.Batch', 'pyglet.graphics.Batch', ([], {}), '()\n', (2252, 2254), False, 'import pyglet\n'), ((4017, 4118), 'pyglet.sprite.Sprite', 'pyglet.sprite.Sprite', ([], {'img': 'DEFAULT_CHARACTER', 'x': 'self.character.x', 'y': 'self.character.y', 'batch': 'batch'}), '(img=DEFAULT_CHARACTER, x=self.character.x, y=self.\n character.y, batch=batch)\n', (4037, 4118), False, 'import pyglet\n'), ((4785, 4808), 'pyglet.graphics.Batch', 'pyglet.graphics.Batch', ([], {}), '()\n', (4806, 4808), False, 'import pyglet\n'), ((792, 825), 'pyglet.resource.image', 'pyglet.resource.image', (['image_path'], {}), '(image_path)\n', (813, 825), False, 'import pyglet\n'), ((1439, 1582), 'cocos.sprite.Sprite', 'cocos.sprite.Sprite', (['DEFAULT_CHARACTER'], {'position': '(character.x * CELL_SIZE, character.y * CELL_SIZE)', 'anchor': '(CELL_SIZE / 2, CELL_SIZE / 2)'}), '(DEFAULT_CHARACTER, position=(character.x * CELL_SIZE, \n character.y * CELL_SIZE), anchor=(CELL_SIZE / 2, CELL_SIZE / 2))\n', (1458, 1582), False, 'import cocos\n'), ((3697, 3826), 'pyglet.graphics.draw', 'pyglet.graphics.draw', (['(4)', 'pyglet.gl.GL_LINE_LOOP', "('v2f', vertices)", "('c3B', (0, 0, 255, 0, 255, 0, 255, 0, 0, 255, 255, 255))"], {}), "(4, pyglet.gl.GL_LINE_LOOP, ('v2f', vertices), ('c3B',\n (0, 0, 255, 0, 255, 0, 255, 0, 0, 255, 255, 255)))\n", (3717, 3826), False, 'import pyglet\n'), ((4557, 4658), 'pyglet.graphics.draw', 'pyglet.graphics.draw', (['(2)', 'pyglet.gl.GL_LINES', "('v2f', vertices)", "('c4B', (255, 255, 255, 125) * 2)"], {}), "(2, pyglet.gl.GL_LINES, ('v2f', vertices), ('c4B', (255,\n 255, 255, 125) * 2))\n", (4577, 4658), False, 'import pyglet\n'), ((1088, 1162), 'cocos.tiles.RectCell', 'cocos.tiles.RectCell', (['i', 'j', 'cell_size', 'cell_size'], {'properties': '{}', 'tile': 'tile'}), '(i, j, cell_size, cell_size, properties={}, tile=tile)\n', (1108, 1162), False, 'import cocos\n'), ((2935, 3050), 'pyglet.sprite.Sprite', 'pyglet.sprite.Sprite', (['self.textures[self.world.cells[x][y]]'], {'x': '(x * CELL_SIZE)', 'y': '(y * CELL_SIZE)', 'batch': 'self.batch'}), '(self.textures[self.world.cells[x][y]], x=x * CELL_SIZE,\n y=y * CELL_SIZE, batch=self.batch)\n', (2955, 3050), False, 'import pyglet\n')]
import pymongo import numpy as np from tqdm import tqdm from datetime import datetime, timedelta def mongo_query(**kwargs): """Create a MongoDB query based on a set of conditions.""" query = {} if 'start_date' in kwargs: if not ('CreationDate' in query): query['CreationDate'] = {} query['CreationDate']['$gte'] = kwargs['start_date'] if 'end_date' in kwargs: if not ('CreationDate' in query): query['CreationDate'] = {} query['CreationDate']['$lt'] = kwargs['end_date'] if 'exclude_closed' in kwargs: query['Closed'] = kwargs['exclude_closed'] return query def year_range_query(start_year, end_year, exclude_closed=True): """Returns a MongoDB query returning all posts for a given year.""" query = mongo_query(start_date=datetime(start_year, 1, 1), end_date=datetime(end_year + 1, 1, 1), exclude_closed=exclude_closed) return query def single_day_query(day, month, year, exclude_closed=True): """Returns a MongoDB query returning all posts for a given day.""" start_date = datetime(year, month, day) query = mongo_query(start_date=start_date, end_date=start_date + timedelta(days=10), exclude_closed=exclude_closed) return query class MongoDataset: """Interface between MongoDB and the rest of the Python code.""" def __init__(self, forum='overflow'): try: client = pymongo.MongoClient() except Exception as e: message = """Could not connect to MongoDB client. Make sure to start it by executing: sudo systemctl start mongod """ print(message) raise e self.collection = client.titlewave[f'{forum}.posts'] def get_mongo_ids(self, query): """Fetches the ids of documents matching a query.""" result = self.collection.find(query, {'_id': True}) ids = [row['_id'] for row in result] return ids def batch_update(self, ids, command, batch_size=256, progress_bar=True): """ Execute an update_many command in batches. Parameters: ids - The document ids in the Mongo collection of the documents to be updated. command - The update command to be executed on each document. batch_size - The number of documents to update in a single call of update_many. progress_bar - Whether to display a progress bar. """ num_batches = len(ids) // batch_size # Split the array into batches of the specified size, and typecast the ids back to Python integers with tolist. splits = np.array_split(ids, num_batches).tolist() if progress_bar: splits = tqdm(splits) for batch_ids in splits: self.collection.update_many({'_id': {'$in': batch_ids}}, command) def get_partition(self, partition, projection): """ Fetches all documents in a specified partition of the dataset. Parameters: partition - The name of the partition (e.g., "classifier_train") projection - Indicates which fields of the documents to return. """ cursor = self.collection.find({'partition': partition}, projection) return list(cursor) def reset_partitions(self): """Remove the partition field from all documents in the collection.""" self.collection.update_many({'partition': {'$exists': True}}, {'$unset': {'partition': 1}})
[ "datetime.datetime", "tqdm.tqdm", "numpy.array_split", "pymongo.MongoClient", "datetime.timedelta" ]
[((1136, 1162), 'datetime.datetime', 'datetime', (['year', 'month', 'day'], {}), '(year, month, day)\n', (1144, 1162), False, 'from datetime import datetime, timedelta\n'), ((822, 848), 'datetime.datetime', 'datetime', (['start_year', '(1)', '(1)'], {}), '(start_year, 1, 1)\n', (830, 848), False, 'from datetime import datetime, timedelta\n'), ((883, 911), 'datetime.datetime', 'datetime', (['(end_year + 1)', '(1)', '(1)'], {}), '(end_year + 1, 1, 1)\n', (891, 911), False, 'from datetime import datetime, timedelta\n'), ((1516, 1537), 'pymongo.MongoClient', 'pymongo.MongoClient', ([], {}), '()\n', (1535, 1537), False, 'import pymongo\n'), ((2805, 2817), 'tqdm.tqdm', 'tqdm', (['splits'], {}), '(splits)\n', (2809, 2817), False, 'from tqdm import tqdm\n'), ((1256, 1274), 'datetime.timedelta', 'timedelta', ([], {'days': '(10)'}), '(days=10)\n', (1265, 1274), False, 'from datetime import datetime, timedelta\n'), ((2717, 2749), 'numpy.array_split', 'np.array_split', (['ids', 'num_batches'], {}), '(ids, num_batches)\n', (2731, 2749), True, 'import numpy as np\n')]
import sys import os import subprocess import text try: from setuptools import setup, find_packages except ImportError: from distutils.core import setup from distutils.util import convert_path def _find_packages(where='.', exclude=()): """Return a list all Python packages found within directory 'where' 'where' should be supplied as a "cross-platform" (i.e. URL-style) path; it will be converted to the appropriate local path syntax. 'exclude' is a sequence of package names to exclude; '*' can be used as a wildcard in the names, such that 'foo.*' will exclude all subpackages of 'foo' (but not 'foo' itself). """ out = [] stack = [(convert_path(where), '')] while stack: where, prefix = stack.pop(0) for name in os.listdir(where): fn = os.path.join(where, name) if ('.' not in name and os.path.isdir(fn) and os.path.isfile(os.path.join(fn, '__init__.py'))): out.append(prefix+name) stack.append((fn, prefix + name + '.')) for pat in list(exclude)+['ez_setup', 'distribute_setup']: from fnmatch import fnmatchcase out = [item for item in out if not fnmatchcase(item, pat)] return out find_packages = _find_packages PUBLISH_CMD = "python setup.py register sdist bdist_wheel upload" TEST_PUBLISH_CMD = 'python setup.py register -r test sdist bdist_wheel upload -r test' TEST_CMD = 'python run_tests.py' if 'publish' in sys.argv: try: __import__('wheel') except ImportError: print("wheel required. Run `pip install wheel`.") sys.exit(1) status = subprocess.call(PUBLISH_CMD, shell=True) sys.exit(status) if 'publish_test' in sys.argv: try: __import__('wheel') except ImportError: print("wheel required. Run `pip install wheel`.") sys.exit(1) status = subprocess.call(TEST_PUBLISH_CMD, shell=True) sys.exit() if 'run_tests' in sys.argv: try: __import__('nose') except ImportError: print('nose required. Run `pip install nose`.') sys.exit(1) status = subprocess.call(TEST_CMD, shell=True) sys.exit(status) def read(fname): with open(fname) as fp: content = fp.read() return content setup( name='textblob', version=text.__version__, description='Simple, Pythonic text processing. Sentiment analysis, ' 'POS tagging, noun phrase parsing, and more.', long_description=(read("README.rst") + '\n\n' + read("HISTORY.rst")), license=read("LICENSE"), author='<NAME>', author_email='<EMAIL>', url='https://github.com/sloria/TextBlob', install_requires=['PyYAML'], packages=find_packages(exclude=('test*', 'text.nltk.test')), package_data={ "text": ["*.txt", "*.xml"], }, classifiers=( 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Natural Language :: English', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.3', "Topic :: Text Processing :: Linguistic", ), tests_require=['nose'], )
[ "os.listdir", "fnmatch.fnmatchcase", "distutils.util.convert_path", "setuptools.find_packages", "os.path.join", "os.path.isdir", "subprocess.call", "sys.exit" ]
[((1748, 1788), 'subprocess.call', 'subprocess.call', (['PUBLISH_CMD'], {'shell': '(True)'}), '(PUBLISH_CMD, shell=True)\n', (1763, 1788), False, 'import subprocess\n'), ((1793, 1809), 'sys.exit', 'sys.exit', (['status'], {}), '(status)\n', (1801, 1809), False, 'import sys\n'), ((1994, 2039), 'subprocess.call', 'subprocess.call', (['TEST_PUBLISH_CMD'], {'shell': '(True)'}), '(TEST_PUBLISH_CMD, shell=True)\n', (2009, 2039), False, 'import subprocess\n'), ((2044, 2054), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2052, 2054), False, 'import sys\n'), ((2234, 2271), 'subprocess.call', 'subprocess.call', (['TEST_CMD'], {'shell': '(True)'}), '(TEST_CMD, shell=True)\n', (2249, 2271), False, 'import subprocess\n'), ((2276, 2292), 'sys.exit', 'sys.exit', (['status'], {}), '(status)\n', (2284, 2292), False, 'import sys\n'), ((2849, 2899), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "('test*', 'text.nltk.test')"}), "(exclude=('test*', 'text.nltk.test'))\n", (2862, 2899), False, 'from setuptools import setup, find_packages\n'), ((1723, 1734), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1731, 1734), False, 'import sys\n'), ((1969, 1980), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1977, 1980), False, 'import sys\n'), ((2208, 2219), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2216, 2219), False, 'import sys\n'), ((839, 856), 'os.listdir', 'os.listdir', (['where'], {}), '(where)\n', (849, 856), False, 'import os\n'), ((727, 746), 'distutils.util.convert_path', 'convert_path', (['where'], {}), '(where)\n', (739, 746), False, 'from distutils.util import convert_path\n'), ((879, 904), 'os.path.join', 'os.path.join', (['where', 'name'], {}), '(where, name)\n', (891, 904), False, 'import os\n'), ((945, 962), 'os.path.isdir', 'os.path.isdir', (['fn'], {}), '(fn)\n', (958, 962), False, 'import os\n'), ((1006, 1037), 'os.path.join', 'os.path.join', (['fn', '"""__init__.py"""'], {}), "(fn, '__init__.py')\n", (1018, 1037), False, 'import os\n'), ((1303, 1325), 'fnmatch.fnmatchcase', 'fnmatchcase', (['item', 'pat'], {}), '(item, pat)\n', (1314, 1325), False, 'from fnmatch import fnmatchcase\n')]
from __future__ import absolute_import, unicode_literals import warnings from unittest import TestCase from immutable import Immutable, ImmutableFactory warnings.filterwarnings("ignore") class TestImmutableObjectFactory(TestCase): def test_create_empty(self): # unlike a namedtuple, you don't even need a name for this (simpler) obj = ImmutableFactory.create() self.assertEqual(obj.__class__.__name__, 'Immutable') def test_instantiation(self): # you can create via a tuple, an unpacked dict, or both. factory = ImmutableFactory() tup_instantiation = factory.create((('blah', 10),)) self.assertEqual(tup_instantiation.blah, 10) dict_instantiation = factory.create(**{'okie': 'dokie'}) self.assertEqual(dict_instantiation.okie, 'dokie') both_instantiation = factory.create((('blah', 10),), **{'okie': 'dokie'}) self.assertEqual(both_instantiation.items, (('blah', 10), ('okie', 'dokie'))) def test_keys(self): # the keys attr should be included by default. factory = ImmutableFactory() attributes = {'one': 1, 'two': 2} obj = factory.create(**attributes) self.assertEqual(set(obj.keys), {'one', 'two'}) # if given as a tuple, they should keep the given order attributes = (('one', 1), ('two', 2)) obj = factory.create(attributes) self.assertEqual(obj.keys, ('one', 'two')) attributes = {'one': 1, 'two': 2} obj = factory.create(keys=False, **attributes) with self.assertRaises(AttributeError): obj.keys def test_values(self): # the keys attr should be included by default. factory = ImmutableFactory() attributes = {'one': 1, 'two': 2} obj = factory.create(**attributes) self.assertEqual(set(obj.values), {1, 2}) # if given as a tuple, they should keep the given order attributes = (('one', 1), ('two', 2)) obj = factory.create(attributes) self.assertEqual(obj.values, (1, 2)) attributes = {'one': 1, 'two': 2} obj = factory.create(values=False, **attributes) with self.assertRaises(AttributeError): obj.values def test_items(self): # the keys attr should be included by default. factory = ImmutableFactory() attributes = {'one': 1, 'two': 2} obj = factory.create(**attributes) self.assertEqual(set(obj.items), {('one', 1), ('two', 2)}) # if given as a tuple, they should keep the given order attributes = (('one', 1), ('two', 2)) obj = factory.create(attributes) self.assertEqual(obj.items, (('one', 1), ('two', 2))) attributes = {'one': 1, 'two': 2} obj = factory.create(items=False, **attributes) with self.assertRaises(AttributeError): obj.items def test_attribute_access(self): # you should be able to access via the dot-operator, or via index # note that keys, values, and items are [-3], [-2], and [-1] factory = ImmutableFactory() attributes = {'hi': 'there'} obj = factory.create(**attributes) self.assertEqual(obj.hi, 'there') self.assertEqual(obj[0], 'there') self.assertEqual(obj[-3], ('hi',)) self.assertEqual(obj[-2], ('there',)) self.assertEqual(obj[-1], (('hi', 'there'),)) def test_change_attribute(self): # you can't! factory = ImmutableFactory() obj = factory.create(**{'change': 'me'}) with self.assertRaisesRegexp(AttributeError, "can't set attribute"): obj.change = 'to this' with self.assertRaisesRegexp(TypeError, "does not support item assignment"): obj[0] = 'to this' class TestImmutable(TestCase): def setUp(self): self.unordered_immutable = Immutable(black='black', white='white', red='red', blue='blue') self.ordered_immutable = Immutable( ('zero', 0), ('one', 1), ('two', 2), ('three', 3) ) def test_create_empty(self): # should be able to instantiate without args. obj = Immutable() self.assertEqual(obj.__class__.__name__, 'Immutable') def test_instantiation(self): # you can create via a tuple, an unpacked dict, or both. tup_instantiation = Immutable(('blah', 10)) self.assertEqual(tup_instantiation.blah, 10) dict_instantiation = Immutable(**{'okie': 'dokie'}) self.assertEqual(dict_instantiation.okie, 'dokie') both_instantiation = Immutable(('blah', 10), **{'okie': 'dokie'}) self.assertEqual(list(both_instantiation.items()), [('blah', 10), ('okie', 'dokie')]) def test_keys(self): # the keys() api should be preserved self.assertEqual(set(self.unordered_immutable.keys()), {'black', 'white', 'red', 'blue'}) # if given as a tuple, they should keep the given order self.assertEqual(list(self.ordered_immutable.keys()), ['zero', 'one', 'two', 'three']) def test_values(self): # the values() api should be preserved self.assertEqual(set(self.unordered_immutable.values()), {'black', 'white', 'red', 'blue'}) # if given as a tuple, they should keep the given order self.assertEqual(list(self.ordered_immutable.values()), [0, 1, 2, 3]) def test_items(self): # the items() api should be preserved self.assertEqual(set(self.unordered_immutable.items()), {('black', 'black'), ('white', 'white'), ('red', 'red'), ('blue', 'blue')}) # if given as a tuple, they should keep the given order self.assertEqual(list(self.ordered_immutable.items()), [('zero', 0), ('one', 1), ('two', 2), ('three', 3)]) def test_index(self): # the index() api should be preserved (it acts on the values). self.assertEqual(self.ordered_immutable.index(0), 0) self.assertEqual(self.ordered_immutable.index(1), 1) def test_count(self): # the count() api should be preserved (it acts on the values). obj = Immutable(**{k: 'blah' for k in 'abcdefg'}) self.assertEqual(obj.count('blah'), 7) self.assertEqual(obj.count('nope'), 0) def test_containment(self): # we don't support containment because it's ambiguous regex = 'Containment not implemented.' with self.assertRaisesRegexp(Immutable.ImmutableError, regex): 'white' in self.unordered_immutable def test_reversal(self): # we don't support reversal because it's ambiguous regex = 'Reversal not implemented.' with self.assertRaisesRegexp(Immutable.ImmutableError, regex): reversed(self.unordered_immutable) def test_equality(self): # equality works by hashing two immutable instances self.assertFalse(self.ordered_immutable == self.unordered_immutable) args = [('a', 1), ('b', 2)] imm_0 = Immutable(*args) imm_1 = Immutable(*args) self.assertFalse(imm_0 == args) self.assertTrue(imm_0 == imm_1) def test_non_equality_comparisons(self): # equality works by hashing two immutable instances self.assertTrue(self.ordered_immutable != self.unordered_immutable) args = [('a', 1), ('b', 2)] imm_0 = Immutable(*args) imm_1 = Immutable(*args) self.assertTrue(imm_0 != args) self.assertFalse(imm_0 != imm_1) def test_attribute_access(self): # You should be able to access via both getitem or getattr with indices # or keys. self.assertEqual(self.ordered_immutable.zero, 0) self.assertEqual(self.ordered_immutable['zero'], 0) self.assertEqual(self.ordered_immutable[0], 0) self.assertEqual(self.ordered_immutable[-4], 0) def test_change_attribute(self): # you can't! # should not be able to change with key with self.assertRaisesRegexp(Immutable.ImmutableError, 'Cannot set items on Immutable.'): self.ordered_immutable['zero'] = 10 # should not be able to change with index with self.assertRaisesRegexp(Immutable.ImmutableError, 'Cannot set items on Immutable.'): self.ordered_immutable[0] = 10 # should not be able to change with negative index with self.assertRaisesRegexp(Immutable.ImmutableError, 'Cannot set items on Immutable.'): self.ordered_immutable[-4] = 10 # should not be able to change with '.' access with self.assertRaisesRegexp(Immutable.ImmutableError, 'Cannot set attributes on Immutable.'): self.ordered_immutable.zero = 10 # finally, show how we *can* change it! NEVER DO THIS! self.ordered_immutable._ordered_dict['zero'] = 10 self.assertEqual(self.ordered_immutable['zero'], 10) def test___dir__(self): # __dir__ should return a list of the keys. dir_list = self.ordered_immutable.__dir__() expected_dir_list = ['zero', 'one', 'two', 'three'] self.assertEqual(dir_list, expected_dir_list)
[ "immutable.ImmutableFactory", "immutable.ImmutableFactory.create", "immutable.Immutable", "warnings.filterwarnings" ]
[((156, 189), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (179, 189), False, 'import warnings\n'), ((363, 388), 'immutable.ImmutableFactory.create', 'ImmutableFactory.create', ([], {}), '()\n', (386, 388), False, 'from immutable import Immutable, ImmutableFactory\n'), ((571, 589), 'immutable.ImmutableFactory', 'ImmutableFactory', ([], {}), '()\n', (587, 589), False, 'from immutable import Immutable, ImmutableFactory\n'), ((1168, 1186), 'immutable.ImmutableFactory', 'ImmutableFactory', ([], {}), '()\n', (1184, 1186), False, 'from immutable import Immutable, ImmutableFactory\n'), ((1802, 1820), 'immutable.ImmutableFactory', 'ImmutableFactory', ([], {}), '()\n', (1818, 1820), False, 'from immutable import Immutable, ImmutableFactory\n'), ((2427, 2445), 'immutable.ImmutableFactory', 'ImmutableFactory', ([], {}), '()\n', (2443, 2445), False, 'from immutable import Immutable, ImmutableFactory\n'), ((3183, 3201), 'immutable.ImmutableFactory', 'ImmutableFactory', ([], {}), '()\n', (3199, 3201), False, 'from immutable import Immutable, ImmutableFactory\n'), ((3589, 3607), 'immutable.ImmutableFactory', 'ImmutableFactory', ([], {}), '()\n', (3605, 3607), False, 'from immutable import Immutable, ImmutableFactory\n'), ((4014, 4077), 'immutable.Immutable', 'Immutable', ([], {'black': '"""black"""', 'white': '"""white"""', 'red': '"""red"""', 'blue': '"""blue"""'}), "(black='black', white='white', red='red', blue='blue')\n", (4023, 4077), False, 'from immutable import Immutable, ImmutableFactory\n'), ((4156, 4216), 'immutable.Immutable', 'Immutable', (["('zero', 0)", "('one', 1)", "('two', 2)", "('three', 3)"], {}), "(('zero', 0), ('one', 1), ('two', 2), ('three', 3))\n", (4165, 4216), False, 'from immutable import Immutable, ImmutableFactory\n'), ((4343, 4354), 'immutable.Immutable', 'Immutable', ([], {}), '()\n', (4352, 4354), False, 'from immutable import Immutable, ImmutableFactory\n'), ((4547, 4570), 'immutable.Immutable', 'Immutable', (["('blah', 10)"], {}), "(('blah', 10))\n", (4556, 4570), False, 'from immutable import Immutable, ImmutableFactory\n'), ((4654, 4684), 'immutable.Immutable', 'Immutable', ([], {}), "(**{'okie': 'dokie'})\n", (4663, 4684), False, 'from immutable import Immutable, ImmutableFactory\n'), ((4774, 4818), 'immutable.Immutable', 'Immutable', (["('blah', 10)"], {}), "(('blah', 10), **{'okie': 'dokie'})\n", (4783, 4818), False, 'from immutable import Immutable, ImmutableFactory\n'), ((6472, 6515), 'immutable.Immutable', 'Immutable', ([], {}), "(**{k: 'blah' for k in 'abcdefg'})\n", (6481, 6515), False, 'from immutable import Immutable, ImmutableFactory\n'), ((7348, 7364), 'immutable.Immutable', 'Immutable', (['*args'], {}), '(*args)\n', (7357, 7364), False, 'from immutable import Immutable, ImmutableFactory\n'), ((7381, 7397), 'immutable.Immutable', 'Immutable', (['*args'], {}), '(*args)\n', (7390, 7397), False, 'from immutable import Immutable, ImmutableFactory\n'), ((7716, 7732), 'immutable.Immutable', 'Immutable', (['*args'], {}), '(*args)\n', (7725, 7732), False, 'from immutable import Immutable, ImmutableFactory\n'), ((7749, 7765), 'immutable.Immutable', 'Immutable', (['*args'], {}), '(*args)\n', (7758, 7765), False, 'from immutable import Immutable, ImmutableFactory\n')]
# Generated by Django 2.2.6 on 2019-10-15 19:17 import uuid from django.db import migrations, models class Migration(migrations.Migration): dependencies = [("children", "0001_initial")] operations = [ migrations.RenameField(model_name="child", old_name="uuid", new_name="id"), migrations.RemoveField(model_name="child", name="social_security_number_hash"), migrations.AlterField( model_name="child", name="id", field=models.UUIDField( default=uuid.uuid4, editable=False, primary_key=True, serialize=False, verbose_name="UUID", ), ), ]
[ "django.db.migrations.RemoveField", "django.db.migrations.RenameField", "django.db.models.UUIDField" ]
[((222, 296), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""child"""', 'old_name': '"""uuid"""', 'new_name': '"""id"""'}), "(model_name='child', old_name='uuid', new_name='id')\n", (244, 296), False, 'from django.db import migrations, models\n'), ((306, 384), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""child"""', 'name': '"""social_security_number_hash"""'}), "(model_name='child', name='social_security_number_hash')\n", (328, 384), False, 'from django.db import migrations, models\n'), ((490, 602), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'default': 'uuid.uuid4', 'editable': '(False)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""UUID"""'}), "(default=uuid.uuid4, editable=False, primary_key=True,\n serialize=False, verbose_name='UUID')\n", (506, 602), False, 'from django.db import migrations, models\n')]
#!/usr/bin/env python3 # Foundations of Python Network Programming, Third Edition # https://github.com/brandon-rhodes/fopnp/blob/m/py3/chapter17/recursedl.py from ftplib import FTP, error_perm def walk_dir(ftp, dirpath): original_dir = ftp.pwd() try: ftp.cwd(dirpath) except error_perm: return # ignore non-directores and ones we cannot enter print(dirpath) names = sorted(ftp.nlst()) for name in names: walk_dir(ftp, dirpath + '/' + name) ftp.cwd(original_dir) # return to cwd of our caller def main(): ftp = FTP('ftp.kernel.org') ftp.login() walk_dir(ftp, '/pub/linux/kernel/Historic/old-versions') ftp.quit() if __name__ == '__main__': main()
[ "ftplib.FTP" ]
[((571, 592), 'ftplib.FTP', 'FTP', (['"""ftp.kernel.org"""'], {}), "('ftp.kernel.org')\n", (574, 592), False, 'from ftplib import FTP, error_perm\n')]
from allauth.socialaccount import providers from django import template register = template.Library() @register.simple_tag def get_user_social_providers(user): user_providers = set() for account in user.socialaccount_set.all(): user_providers.add(account.get_provider()) return list(user_providers) @register.simple_tag def get_other_social_providers(user): user_providers = get_user_social_providers(user) user_provider_names = [p.name.lower() for p in user_providers] other_providers = [] for provider in providers.registry.get_list(): if provider.name.lower() not in user_provider_names: other_providers.append(provider) return other_providers @register.simple_tag def get_number_unconnected_providers(user): return len(get_other_social_providers(user)) @register.simple_tag def user_providers(user): return user.socialaccount_set.values_list("provider", flat=True)
[ "django.template.Library", "allauth.socialaccount.providers.registry.get_list" ]
[((84, 102), 'django.template.Library', 'template.Library', ([], {}), '()\n', (100, 102), False, 'from django import template\n'), ((548, 577), 'allauth.socialaccount.providers.registry.get_list', 'providers.registry.get_list', ([], {}), '()\n', (575, 577), False, 'from allauth.socialaccount import providers\n')]
from uninas.utils.args import Argument from uninas.register import Register from uninas.methods.abstract import AbstractBiOptimizationMethod from uninas.methods.strategies.manager import StrategyManager from uninas.methods.strategies.differentiable import DifferentiableStrategy @Register.method(search=True) class AsapSearchMethod(AbstractBiOptimizationMethod): """ Executes all choices, learns how to weights them in a weighted sum, anneals the softmax temperature to enforce convergence and prunes the options that are weighted below a threshold """ @classmethod def args_to_add(cls, index=None) -> [Argument]: """ list arguments to add to argparse when this class (or a child class) is chosen """ return super().args_to_add(index) + [ Argument('tau_0', default=1.6, type=float, help='initial tau value for the softmax temperature'), Argument('tau_grace', default=1.0, type=float, help='no arc training/pruning until tau is smaller'), Argument('beta', default=0.95, type=float, help='beta value to anneal tau0'), ] def setup_strategy(self) -> StrategyManager: """ set up the strategy for architecture weights """ tau_0 = self._parsed_argument('tau_0', self.hparams) return StrategyManager().add_strategy(DifferentiableStrategy(self.max_epochs, tau=tau_0, use_mask=True)) def _on_epoch_start(self) -> dict: log_dict = super()._on_epoch_start() tau_0, tau_grace, beta = self._parsed_arguments(['tau_0', 'tau_grace', 'beta'], self.hparams) for strategy in StrategyManager().get_strategies_list(): strategy.tau = tau_0 * beta ** self.current_epoch log_dict = self._add_to_dict(log_dict, dict(tau=strategy.tau)) self.update_architecture_weights = strategy.tau < tau_grace if self.update_architecture_weights: strategy.mask_all_weights_below(0.4, div_by_numel=True) log_dict.update(strategy.get_masks_log_dict(prefix='asap/masks')) self.set_loader_multiples((1, 1)) else: self.set_loader_multiples((1, 0)) return log_dict
[ "uninas.methods.strategies.differentiable.DifferentiableStrategy", "uninas.register.Register.method", "uninas.utils.args.Argument", "uninas.methods.strategies.manager.StrategyManager" ]
[((282, 310), 'uninas.register.Register.method', 'Register.method', ([], {'search': '(True)'}), '(search=True)\n', (297, 310), False, 'from uninas.register import Register\n'), ((1323, 1388), 'uninas.methods.strategies.differentiable.DifferentiableStrategy', 'DifferentiableStrategy', (['self.max_epochs'], {'tau': 'tau_0', 'use_mask': '(True)'}), '(self.max_epochs, tau=tau_0, use_mask=True)\n', (1345, 1388), False, 'from uninas.methods.strategies.differentiable import DifferentiableStrategy\n'), ((794, 895), 'uninas.utils.args.Argument', 'Argument', (['"""tau_0"""'], {'default': '(1.6)', 'type': 'float', 'help': '"""initial tau value for the softmax temperature"""'}), "('tau_0', default=1.6, type=float, help=\n 'initial tau value for the softmax temperature')\n", (802, 895), False, 'from uninas.utils.args import Argument\n'), ((904, 1008), 'uninas.utils.args.Argument', 'Argument', (['"""tau_grace"""'], {'default': '(1.0)', 'type': 'float', 'help': '"""no arc training/pruning until tau is smaller"""'}), "('tau_grace', default=1.0, type=float, help=\n 'no arc training/pruning until tau is smaller')\n", (912, 1008), False, 'from uninas.utils.args import Argument\n'), ((1017, 1093), 'uninas.utils.args.Argument', 'Argument', (['"""beta"""'], {'default': '(0.95)', 'type': 'float', 'help': '"""beta value to anneal tau0"""'}), "('beta', default=0.95, type=float, help='beta value to anneal tau0')\n", (1025, 1093), False, 'from uninas.utils.args import Argument\n'), ((1292, 1309), 'uninas.methods.strategies.manager.StrategyManager', 'StrategyManager', ([], {}), '()\n', (1307, 1309), False, 'from uninas.methods.strategies.manager import StrategyManager\n'), ((1601, 1618), 'uninas.methods.strategies.manager.StrategyManager', 'StrategyManager', ([], {}), '()\n', (1616, 1618), False, 'from uninas.methods.strategies.manager import StrategyManager\n')]
from flask import Blueprint, render_template, request from sklearn.datasets import load_iris from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from .models import User from .basilica_service import connection as basilica_connection stats_routes = Blueprint("stats_routes", __name__) @stats_routes.route("/stats/iris") def iris(): # train the model (on the fly, in real-time): X, y = load_iris(return_X_y=True) clf = LogisticRegression(random_state=0, solver="lbfgs", multi_class="multinomial").fit(X, y) # make a prediction: results = str(clf.predict(X[:2, :])) return results @stats_routes.route("/") def twitoff_prediction_form(): return render_template("prediction_form.html") @stats_routes.route("/stats/predict", methods=["POST"]) def twitoff_prediction(): print("FORM DATA:", dict(request.form)) screen_name_a = request.form["screen_name_a"] screen_name_b = request.form["screen_name_b"] tweet_text = request.form["tweet_text"] # # TRAIN THE MODEL # # inputs: embeddings for each tweet # labels: screen name for each tweet model = RandomForestClassifier(random_state=42,max_depth = 16,min_samples_leaf = 14, min_samples_split=8) user_a = User.query.filter(User.screen_name == screen_name_a).one() user_b = User.query.filter(User.screen_name == screen_name_b).one() user_a_tweets = user_a.tweets user_b_tweets = user_b.tweets embeddings = [] # wrapping in a list to make an array labels = [] all_tweets = user_a_tweets + user_b_tweets for tweet in all_tweets: embeddings.append(tweet.embedding) # embedding are our x labels.append(tweet.user.screen_name) # labels are our y model.fit(embeddings, labels) # # MAKE PREDICTION # example_embedding = basilica_connection.embed_sentence(tweet_text, model="twitter") result = model.predict([example_embedding]) print(result) maj_clss = max(set(labels), key=labels.count) y_pred = [maj_clss] * len(embeddings) #predictions = [(value) for value in y_pred] screen_name_most_likely = result[0] predictionr = [screen_name_most_likely] * len(embeddings) acc = accuracy_score(y_pred,predictionr) return render_template("prediction_results.html", screen_name_a=screen_name_a, screen_name_b=screen_name_b, screen_name_most_likely = screen_name_most_likely, tweet_text = tweet_text, acc = acc, result = result )
[ "sklearn.datasets.load_iris", "flask.render_template", "sklearn.ensemble.RandomForestClassifier", "sklearn.linear_model.LogisticRegression", "flask.Blueprint", "sklearn.metrics.accuracy_score" ]
[((346, 381), 'flask.Blueprint', 'Blueprint', (['"""stats_routes"""', '__name__'], {}), "('stats_routes', __name__)\n", (355, 381), False, 'from flask import Blueprint, render_template, request\n'), ((491, 517), 'sklearn.datasets.load_iris', 'load_iris', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (500, 517), False, 'from sklearn.datasets import load_iris\n'), ((769, 808), 'flask.render_template', 'render_template', (['"""prediction_form.html"""'], {}), "('prediction_form.html')\n", (784, 808), False, 'from flask import Blueprint, render_template, request\n'), ((1213, 1312), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(42)', 'max_depth': '(16)', 'min_samples_leaf': '(14)', 'min_samples_split': '(8)'}), '(random_state=42, max_depth=16, min_samples_leaf=14,\n min_samples_split=8)\n', (1235, 1312), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((2310, 2345), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_pred', 'predictionr'], {}), '(y_pred, predictionr)\n', (2324, 2345), False, 'from sklearn.metrics import accuracy_score\n'), ((2357, 2562), 'flask.render_template', 'render_template', (['"""prediction_results.html"""'], {'screen_name_a': 'screen_name_a', 'screen_name_b': 'screen_name_b', 'screen_name_most_likely': 'screen_name_most_likely', 'tweet_text': 'tweet_text', 'acc': 'acc', 'result': 'result'}), "('prediction_results.html', screen_name_a=screen_name_a,\n screen_name_b=screen_name_b, screen_name_most_likely=\n screen_name_most_likely, tweet_text=tweet_text, acc=acc, result=result)\n", (2372, 2562), False, 'from flask import Blueprint, render_template, request\n'), ((528, 605), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(0)', 'solver': '"""lbfgs"""', 'multi_class': '"""multinomial"""'}), "(random_state=0, solver='lbfgs', multi_class='multinomial')\n", (546, 605), False, 'from sklearn.linear_model import LogisticRegression\n')]
import pandas as pd dataset = pd.read_csv('iris.csv') dataset.boxplot(column = 'sepal_width',by = 'species') import matplotlib.pyplot as plt hours_slices = [8,16] activities = ['work','sleep'] colors = ['g','r'] plt.pie(hours_slices,labels=activities,colors=colors,startangle=90,autopct='%.1f%%') plt.show()
[ "matplotlib.pyplot.pie", "pandas.read_csv", "matplotlib.pyplot.show" ]
[((30, 53), 'pandas.read_csv', 'pd.read_csv', (['"""iris.csv"""'], {}), "('iris.csv')\n", (41, 53), True, 'import pandas as pd\n'), ((215, 307), 'matplotlib.pyplot.pie', 'plt.pie', (['hours_slices'], {'labels': 'activities', 'colors': 'colors', 'startangle': '(90)', 'autopct': '"""%.1f%%"""'}), "(hours_slices, labels=activities, colors=colors, startangle=90,\n autopct='%.1f%%')\n", (222, 307), True, 'import matplotlib.pyplot as plt\n'), ((300, 310), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (308, 310), True, 'import matplotlib.pyplot as plt\n')]
# -*- coding: utf-8 -*- ''' Copyright (c) 2021 <NAME>. This file is part of HermesBot. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' from discord.ext import commands from ..helpers import AudioManager, GuildSettings from ..utils import smart_print import asyncio class BotController(commands.Cog): def __init__(self, bot): """Initialize important information.""" self.bot = bot self.audio_manager = AudioManager(bot) async def handle_disconnection(self, guild): await self.audio_manager.clear_audio_player(guild) @commands.command(name='connect', aliases=['join'], help="- Call bot to your voice channel.") async def player_connect(self, ctx, play_quote: bool = True): try: channel = ctx.author.voice.channel except AttributeError: return await smart_print(ctx, 'The bot can only be summoned from a voice channel') # noqa vc = ctx.voice_client # Check if the vc already exists if(vc): if(self.audio_manager.has_player(ctx) is False): voice_state = ctx.guild.voice_client await voice_state.disconnect() await channel.connect() else: # Check if the bot is already in the voice channel if(vc.channel.id == channel.id): print('Already in the same channel') return # Connect to the new voice channel try: print('Moving to channel') await vc.move_to(channel) except asyncio.TimeoutError: return await smart_print(ctx, 'Moving to channel: <%s> timed out.', data=[channel]) # noqa else: # The bot is not in a voice channel. Lets join one try: print('Connecting to channel') await channel.connect() except asyncio.TimeoutError: return await smart_print(ctx, 'Connecting to channel: <%s> timed out.', data=[channel]) # noqa if play_quote: await self.audio_manager.on_bot_join_channel(ctx, ctx.guild) await smart_print(ctx, 'Connected to: **%s**', data=[channel]) @commands.command(name='disconnect', aliases=['leave', 'go', 'goaway', 'go-away', 'stop'], help="- Ask the bot to leave the voice channel.") async def player_disconnect(self, ctx): vc = ctx.voice_client if(not vc or not vc.is_connected()): return await smart_print(ctx, 'The bot is not connected to a channel.') # noqa # Clean up the bot, its time for it to go. # await self.audio_manager.clear_audio_player(ctx.guild) await vc.disconnect() @commands.command( name="play", aliases=['p'], help="- <url:string | search:string> : Adds a song to the queue." ) async def play_song(self, ctx, *, search: str = None): await ctx.invoke(self.player_connect) # No search was provided. Maybe we need to resume? if(search is None): if self.audio_manager.can_resume(ctx): return await ctx.invoke(self.resume_song) else: return await smart_print(ctx, 'Command missing arguments. Use .help for additional information.') # noqa # In some cases the bot may have been disconnected # manualy. If this is the case, then the voice_client # exsists but is not conected. Lets handle that now. vc = ctx.voice_client # Check if the bot needs connecting # if (not vc or not ctx.voice_client.is_connected()): vc = ctx.voice_client if(not vc): print('The bot was unable to create a voice client.') return # No resume. This is a new song request. await self.audio_manager.play(ctx, search) @commands.command( name='play_quote', aliases=['pq'], description='- <ID:int> : Plays a quote with the specified ID.' ) async def play_quote(self, ctx, id: int): await ctx.invoke(self.player_connect, play_quote=False) if id is None: return await smart_print(ctx, 'Command missing arguments. Use .help for additional information.') # noqa await self.audio_manager.play_quote(ctx, id) @commands.command(name="playnext", aliases=['pn'], help="- Adds a song to the top of the queue.") async def play_song_next(self, ctx, *, search: str = None): # No search was provided. Maybe we need to resume? if(search is None): if self.audio_manager.can_resume(ctx): return await ctx.invoke(self.resume_song) else: return await smart_print(ctx, 'Command missing arguments. Use .help for additional information.') # noqa vc = ctx.voice_client if (not vc): await ctx.invoke(self.player_connect) vc = ctx.voice_client if(not vc): print('The bot was unable to create a voice client.') return await self.audio_manager.play(ctx, search, playnext=True) @commands.command(name='pause', help="- Pause the music.") async def pause_song(self, ctx): await self.audio_manager.pause(ctx) @commands.command(name='resume', help="- Resume the music.") async def resume_song(self, ctx): await self.audio_manager.resume(ctx) @commands.command(name='skip', aliases=['next'], help="- Skips the current song.") async def skip_song(self, ctx): await self.audio_manager.skip(ctx) @commands.command(name='queue', aliases=['q', 'playlist'], help="- Shows the current music queue.") async def play_queue(self, ctx): await self.audio_manager.display_playing_queue(ctx) @commands.command(name='playing', aliases=['now', 'current', 'np'], help="- Display the title of the song playing.") async def play_current(self, ctx): await self.audio_manager.show_song_playing(ctx) @commands.command(name='shuffle', aliases=['sh'], help="- Shuffles the current song queue.") async def shuffle(self, ctx): await self.audio_manager.shuffle(ctx) @commands.command(name='clear', help='- Clear the current queue of songs.') async def clear_queue(self, ctx): await self.audio_manager.clear_queue(ctx) @commands.command(name='music_volume', aliases=['mvol'], help='- <[1-100]: int> Sets the volume for music.') async def set_music_volume(self, ctx, volume: float): if volume < 0 or volume > 100: return await smart_print(ctx, 'The volume needs to be in the range of 1-100.') # noqa await GuildSettings.save_music_volume(ctx.guild.id, volume/100) await self.audio_manager.set_volume(ctx, volume) @commands.command(name='quote_volume', aliases=['qvol'], help='- <[1-100]: int> Sets the volume for quotes.') async def set_quote_volume(self, ctx, volume: float): if volume < 0 or volume > 100: return await smart_print(ctx, 'The volume needs to be in the range of 1-100.') # noqa await GuildSettings.save_quote_volume(ctx.guild.id, volume/100) await smart_print(ctx, 'Quote volume set to **%s%**', data=[volume]) def setup(bot): bot.add_cog(BotController(bot))
[ "discord.ext.commands.command" ]
[((1050, 1147), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""connect"""', 'aliases': "['join']", 'help': '"""- Call bot to your voice channel."""'}), "(name='connect', aliases=['join'], help=\n '- Call bot to your voice channel.')\n", (1066, 1147), False, 'from discord.ext import commands\n'), ((2789, 2932), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""disconnect"""', 'aliases': "['leave', 'go', 'goaway', 'go-away', 'stop']", 'help': '"""- Ask the bot to leave the voice channel."""'}), "(name='disconnect', aliases=['leave', 'go', 'goaway',\n 'go-away', 'stop'], help='- Ask the bot to leave the voice channel.')\n", (2805, 2932), False, 'from discord.ext import commands\n'), ((3338, 3454), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""play"""', 'aliases': "['p']", 'help': '"""- <url:string | search:string> : Adds a song to the queue."""'}), "(name='play', aliases=['p'], help=\n '- <url:string | search:string> : Adds a song to the queue.')\n", (3354, 3454), False, 'from discord.ext import commands\n'), ((4490, 4611), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""play_quote"""', 'aliases': "['pq']", 'description': '"""- <ID:int> : Plays a quote with the specified ID."""'}), "(name='play_quote', aliases=['pq'], description=\n '- <ID:int> : Plays a quote with the specified ID.')\n", (4506, 4611), False, 'from discord.ext import commands\n'), ((4949, 5050), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""playnext"""', 'aliases': "['pn']", 'help': '"""- Adds a song to the top of the queue."""'}), "(name='playnext', aliases=['pn'], help=\n '- Adds a song to the top of the queue.')\n", (4965, 5050), False, 'from discord.ext import commands\n'), ((5803, 5860), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""pause"""', 'help': '"""- Pause the music."""'}), "(name='pause', help='- Pause the music.')\n", (5819, 5860), False, 'from discord.ext import commands\n'), ((5970, 6029), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""resume"""', 'help': '"""- Resume the music."""'}), "(name='resume', help='- Resume the music.')\n", (5986, 6029), False, 'from discord.ext import commands\n'), ((6141, 6227), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""skip"""', 'aliases': "['next']", 'help': '"""- Skips the current song."""'}), "(name='skip', aliases=['next'], help=\n '- Skips the current song.')\n", (6157, 6227), False, 'from discord.ext import commands\n'), ((6352, 6455), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""queue"""', 'aliases': "['q', 'playlist']", 'help': '"""- Shows the current music queue."""'}), "(name='queue', aliases=['q', 'playlist'], help=\n '- Shows the current music queue.')\n", (6368, 6455), False, 'from discord.ext import commands\n'), ((6598, 6718), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""playing"""', 'aliases': "['now', 'current', 'np']", 'help': '"""- Display the title of the song playing."""'}), "(name='playing', aliases=['now', 'current', 'np'], help=\n '- Display the title of the song playing.')\n", (6614, 6718), False, 'from discord.ext import commands\n'), ((6859, 6955), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""shuffle"""', 'aliases': "['sh']", 'help': '"""- Shuffles the current song queue."""'}), "(name='shuffle', aliases=['sh'], help=\n '- Shuffles the current song queue.')\n", (6875, 6955), False, 'from discord.ext import commands\n'), ((7059, 7133), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""clear"""', 'help': '"""- Clear the current queue of songs."""'}), "(name='clear', help='- Clear the current queue of songs.')\n", (7075, 7133), False, 'from discord.ext import commands\n'), ((7228, 7340), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""music_volume"""', 'aliases': "['mvol']", 'help': '"""- <[1-100]: int> Sets the volume for music."""'}), "(name='music_volume', aliases=['mvol'], help=\n '- <[1-100]: int> Sets the volume for music.')\n", (7244, 7340), False, 'from discord.ext import commands\n'), ((7691, 7804), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""quote_volume"""', 'aliases': "['qvol']", 'help': '"""- <[1-100]: int> Sets the volume for quotes."""'}), "(name='quote_volume', aliases=['qvol'], help=\n '- <[1-100]: int> Sets the volume for quotes.')\n", (7707, 7804), False, 'from discord.ext import commands\n')]
from PIL import Image my_image = Image.open("assets/images/splashscreen_background.png") width, height = my_image.size print(height)
[ "PIL.Image.open" ]
[((35, 90), 'PIL.Image.open', 'Image.open', (['"""assets/images/splashscreen_background.png"""'], {}), "('assets/images/splashscreen_background.png')\n", (45, 90), False, 'from PIL import Image\n')]
#! /usr/bin/env python # -*- coding: utf-8 -*- # # Interpreter version: python 2.7 # # Imports ===================================================================== import xmltodict from odictliteral import odict from kwargs_obj import KwargsObj from tools import both_set_and_different # Functions & classes ========================================================= class DigitalInstance(KwargsObj): """ Container used to hold informations about instances of the documents in digital library - this is pointer to document in digital library. Attributes: uid (str): ID of the library. url (str): URL of the library. digital_library_id (str): Id of the digitial library. active (bool, def. None): Is the record active? created (str, def. None): ISO 8601 string with date. deactivated (str, def. None): ISO 8601 string representation of date. format (str, def. None): Format of the book. ``jpg;pdf`` for example. accessibility (str, def. None): Free description of accessibility. """ def __init__(self, url, digital_library_id, **kwargs): self.url = url self.digital_library_id = digital_library_id self.uid = None self.active = None self.format = None self.created = None self.accessibility = None self.deactivated = None self._all_set = True self._kwargs_to_attributes(kwargs) def __eq__(self, other): tests = [ self.uid != other.uid, self.active != other.active, self.digital_library_id != other.digital_library_id, self.url != other.url, self.deactivated != other.deactivated, ] if any(tests): return False return not any([ both_set_and_different(self.format, other.format), both_set_and_different(self.created, other.created), both_set_and_different(self.accessibility, other.accessibility), ]) def __ne__(self, other): return not self.__eq__(other) def to_xml(self): """ Convert self to XML, which can be send to API to register new digital instance. Returns: str: UTF-8 encoded string with XML representation. Raises: AssertionError: If :attr:`url` or :attr:`digital_library_id` is \ not set. """ assert self.url, "You have to set .url!" assert self.digital_library_id, "You have to set .digital_library_id!" root = odict[ "digitalInstance": odict[ "@xmlns": "http://resolver.nkp.cz/v3/", "url": self.url, "digitalLibraryId": self.digital_library_id, "format": self.format, "accessibility": self.accessibility, ] ] if not self.format: del root["digitalInstance"]["format"] if not self.accessibility: del root["digitalInstance"]["accessibility"] return xmltodict.unparse(root, pretty=True).encode("utf-8") @staticmethod def instance_from_xmldict(dict_tag): """ Create DigitalInstance from nested dicts (result of xmltodict). Args: dict_tag (dict): Nested dicts. Returns: obj: :class:`DigitalInstance` object. """ return DigitalInstance( uid=dict_tag["@id"], active=dict_tag["@active"].lower() == "true", url=dict_tag.get("url", None), digital_library_id=dict_tag["digitalLibraryId"], format=dict_tag.get("format", None), created=dict_tag.get("created", None), deactivated=dict_tag.get("deactivated", None), accessibility=dict_tag.get("accessibility", None), ) @staticmethod def from_xml(xml): """ Parse `xml` string and DigitalInstances. Args: xml (str): Unicode/utf-8 XML. Returns: list: List of :class:`DigitalInstance` objects. """ xdom = xmltodict.parse(xml) di = xdom["response"]["digitalInstances"] if not di.get("digitalInstance", False): return [] return [ DigitalInstance.instance_from_xmldict(dig_instance_tag) for dig_instance_tag in di["digitalInstance"] ]
[ "tools.both_set_and_different", "xmltodict.parse", "xmltodict.unparse" ]
[((4150, 4170), 'xmltodict.parse', 'xmltodict.parse', (['xml'], {}), '(xml)\n', (4165, 4170), False, 'import xmltodict\n'), ((3091, 3127), 'xmltodict.unparse', 'xmltodict.unparse', (['root'], {'pretty': '(True)'}), '(root, pretty=True)\n', (3108, 3127), False, 'import xmltodict\n'), ((1821, 1870), 'tools.both_set_and_different', 'both_set_and_different', (['self.format', 'other.format'], {}), '(self.format, other.format)\n', (1843, 1870), False, 'from tools import both_set_and_different\n'), ((1884, 1935), 'tools.both_set_and_different', 'both_set_and_different', (['self.created', 'other.created'], {}), '(self.created, other.created)\n', (1906, 1935), False, 'from tools import both_set_and_different\n'), ((1949, 2012), 'tools.both_set_and_different', 'both_set_and_different', (['self.accessibility', 'other.accessibility'], {}), '(self.accessibility, other.accessibility)\n', (1971, 2012), False, 'from tools import both_set_and_different\n')]
"""Test for Simple Content Chunkifyer""" import unittest from PiCN.Layers.ChunkLayer.Chunkifyer import SimpleContentChunkifyer from PiCN.Packets import Content, Name class test_SimpleContentChunkifyer(unittest.TestCase): def setUp(self): self.chunkifyer = SimpleContentChunkifyer() def tearDown(self): pass def test_generate_metadata_no_next(self): """Test generating a simple metadata object""" name = Name("/test/data") res = self.chunkifyer.generate_meta_data(2,4,0, 0,name,300) self.assertEqual(res.name.to_string(), "/test/data") self.assertEqual(res.content, "mdo:300:/test/data/c2;/test/data/c3:") def test_generate_metadata_one_next(self): """Test generating a simple metadata object with one following""" name = Name("/test/data") res = self.chunkifyer.generate_meta_data(2,4,0,1,name,300) self.assertEqual(res.name.to_string(), "/test/data") self.assertEqual(res.content, "mdo:300:/test/data/c2;/test/data/c3:/test/data/m1") def test_generate_metadata_two_next(self): """Test generating a simple metadata object with two following""" name = Name("/test/data") res = self.chunkifyer.generate_meta_data(2,4,1,2, name,300) self.assertEqual(res.name.to_string(), "/test/data/m1") self.assertEqual(res.content, "mdo:300:/test/data/c2;/test/data/c3:/test/data/m2") def test_chunk_single_metadata(self): name = Name("/test/data") string = "A" * 4096 + "B" * 4096 + "C" * 4096 content = Content(name, string) md, content = self.chunkifyer.chunk_data(content) md_name_comp = ['/test/data'] md_data_comp = ['mdo:12288:/test/data/c0;/test/data/c1;/test/data/c2:'] content_name_comp = ['/test/data/c0', '/test/data/c1', '/test/data/c2'] content_data_comp = ["A" * 4096, "B" * 4096, "C" * 4096] for i in range(0, len(md)): self.assertEqual(md[i].name.to_string(), md_name_comp[i]) self.assertEqual(md[i].content, md_data_comp[i]) for i in range(0, len(content)): self.assertEqual(content[i].name.to_string(), content_name_comp[i]) self.assertEqual(content[i].content, content_data_comp[i]) def test_chunk_multiple_metadata(self): """Test chunking metadata with three metadata objects and 10 chunks""" name = Name("/test/data") string = "A"*4096 + "B"*4096 + "C"*4096 + "D"*4096 + "E"*4096 + "F"*4096 + "G"*4096 + "H"*4096 \ + "I"*4096 + "J"*4000 content = Content(name, string) md, chunked_content = self.chunkifyer.chunk_data(content) md_name_comp = ['/test/data', '/test/data/m1', '/test/data/m2'] md_data_comp = ['mdo:40864:/test/data/c0;/test/data/c1;/test/data/c2;/test/data/c3:/test/data/m1', 'mdo:40864:/test/data/c4;/test/data/c5;/test/data/c6;/test/data/c7:/test/data/m2', 'mdo:40864:/test/data/c8;/test/data/c9:'] content_name_comp = ['/test/data/c0', '/test/data/c1', '/test/data/c2', '/test/data/c3', '/test/data/c4', '/test/data/c5', '/test/data/c6', '/test/data/c7', '/test/data/c8', '/test/data/c9'] content_data_comp = ["A"*4096, "B"*4096, "C"*4096, "D"*4096, "E"*4096, "F"*4096, "G"*4096, "H"*4096, "I"*4096, "J"*4000] for i in range(0, len(md)): self.assertEqual(md[i].name.to_string(), md_name_comp[i]) self.assertEqual(md[i].content, md_data_comp[i]) for i in range(0, len(chunked_content)): self.assertEqual(chunked_content[i].name.to_string(), content_name_comp[i]) self.assertEqual(chunked_content[i].content, content_data_comp[i]) def test_chunk_multiple_metadata_reassemble(self): """Test chunking metadata with three metadata objects and 10 chunks and reassemble""" name = Name("/test/data") string = "A" * 4096 + "B" * 4096 + "C" * 4096 + "D" * 4096 + "E" * 4096 + "F" * 4096 + "G" * 4096 + "H" * 4096 \ + "I" * 4096 + "J" * 4000 content = Content(name, string) md, chunked_content = self.chunkifyer.chunk_data(content) md_name_comp = ['/test/data', '/test/data/m1', '/test/data/m2'] md_data_comp = ['mdo:40864:/test/data/c0;/test/data/c1;/test/data/c2;/test/data/c3:/test/data/m1', 'mdo:40864:/test/data/c4;/test/data/c5;/test/data/c6;/test/data/c7:/test/data/m2', 'mdo:40864:/test/data/c8;/test/data/c9:'] content_name_comp = ['/test/data/c0', '/test/data/c1', '/test/data/c2', '/test/data/c3', '/test/data/c4', '/test/data/c5', '/test/data/c6', '/test/data/c7', '/test/data/c8', '/test/data/c9'] content_data_comp = ["A" * 4096, "B" * 4096, "C" * 4096, "D" * 4096, "E" * 4096, "F" * 4096, "G" * 4096, "H" * 4096, "I" * 4096, "J" * 4000] for i in range(0, len(md)): self.assertEqual(md[i].name.to_string(), md_name_comp[i]) self.assertEqual(md[i].content, md_data_comp[i]) for i in range(0, len(chunked_content)): self.assertEqual(chunked_content[i].name.to_string(), content_name_comp[i]) self.assertEqual(chunked_content[i].content, content_data_comp[i]) reassembled_content = self.chunkifyer.reassamble_data(md[0].name, chunked_content) self.assertEqual(content, reassembled_content) def test_parse_metadata_next(self): """Test parse metadata with next metadata""" md, names, size = self.chunkifyer.parse_meta_data( "mdo:300:/test/data/c0;/test/data/c1;/test/data/c2;/test/data/c3:/test/data/m1") self.assertEqual(Name("/test/data/m1"), md) names_comp = [Name("/test/data/c0"), Name("/test/data/c1"), Name("/test/data/c2"), Name("/test/data/c3")] self.assertEqual(names, names_comp) self.assertEqual(int(size), 300) def test_parse_metadata(self): """Test parse metadata""" md, names, size = self.chunkifyer.parse_meta_data( "mdo:300:/test/data/c0;/test/data/c1;/test/data/c2;/test/data/c3:") self.assertEqual(None, md) names_comp = [Name("/test/data/c0"), Name("/test/data/c1"), Name("/test/data/c2"), Name("/test/data/c3")] self.assertEqual(names, names_comp) self.assertEqual(int(size), 300)
[ "PiCN.Packets.Content", "PiCN.Packets.Name", "PiCN.Layers.ChunkLayer.Chunkifyer.SimpleContentChunkifyer" ]
[((273, 298), 'PiCN.Layers.ChunkLayer.Chunkifyer.SimpleContentChunkifyer', 'SimpleContentChunkifyer', ([], {}), '()\n', (296, 298), False, 'from PiCN.Layers.ChunkLayer.Chunkifyer import SimpleContentChunkifyer\n'), ((454, 472), 'PiCN.Packets.Name', 'Name', (['"""/test/data"""'], {}), "('/test/data')\n", (458, 472), False, 'from PiCN.Packets import Content, Name\n'), ((819, 837), 'PiCN.Packets.Name', 'Name', (['"""/test/data"""'], {}), "('/test/data')\n", (823, 837), False, 'from PiCN.Packets import Content, Name\n'), ((1196, 1214), 'PiCN.Packets.Name', 'Name', (['"""/test/data"""'], {}), "('/test/data')\n", (1200, 1214), False, 'from PiCN.Packets import Content, Name\n'), ((1498, 1516), 'PiCN.Packets.Name', 'Name', (['"""/test/data"""'], {}), "('/test/data')\n", (1502, 1516), False, 'from PiCN.Packets import Content, Name\n'), ((1589, 1610), 'PiCN.Packets.Content', 'Content', (['name', 'string'], {}), '(name, string)\n', (1596, 1610), False, 'from PiCN.Packets import Content, Name\n'), ((2437, 2455), 'PiCN.Packets.Name', 'Name', (['"""/test/data"""'], {}), "('/test/data')\n", (2441, 2455), False, 'from PiCN.Packets import Content, Name\n'), ((2618, 2639), 'PiCN.Packets.Content', 'Content', (['name', 'string'], {}), '(name, string)\n', (2625, 2639), False, 'from PiCN.Packets import Content, Name\n'), ((3999, 4017), 'PiCN.Packets.Name', 'Name', (['"""/test/data"""'], {}), "('/test/data')\n", (4003, 4017), False, 'from PiCN.Packets import Content, Name\n'), ((4200, 4221), 'PiCN.Packets.Content', 'Content', (['name', 'string'], {}), '(name, string)\n', (4207, 4221), False, 'from PiCN.Packets import Content, Name\n'), ((5884, 5905), 'PiCN.Packets.Name', 'Name', (['"""/test/data/m1"""'], {}), "('/test/data/m1')\n", (5888, 5905), False, 'from PiCN.Packets import Content, Name\n'), ((5933, 5954), 'PiCN.Packets.Name', 'Name', (['"""/test/data/c0"""'], {}), "('/test/data/c0')\n", (5937, 5954), False, 'from PiCN.Packets import Content, Name\n'), ((5956, 5977), 'PiCN.Packets.Name', 'Name', (['"""/test/data/c1"""'], {}), "('/test/data/c1')\n", (5960, 5977), False, 'from PiCN.Packets import Content, Name\n'), ((5979, 6000), 'PiCN.Packets.Name', 'Name', (['"""/test/data/c2"""'], {}), "('/test/data/c2')\n", (5983, 6000), False, 'from PiCN.Packets import Content, Name\n'), ((6002, 6023), 'PiCN.Packets.Name', 'Name', (['"""/test/data/c3"""'], {}), "('/test/data/c3')\n", (6006, 6023), False, 'from PiCN.Packets import Content, Name\n'), ((6377, 6398), 'PiCN.Packets.Name', 'Name', (['"""/test/data/c0"""'], {}), "('/test/data/c0')\n", (6381, 6398), False, 'from PiCN.Packets import Content, Name\n'), ((6400, 6421), 'PiCN.Packets.Name', 'Name', (['"""/test/data/c1"""'], {}), "('/test/data/c1')\n", (6404, 6421), False, 'from PiCN.Packets import Content, Name\n'), ((6423, 6444), 'PiCN.Packets.Name', 'Name', (['"""/test/data/c2"""'], {}), "('/test/data/c2')\n", (6427, 6444), False, 'from PiCN.Packets import Content, Name\n'), ((6446, 6467), 'PiCN.Packets.Name', 'Name', (['"""/test/data/c3"""'], {}), "('/test/data/c3')\n", (6450, 6467), False, 'from PiCN.Packets import Content, Name\n')]
from typing import TYPE_CHECKING if TYPE_CHECKING: from pyspawn._graph.relationship import Relationship from dataclasses import dataclass from typing import Set @dataclass(frozen=False) class Table: """Table class""" schema: str table_name: str def __post_init__(self): self.relationships: Set[Relationship] = set() def get_full_name(self, quote_identifier: str) -> str: """return the fully quoted schema and name representation of the table.""" if self.schema is None: return f"{quote_identifier}{self.table_name}{quote_identifier}" return f"{quote_identifier}{self.schema}{quote_identifier}.{quote_identifier}{self.table_name}{quote_identifier}" def to_string(self): """String representation of schema.table_name for the table.""" return f"{self.schema}.{self.table_name}" def __eq__(self, other): """Overrides the default equality implementation based on object identifiers.""" if isinstance(other, Table): return self.schema == other.schema and self.table_name == other.table_name return False def __hash__(self) -> int: """Overrides the default hash implementation based on object identifiers.""" return hash(self.schema + "-" + self.table_name)
[ "dataclasses.dataclass" ]
[((169, 192), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(False)'}), '(frozen=False)\n', (178, 192), False, 'from dataclasses import dataclass\n')]
# pylint: disable=fixme, line-too-long, import-error, no-name-in-module # # Copyright (c) 2020 Synopsys, Inc. # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # Uncomment for debugging. Can't use localhost. # See: https://www.jetbrains.com/help/pycharm/remote-debugging-with-product.html#remote-debug-config # import pydevd_pycharm # pydevd_pycharm.settrace('<Host IP Address>', port=5002, stdoutToServer=True, stderrToServer=True) """ A script that inspects the pip cache to determine the hierarchy of dependencies Usage: pip-inspector.py --projectname=<project_name> --requirements=<requirements_path> """ from getopt import getopt, GetoptError from os import path import sys from re import split from pkg_resources import working_set, Requirement import pip pip_major_version = int(pip.__version__.split(".")[0]) if pip_major_version >= 20: from pip._internal.req import parse_requirements from pip._internal.network.session import PipSession elif pip_major_version >= 10: from pip._internal.req import parse_requirements from pip._internal.download import PipSession else: from pip.req import parse_requirements from pip.download import PipSession def main(): """Handles commandline args, executes the inspector, and prints the resulting dependency tree""" try: opts, __ = getopt(sys.argv[1:], 'p:r:', ['projectname=', 'requirements=']) except GetoptError as error: print(str(error)) print('integration-pip-inspector.py --projectname=<project_name> --requirements=<requirements_path>') sys.exit(2) project_name = None requirements_path = None for opt, arg in opts: if opt in ('-p', '--projectname'): project_name = arg elif opt in ('-r', '--requirements'): requirements_path = arg project_dependency_node = resolve_project_node(project_name) if requirements_path is not None: try: assert path.exists(requirements_path), ("The requirements file %s does not exist." % requirements_path) populate_dependency_tree(project_dependency_node, requirements_path) except AssertionError: print('r?' + requirements_path) print(project_dependency_node.render()) def resolve_project_node(project_name): """Attempts to resolve the root DependencyNode from the user provided --projectname argument. If it can't, produces a DependencyNode with name 'n?' and version 'v?'""" project_dependency_node = None if project_name is not None: project_dependency_node = recursively_resolve_dependencies(project_name, []) if project_dependency_node is None: project_dependency_node = DependencyNode('n?', 'v?') return project_dependency_node def populate_dependency_tree(project_root_node, requirements_path): """Resolves the dependencies of the user-provided requirements.txt and appends them to the dependency tree""" try: # This line is pretty much the only reason why we call the internal pip APIs anymore. We should consider if we # can do this with a more generalized approach. # --rotte DEC 2020 parsed_requirements = parse_requirements(requirements_path, session=PipSession()) for parsed_requirement in parsed_requirements: package_name = None # In 20.1 of pip, the requirements object changed if hasattr(parsed_requirement, 'req'): package_name = parsed_requirement.req.name if package_name is None: # Comparators from: https://www.python.org/dev/peps/pep-0508/#grammar # (Last updated November 2020) # # re matches from left to right, so subsets (e.g. ===) should be before supersets (e.g. ==) # See: https://docs.python.org/3/library/re.html # --rotte NOV 2020 package_name = split('===|<=|!=|==|>=|~=|<|>', parsed_requirement.requirement)[0] dependency_node = recursively_resolve_dependencies(package_name, []) if dependency_node is not None: project_root_node.children = project_root_node.children + [dependency_node] else: print('--' + package_name) except: print('p?' + requirements_path) def recursively_resolve_dependencies(package_name, history): """Forms a DependencyNode by recursively resolving its dependencies. Tracks history for cyclic dependencies.""" package = get_package_by_name(package_name) if package is None: return None dependency_node = DependencyNode(package.project_name, package.version) if package_name.lower() not in history: for package_dependency in package.requires(): child_node = recursively_resolve_dependencies(package_dependency.key, history + [package_name.lower()]) if child_node is not None: dependency_node.children = dependency_node.children + [child_node] return dependency_node def get_package_by_name(package_name): """Looks up a package from the pip cache""" if package_name is None: return None package_dict = working_set.by_key try: # TODO: By using pkg_resources.Requirement.parse to get the correct key, we may not need to attempt the other # methods. Robust tests are needed to confirm. return package_dict[Requirement.parse(package_name).key] except: pass name_variants = (package_name, package_name.lower(), package_name.replace('-', '_'), package_name.replace('_', '-')) for name_variant in name_variants: if name_variant in package_dict: return package_dict[name_variant] return None class DependencyNode(object): """Represents a python dependency in a tree graph with a name, version, and array of children DependencyNodes""" def __init__(self, name, version): self.name = name self.version = version self.children = [] def render(self, layer=1): """Recursively builds a dependency tree string to be printed to the commandline""" result = self.name + "==" + self.version for child in self.children: result += "\n" + (" " * 4 * layer) result += child.render(layer + 1) return result if __name__ == '__main__': main()
[ "os.path.exists", "getopt.getopt", "re.split", "pip.download.PipSession", "pip.__version__.split", "pkg_resources.Requirement.parse", "sys.exit" ]
[((1524, 1550), 'pip.__version__.split', 'pip.__version__.split', (['"""."""'], {}), "('.')\n", (1545, 1550), False, 'import pip\n'), ((2058, 2121), 'getopt.getopt', 'getopt', (['sys.argv[1:]', '"""p:r:"""', "['projectname=', 'requirements=']"], {}), "(sys.argv[1:], 'p:r:', ['projectname=', 'requirements='])\n", (2064, 2121), False, 'from getopt import getopt, GetoptError\n'), ((2299, 2310), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (2307, 2310), False, 'import sys\n'), ((2685, 2715), 'os.path.exists', 'path.exists', (['requirements_path'], {}), '(requirements_path)\n', (2696, 2715), False, 'from os import path\n'), ((3964, 3976), 'pip.download.PipSession', 'PipSession', ([], {}), '()\n', (3974, 3976), False, 'from pip.download import PipSession\n'), ((6169, 6200), 'pkg_resources.Requirement.parse', 'Requirement.parse', (['package_name'], {}), '(package_name)\n', (6186, 6200), False, 'from pkg_resources import working_set, Requirement\n'), ((4665, 4728), 're.split', 'split', (['"""===|<=|!=|==|>=|~=|<|>"""', 'parsed_requirement.requirement'], {}), "('===|<=|!=|==|>=|~=|<|>', parsed_requirement.requirement)\n", (4670, 4728), False, 'from re import split\n')]
import json import logging import hashlib from actingweb import on_aw from armyknife_src import webexrequest from armyknife_src import webexbothandler from armyknife_src import webexmessagehandler from armyknife_src import fargate PROP_HIDE = [ "email", "oauthId" ] PROP_PROTECT = PROP_HIDE + [ "service_status" ] class OnAWWebexTeams(on_aw.OnAWBase): def get_properties(self, path: list, data: dict) -> dict or None: """ Called on GET to properties for transformations to be done :param path: Target path requested :type path: list[str] :param data: Data retrieved from data store to be returned :type data: dict :return: The transformed data to return to requestor or None if 404 should be returned :rtype: dict or None """ if not path: for k, v in data.copy().items(): if k in PROP_HIDE: del data[k] elif len(path) > 0 and path[0] in PROP_HIDE: return None return data def delete_properties(self, path: list, old: dict, new: dict) -> bool: """ Called on DELETE to properties :param path: Target path to be deleted :type path: list[str] :param old: Property value that will be deleted (or changed) :type old: dict :param new: Property value after path has been deleted :type new: dict :return: True if DELETE is allowed, False if 403 should be returned :rtype: bool """ if len(path) > 0 and path[0] in PROP_PROTECT: return False return True def put_properties(self, path: list, old: dict, new: dict) -> dict or None: """ Called on PUT to properties for transformations to be done before save :param path: Target path requested to be updated :type path: list[str] :param old: Old data from database :type old: dict :param new: :type new: New data from PUT request (after merge) :return: The dict that should be stored or None if 400 should be returned and nothing stored :rtype: dict or None """ if not path: return None elif len(path) > 0 and path[0] in PROP_PROTECT: return None return new def post_properties(self, prop: str, data: dict) -> dict or None: """ Called on POST to properties, once for each property :param prop: Property to be created :type prop: str :param data: The data to be stored in prop :type data: dict :return: The transformed data to store in prop or None if that property should be skipped and not stored :rtype: dict or None """ if not prop: return None elif prop in PROP_PROTECT: return None return data def delete_actor(self): """ Here we need to do additional cleanup when a user is deleted """ spark = webexrequest.WebexTeamsRequest( body=self.webobj.request.body, auth=self.auth, myself=self.myself, config=self.config) spark.store.clear_messages(email=spark.me.creator) trackers = spark.store.load_trackers() for tracker in trackers: spark.store.delete_tracker(tracker["email"]) firehose_id = spark.me.property.firehoseId if firehose_id: spark.link.unregister_webhook(firehose_id) spark.store.delete_rooms() spark.store.delete_pinned_messages() spark.store.delete_pinned_messages(comment="#/TOPOFMIND") if '@actingweb.net' not in spark.me.creator and spark.me.creator != "creator" and \ spark.me.creator != "trustee": spark.link.post_admin_message(text='User just left: ' + spark.me.creator) return def www_paths(self, path=''): """ This method is called on the user's URL/www requests """ # spark = sparkrequest.WebexTeamsRequest(body=self.webobj.request.body, # auth=self.auth, # myself=self.myself, # config=self.config) if path == '' or not self.myself: logging.info('Got an on_www_paths without proper parameters.') return False if path == 'getattachment': self.webobj.response.template_values = { 'url': str(self.webobj.request.get('url')), 'filename': str(self.webobj.request.get('filename')), } return True return False def check_on_oauth_success(self, token=None): """ Before approving an OAuth request, we need to validate the identity """ spark = webexrequest.WebexTeamsRequest( body=self.webobj.request.body, auth=self.auth, myself=self.myself, config=self.config) me = spark.link.get_me() if not me: logging.debug("Not able to retrieve myself from Cisco Webex Teams!") return False logging.debug("My identity:" + me['id']) current_id = spark.me.property.oauthId if not current_id: if 'emails' not in me: spark.me.store.cookie_redirect = None return False if spark.me.creator.lower() != me['emails'][0].lower(): spark.me.store.cookie_redirect = None spark.me.store.oauth_token = None spark.me.store.oauth_refresh_token = None spark.me.store.oauth_token_expiry = None spark.me.store.oauth_refresh_token_expiry = None spark.link.post_bot_message( email=me['emails'][0], text="**WARNING!!**\n\nAn attempt to create a new Army Knife account for " + spark.me.creator + " was done while you were logged into Cisco Webex Teams in your browser. Did you try with" " the wrong email address?\n\n" "You can instead do /init here to (re)authorize your account" " (click the link to grant new access).", markdown=True) spark.link.post_bot_message( email=spark.me.creator, text="**SECURITY WARNING**\n\n" + me['emails'][0] + "'s Cisco Webex Teams credentials were attempted used to create a new Army Knife" " account for you.\n\n" "No action required, but somebody may have attempted to hijack your" " Army Knife account.", markdown=True) if not spark.me.property.oauthId: spark.me.delete() return False spark.me.store.email = me['emails'][0].lower() spark.me.property.oauthId = me['id'] if 'displayName' in me: spark.me.property.displayName = me['displayName'] if 'avatar' in me: spark.me.property.avatarURI = me['avatar'] if '@actingweb.net' not in me['emails'][0]: spark.link.post_admin_message( text='New user just signed up: ' + me['displayName'] + ' (' + me['emails'][0] + ')') else: logging.debug("Actor's identity:" + current_id) if me['id'] != current_id: spark.me.store.cookie_redirect = None spark.me.store.oauth_token = None spark.me.store.oauth_refresh_token = None spark.me.store.oauth_token_expiry = None spark.me.store.oauth_refresh_token_expiry = None spark.link.post_bot_message( email=spark.me.property.email, text="**SECURITY WARNING**\n\n" + (me['emails'][0] or "Unknown") + " tried to log into your Army Knife account.\n\n" "For security reasons, your Army Knife account has been suspended.\n\n" "If this happens repeatedly, please contact <EMAIL>", markdown=True) return False return True def actions_on_oauth_success(self): """ When OAuth is successfully done, we need to do several actions """ if not self.myself: return True spark = webexrequest.WebexTeamsRequest(body=self.webobj.request.body, auth=self.auth, myself=self.myself, config=self.config) email = spark.me.creator hook_id = spark.me.property.firehoseId spark.me.property.token_invalid = None spark.me.property.service_status = None if hook_id: if spark.link.unregister_webhook(hook_id) is None and self.auth.oauth.last_response_code != 404 and \ self.auth.oauth.last_response_code != 0: spark.link.post_bot_message( email=email, text="Not able to delete old Cisco Webex Teams webhook link, do /init and authorize again " "or do `/support your_msg` to get help", markdown=True) spark.link.post_admin_message( text="Successfully authorized account, but could not delete old firehose: " + email) spark.link.post_admin_message( text=str(self.auth.oauth.last_response_code) + ':' + self.auth.oauth.last_response_message) return True msghash = hashlib.sha256() msghash.update(spark.me.passphrase.encode('utf-8')) hook = spark.link.register_webhook( name='Firehose', target=self.config.root + spark.me.id + '/callbacks/firehose', resource='all', event='all', secret=msghash.hexdigest() ) if hook and hook['id']: logging.debug('Successfully registered messages firehose webhook') spark.me.property.firehoseId = hook['id'] else: logging.debug('Failed to register messages firehose webhook') spark.link.post_admin_message(text='Failed to register firehose for new user: ' + email) spark.link.post_bot_message( email=email, text="Not able to create Cisco Webex Teams webhook link, do /init and authorize again " "or do `/support your_msg` to get help", markdown=True) return True spark.me.property.app_disabled = None spark.link.post_bot_message( email=email, text="Hi there! Welcome to the **Army Knife**! \n\n" "You have successfully authorized access.\n\nSend me commands starting with /. Like /help or /me", markdown=True) return True def bot_post(self, path): """Called on POSTs to /bot.""" # Get a spark request object to do signature check spark = webexrequest.WebexTeamsRequest(body=self.webobj.request.body, auth=self.auth, myself=None, config=self.config) if not fargate.in_fargate() and not fargate.fargate_disabled() and \ not spark.check_bot_signature(self.webobj.request.headers, self.webobj.request.body): return 404 # Try to re-init from person_id in the message spark.re_init() # Ignore messages from the bot itself if spark.is_actor_bot: logging.debug("Dropping message from ArmyKnife bot...") return 204 # If not successful, we don't have this user if not spark.is_actor_user: spark.enrich_data('person') # # The first time a user is in touch with the bot, it can either be the user or the bot that has initiated # the contact, i.e. the actor_id can either be somebody unknown or the bot # The message flow is the following: # 1. rooms, created -> type direct or group # 2. memberships, created -> two messages, one for the bot and one for the user # 3. messages, created -> either from the bot or from the user depending on who initiated the request # handler = webexbothandler.WebexTeamsBotHandler(spark, self.webobj) if spark.body['resource'] == 'rooms': if spark.body['event'] == 'created': handler.rooms_created() elif spark.body['resource'] == 'memberships': if spark.body['event'] == 'created': handler.memberships_created() elif spark.body['resource'] == 'messages': if spark.body['event'] == 'created': handler.messages_created() # No more event types we want to handle, just return return 204 def get_callbacks(self, name): """ This method is called for regular web browser requests to the user's URL/something """ spark = webexrequest.WebexTeamsRequest(body=self.webobj.request.body, auth=self.auth, myself=self.myself, config=self.config) if name == 'joinroom': uuid = self.webobj.request.get('id') room = spark.store.load_room_by_uuid(uuid) if not room: self.webobj.response.set_status(404) return True roominfo = spark.link.get_room(room['id']) self.webobj.response.template_values = { 'id': uuid, 'title': roominfo['title'], } if name == 'makefilepublic': pass # This is not secure!!! So do not execute # token is exposed directly in javascript in the users browser # self.webobj.response.template_values = { # 'url': str(self.webobj.request.get('url')), # 'token': str(auth.token), # 'filename': str(self.webobj.request.get('filename')), # } return True def post_callbacks(self, name): if not self.myself or not self.myself.id: logging.debug("Got a firehose callback for an unknown user.") self.webobj.response.set_status(410, 'Gone') return True spark = webexrequest.WebexTeamsRequest( body=self.webobj.request.body, auth=self.auth, myself=self.myself, config=self.config) # Ignore messages from the bot itself if spark.is_actor_bot: logging.debug("Dropping message from ArmyKnife bot...") return True # Clean up any actor creations from earlier where we got wrong creator email # Likely not needed anymore, but just in case if spark.me.creator.lower() == self.config.bot['email'].lower() or spark.me.creator == "creator": my_email = spark.me.property.email.lower() if my_email and len(my_email) > 0: spark.me.modify(creator=my_email) # Deprecated support for /callbacks/room if name == 'room': self.webobj.response.set_status(404, 'Not found') return True handler = webexmessagehandler.WebexTeamsMessageHandler(spark, self.webobj) # non-json POSTs to be handled first if name == 'joinroom': return handler.joinroom() if not spark.check_firehose_signature(self.webobj.request.headers, self.webobj.request.body): logging.debug('Returning 403 forbidden...') return False if spark.body['resource'] == 'memberships': if spark.body['event'] == 'created': handler.memberships_created() else: # memberships:deleted return True elif spark.body['resource'] == 'messages': # If message_actions() returns False, the account was disabled or invalid if not handler.message_actions(): return True # Only handle messages:created events below if spark.body['resource'] != 'messages' or spark.body['event'] != 'created': return True # Special case for /delete as we need to call self.delete_actor() # Make sure we have pulled down the message and spark.cmd is thus set if not spark.enrich_data('msg'): return True if not spark.enrich_data('account'): return True if spark.cmd == '/delete' and spark.room_id == spark.chat_room_id: if len(spark.msg_list) == 2 and spark.msg_list_wcap[1] == 'DELETENOW': self.delete_actor() spark.me.delete() spark.link.post_bot_message( email=spark.me.creator, text="All your account data and the Cisco Webex Teams webhook was deleted. Sorry to see you" " leaving!\n\nThis 1:1 cannot be deleted (Cisco Webex Teams feature), " "and you can any time type /init" " here to register a new account.", markdown=True) else: spark.link.post_bot_message( email=spark.me.creator, text="Usage: `/delete DELETENOW`", markdown=True) if spark.body['resource'] == 'messages' and spark.body['event'] == 'created': handler.messages_created() # Successfully processed, just not acted upon self.webobj.response.set_status(204, 'No content') return True def post_subscriptions(self, sub, peerid, data): """Customizible function to process incoming callbacks/subscriptions/ callback with json body, return True if processed, False if not.""" spark = webexrequest.WebexTeamsRequest( body=self.webobj.request.body, auth=self.auth, myself=self.myself, config=self.config) logging.debug("Got callback and processed " + sub["subscriptionid"] + " subscription from peer " + peerid + " with json blob: " + json.dumps(data)) app_disabled = spark.me.property.app_disabled if app_disabled and app_disabled.lower() == 'true': logging.debug("Account is disabled: " + spark.me.creator) return True if 'target' in data and data['target'] == 'properties': if 'subtarget' in data: if data['subtarget'] == 'topofmind' and 'data' in data: topofmind = data['data'] toplist = topofmind['list'] if len(toplist) == 0: spark.link.post_bot_message( email=spark.me.creator, text=topofmind['displayName'] + " (" + topofmind['email'] + ") just cleared " + topofmind['title'], markdown=True) return True out = topofmind['displayName'] + " (" + topofmind['email'] + ") just updated " + topofmind[ 'title'] + "\n\n----\n\n" for i, el in sorted(toplist.items()): out = out + "**" + i + "**: " + el + "\n\n" spark.link.post_bot_message(email=spark.me.creator, text=out, markdown=True) elif data['subtarget'] == 'new' and 'data' in data: out = '#Incoming email(s): \n' for k, v in data['data'].items(): h = v['headers'] out += '**From: ' + h['From'][0] + '** \n' out += 'Subject: ' + h['Subject'][0] + ' \n' out += v['snippet'] + '\n\n---\n\n' if len(out) > 4000: spark.link.post_bot_message(email=spark.me.creator, text=out, markdown=True) out = '' if out: spark.link.post_bot_message(email=spark.me.creator, text=out, markdown=True) return True if 'resource' in data: folder_id = data['resource'] room = spark.store.load_room_by_boxfolder_id(folder_id=folder_id) if room and 'data' in data and 'suggested_txt' in data['data']: spark.link.post_message(room.id, '**From Box:** ' + data['data']['suggested_txt'], markdown=True) else: logging.warning('Was not able to post callback message to Cisco Webex Teams room.') else: logging.debug('No resource in received subscription data.') return True
[ "hashlib.sha256", "armyknife_src.webexrequest.WebexTeamsRequest", "logging.debug", "json.dumps", "logging.warning", "armyknife_src.fargate.in_fargate", "armyknife_src.fargate.fargate_disabled", "armyknife_src.webexbothandler.WebexTeamsBotHandler", "logging.info", "armyknife_src.webexmessagehandler...
[((2988, 3110), 'armyknife_src.webexrequest.WebexTeamsRequest', 'webexrequest.WebexTeamsRequest', ([], {'body': 'self.webobj.request.body', 'auth': 'self.auth', 'myself': 'self.myself', 'config': 'self.config'}), '(body=self.webobj.request.body, auth=self.\n auth, myself=self.myself, config=self.config)\n', (3018, 3110), False, 'from armyknife_src import webexrequest\n'), ((4773, 4895), 'armyknife_src.webexrequest.WebexTeamsRequest', 'webexrequest.WebexTeamsRequest', ([], {'body': 'self.webobj.request.body', 'auth': 'self.auth', 'myself': 'self.myself', 'config': 'self.config'}), '(body=self.webobj.request.body, auth=self.\n auth, myself=self.myself, config=self.config)\n', (4803, 4895), False, 'from armyknife_src import webexrequest\n'), ((5106, 5146), 'logging.debug', 'logging.debug', (["('My identity:' + me['id'])"], {}), "('My identity:' + me['id'])\n", (5119, 5146), False, 'import logging\n'), ((8510, 8632), 'armyknife_src.webexrequest.WebexTeamsRequest', 'webexrequest.WebexTeamsRequest', ([], {'body': 'self.webobj.request.body', 'auth': 'self.auth', 'myself': 'self.myself', 'config': 'self.config'}), '(body=self.webobj.request.body, auth=self.\n auth, myself=self.myself, config=self.config)\n', (8540, 8632), False, 'from armyknife_src import webexrequest\n'), ((9787, 9803), 'hashlib.sha256', 'hashlib.sha256', ([], {}), '()\n', (9801, 9803), False, 'import hashlib\n'), ((11244, 11359), 'armyknife_src.webexrequest.WebexTeamsRequest', 'webexrequest.WebexTeamsRequest', ([], {'body': 'self.webobj.request.body', 'auth': 'self.auth', 'myself': 'None', 'config': 'self.config'}), '(body=self.webobj.request.body, auth=self.\n auth, myself=None, config=self.config)\n', (11274, 11359), False, 'from armyknife_src import webexrequest\n'), ((12610, 12666), 'armyknife_src.webexbothandler.WebexTeamsBotHandler', 'webexbothandler.WebexTeamsBotHandler', (['spark', 'self.webobj'], {}), '(spark, self.webobj)\n', (12646, 12666), False, 'from armyknife_src import webexbothandler\n'), ((13334, 13456), 'armyknife_src.webexrequest.WebexTeamsRequest', 'webexrequest.WebexTeamsRequest', ([], {'body': 'self.webobj.request.body', 'auth': 'self.auth', 'myself': 'self.myself', 'config': 'self.config'}), '(body=self.webobj.request.body, auth=self.\n auth, myself=self.myself, config=self.config)\n', (13364, 13456), False, 'from armyknife_src import webexrequest\n'), ((14740, 14862), 'armyknife_src.webexrequest.WebexTeamsRequest', 'webexrequest.WebexTeamsRequest', ([], {'body': 'self.webobj.request.body', 'auth': 'self.auth', 'myself': 'self.myself', 'config': 'self.config'}), '(body=self.webobj.request.body, auth=self.\n auth, myself=self.myself, config=self.config)\n', (14770, 14862), False, 'from armyknife_src import webexrequest\n'), ((15653, 15717), 'armyknife_src.webexmessagehandler.WebexTeamsMessageHandler', 'webexmessagehandler.WebexTeamsMessageHandler', (['spark', 'self.webobj'], {}), '(spark, self.webobj)\n', (15697, 15717), False, 'from armyknife_src import webexmessagehandler\n'), ((18253, 18375), 'armyknife_src.webexrequest.WebexTeamsRequest', 'webexrequest.WebexTeamsRequest', ([], {'body': 'self.webobj.request.body', 'auth': 'self.auth', 'myself': 'self.myself', 'config': 'self.config'}), '(body=self.webobj.request.body, auth=self.\n auth, myself=self.myself, config=self.config)\n', (18283, 18375), False, 'from armyknife_src import webexrequest\n'), ((4251, 4313), 'logging.info', 'logging.info', (['"""Got an on_www_paths without proper parameters."""'], {}), "('Got an on_www_paths without proper parameters.')\n", (4263, 4313), False, 'import logging\n'), ((5004, 5072), 'logging.debug', 'logging.debug', (['"""Not able to retrieve myself from Cisco Webex Teams!"""'], {}), "('Not able to retrieve myself from Cisco Webex Teams!')\n", (5017, 5072), False, 'import logging\n'), ((7428, 7475), 'logging.debug', 'logging.debug', (['("Actor\'s identity:" + current_id)'], {}), '("Actor\'s identity:" + current_id)\n', (7441, 7475), False, 'import logging\n'), ((10162, 10228), 'logging.debug', 'logging.debug', (['"""Successfully registered messages firehose webhook"""'], {}), "('Successfully registered messages firehose webhook')\n", (10175, 10228), False, 'import logging\n'), ((10309, 10370), 'logging.debug', 'logging.debug', (['"""Failed to register messages firehose webhook"""'], {}), "('Failed to register messages firehose webhook')\n", (10322, 10370), False, 'import logging\n'), ((11866, 11921), 'logging.debug', 'logging.debug', (['"""Dropping message from ArmyKnife bot..."""'], {}), "('Dropping message from ArmyKnife bot...')\n", (11879, 11921), False, 'import logging\n'), ((14581, 14642), 'logging.debug', 'logging.debug', (['"""Got a firehose callback for an unknown user."""'], {}), "('Got a firehose callback for an unknown user.')\n", (14594, 14642), False, 'import logging\n'), ((14996, 15051), 'logging.debug', 'logging.debug', (['"""Dropping message from ArmyKnife bot..."""'], {}), "('Dropping message from ArmyKnife bot...')\n", (15009, 15051), False, 'import logging\n'), ((15946, 15989), 'logging.debug', 'logging.debug', (['"""Returning 403 forbidden..."""'], {}), "('Returning 403 forbidden...')\n", (15959, 15989), False, 'import logging\n'), ((18724, 18781), 'logging.debug', 'logging.debug', (["('Account is disabled: ' + spark.me.creator)"], {}), "('Account is disabled: ' + spark.me.creator)\n", (18737, 18781), False, 'import logging\n'), ((21050, 21109), 'logging.debug', 'logging.debug', (['"""No resource in received subscription data."""'], {}), "('No resource in received subscription data.')\n", (21063, 21109), False, 'import logging\n'), ((11511, 11531), 'armyknife_src.fargate.in_fargate', 'fargate.in_fargate', ([], {}), '()\n', (11529, 11531), False, 'from armyknife_src import fargate\n'), ((11540, 11566), 'armyknife_src.fargate.fargate_disabled', 'fargate.fargate_disabled', ([], {}), '()\n', (11564, 11566), False, 'from armyknife_src import fargate\n'), ((18580, 18596), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (18590, 18596), False, 'import json\n'), ((20940, 21028), 'logging.warning', 'logging.warning', (['"""Was not able to post callback message to Cisco Webex Teams room."""'], {}), "(\n 'Was not able to post callback message to Cisco Webex Teams room.')\n", (20955, 21028), False, 'import logging\n')]
import os, sys import warnings __thisdir__ = os.path.dirname(os.path.realpath(__file__)) # Testing the import of the numpy package def test_import_numpy(): try: import numpy as np except ImportError as e: assert(1 == 0), "Numpy could not be imported:\n %s" %e sys.path.pop(0) # Testing the import of the scipy package def test_import_scipy(): try: import scipy as sp except ImportError as e: assert(1 == 0), "Scipy could not be imported:\n %s" %e sys.path.pop(0) # Testing for broken MPI installation def test_import_mpi4py(): try: from mpi4py import MPI except ImportError: warnings.warn(UserWarning("MPI for python could not be imported")) assert(1 == 1) sys.path.pop(0) # Testing the import of the Qt modules QtGui and QtCore def test_import_qt_modules(): sys.path.insert(0, __thisdir__ + "/../src") try: from interface.Qt import QtGui, QtCore except ImportError as e: assert (1 == 0), "The Qt modules QtGui and QtCore could not be imported:\n %s" %e sys.path.pop(0) # Testing the import of the pyqtgraph module def test_import_pyqtgraph_module(): try: import pyqtgraph except ImportError: assert (1 == 0), "The pyqtgraph module could not be imported" sys.path.pop(0) # Testimg the import of the interface module def test_import_interface_module(): sys.path.insert(0, __thisdir__ + "/../src") try: import interface except ImportError as e: assert (1 == 0), "The interface module could not be imported:\n %s" %e sys.path.pop(0) # Testing the import of the ipc module def test_import_ipc_module(): sys.path.insert(0, __thisdir__ + "/../src") try: import ipc except ImportError as e: assert (1 == 0), "The ipc module could not be imported:\n %s" %e sys.path.pop(0) # Testing the import of the plotting module def test_import_plotting_module(): sys.path.insert(0, __thisdir__ + "/../src") try: import plotting except ImportError as e: assert (1 == 0), "The plotting module could not be imported:\n %s" %e sys.path.pop(0) # Testing the import of the analysis module def test_import_analysis_module(): sys.path.insert(0, __thisdir__ + "/../src") try: import analysis except ImportError as e: assert (1 == 0), "The analysis module could not be imported:\n %s" %e sys.path.pop(0) # Testing the import of the simulation module def test_import_simulation_module(): sys.path.insert(0, __thisdir__ + "/../src") try: import simulation except ImportError as e: assert (1 == 0), "The simulation module could not be imported:\n %s" %e sys.path.pop(0) # Testing the import of the utils module def test_import_utils_module(): sys.path.insert(0, __thisdir__ + "/../src") try: import utils except ImportError as e: assert (1 == 0), "The utils module could not be imported:\n %s" %e sys.path.pop(0) # Testing if LCLS backend is imported properly def test_import_backend_lcls(): sys.path.insert(0, __thisdir__ + "/../src") try: import backend.lcls return True except ImportError as e: warnings.warn(UserWarning("The LCLS backend could not be imported:\n %s" %e)) assert(1 == 1) return False sys.path.pop(0)
[ "os.path.realpath", "sys.path.pop", "sys.path.insert" ]
[((61, 87), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (77, 87), False, 'import os, sys\n'), ((289, 304), 'sys.path.pop', 'sys.path.pop', (['(0)'], {}), '(0)\n', (301, 304), False, 'import os, sys\n'), ((506, 521), 'sys.path.pop', 'sys.path.pop', (['(0)'], {}), '(0)\n', (518, 521), False, 'import os, sys\n'), ((757, 772), 'sys.path.pop', 'sys.path.pop', (['(0)'], {}), '(0)\n', (769, 772), False, 'import os, sys\n'), ((864, 907), 'sys.path.insert', 'sys.path.insert', (['(0)', "(__thisdir__ + '/../src')"], {}), "(0, __thisdir__ + '/../src')\n", (879, 907), False, 'import os, sys\n'), ((1087, 1102), 'sys.path.pop', 'sys.path.pop', (['(0)'], {}), '(0)\n', (1099, 1102), False, 'import os, sys\n'), ((1321, 1336), 'sys.path.pop', 'sys.path.pop', (['(0)'], {}), '(0)\n', (1333, 1336), False, 'import os, sys\n'), ((1423, 1466), 'sys.path.insert', 'sys.path.insert', (['(0)', "(__thisdir__ + '/../src')"], {}), "(0, __thisdir__ + '/../src')\n", (1438, 1466), False, 'import os, sys\n'), ((1613, 1628), 'sys.path.pop', 'sys.path.pop', (['(0)'], {}), '(0)\n', (1625, 1628), False, 'import os, sys\n'), ((1707, 1750), 'sys.path.insert', 'sys.path.insert', (['(0)', "(__thisdir__ + '/../src')"], {}), "(0, __thisdir__ + '/../src')\n", (1722, 1750), False, 'import os, sys\n'), ((1885, 1900), 'sys.path.pop', 'sys.path.pop', (['(0)'], {}), '(0)\n', (1897, 1900), False, 'import os, sys\n'), ((1989, 2032), 'sys.path.insert', 'sys.path.insert', (['(0)', "(__thisdir__ + '/../src')"], {}), "(0, __thisdir__ + '/../src')\n", (2004, 2032), False, 'import os, sys\n'), ((2177, 2192), 'sys.path.pop', 'sys.path.pop', (['(0)'], {}), '(0)\n', (2189, 2192), False, 'import os, sys\n'), ((2277, 2320), 'sys.path.insert', 'sys.path.insert', (['(0)', "(__thisdir__ + '/../src')"], {}), "(0, __thisdir__ + '/../src')\n", (2292, 2320), False, 'import os, sys\n'), ((2465, 2480), 'sys.path.pop', 'sys.path.pop', (['(0)'], {}), '(0)\n', (2477, 2480), False, 'import os, sys\n'), ((2569, 2612), 'sys.path.insert', 'sys.path.insert', (['(0)', "(__thisdir__ + '/../src')"], {}), "(0, __thisdir__ + '/../src')\n", (2584, 2612), False, 'import os, sys\n'), ((2761, 2776), 'sys.path.pop', 'sys.path.pop', (['(0)'], {}), '(0)\n', (2773, 2776), False, 'import os, sys\n'), ((2855, 2898), 'sys.path.insert', 'sys.path.insert', (['(0)', "(__thisdir__ + '/../src')"], {}), "(0, __thisdir__ + '/../src')\n", (2870, 2898), False, 'import os, sys\n'), ((3037, 3052), 'sys.path.pop', 'sys.path.pop', (['(0)'], {}), '(0)\n', (3049, 3052), False, 'import os, sys\n'), ((3141, 3184), 'sys.path.insert', 'sys.path.insert', (['(0)', "(__thisdir__ + '/../src')"], {}), "(0, __thisdir__ + '/../src')\n", (3156, 3184), False, 'import os, sys\n'), ((3405, 3420), 'sys.path.pop', 'sys.path.pop', (['(0)'], {}), '(0)\n', (3417, 3420), False, 'import os, sys\n')]
# coding: utf-8 from __future__ import absolute_import from datetime import date, datetime # noqa: F401 from typing import List, Dict # noqa: F401 from xcube_hub.models.base_model_ import Model from xcube_hub import util class CubegenConfigCodeConfigFileSet(Model): """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). Do not edit the class manually. """ def __init__(self, path=None): # noqa: E501 """CubegenConfigCodeConfigFileSet - a model defined in OpenAPI :param path: The path of this CubegenConfigCodeConfigFileSet. # noqa: E501 :type path: str """ self.openapi_types = { 'path': str } self.attribute_map = { 'path': 'path' } self._path = path @classmethod def from_dict(cls, dikt) -> 'CubegenConfigCodeConfigFileSet': """Returns the dict as a model :param dikt: A dict. :type: dict :return: The CubegenConfigCodeConfig_file_set of this CubegenConfigCodeConfigFileSet. # noqa: E501 :rtype: CubegenConfigCodeConfigFileSet """ return util.deserialize_model(dikt, cls) @property def path(self): """Gets the path of this CubegenConfigCodeConfigFileSet. :return: The path of this CubegenConfigCodeConfigFileSet. :rtype: str """ return self._path @path.setter def path(self, path): """Sets the path of this CubegenConfigCodeConfigFileSet. :param path: The path of this CubegenConfigCodeConfigFileSet. :type path: str """ self._path = path
[ "xcube_hub.util.deserialize_model" ]
[((1173, 1206), 'xcube_hub.util.deserialize_model', 'util.deserialize_model', (['dikt', 'cls'], {}), '(dikt, cls)\n', (1195, 1206), False, 'from xcube_hub import util\n')]
# -*- coding: utf-8 -*- # Generated by Django 1.11.5 on 2017-09-29 22:13 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('hostmanager', '0001_initial'), ] operations = [ migrations.AlterField( model_name='server', name='ip', field=models.CharField(blank=True, help_text='Will be automatically populated if the domain is registered.', max_length=255, null=True, verbose_name='IP Address'), ), ]
[ "django.db.models.CharField" ]
[((390, 562), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Will be automatically populated if the domain is registered."""', 'max_length': '(255)', 'null': '(True)', 'verbose_name': '"""IP Address"""'}), "(blank=True, help_text=\n 'Will be automatically populated if the domain is registered.',\n max_length=255, null=True, verbose_name='IP Address')\n", (406, 562), False, 'from django.db import migrations, models\n')]
# Generated by Django 3.2.4 on 2021-06-28 14:12 import django.db.models.deletion # noqa from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("contenttypes", "0002_remove_content_type_name"), ("wagtailsearch", "0004_querydailyhits_verbose_name_plural"), ] operations = [ migrations.CreateModel( name="IndexEntry", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("object_id", models.CharField(max_length=50)), ("title_norm", models.FloatField(default=1.0)), ( "content_type", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, related_name="+", to="contenttypes.contenttype", ), ), ], options={ "verbose_name": "index entry", "verbose_name_plural": "index entries", "abstract": False, }, ), migrations.AlterUniqueTogether( name="indexentry", unique_together={("content_type", "object_id")}, ), ]
[ "django.db.migrations.AlterUniqueTogether", "django.db.models.FloatField", "django.db.models.ForeignKey", "django.db.models.AutoField", "django.db.models.CharField" ]
[((1365, 1468), 'django.db.migrations.AlterUniqueTogether', 'migrations.AlterUniqueTogether', ([], {'name': '"""indexentry"""', 'unique_together': "{('content_type', 'object_id')}"}), "(name='indexentry', unique_together={(\n 'content_type', 'object_id')})\n", (1395, 1468), False, 'from django.db import migrations, models\n'), ((498, 591), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (514, 591), False, 'from django.db import migrations, models\n'), ((757, 788), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (773, 788), False, 'from django.db import migrations, models\n'), ((822, 852), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(1.0)'}), '(default=1.0)\n', (839, 852), False, 'from django.db import migrations, models\n'), ((929, 1045), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""+"""', 'to': '"""contenttypes.contenttype"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='+', to='contenttypes.contenttype')\n", (946, 1045), False, 'from django.db import migrations, models\n')]
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import torch import torch.nn.functional as F from torch import nn from maskrcnn_benchmark.modeling import registry from maskrcnn_benchmark.modeling.rbox_coder import RBoxCoder from maskrcnn_benchmark.layers import Mish from .loss import make_rpn_loss_evaluator from .anchor_generator import make_anchor_generator from .inference import make_rpn_postprocessor from .mc_loss import make_rpn_loss_evaluator as make_mc_rpn_loss_evaluator loss_evaluator_dict = { "SingleConvARPNHead": make_rpn_loss_evaluator, "SingleConvARPNMCHead": make_mc_rpn_loss_evaluator, "TowerARPNHead": make_rpn_loss_evaluator } class Conv2dGroup(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, relu=True, same_padding=False, gn=False): super(Conv2dGroup, self).__init__() padding = int((kernel_size - 1) / 2) if same_padding else 0 self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding) # self.bn = nn.BatchNorm2d(out_channels) if bn else None # self.gn = nn.GroupNorm(32, out_channels) if gn else None self.relu = nn.ReLU() if relu else None def forward(self, x): x = self.conv(x) if self.gn is not None: x = self.gn(x) if self.relu is not None: x = self.relu(x) return x @registry.RPN_HEADS.register("SingleConvARPNHead") class RPNHead(nn.Module): """ Adds a simple RPN Head with classification and regression heads """ def __init__(self, cfg, in_channels, num_anchors=1): """ Arguments: cfg : config in_channels (int): number of channels of the input feature num_anchors (int): number of anchors to be predicted # We consider the condition in east """ super(RPNHead, self).__init__() self.conv = nn.Conv2d( in_channels, in_channels, kernel_size=3, stride=1, padding=1 ) self.cls_logits = nn.Conv2d(in_channels, 1 * num_anchors, kernel_size=1, stride=1) # Dis from t, r, b, l self.bbox_pred = nn.Conv2d( in_channels, 4 * num_anchors, kernel_size=1, stride=1 ) # Angle self.angle_pred = nn.Conv2d(in_channels, 1 * num_anchors, kernel_size=1, stride=1) for l in [self.conv, self.cls_logits, self.bbox_pred, self.angle_pred]: # torch.nn.init.normal_(l.weight, std=0.01) torch.nn.init.constant_(l.bias, 0) self.activation = Mish() if cfg.MODEL.ARPN.USE_MISH else nn.ReLU() def forward(self, x): logits = [] bbox_reg = [] # angle_pred = [] for feature in x: t = self.activation(self.conv(feature)) logits.append(self.cls_logits(t).sigmoid()) bbox_logit = torch.cat([ self.bbox_pred(t).sigmoid(), self.angle_pred(t).sigmoid() ], dim=1) bbox_reg.append(bbox_logit) return logits, bbox_reg # , angle_pred @registry.RPN_HEADS.register("SingleConvARPNMCHead") class MCRPNHead(nn.Module): """ Adds a simple RPN Head with classification and regression heads """ def __init__(self, cfg, in_channels, num_anchors=1): """ Arguments: cfg : config in_channels (int): number of channels of the input feature num_anchors (int): number of anchors to be predicted # We consider the condition in east """ super(MCRPNHead, self).__init__() self.conv = nn.Conv2d( in_channels, in_channels, kernel_size=3, stride=1, padding=1 ) self.cls_logits = nn.Conv2d(in_channels, 1 * num_anchors, kernel_size=1, stride=1) # Dis from t, r, b, l self.bbox_pred = nn.Conv2d( in_channels, 4 * num_anchors, kernel_size=1, stride=1 ) # Angle self.angle_pred = nn.Conv2d(in_channels, 1 * num_anchors, kernel_size=1, stride=1) # Multi-class map self.mc_pred = nn.Conv2d(in_channels, cfg.MODEL.ARPN.MC_NUM * num_anchors, kernel_size=1, stride=1) for l in [self.conv, self.cls_logits, self.bbox_pred, self.angle_pred, self.mc_pred]: # torch.nn.init.normal_(l.weight, std=0.01) torch.nn.init.constant_(l.bias, 0) def forward(self, x): logits = [] bbox_reg = [] # angle_pred = [] for feature in x: t = F.relu(self.conv(feature)) logits.append(self.cls_logits(t).sigmoid()) bbox_logit = torch.cat([ self.bbox_pred(t).sigmoid(), self.angle_pred(t).sigmoid(), self.mc_pred(t), ], dim=1) bbox_reg.append(bbox_logit) return logits, bbox_reg # , angle_pred @registry.RPN_HEADS.register("TowerARPNHead") class TowerARPNHead(nn.Module): """ Adds a simple RPN Head with classification and regression heads """ def __init__(self, cfg, in_channels, num_anchors=1): """ Arguments: cfg : config in_channels (int): number of channels of the input feature num_anchors (int): number of anchors to be predicted # We consider the condition in east """ super(TowerARPNHead, self).__init__() self.conv = nn.Conv2d( in_channels, in_channels, kernel_size=3, stride=1, padding=1 ) cls_tower = [] abox_tower = [] for i in range(cfg.MODEL.ARPN.CONV_STACK): #cls_tower.append( # Conv2dGroup( # in_channels, # in_channels, # 3, # same_padding=True, # gn=cfg.MODEL.ARPN.USE_GN # ) #) abox_tower.append( Conv2dGroup( in_channels, in_channels, 3, same_padding=True, gn=cfg.MODEL.ARPN.USE_GN ) ) self.add_module('cls_tower', nn.Sequential(*cls_tower)) self.add_module('abox_tower', nn.Sequential(*abox_tower)) self.cls_logits = nn.Conv2d(in_channels, 1 * num_anchors, kernel_size=1, stride=1) # Dis from t, r, b, l self.bbox_pred = nn.Conv2d( in_channels, 4 * num_anchors, kernel_size=1, stride=1 ) # for i in range(len(cfg.MODEL.ARPN.SCALE_STACK)): # self.add_module('ff_boxes_' + str(i + 2), self.box_logits[i]) # Angle self.angle_pred = nn.Conv2d(in_channels, 1 * num_anchors, kernel_size=1, stride=1) # initialization for modules in [self.cls_tower, self.abox_tower, self.cls_logits, self.bbox_pred, self.angle_pred]: for l in modules.modules(): if isinstance(l, nn.Conv2d): torch.nn.init.normal_(l.weight, std=0.01) torch.nn.init.constant_(l.bias, 0) ''' for name, param in self.named_parameters(): # print('name:', name) if "weight" in name and 'gn' in name: param.data.fill_(1) elif "bias" in name and 'gn' in name: param.data.fill_(0) else: torch.nn.init.normal_(param, std=0.01) torch.nn.init.constant_(param, 0) for l in [self.conv, self.cls_logits, self.angle_pred] + self.box_logits: # self.bbox_pred, torch.nn.init.normal_(l.weight, std=0.01) torch.nn.init.constant_(l.bias, 0) ''' def forward(self, x): logits = [] bbox_reg = [] # angle_pred = [] cnt = 0 for feature in x: # cls = self.cls_tower(feature) t = self.abox_tower(feature) logits.append(self.cls_logits(t).sigmoid()) bbox_logit = torch.cat([ self.bbox_pred(t).sigmoid(), # .sigmoid(), self.angle_pred(t).sigmoid() ], dim=1) bbox_reg.append(bbox_logit) cnt += 1 return logits, bbox_reg loss_name_dict = { "SingleConvARPNHead": ["loss_objectness", "loss_rpn_box_reg"], "SingleConvARPNMCHead": ["loss_objectness", "loss_rpn_box_reg", "loss_mc"], "TowerARPNHead": ["loss_objectness", "loss_rpn_box_reg"], } class RPNModule(torch.nn.Module): """ Module for RPN computation. Takes feature maps from the backbone and RPN proposals and losses. Works for both FPN and non-FPN. """ def __init__(self, cfg): super(RPNModule, self).__init__() self.cfg = cfg.clone() anchor_generator = make_anchor_generator(cfg) in_channels = cfg.MODEL.BACKBONE.OUT_CHANNELS rpn_head = registry.RPN_HEADS[cfg.MODEL.RPN.RPN_HEAD] head = rpn_head( cfg, in_channels, 1 ) rpn_box_coder = RBoxCoder(weights=(1.0, 1.0, 1.0, 1.0, 1.0)) box_selector_train = make_rpn_postprocessor(cfg, rpn_box_coder, is_train=True) box_selector_test = make_rpn_postprocessor(cfg, rpn_box_coder, is_train=False) loss_evaluator = loss_evaluator_dict[cfg.MODEL.RPN.RPN_HEAD](cfg, rpn_box_coder) self.anchor_generator = anchor_generator self.head = head self.box_selector_train = box_selector_train self.box_selector_test = box_selector_test self.loss_evaluator = loss_evaluator self.loss_name = loss_name_dict[cfg.MODEL.RPN.RPN_HEAD] def forward(self, images, features, targets=None): """ Arguments: images (ImageList): images for which we want to compute the predictions features (list[Tensor]): features computed from the images that are used for computing the predictions. Each tensor in the list correspond to different feature levels targets (list[BoxList): ground-truth boxes present in the image (optional) Returns: boxes (list[BoxList]): the predicted boxes from the RPN, one BoxList per image. losses (dict[Tensor]): the losses for the model during training. During testing, it is an empty dict. """ objectness, rpn_box_regression = self.head(features) anchors = self.anchor_generator(images, features) if self.training: return self._forward_train(anchors, objectness, rpn_box_regression, targets) else: return self._forward_test(anchors, objectness, rpn_box_regression) def _forward_train(self, anchors, objectness, rpn_box_regression, targets): import ipdb;ipdb.set_trace() if self.cfg.MODEL.RPN_ONLY: # When training an RPN-only model, the loss is determined by the # predicted objectness and rpn_box_regression values and there is # no need to transform the anchors into predicted boxes; this is an # optimization that avoids the unnecessary transformation. boxes = anchors else: # For end-to-end models, anchors must be transformed into boxes and # sampled into a training batch. with torch.no_grad(): boxes = self.box_selector_train( anchors, objectness, rpn_box_regression, targets ) loss_item = self.loss_evaluator( anchors, objectness, rpn_box_regression, targets ) losses = {self.loss_name[i]: loss_item[i] for i in range(len(loss_item))} ''' losses = { "loss_objectness": loss_objectness, "loss_rpn_box_reg": loss_rpn_box_reg, } ''' return boxes, losses def _forward_test(self, anchors, objectness, rpn_box_regression): boxes = self.box_selector_test(anchors, objectness, rpn_box_regression) if self.cfg.MODEL.RPN_ONLY: # For end-to-end models, the RPN proposals are an intermediate state # and don't bother to sort them in decreasing score order. For RPN-only # models, the proposals are the final output and we return them in # high-to-low confidence order. inds = [ box.get_field("objectness").sort(descending=True)[1] for box in boxes ] boxes = [box[ind] for box, ind in zip(boxes, inds)] return boxes, {} def build_rpn(cfg): """ This gives the gist of it. Not super important because it doesn't change as much """ return RPNModule(cfg)
[ "torch.nn.GroupNorm", "torch.nn.ReLU", "torch.nn.init.constant_", "ipdb.set_trace", "torch.nn.Sequential", "torch.nn.Conv2d", "maskrcnn_benchmark.modeling.registry.RPN_HEADS.register", "maskrcnn_benchmark.layers.Mish", "maskrcnn_benchmark.modeling.rbox_coder.RBoxCoder", "torch.no_grad", "torch.n...
[((1413, 1462), 'maskrcnn_benchmark.modeling.registry.RPN_HEADS.register', 'registry.RPN_HEADS.register', (['"""SingleConvARPNHead"""'], {}), "('SingleConvARPNHead')\n", (1440, 1462), False, 'from maskrcnn_benchmark.modeling import registry\n'), ((3123, 3174), 'maskrcnn_benchmark.modeling.registry.RPN_HEADS.register', 'registry.RPN_HEADS.register', (['"""SingleConvARPNMCHead"""'], {}), "('SingleConvARPNMCHead')\n", (3150, 3174), False, 'from maskrcnn_benchmark.modeling import registry\n'), ((4937, 4981), 'maskrcnn_benchmark.modeling.registry.RPN_HEADS.register', 'registry.RPN_HEADS.register', (['"""TowerARPNHead"""'], {}), "('TowerARPNHead')\n", (4964, 4981), False, 'from maskrcnn_benchmark.modeling import registry\n'), ((964, 1038), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels', 'kernel_size', 'stride'], {'padding': 'padding'}), '(in_channels, out_channels, kernel_size, stride, padding=padding)\n', (973, 1038), False, 'from torch import nn\n'), ((1953, 2024), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'in_channels'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels, in_channels, kernel_size=3, stride=1, padding=1)\n', (1962, 2024), False, 'from torch import nn\n'), ((2075, 2139), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', '(1 * num_anchors)'], {'kernel_size': '(1)', 'stride': '(1)'}), '(in_channels, 1 * num_anchors, kernel_size=1, stride=1)\n', (2084, 2139), False, 'from torch import nn\n'), ((2196, 2260), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', '(4 * num_anchors)'], {'kernel_size': '(1)', 'stride': '(1)'}), '(in_channels, 4 * num_anchors, kernel_size=1, stride=1)\n', (2205, 2260), False, 'from torch import nn\n'), ((2326, 2390), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', '(1 * num_anchors)'], {'kernel_size': '(1)', 'stride': '(1)'}), '(in_channels, 1 * num_anchors, kernel_size=1, stride=1)\n', (2335, 2390), False, 'from torch import nn\n'), ((3669, 3740), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'in_channels'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels, in_channels, kernel_size=3, stride=1, padding=1)\n', (3678, 3740), False, 'from torch import nn\n'), ((3791, 3855), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', '(1 * num_anchors)'], {'kernel_size': '(1)', 'stride': '(1)'}), '(in_channels, 1 * num_anchors, kernel_size=1, stride=1)\n', (3800, 3855), False, 'from torch import nn\n'), ((3912, 3976), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', '(4 * num_anchors)'], {'kernel_size': '(1)', 'stride': '(1)'}), '(in_channels, 4 * num_anchors, kernel_size=1, stride=1)\n', (3921, 3976), False, 'from torch import nn\n'), ((4042, 4106), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', '(1 * num_anchors)'], {'kernel_size': '(1)', 'stride': '(1)'}), '(in_channels, 1 * num_anchors, kernel_size=1, stride=1)\n', (4051, 4106), False, 'from torch import nn\n'), ((4157, 4245), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', '(cfg.MODEL.ARPN.MC_NUM * num_anchors)'], {'kernel_size': '(1)', 'stride': '(1)'}), '(in_channels, cfg.MODEL.ARPN.MC_NUM * num_anchors, kernel_size=1,\n stride=1)\n', (4166, 4245), False, 'from torch import nn\n'), ((5484, 5555), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'in_channels'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels, in_channels, kernel_size=3, stride=1, padding=1)\n', (5493, 5555), False, 'from torch import nn\n'), ((6376, 6440), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', '(1 * num_anchors)'], {'kernel_size': '(1)', 'stride': '(1)'}), '(in_channels, 1 * num_anchors, kernel_size=1, stride=1)\n', (6385, 6440), False, 'from torch import nn\n'), ((6497, 6561), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', '(4 * num_anchors)'], {'kernel_size': '(1)', 'stride': '(1)'}), '(in_channels, 4 * num_anchors, kernel_size=1, stride=1)\n', (6506, 6561), False, 'from torch import nn\n'), ((6762, 6826), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', '(1 * num_anchors)'], {'kernel_size': '(1)', 'stride': '(1)'}), '(in_channels, 1 * num_anchors, kernel_size=1, stride=1)\n', (6771, 6826), False, 'from torch import nn\n'), ((9155, 9199), 'maskrcnn_benchmark.modeling.rbox_coder.RBoxCoder', 'RBoxCoder', ([], {'weights': '(1.0, 1.0, 1.0, 1.0, 1.0)'}), '(weights=(1.0, 1.0, 1.0, 1.0, 1.0))\n', (9164, 9199), False, 'from maskrcnn_benchmark.modeling.rbox_coder import RBoxCoder\n'), ((10928, 10944), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (10942, 10944), False, 'import ipdb\n'), ((1124, 1154), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['(32)', 'out_channels'], {}), '(32, out_channels)\n', (1136, 1154), False, 'from torch import nn\n'), ((1191, 1200), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1198, 1200), False, 'from torch import nn\n'), ((2486, 2527), 'torch.nn.init.normal_', 'torch.nn.init.normal_', (['l.weight'], {'std': '(0.01)'}), '(l.weight, std=0.01)\n', (2507, 2527), False, 'import torch\n'), ((2540, 2574), 'torch.nn.init.constant_', 'torch.nn.init.constant_', (['l.bias', '(0)'], {}), '(l.bias, 0)\n', (2563, 2574), False, 'import torch\n'), ((2602, 2608), 'maskrcnn_benchmark.layers.Mish', 'Mish', ([], {}), '()\n', (2606, 2608), False, 'from maskrcnn_benchmark.layers import Mish\n'), ((2641, 2650), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2648, 2650), False, 'from torch import nn\n'), ((4351, 4392), 'torch.nn.init.normal_', 'torch.nn.init.normal_', (['l.weight'], {'std': '(0.01)'}), '(l.weight, std=0.01)\n', (4372, 4392), False, 'import torch\n'), ((4405, 4439), 'torch.nn.init.constant_', 'torch.nn.init.constant_', (['l.bias', '(0)'], {}), '(l.bias, 0)\n', (4428, 4439), False, 'import torch\n'), ((6256, 6281), 'torch.nn.Sequential', 'nn.Sequential', (['*cls_tower'], {}), '(*cls_tower)\n', (6269, 6281), False, 'from torch import nn\n'), ((6321, 6347), 'torch.nn.Sequential', 'nn.Sequential', (['*abox_tower'], {}), '(*abox_tower)\n', (6334, 6347), False, 'from torch import nn\n'), ((11471, 11486), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11484, 11486), False, 'import torch\n'), ((7114, 7155), 'torch.nn.init.normal_', 'torch.nn.init.normal_', (['l.weight'], {'std': '(0.01)'}), '(l.weight, std=0.01)\n', (7135, 7155), False, 'import torch\n'), ((7176, 7210), 'torch.nn.init.constant_', 'torch.nn.init.constant_', (['l.bias', '(0)'], {}), '(l.bias, 0)\n', (7199, 7210), False, 'import torch\n')]
import matplotlib.pyplot as plt import numpy as np plt.style.use('seaborn-darkgrid') x = range(8) y = np.linspace(1.1, 5.0, 8) ylabel = map(lambda num: bin(num)[2:], x) xlabel = map(lambda num: "{0:.2f}".format(num), y) plt.step(x, y) plt.yticks(y, ylabel) plt.xticks(x, xlabel, rotation=45) plt.ylabel("Binary Output") plt.xlabel("Analog Input") plt.savefig("adc.png", transparent=True)
[ "matplotlib.pyplot.savefig", "matplotlib.pyplot.xticks", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.style.use", "numpy.linspace", "matplotlib.pyplot.yticks", "matplotlib.pyplot.step" ]
[((51, 84), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-darkgrid"""'], {}), "('seaborn-darkgrid')\n", (64, 84), True, 'import matplotlib.pyplot as plt\n'), ((103, 127), 'numpy.linspace', 'np.linspace', (['(1.1)', '(5.0)', '(8)'], {}), '(1.1, 5.0, 8)\n', (114, 127), True, 'import numpy as np\n'), ((223, 237), 'matplotlib.pyplot.step', 'plt.step', (['x', 'y'], {}), '(x, y)\n', (231, 237), True, 'import matplotlib.pyplot as plt\n'), ((238, 259), 'matplotlib.pyplot.yticks', 'plt.yticks', (['y', 'ylabel'], {}), '(y, ylabel)\n', (248, 259), True, 'import matplotlib.pyplot as plt\n'), ((260, 294), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x', 'xlabel'], {'rotation': '(45)'}), '(x, xlabel, rotation=45)\n', (270, 294), True, 'import matplotlib.pyplot as plt\n'), ((295, 322), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Binary Output"""'], {}), "('Binary Output')\n", (305, 322), True, 'import matplotlib.pyplot as plt\n'), ((323, 349), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Analog Input"""'], {}), "('Analog Input')\n", (333, 349), True, 'import matplotlib.pyplot as plt\n'), ((350, 390), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""adc.png"""'], {'transparent': '(True)'}), "('adc.png', transparent=True)\n", (361, 390), True, 'import matplotlib.pyplot as plt\n')]
"""The Tesla Wall Charger Director integration.""" import asyncio from twcdirector.listener import TWCListener from twcdirector.device import TWCPeripheral from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant from homeassistant.const import (EVENT_HOMEASSISTANT_STOP) from .const import ( DOMAIN, CONF_RS485_INTERFACE, CONF_SHARED_MAX_CURRENT ) from .event import ( TWCDeviceEvent ) import logging _LOGGER = logging.getLogger(__name__) PLATFORMS = ["sensor", "number"] async def async_setup(hass: HomeAssistant, config: dict): """Set up the Tesla Wall Charger Director component.""" hass.data.setdefault(DOMAIN, {}) return True async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry): """Set up Tesla Wall Charger Director from a config entry.""" listener_config = entry.data listener_options = { "event_loop": hass.loop } if CONF_RS485_INTERFACE in listener_config: listener_options["interface"] = f"/dev/{listener_config[CONF_RS485_INTERFACE]}" if CONF_SHARED_MAX_CURRENT in listener_config: listener_options["shared_max_current"] = listener_config[CONF_SHARED_MAX_CURRENT] twc_listener = TWCListener(**listener_options) hass.loop.create_task(twc_listener.process_transmit_messages()) hass.loop.create_task(twc_listener.listen()) # Shutdown event closure async def async_shutdown_event(call): _LOGGER.info("Shutting down Tesla Wall Charger Director") await twc_listener.shutdown() hass.bus.async_listen(EVENT_HOMEASSISTANT_STOP, async_shutdown_event) hass.data[DOMAIN].setdefault(entry.entry_id, {}) hass.data[DOMAIN][entry.entry_id]["twc_listener"] = twc_listener device_queue = asyncio.Queue() twc_listener.register_device_queue(device_queue) hass.loop.create_task(new_device_processor(device_queue, hass, entry)) for platform in PLATFORMS: hass.async_create_task( hass.config_entries.async_forward_entry_setup(entry, platform)) return True async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry): """Unload a config entry.""" unload_ok = all(await asyncio.gather(*[ hass.config_entries.async_forward_entry_unload(entry, component) for component in PLATFORMS ])) if unload_ok: hass.data[DOMAIN].pop(entry.entry_id) return unload_ok async def new_device_processor(device_queue, hass, entry): while True: new_device = await device_queue.get() if isinstance(new_device, TWCPeripheral): event_entity = TWCDeviceEvent(hass, new_device) device_registry = await hass.helpers.device_registry.async_get_registry() device_info = event_entity.device_info device_info["config_entry_id"] = entry.entry_id device = device_registry.async_get_or_create(**device_info) event_entity.entity_id = device.id _LOGGER.debug(f"Trigger Device Info: {device_info}") device_queue.task_done()
[ "logging.getLogger", "twcdirector.listener.TWCListener", "asyncio.Queue" ]
[((475, 502), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (492, 502), False, 'import logging\n'), ((1244, 1275), 'twcdirector.listener.TWCListener', 'TWCListener', ([], {}), '(**listener_options)\n', (1255, 1275), False, 'from twcdirector.listener import TWCListener\n'), ((1788, 1803), 'asyncio.Queue', 'asyncio.Queue', ([], {}), '()\n', (1801, 1803), False, 'import asyncio\n')]
import pygame from pygame.locals import QUIT, KEYDOWN, K_q, K_ESCAPE, K_BACKQUOTE, MOUSEBUTTONDOWN from resources.resourcemgr import ResourceMgr from scenes.game_scene import GameScene def main(): pygame.init() SCREEN_SIZE = (1280, 800) surface = pygame.display.set_mode(SCREEN_SIZE, 0, 32) clock = pygame.time.Clock() active_scene = GameScene(ResourceMgr()) active_scene.generate_game() while True: for event in pygame.event.get(): if event.type == QUIT: return if event.type == KEYDOWN: if event.key == K_q or event.key == K_ESCAPE: return if event.key == K_BACKQUOTE: active_scene.handle_tilde_key_down() if event.type == MOUSEBUTTONDOWN: if event.button == 1: active_scene.handle_mouse_left_down(pygame.mouse.get_pos()) if event.button == 3: active_scene.handle_mouse_right_down(pygame.mouse.get_pos()) time_passed = clock.tick(30) active_scene.tick(time_passed) active_scene.draw(surface) pygame.display.update() main()
[ "pygame.init", "pygame.event.get", "pygame.display.set_mode", "resources.resourcemgr.ResourceMgr", "pygame.mouse.get_pos", "pygame.time.Clock", "pygame.display.update" ]
[((204, 217), 'pygame.init', 'pygame.init', ([], {}), '()\n', (215, 217), False, 'import pygame\n'), ((262, 305), 'pygame.display.set_mode', 'pygame.display.set_mode', (['SCREEN_SIZE', '(0)', '(32)'], {}), '(SCREEN_SIZE, 0, 32)\n', (285, 305), False, 'import pygame\n'), ((318, 337), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (335, 337), False, 'import pygame\n'), ((368, 381), 'resources.resourcemgr.ResourceMgr', 'ResourceMgr', ([], {}), '()\n', (379, 381), False, 'from resources.resourcemgr import ResourceMgr\n'), ((455, 473), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (471, 473), False, 'import pygame\n'), ((1167, 1190), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (1188, 1190), False, 'import pygame\n'), ((902, 924), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (922, 924), False, 'import pygame\n'), ((1021, 1043), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (1041, 1043), False, 'import pygame\n')]
from src.modelo.cancion import Cancion from src.modelo.interprete import Interprete from src.modelo.album import Album, Medio from src.modelo.declarative_base import Session, engine, Base from src.logica.coleccion import Coleccion def anadir_album (titulo , anio , descripcion , medio) : # Crea la BD Base.metadata.create_all ( engine ) # Abre la sesion session = Session ( ) coleccion = Coleccion ( ) if coleccion.agregar_album ( titulo , anio , descripcion , medio ) : print ( f"Se añadio el titulo: {titulo}" ) else : print ( f"El título: {titulo}, ya existe" ) session.close() def editar_album (album_id,titulo , anio , descripcion , medio) : # Crea la BD Base.metadata.create_all ( engine ) # Abre la sesion session = Session ( ) coleccion = Coleccion ( ) if coleccion.editar_album ( album_id , titulo , anio , descripcion , medio ) : print ( f"Se modifico el album con id: {album_id}" ) else : print ( f"El nuevo título '{titulo}' para el album con id: {album_id}, ya existe" ) session.close() def mostrar_album (album_id) : # Crea la BD Base.metadata.create_all ( engine ) # Abre la sesion session = Session ( ) coleccion = Coleccion ( ) album=coleccion.dar_album_por_id(album_id) # print(album) print ( f"=======================================" ) print ( f"Id Album : {album[ 'id' ]}" ) print ( f"Título Album: {album[ 'titulo' ]}" ) print ( f"Año : {album[ 'anio' ]}" ) print ( f"Descripción : {album[ 'descripcion' ]}" ) print ( f"Medio : {album[ 'medio' ]}" ) print ( f"=======================================" ) session.close() def Anadir_registros(): # Crea la BD Base.metadata.create_all ( engine ) # Abre la sesion session = Session ( ) # crear interpretes interprete1 = Interprete ( nombre = "<NAME>" , texto_curiosidades = "Es colombiano y vive en NY" ) interprete2 = Interprete ( nombre = "<NAME>" , texto_curiosidades = "Canto a Cuba" ) interprete3 = Interprete ( nombre = "Buena Vista Social club" ) interprete4 = Interprete ( nombre = "<NAME>" , texto_curiosidades = "No sabia quien era" ) session.add ( interprete1 ) session.add ( interprete2 ) session.add ( interprete3 ) session.add ( interprete4 ) session.commit ( ) # Crear albumes album1 = Album ( titulo = "Latin Jazz Compilation" , anio = 2021 , descripcion = "Album original" , medio = Medio.DISCO ) album2 = Album ( titulo = "Bandas sonoras famosas" , anio = 2021 , descripcion = "Compilacion" , medio = Medio.DISCO ) session.add ( album1 ) session.add ( album2 ) # Crear canciones cancion1 = Cancion ( titulo = "Ajiaco" , minutos = 3 , segundos = 1 , compositor = "Samuel Torres" ) cancion2 = Cancion ( titulo = "Forced Displacement" , minutos = 3 , segundos = 12 , compositor = "Desconocido" ) cancion3 = Cancion ( titulo = "Alegria" , minutos = 4 , segundos = 27 , compositor = "AU" ) session.add ( cancion1 ) session.add ( cancion2 ) session.add ( cancion3 ) # Relacionar albumes con canciones album1.canciones = [ cancion1 , cancion2 ] album2.canciones = [ cancion1 , cancion3 ] # Relacionar canciones con interpretes cancion1.interpretes = [ interprete1 ] cancion2.interpretes = [ interprete2 ] cancion3.interpretes = [ interprete3 , interprete4 ] session.commit ( ) session.commit ( ) session.close ( ) if __name__ == '__main__': Anadir_registros ( ) anadir_album ( "Arde del cielo" , 2008 , "Album rock" , Medio.CD ) anadir_album ( "Similares" , 2015 , "Romantico" , Medio.CD ) anadir_album ( "Otherside" , 1999 , "Rock alternativo" , Medio.CD ) anadir_album ( "All the Little Lights" , 2012 , "Folk, Pop, Cantautor, Rock, Acoustic" , Medio.CD ) editar_album (2, "Similares",2020, "Romantico",Medio.CD) editar_album ( 1 , "River flows in you ",2010 , "Romantico" , Medio.CD) for i in [ 1 , 2 , 3, 4] : mostrar_album ( i ) i = 1 while i <= 4: mostrar_album(i) i=i+1
[ "src.modelo.cancion.Cancion", "src.modelo.declarative_base.Base.metadata.create_all", "src.modelo.interprete.Interprete", "src.modelo.album.Album", "src.logica.coleccion.Coleccion", "src.modelo.declarative_base.Session" ]
[((309, 341), 'src.modelo.declarative_base.Base.metadata.create_all', 'Base.metadata.create_all', (['engine'], {}), '(engine)\n', (333, 341), False, 'from src.modelo.declarative_base import Session, engine, Base\n'), ((379, 388), 'src.modelo.declarative_base.Session', 'Session', ([], {}), '()\n', (386, 388), False, 'from src.modelo.declarative_base import Session, engine, Base\n'), ((406, 417), 'src.logica.coleccion.Coleccion', 'Coleccion', ([], {}), '()\n', (415, 417), False, 'from src.logica.coleccion import Coleccion\n'), ((707, 739), 'src.modelo.declarative_base.Base.metadata.create_all', 'Base.metadata.create_all', (['engine'], {}), '(engine)\n', (731, 739), False, 'from src.modelo.declarative_base import Session, engine, Base\n'), ((777, 786), 'src.modelo.declarative_base.Session', 'Session', ([], {}), '()\n', (784, 786), False, 'from src.modelo.declarative_base import Session, engine, Base\n'), ((804, 815), 'src.logica.coleccion.Coleccion', 'Coleccion', ([], {}), '()\n', (813, 815), False, 'from src.logica.coleccion import Coleccion\n'), ((1132, 1164), 'src.modelo.declarative_base.Base.metadata.create_all', 'Base.metadata.create_all', (['engine'], {}), '(engine)\n', (1156, 1164), False, 'from src.modelo.declarative_base import Session, engine, Base\n'), ((1202, 1211), 'src.modelo.declarative_base.Session', 'Session', ([], {}), '()\n', (1209, 1211), False, 'from src.modelo.declarative_base import Session, engine, Base\n'), ((1229, 1240), 'src.logica.coleccion.Coleccion', 'Coleccion', ([], {}), '()\n', (1238, 1240), False, 'from src.logica.coleccion import Coleccion\n'), ((1731, 1763), 'src.modelo.declarative_base.Base.metadata.create_all', 'Base.metadata.create_all', (['engine'], {}), '(engine)\n', (1755, 1763), False, 'from src.modelo.declarative_base import Session, engine, Base\n'), ((1801, 1810), 'src.modelo.declarative_base.Session', 'Session', ([], {}), '()\n', (1808, 1810), False, 'from src.modelo.declarative_base import Session, engine, Base\n'), ((1854, 1930), 'src.modelo.interprete.Interprete', 'Interprete', ([], {'nombre': '"""<NAME>"""', 'texto_curiosidades': '"""Es colombiano y vive en NY"""'}), "(nombre='<NAME>', texto_curiosidades='Es colombiano y vive en NY')\n", (1864, 1930), False, 'from src.modelo.interprete import Interprete\n'), ((1956, 2018), 'src.modelo.interprete.Interprete', 'Interprete', ([], {'nombre': '"""<NAME>"""', 'texto_curiosidades': '"""Canto a Cuba"""'}), "(nombre='<NAME>', texto_curiosidades='Canto a Cuba')\n", (1966, 2018), False, 'from src.modelo.interprete import Interprete\n'), ((2044, 2088), 'src.modelo.interprete.Interprete', 'Interprete', ([], {'nombre': '"""Buena Vista Social club"""'}), "(nombre='Buena Vista Social club')\n", (2054, 2088), False, 'from src.modelo.interprete import Interprete\n'), ((2111, 2179), 'src.modelo.interprete.Interprete', 'Interprete', ([], {'nombre': '"""<NAME>"""', 'texto_curiosidades': '"""No sabia quien era"""'}), "(nombre='<NAME>', texto_curiosidades='No sabia quien era')\n", (2121, 2179), False, 'from src.modelo.interprete import Interprete\n'), ((2366, 2469), 'src.modelo.album.Album', 'Album', ([], {'titulo': '"""Latin Jazz Compilation"""', 'anio': '(2021)', 'descripcion': '"""Album original"""', 'medio': 'Medio.DISCO'}), "(titulo='Latin Jazz Compilation', anio=2021, descripcion=\n 'Album original', medio=Medio.DISCO)\n", (2371, 2469), False, 'from src.modelo.album import Album, Medio\n'), ((2491, 2590), 'src.modelo.album.Album', 'Album', ([], {'titulo': '"""Bandas sonoras famosas"""', 'anio': '(2021)', 'descripcion': '"""Compilacion"""', 'medio': 'Medio.DISCO'}), "(titulo='Bandas sonoras famosas', anio=2021, descripcion='Compilacion',\n medio=Medio.DISCO)\n", (2496, 2590), False, 'from src.modelo.album import Album, Medio\n'), ((2689, 2764), 'src.modelo.cancion.Cancion', 'Cancion', ([], {'titulo': '"""Ajiaco"""', 'minutos': '(3)', 'segundos': '(1)', 'compositor': '"""Samuel Torres"""'}), "(titulo='Ajiaco', minutos=3, segundos=1, compositor='Samuel Torres')\n", (2696, 2764), False, 'from src.modelo.cancion import Cancion\n'), ((2793, 2885), 'src.modelo.cancion.Cancion', 'Cancion', ([], {'titulo': '"""Forced Displacement"""', 'minutos': '(3)', 'segundos': '(12)', 'compositor': '"""Desconocido"""'}), "(titulo='Forced Displacement', minutos=3, segundos=12, compositor=\n 'Desconocido')\n", (2800, 2885), False, 'from src.modelo.cancion import Cancion\n'), ((2909, 2975), 'src.modelo.cancion.Cancion', 'Cancion', ([], {'titulo': '"""Alegria"""', 'minutos': '(4)', 'segundos': '(27)', 'compositor': '"""AU"""'}), "(titulo='Alegria', minutos=4, segundos=27, compositor='AU')\n", (2916, 2975), False, 'from src.modelo.cancion import Cancion\n')]
from __future__ import absolute_import, division, print_function, unicode_literals import json from postgres.orm import Model from psycopg2 import IntegrityError from urlparse import urlsplit, urlunsplit import xml.etree.ElementTree as ET import xmltodict from aspen import Response from gratipay.exceptions import ProblemChangingUsername from gratipay.utils.username import safely_reserve_a_username class UnknownAccountElsewhere(Exception): pass class AccountElsewhere(Model): typname = "elsewhere_with_participant" def __init__(self, *args, **kwargs): super(AccountElsewhere, self).__init__(*args, **kwargs) self.platform_data = getattr(self.platforms, self.platform) # Constructors # ============ @classmethod def from_user_id(cls, platform, user_id): """Return an existing AccountElsewhere based on platform and user_id. """ return cls._from_thing('user_id', platform, user_id) @classmethod def from_user_name(cls, platform, user_name): """Return an existing AccountElsewhere based on platform and user_name. """ return cls._from_thing('user_name', platform, user_name) @classmethod def _from_thing(cls, thing, platform, value): assert thing in ('user_id', 'user_name') exception = UnknownAccountElsewhere(thing, platform, value) return cls.db.one(""" SELECT elsewhere.*::elsewhere_with_participant FROM elsewhere WHERE platform = %s AND {} = %s """.format(thing), (platform, value), default=exception) @classmethod def get_many(cls, platform, user_infos): accounts = cls.db.all("""\ SELECT elsewhere.*::elsewhere_with_participant FROM elsewhere WHERE platform = %s AND user_id = any(%s) """, (platform, [i.user_id for i in user_infos])) found_user_ids = set(a.user_id for a in accounts) for i in user_infos: if i.user_id not in found_user_ids: accounts.append(cls.upsert(i)) return accounts @classmethod def upsert(cls, i): """Insert or update a user's info. """ # Clean up avatar_url if i.avatar_url: scheme, netloc, path, query, fragment = urlsplit(i.avatar_url) fragment = '' if netloc.endswith('githubusercontent.com') or \ netloc.endswith('gravatar.com'): query = 's=128' i.avatar_url = urlunsplit((scheme, netloc, path, query, fragment)) # Serialize extra_info if isinstance(i.extra_info, ET.Element): i.extra_info = xmltodict.parse(ET.tostring(i.extra_info)) i.extra_info = json.dumps(i.extra_info) cols, vals = zip(*i.__dict__.items()) cols = ', '.join(cols) placeholders = ', '.join(['%s']*len(vals)) try: # Try to insert the account # We do this with a transaction so that if the insert fails, the # participant we reserved for them is rolled back as well. with cls.db.get_cursor() as cursor: username = safely_reserve_a_username(cursor) cursor.execute(""" INSERT INTO elsewhere (participant, {0}) VALUES (%s, {1}) """.format(cols, placeholders), (username,)+vals) # Propagate elsewhere.is_team to participants.number if i.is_team: cursor.execute(""" UPDATE participants SET number = 'plural'::participant_number WHERE username = %s """, (username,)) except IntegrityError: # The account is already in the DB, update it instead username = cls.db.one(""" UPDATE elsewhere SET ({0}) = ({1}) WHERE platform=%s AND user_id=%s RETURNING participant """.format(cols, placeholders), vals+(i.platform, i.user_id)) if not username: raise # Return account after propagating avatar_url to participant account = AccountElsewhere.from_user_id(i.platform, i.user_id) account.participant.update_avatar() return account # Random Stuff # ============ def get_auth_session(self): if not self.token: return params = dict(token=self.token) if 'refresh_token' in self.token: params['token_updater'] = self.save_token return self.platform_data.get_auth_session(**params) @property def html_url(self): return self.platform_data.account_url.format( user_id=self.user_id, user_name=self.user_name, platform_data=self.platform_data ) def opt_in(self, desired_username): """Given a desired username, return a User object. """ from gratipay.security.user import User self.set_is_locked(False) user = User.from_username(self.participant.username) assert not user.ANON, self.participant # sanity check if self.participant.is_claimed: newly_claimed = False else: newly_claimed = True user.participant.set_as_claimed() try: user.participant.change_username(desired_username) except ProblemChangingUsername: pass if user.participant.is_closed: user.participant.update_is_closed(False) return user, newly_claimed def save_token(self, token): """Saves the given access token in the database. """ self.db.run(""" UPDATE elsewhere SET token = %s WHERE id=%s """, (token, self.id)) self.set_attributes(token=token) def set_is_locked(self, is_locked): self.db.run( 'UPDATE elsewhere SET is_locked=%s WHERE id=%s' , (is_locked, self.id) ) self.set_attributes(is_locked=is_locked) def get_account_elsewhere(website, request): path = request.line.uri.path platform = getattr(website.platforms, path['platform'], None) if platform is None: raise Response(404) user_name = path['user_name'] try: account = AccountElsewhere.from_user_name(platform.name, user_name) except UnknownAccountElsewhere: account = AccountElsewhere.upsert(platform.get_user_info(user_name)) return platform, account
[ "gratipay.security.user.User.from_username", "urlparse.urlunsplit", "xml.etree.ElementTree.tostring", "json.dumps", "gratipay.utils.username.safely_reserve_a_username", "aspen.Response", "urlparse.urlsplit" ]
[((2783, 2807), 'json.dumps', 'json.dumps', (['i.extra_info'], {}), '(i.extra_info)\n', (2793, 2807), False, 'import json\n'), ((5186, 5231), 'gratipay.security.user.User.from_username', 'User.from_username', (['self.participant.username'], {}), '(self.participant.username)\n', (5204, 5231), False, 'from gratipay.security.user import User\n'), ((6428, 6441), 'aspen.Response', 'Response', (['(404)'], {}), '(404)\n', (6436, 6441), False, 'from aspen import Response\n'), ((2340, 2362), 'urlparse.urlsplit', 'urlsplit', (['i.avatar_url'], {}), '(i.avatar_url)\n', (2348, 2362), False, 'from urlparse import urlsplit, urlunsplit\n'), ((2557, 2608), 'urlparse.urlunsplit', 'urlunsplit', (['(scheme, netloc, path, query, fragment)'], {}), '((scheme, netloc, path, query, fragment))\n', (2567, 2608), False, 'from urlparse import urlsplit, urlunsplit\n'), ((2733, 2758), 'xml.etree.ElementTree.tostring', 'ET.tostring', (['i.extra_info'], {}), '(i.extra_info)\n', (2744, 2758), True, 'import xml.etree.ElementTree as ET\n'), ((3214, 3247), 'gratipay.utils.username.safely_reserve_a_username', 'safely_reserve_a_username', (['cursor'], {}), '(cursor)\n', (3239, 3247), False, 'from gratipay.utils.username import safely_reserve_a_username\n')]
import torch import torch.nn as nn import torch.nn.functional as F class Conv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, NL='relu', same_padding=False, bn=False, dilation=1): super(Conv2d, self).__init__() padding = int((kernel_size - 1) // 2) if same_padding else 0 self.conv = [] if dilation==1: self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding, dilation=dilation) else: self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=dilation, dilation=dilation) self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0, affine=True) if bn else None if NL == 'relu' : self.relu = nn.ReLU(inplace=True) elif NL == 'prelu': self.relu = nn.PReLU() else: self.relu = None def forward(self, x): x = self.conv(x) if self.bn is not None: x = self.bn(x) if self.relu is not None: x = self.relu(x) return x class MCNN(nn.Module): ''' Multi-column CNN -Implementation of Single Image Crowd Counting via Multi-column CNN (Zhang et al.) ''' def __init__(self, in_ch=3, bn=False, up_scale=4): super(MCNN, self).__init__() self.up_scale = up_scale self.branch1 = nn.Sequential(Conv2d(in_ch, 16, 9, same_padding=True, bn=bn), nn.MaxPool2d(2), Conv2d(16, 32, 7, same_padding=True, bn=bn), nn.MaxPool2d(2), Conv2d(32, 16, 7, same_padding=True, bn=bn), Conv2d(16, 8, 7, same_padding=True, bn=bn)) self.branch2 = nn.Sequential(Conv2d(in_ch, 20, 7, same_padding=True, bn=bn), nn.MaxPool2d(2), Conv2d(20, 40, 5, same_padding=True, bn=bn), nn.MaxPool2d(2), Conv2d(40, 20, 5, same_padding=True, bn=bn), Conv2d(20, 10, 5, same_padding=True, bn=bn)) self.branch3 = nn.Sequential(Conv2d(in_ch, 24, 5, same_padding=True, bn=bn), nn.MaxPool2d(2), Conv2d(24, 48, 3, same_padding=True, bn=bn), nn.MaxPool2d(2), Conv2d(48, 24, 3, same_padding=True, bn=bn), Conv2d(24, 12, 3, same_padding=True, bn=bn)) self.fuse = nn.Sequential(Conv2d( 30, 1, 1, same_padding=True, bn=bn)) initialize_weights(self.modules()) def forward(self, im_data): x1 = self.branch1(im_data) x2 = self.branch2(im_data) x3 = self.branch3(im_data) x = torch.cat((x1,x2,x3),1) x = self.fuse(x) if self.up_scale != 1: x = F.interpolate(x, scale_factor=self.up_scale, mode='bilinear', align_corners=False) return x def initialize_weights(models): for model in models: real_init_weights(model) def real_init_weights(m): if isinstance(m, list): for mini_m in m: real_init_weights(mini_m) else: if isinstance(m, nn.Conv2d): nn.init.normal_(m.weight, std=0.01) if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): m.weight.data.normal_(0.0, std=0.01) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m,nn.Module): for mini_m in m.children(): real_init_weights(mini_m) else: print( m ) def weights_normal_init(*models): for model in models: dev=0.01 if isinstance(model, list): for m in model: weights_normal_init(m, dev) else: for m in model.modules(): if isinstance(m, nn.Conv2d): m.weight.data.normal_(0.0, dev) if m.bias is not None: m.bias.data.fill_(0.0) elif isinstance(m, nn.Linear): m.weight.data.normal_(0.0, dev)
[ "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.nn.init.constant_", "torch.nn.Conv2d", "torch.nn.PReLU", "torch.nn.init.normal_", "torch.nn.MaxPool2d", "torch.nn.functional.interpolate", "torch.cat" ]
[((3008, 3034), 'torch.cat', 'torch.cat', (['(x1, x2, x3)', '(1)'], {}), '((x1, x2, x3), 1)\n', (3017, 3034), False, 'import torch\n'), ((399, 496), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels', 'kernel_size', 'stride'], {'padding': 'padding', 'dilation': 'dilation'}), '(in_channels, out_channels, kernel_size, stride, padding=padding,\n dilation=dilation)\n', (408, 496), True, 'import torch.nn as nn\n'), ((531, 629), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels', 'kernel_size', 'stride'], {'padding': 'dilation', 'dilation': 'dilation'}), '(in_channels, out_channels, kernel_size, stride, padding=dilation,\n dilation=dilation)\n', (540, 629), True, 'import torch.nn as nn\n'), ((644, 708), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {'eps': '(0.001)', 'momentum': '(0)', 'affine': '(True)'}), '(out_channels, eps=0.001, momentum=0, affine=True)\n', (658, 708), True, 'import torch.nn as nn\n'), ((775, 796), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (782, 796), True, 'import torch.nn as nn\n'), ((1502, 1517), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (1514, 1517), True, 'import torch.nn as nn\n'), ((1638, 1653), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (1650, 1653), True, 'import torch.nn as nn\n'), ((1950, 1965), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (1962, 1965), True, 'import torch.nn as nn\n'), ((2086, 2101), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (2098, 2101), True, 'import torch.nn as nn\n'), ((2398, 2413), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (2410, 2413), True, 'import torch.nn as nn\n'), ((2534, 2549), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (2546, 2549), True, 'import torch.nn as nn\n'), ((3105, 3192), 'torch.nn.functional.interpolate', 'F.interpolate', (['x'], {'scale_factor': 'self.up_scale', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(x, scale_factor=self.up_scale, mode='bilinear', align_corners\n =False)\n", (3118, 3192), True, 'import torch.nn.functional as F\n'), ((3480, 3515), 'torch.nn.init.normal_', 'nn.init.normal_', (['m.weight'], {'std': '(0.01)'}), '(m.weight, std=0.01)\n', (3495, 3515), True, 'import torch.nn as nn\n'), ((850, 860), 'torch.nn.PReLU', 'nn.PReLU', ([], {}), '()\n', (858, 860), True, 'import torch.nn as nn\n'), ((3567, 3595), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (3584, 3595), True, 'import torch.nn as nn\n'), ((3740, 3770), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.weight', '(1)'], {}), '(m.weight, 1)\n', (3757, 3770), True, 'import torch.nn as nn\n'), ((3783, 3811), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (3800, 3811), True, 'import torch.nn as nn\n')]
from .piece import Piece from typing import Optional from colorama import init from termcolor import colored init() class Field: def __init__(self, piece: Optional[Piece] = None) -> None: self._piece = piece def get_piece(self) -> Optional[Piece]: return self._piece def put_piece(self, piece: Piece): self._piece = piece def __str__(self) -> str: if self._piece is not None: return colored(self._piece.get_sign(), self._piece.get_color()) else: return ' ' def is_sign(self, sign: str): if self._piece is None: return False return self._piece.get_sign() == sign
[ "colorama.init" ]
[((111, 117), 'colorama.init', 'init', ([], {}), '()\n', (115, 117), False, 'from colorama import init\n')]
from time import sleep import emoji print('AGUARDE A CONTAGEM PARA OS FOGOS') for c in range(10, 0, -1): print(c) sleep(1) print(emoji.emojize("Detonação dos fogos :sunny: ", use_aliases=True)) print('FIM')
[ "emoji.emojize", "time.sleep" ]
[((123, 131), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (128, 131), False, 'from time import sleep\n'), ((138, 201), 'emoji.emojize', 'emoji.emojize', (['"""Detonação dos fogos :sunny: """'], {'use_aliases': '(True)'}), "('Detonação dos fogos :sunny: ', use_aliases=True)\n", (151, 201), False, 'import emoji\n')]
#funcsboot.py from placerg.funcs import * from placerg.funcsrg import * from placerg.funcsall import * from placerg.objects import * from scipy.optimize import curve_fit def bootfunc(a, env): rate=[] coeff=[] eigspec=[] var=[] psil=[] actmom=[] autocorr=[] tau=[] mu=[] alpha=[] beta=[] z=[] # histogram cell rate rank plott=cellraterank(env) rate.append(plott[1]) # histogram pairwise correlations plott=corrcoefhist(env) coeff.append(plott[0][1]) xplot,plot=eigplotall(a) eigspec.append(plot) fitx=[] fity=[] for m in range(len(xplot)): fitx.append(xplot[m][:int(xplot[m].size/2)]) fity.append(plot[m][:int(plot[m].size/2)]) fitx=np.hstack(fitx) fity=np.hstack(fity) popt, pcov = curve_fit(linfunc, fitx, np.real(fity)) mu.append(popt[1]) plott=varplotall(a) var.append(plott[1]) popt, pcov = curve_fit(linfunc, plott[0][:4], plott[1][:4]) alpha.append(popt[1]) plott=probplotall(a) wh=np.where(np.isfinite(plott[1]) == True) x=plott[0][wh] y=plott[1][wh] psil.append(y) popt, pcov = curve_fit(probfunc,x, y) beta.append(popt[1]) x, plott=activemom(a) actmom.append(plott) x, result= calccorrmulti(a) autocorr.append(result) xnew=(x[int(result.shape[1]/2)-1:int(result.shape[1]/2)+1]) taus=[] for l in range(result.shape[0]): y=np.log(result[l, int(result.shape[1]/2)-1:int(result.shape[1]/2)+1]) y[np.where(np.isfinite(y)==False)]=0. popt, pcov = curve_fit(linear, xnew, y, maxfev=20000) taus.append(popt[0]) taus=1/np.array(taus).flatten() tau.append(taus) popt, pcov = curve_fit(linfunc, 2**np.arange(2,8)[:3],\ taus[:3]) z.append(popt[1]) pltall=bootstrap(rate, coeff, \ eigspec,var, psil, actmom, \ autocorr, tau, mu, alpha, beta, z) return pltall def bootprocess(a, env, i): env.pmat=env.boots[i] a=infoset(env.N, env.pmat, a.k) boot=bootfunc(a, env) print(str(i)+' bootstrap array finished') return boot def bootloop(aname, envname): rate=[] coeff=[] eigspec0=[] eigspec1=[] eigspec2=[] var=[] psil=[] actmom=[] autocorr0=[] autocorr1=[] autocorr2=[] autocorr3=[] autocorr4=[] autocorr5=[] tau=[] mu=[] alpha=[] beta=[] z=[] env=load_object(envname) a=load_object(aname) if __name__ == '__main__': p = Pool(2) boots_list = p.map(partial(bootprocess, a=a, env=env), range(len(env.boots))) for i in range(len(env.boots)): rate.append(boots_list[i].rate) coeff.append(boots_list[i].coeff) eigspec0.append(boots_list[i].eigspec[0][0]) eigspec1.append(boots_list[i].eigspec[0][1]) eigspec2.append(boots_list[i].eigspec[0][2]) var.append(boots_list[i].var) psil.append(boots_list[i].psil) actmom.append(boots_list[i].actmom) autocorr0.append(boots_list[i].autocorr[0][0]) autocorr1.append(boots_list[i].autocorr[0][1]) autocorr2.append(boots_list[i].autocorr[0][2]) autocorr3.append(boots_list[i].autocorr[0][3]) autocorr4.append(boots_list[i].autocorr[0][4]) autocorr5.append(boots_list[i].autocorr[0][5]) tau.append(boot.tau) mu.append(boots_list[i].mu) alpha.append(boots_list[i].alpha) beta.append(boots_list[i].beta) z.append(boots_list[i].z) rate=np.std(np.vstack(rate), axis=0) coeff=np.std(np.vstack(coeff), axis=0) eigspec0=np.std(np.vstack(eigspec0), axis=0) eigspec1=np.std(np.vstack(eigspec1), axis=0) eigspec2=np.std(np.vstack(eigspec2), axis=0) var=np.std(np.vstack(var), axis=0) psil=np.std(np.vstack(psil), axis=0) actmom=np.std(np.vstack(actmom), axis=0) autocorr0=np.std(np.vstack(autocorr0), axis=0) autocorr1=np.std(np.vstack(autocorr1), axis=0) autocorr2=np.std(np.vstack(autocorr2), axis=0) autocorr3=np.std(np.vstack(autocorr3), axis=0) autocorr4=np.std(np.vstack(autocorr4), axis=0) autocorr5=np.std(np.vstack(autocorr5), axis=0) tau=np.std(np.vstack(tau), axis=0) mu=np.std(np.vstack(mu), axis=0) alpha=np.std(np.vstack(alpha), axis=0) beta=np.std(np.vstack(beta), axis=0) z=np.std(np.vstack(z), axis=0) eigspec=[eigspec0, eigspec1, eigspec2] autocorr=[autocorr0, autocorr1, autocorr2, autocorr3, autocorr4, autocorr5] pltall=bootstrap(rate, coeff, \ eigspec,var, psil, actmom, \ autocorr, tau, mu, alpha, beta, z) print('bootstrap competed') return pltall def loopall(arra, arrenv, keyword): hamx=[] ham=[] probx=[] prob=[] ratex=[] rate=[] rateerr=[] coeffx=[] coeff=[] coefferr=[] shuffcoeffx=[] shuffcoeff=[] eigspecx=[] eigspec=[] eigspecerr=[] varx=[] var=[] varerr=[] psilx=[] psil=[] psilerr=[] actmomx=[] actmom=[] actmomerr=[] autocorrx=[] autocorr=[] autocorrerr=[] tau=[] tauerr=[] mu=[] muerr=[] alpha=[] alphaerr=[] beta=[] betaerr=[] z=[] zerr=[] epsilon=[] eta=[] phi=[] percell=[] stim=[] timeconst=[] labeltype=[] label=[] for i in range(len(arra)): boot=bootloop(arra[i], arrenv[i]) # define object names we want to load in aname= arra[i] envname=arrenv[i] # load in objects env=load_object(envname) a=load_object(aname) # histogram hamiltonians plott=hamhist(env) hamx.append(plott[0]) ham.append(plott[1]) # histogram probability dist plott=probhist(env) probx.append(plott[0]) prob.append(plott[1]) # histogram cell rate rank plott=cellraterank(env) ratex.append(plott[0]) rate.append(plott[1]) rateerr.append(boot.rate) # histogram pairwise correlations plott=corrcoefhist(env) coeffx.append(plott[0][0]) coeff.append(plott[0][1]) coefferr.append(boot.coeff) # histogram pairwise correlations, shuffled shuffcoeffx.append(plott[1][0]) shuffcoeff.append(plott[1][1]) # eigenvalue spectra xplot,plot=eigplotall(a) eigspecx.append(xplot) eigspec.append(plot) eigspecerr.append(boot.eigspec) fitx=[] fity=[] for m in range(len(xplot)): fitx.append(xplot[m][:int(xplot[m].size/2)]) fity.append(plot[m][:int(plot[m].size/2)]) fitx=np.hstack(fitx) fity=np.hstack(fity) popt, pcov = curve_fit(linfunc, fitx, np.real(fity)) mu.append(popt) muerr.append(boot.mu) # variance over coarse grained variables plott=varplotall(a) varx.append(plott[0]) var.append(plott[1]) varerr.append(boot.var) popt, pcov = curve_fit(linfunc, plott[0][:4], plott[1][:4]) alpha.append(popt) alphaerr.append(boot.alpha) # log p(silence) plott=probplotall(a) psilx.append(plott[0]) psil.append(plott[1]) psilerr.append(boot.psil) wh=np.where(np.isfinite(plott[1]) == True) x=plott[0][wh] y=plott[1][wh] psil.append(y) popt, pcov = curve_fit(probfunc,x, y) beta.append(popt) betaerr.append(boot.beta) # activity, momentum space x, plott=activemom(a) actmomx.append(x) actmom.append(plott) actmomerr.append(boot.actmom) x,result= calccorrmulti(a) autocorrx.append(x) autocorr.append(result) autocorrerr.append(boot.autocorr) xnew=(x[int(result.shape[1]/2)-1:int(result.shape[1]/2)+1]) taus=[] for l in range(result.shape[0]): y=np.log(result[l, int(result.shape[1]/2)-1:int(result.shape[1]/2)+1]) y[np.where(np.isfinite(y)==False)]=0. popt, pcov = curve_fit(linear, xnew, y, maxfev=20000) taus.append(popt[0]) taus=1./np.array(taus).flatten() tau.append(taus) tauerr.append(boot.tau) popt, pcov = curve_fit(linfunc, 2**np.arange(2,8)[:3],\ taus[:3]) z.append(popt) zerr.append(boot.z) # record parameters eta.append(env.eta) phi.append(env.phi) epsilon.append(env.epsilon) percell.append(env.percell) timeconst.append(env.timeconst) stim.append(env.nstim) if keyword == 'time': # labels for plots labeltype.append('time constant') label.append(env.timeconst[0]) if keyword == 'type': # labels for plots labeltype.append('cell type') label=['both', 'place', 'none'] if keyword == 'stim': # labels for plots labeltype.append('# of stimuli') label.append(env.nstim) if keyword == 'eta': # labels for plots labeltype.append('eta') label.append(env.eta) if keyword == 'phi': # labels for plots labeltype.append('phi') label.append(env.phi) if keyword == 'epsilon': # labels for plots labeltype.append('epsilon') label.append(env.epsilon) if keyword == 'percell': # labels for plots labeltype.append('p') label.append(env.percell) print(str(i)+'th loop complete') pltall=recordall(hamx, ham, probx, \ prob, ratex, rate, rateerr, coeffx, coeff, coefferr, shuffcoeffx, shuffcoeff, eigspecx,\ eigspec, eigspecerr, varx, var, varerr, psilx, psil, psilerr, actmomx, actmom, actmomerr, autocorrx,\ autocorr, autocorrerr, tau, tauerr, mu, muerr, alpha, alphaerr, beta, betaerr, z, zerr, phi, eta, epsilon, percell, stim, timeconst, labeltype, label) return pltall
[ "scipy.optimize.curve_fit" ]
[((960, 1006), 'scipy.optimize.curve_fit', 'curve_fit', (['linfunc', 'plott[0][:4]', 'plott[1][:4]'], {}), '(linfunc, plott[0][:4], plott[1][:4])\n', (969, 1006), False, 'from scipy.optimize import curve_fit\n'), ((1180, 1205), 'scipy.optimize.curve_fit', 'curve_fit', (['probfunc', 'x', 'y'], {}), '(probfunc, x, y)\n', (1189, 1205), False, 'from scipy.optimize import curve_fit\n'), ((1603, 1643), 'scipy.optimize.curve_fit', 'curve_fit', (['linear', 'xnew', 'y'], {'maxfev': '(20000)'}), '(linear, xnew, y, maxfev=20000)\n', (1612, 1643), False, 'from scipy.optimize import curve_fit\n'), ((7306, 7352), 'scipy.optimize.curve_fit', 'curve_fit', (['linfunc', 'plott[0][:4]', 'plott[1][:4]'], {}), '(linfunc, plott[0][:4], plott[1][:4])\n', (7315, 7352), False, 'from scipy.optimize import curve_fit\n'), ((7707, 7732), 'scipy.optimize.curve_fit', 'curve_fit', (['probfunc', 'x', 'y'], {}), '(probfunc, x, y)\n', (7716, 7732), False, 'from scipy.optimize import curve_fit\n'), ((8377, 8417), 'scipy.optimize.curve_fit', 'curve_fit', (['linear', 'xnew', 'y'], {'maxfev': '(20000)'}), '(linear, xnew, y, maxfev=20000)\n', (8386, 8417), False, 'from scipy.optimize import curve_fit\n')]
# -*- coding: UTF-8 -*- # 该算法比较慢 import operator # 牌型枚举 class ComeType: PASS, SINGLE, PAIR, TRIPLE, TRIPLE_ONE, TRIPLE_TWO, FOURTH_TWO_ONES, FOURTH_TWO_PAIRS, STRAIGHT, EVEN_PAIR, BOMB = \ range(11) # 3-14 分别代表 3-10, J, Q, K, A # 16, 18, 19 分别代表 2, little_joker, big_joker # 将 2 与其他牌分开是为了方便计算顺子 # 定义 HAND_PASS 为过牌 little_joker, big_joker = 18, 19 HAND_PASS = {'type': ComeType.PASS, 'main': 0, 'component': []} # 符号转数字 def get_val(cards): dicts = {'3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, '10': 10, 'J': 11, 'Q': 12, 'K': 13, 'A': 14, '2': 16, 'w': 18, 'W': 19} for i, card in enumerate(cards): cards[i] = dicts[card] # 数字转符号 def get_card(value): out = value[:] dicts = {3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 10: '10', 11: 'J', 12: 'Q', 13: 'K', 14: 'A', 16: '2', 18: 'w', 19: 'W'} for i, card in enumerate(value): out[i] = dicts[card] return out # 根据当前手牌,获取此牌所有可能出的牌型 # 牌型数据结构为 {牌类型,主牌,包含的牌} # 同种牌类型可以通过主牌比较大小 # 为方便比较大小, 将顺子按照不同长度分为不同牌型 def get_all_hands(pokers): if not pokers: return [] # 过牌 combs = [HAND_PASS] # 获取每个点数的数目 dic = counter(pokers) # 王炸 if little_joker in pokers and big_joker in pokers: combs.append({'type': ComeType.BOMB, 'main': big_joker, 'component': [big_joker, little_joker]}) # 非顺子, 非王炸 for poker in dic: if dic[poker] >= 1: # 单张 combs.append({'type': ComeType.SINGLE, 'main': poker, 'component': [poker]}) if dic[poker] >= 2: # 对子 combs.append({'type': ComeType.PAIR, 'main': poker, 'component': [poker, poker]}) if dic[poker] >= 3: # 三带零 combs.append({'type': ComeType.TRIPLE, 'main': poker, 'component': [poker, poker, poker]}) for poker2 in dic: if ALLOW_THREE_ONE and dic[poker2] >= 1 and poker2 != poker: # 三带一 combs.append( {'type': ComeType.TRIPLE_ONE, 'main': poker, 'component': [poker, poker, poker, poker2]}) if ALLOW_THREE_TWO and dic[poker2] >= 2 and poker2 != poker: # 三带二 combs.append({'type': ComeType.TRIPLE_TWO, 'main': poker, 'component': [poker, poker, poker, poker2, poker2]}) if dic[poker] == 4: # 炸弹 combs.append({'type': ComeType.BOMB, 'main': poker, 'component': [poker, poker, poker, poker]}) if ALLOW_FOUR_TWO: pairs = [] ones = [] for poker2 in dic: if dic[poker2] == 1: ones.append(poker2) elif dic[poker2] == 2: pairs.append(poker2) # 四带二单 for i in range(len(ones)): for j in range(i + 1, len(ones)): combs.append({'type': ComeType.FOURTH_TWO_ONES, 'main': poker, 'component': [poker, poker, poker, poker, ones[i], ones[j]]}) # 四带二对 for i in range(len(pairs)): combs.append({'type': ComeType.FOURTH_TWO_ONES, 'main': poker, 'component': [poker, poker, poker, poker, pairs[i], pairs[i]]}) for j in range(i + 1, len(pairs)): combs.append({'type': ComeType.FOURTH_TWO_PAIRS, 'main': poker, 'component': [poker, poker, poker, poker, pairs[i], pairs[i], pairs[j], pairs[j]]}) # 所有顺子组合 # 以 COMB_TYPE.STRAIGHT * len(straight) 标志顺子牌型, 不同长度的顺子是不同的牌型 for straight in create_straight(list(set(pokers)), 5): combs.append({'type': ComeType.STRAIGHT * len(straight), 'main': straight[0], 'component': straight}) # 所有顺子组合 # 以 COMB_TYPE.STRAIGHT * len(straight) 标志顺子牌型, 不同长度的顺子是不同的牌型 for straight in create_straight(get_multi_poker(pokers, 2), 3): combs.append({'type': ComeType.EVEN_PAIR * len(straight), 'main': straight[0], 'component': straight}) # 返回所有可能的出牌类型 return combs # 根据列表创建顺子 def create_straight(list_of_nums, min_length): a = sorted(list_of_nums) lens = len(a) for begin in range(0, lens): for end in range(begin, lens): if a[end] - a[begin] != end - begin: break elif end - begin >= min_length - 1: yield list(range(a[begin], a[end] + 1)) # 获取多个数的元素集合 def get_multi_poker(pokers, count): poker_list = set() for poker in pokers: if pokers.count(poker) >= count: poker_list.add(poker) return list(poker_list) # 统计列表中每个元素的个数 def counter(pokers): dic = {} for poker in pokers: dic[poker] = pokers.count(poker) return dic # comb1 先出,问后出的 comb2 是否能打过 comb1 # 1. 同种牌型比较 main 值, main 值大的胜 # 2. 炸弹大过其他牌型 # 3. 牌型不同, 后出为负 def can_beat(comb1, comb2): if not comb2 or comb2['type'] == ComeType.PASS: return False if not comb1 or comb1['type'] == ComeType.PASS: return True if comb1['type'] == comb2['type']: return comb2['main'] > comb1['main'] elif comb2['type'] == ComeType.BOMB: return True else: return False # 给定 pokers,求打出手牌 hand 后的牌 # 用 component 字段标志打出的牌, 可以方便地统一处理 def make_hand(pokers, c_hand): poker_clone = pokers[:] for poker in c_hand['component']: poker_clone.remove(poker) return poker_clone # 模拟每次出牌, my_pokers 为当前我的牌, enemy_pokers 为对手的牌 # last_hand 为上一手对手出的牌, cache 用于缓存牌局与胜负关系, is_farmer 是否为农民出牌 # 该回合由 my_pokers 出牌 # 返回:是否能出完所有手牌 def hand_out(my_pokers, enemy_pokers, last_hand=None, cache=None, is_farmer=True, first_hand=False): # 牌局终止的边界条件 if cache is None: cache = {} # 如果上一手为空, 则将上一手赋值为 HAND_PASS if last_hand is None: last_hand = HAND_PASS key = str((my_pokers, enemy_pokers, last_hand['component'], is_farmer)) if not my_pokers: cache[key] = True return {'hand_out': True, 'cache': cache, 'tip_hand': None} if not enemy_pokers: cache[key] = False return {'hand_out': False, 'cache': cache, 'tip_hand': None} # 从缓存中读取数据 if key in cache: return {'hand_out': cache[key], 'cache': cache, 'tip_hand': None} # 模拟出牌过程, 深度优先搜索, 找到赢的分支则返回 True for current_hand in get_all_hands(my_pokers): # 转换出牌权有两种情况: # 1. 当前手胜出, 则轮到对方选择出牌 # 2. 当前手 PASS, 且对方之前没有 PASS, 则轮到对方出牌 # 3. 如果对手出不完则,我方胜利 if can_beat(last_hand, current_hand) or \ (last_hand['type'] != ComeType.PASS and current_hand['type'] == ComeType.PASS): if not \ hand_out(enemy_pokers, make_hand(my_pokers, current_hand), current_hand, cache, not is_farmer)[ 'hand_out']: if first_hand: return {'hand_out': True, 'cache': cache, 'tip_hand': current_hand} else: return {'hand_out': True, 'cache': cache, 'tip_hand': None} # 遍历所有情况, 均无法赢, 则返回 False cache[key] = False return {'hand_out': False, 'cache': cache, 'tip_hand': None} if __name__ == '__main__': import time # 残局1 # 是否允许三带一 ALLOW_THREE_ONE = True # 是否允许三带二 ALLOW_THREE_TWO = True # 是否允许四带二 ALLOW_FOUR_TWO = True lord_str = input('请输入地主牌:') farmer_str = input('请输入农民牌:') lord = lord_str.split() farmer = farmer_str.split() # 使用数值代替牌面 get_val(lord) get_val(farmer) print("正在尝试中…") start = time.clock() result = hand_out(farmer, lord, None, {}, True, True) # 输出结果和时间 elapsed = (time.clock() - start) print("Result:", result['hand_out']) print("Elapsed:", elapsed) if result['hand_out']: hand_cache = result['cache'] # 农名自动出牌 farmer = make_hand(farmer, result['tip_hand']) # 提示出牌 print("提示出牌: ", get_card(result['tip_hand']['component'])) flag = 1 while flag: # 输入地主的出牌 # 如果是 q 重新开始 lord_move_str = input('请输入地主的出牌:') if lord_move_str == 'q': lord_str = input('请输入地主牌:') farmer_str = input('请输入农民牌:') lord = lord_str.split() farmer = farmer_str.split() # 使用数值代替牌面 get_val(lord) get_val(farmer) print("正在尝试中…") start = time.clock() result = hand_out(farmer, lord, None, {}, True, True) hand_cache = result['cache'] # 农名自动出牌 farmer = make_hand(farmer, result['tip_hand']) # 提示出牌 print("提示出牌: ", get_card(result['tip_hand']['component'])) # 输出结果和时间 elapsed = (time.clock() - start) print("Result:", result['hand_out']) print("Elapsed:", elapsed) continue lord_temp = lord[:] # 解析地主输入的牌 lord_move = lord_move_str.split() get_val(lord_move) for obj in lord_move: lord.remove(obj) lord_hand = None for hand in get_all_hands(lord_temp): if operator.eq(hand['component'], lord_move): lord_hand = hand print("正在尝试中…") # 计算提示出牌,从遍历的缓存取 start = time.clock() success = False # 遍历尝试 for hand in get_all_hands(farmer): farmer_try = farmer[:] farmer_try = make_hand(farmer_try, hand) key_try = str((lord, farmer_try, hand['component'], False)) if key_try in result['cache'] and not result['cache'][key_try]: if can_beat(lord_hand, hand) or not hand['component']: farmer = make_hand(farmer, hand) success = True # 提示出牌 print("提示出牌: ", get_card(hand['component'])) break elapsed = (time.clock() - start) print("Result:", success) print("Elapsed:", elapsed) # 农民出完了 if len(farmer) == 0: print("出完了所有的牌了...\n") lord_str = input('请输入地主牌:') farmer_str = input('请输入农民牌:') lord = lord_str.split() farmer = farmer_str.split() # 使用数值代替牌面 get_val(lord) get_val(farmer) print("正在尝试中…") start = time.clock() result = hand_out(farmer, lord, None, {}, True, True) hand_cache = result['cache'] # 农名自动出牌 farmer = make_hand(farmer, result['tip_hand']) # 提示出牌 print("提示出牌: ", get_card(result['tip_hand']['component'])) # 输出结果和时间 elapsed = (time.clock() - start) print("Result:", result['hand_out']) print("Elapsed:", elapsed)
[ "operator.eq", "time.clock" ]
[((7683, 7695), 'time.clock', 'time.clock', ([], {}), '()\n', (7693, 7695), False, 'import time\n'), ((7783, 7795), 'time.clock', 'time.clock', ([], {}), '()\n', (7793, 7795), False, 'import time\n'), ((9557, 9569), 'time.clock', 'time.clock', ([], {}), '()\n', (9567, 9569), False, 'import time\n'), ((8588, 8600), 'time.clock', 'time.clock', ([], {}), '()\n', (8598, 8600), False, 'import time\n'), ((9398, 9439), 'operator.eq', 'operator.eq', (["hand['component']", 'lord_move'], {}), "(hand['component'], lord_move)\n", (9409, 9439), False, 'import operator\n'), ((10241, 10253), 'time.clock', 'time.clock', ([], {}), '()\n', (10251, 10253), False, 'import time\n'), ((10755, 10767), 'time.clock', 'time.clock', ([], {}), '()\n', (10765, 10767), False, 'import time\n'), ((8955, 8967), 'time.clock', 'time.clock', ([], {}), '()\n', (8965, 8967), False, 'import time\n'), ((11122, 11134), 'time.clock', 'time.clock', ([], {}), '()\n', (11132, 11134), False, 'import time\n')]
# -*- coding: utf-8 -*- def main(): from math import ceil import sys input = sys.stdin.readline a, b = map(int, input().split()) ans = 1 for i in range(1, b): z = ceil(a / i) if i * (z + 1) <= b: ans = max(ans, i) print(ans) if __name__ == "__main__": main()
[ "math.ceil" ]
[((200, 211), 'math.ceil', 'ceil', (['(a / i)'], {}), '(a / i)\n', (204, 211), False, 'from math import ceil\n')]
""" This script generates number list text files Number list contains stem of filenames Number list specifies the files used for a certain data split Prefix: - training: 0 (training/) - validation: 1 (training/) - testing: 2 (testing/) - domain adaptation training labelled: 4 (training/) - domain adaptation training unlabelled: 5 - domain adaptation validation labelled: 6 (training/) - domain adaptation validation unlabelled: 7 - domain adaptation testing: 8 """ import glob path = '/mnt/lustre/share/DSK/datasets/waymo_open_dataset_kitti/training/label_all' save_pathname = 'val.txt' prefixes = [] skip_empty = True # set false for testing set num_files = 100 # set None to take all def main(): filenames = [] for prefix in prefixes: filenames.extend(sorted(glob.glob(path + '/' + prefix + '*.txt'))) if num_files is not None: filenames = filenames[:num_files] num_list = [] for filename in filenames: # skip empty files if skip_empty: with open(filename, 'r') as ff: lines = ff.readlines() if not lines: # empty print('Skipping', filename) continue num = filename.split('/')[-1][:-4] num_list.append(num) with open(save_pathname, 'w') as ot: for num in num_list: ot.write(num + '\n') if __name__ == '__main__': main()
[ "glob.glob" ]
[((866, 906), 'glob.glob', 'glob.glob', (["(path + '/' + prefix + '*.txt')"], {}), "(path + '/' + prefix + '*.txt')\n", (875, 906), False, 'import glob\n')]
# author: Bartlomiej "furas" Burek (https://blog.furas.pl) # date: 2022.03.07 # [javascript - export gpx file with python? - Stack Overflow](https://stackoverflow.com/questions/71375579/export-gpx-file-with-python/71375874#71375874) import requests start_lon = -0.0898 # can be also as text start_lat = 51.514739 # can be also as text end_lon = -0.096656 # can be also as text end_lat = 51.516214 # can be also as text data = f"{start_lon},{start_lat};{end_lon},{end_lat}" transport = 'bike' # 'car', 'foot' url = f'https://routing.openstreetmap.de/routed-{transport}/route/v1/driving/{data}' payload = { 'overview': 'false', # can't be True/False 'alternatives': 'true', # can't be True/False 'steps': 'true', # can't be True/False } response = requests.get(url, params=payload) print(response.url) #print(response.text) print('---') data = response.json() for point in data['waypoints']: print('name:', point['name']) print('distance:', point['distance']) print('location:', point['location']) print('---')
[ "requests.get" ]
[((790, 823), 'requests.get', 'requests.get', (['url'], {'params': 'payload'}), '(url, params=payload)\n', (802, 823), False, 'import requests\n')]
import pulumi from pulumi_aws import ec2, get_availability_zones stack_name = pulumi.get_stack() project_name = pulumi.get_project() config = pulumi.Config('vpc') vpc = ec2.Vpc(resource_name=f"eks-{project_name}-{stack_name}", cidr_block="10.100.0.0/16", enable_dns_support=True, enable_dns_hostnames=True, instance_tenancy='default', tags={"Project": project_name, "Stack": stack_name}) igw = ec2.InternetGateway(resource_name=f'vpc-ig-{project_name}-{stack_name}', vpc_id=vpc.id, tags={"Project": project_name, "Stack": stack_name}) route_table = ec2.RouteTable(resource_name=f'vpc-route-table-{project_name}-{stack_name}', vpc_id=vpc.id, routes=[ec2.RouteTableRouteArgs( cidr_block='0.0.0.0/0', gateway_id=igw.id)], tags={"Project": project_name, "Stack": stack_name}) # Use availability zones defined in the configuration file if available if config.get('azs'): azs = config.get_object('azs') else: azs = get_availability_zones(state="available").names public_subnets = [] private_subnets = [] # If you wanted to double the number of subnets because you have few # availability zones, you can redefine the variable below to something # like: list(itertools.chain(azs, azs)) which would just repeat the # same list of AZs twice. The iteration logic will pick it up for # subnet creation and create unique names. azs_for_subnets = list(azs) if len(azs) <= 0: raise ValueError("There are no usable availability zones") if len(azs) == 1: pulumi.log.warn("There is only a single usable availability zone") elif len(azs) == 2: pulumi.log.warn("There are only two usable availability zones") for i, az in enumerate(azs_for_subnets): if not isinstance(az, str): raise f'availability zone specified [{i}] is not a valid string value: [{az}]' if az.strip() == "": raise f'availability zone specified [{i}] is an empty string' public_subnet_addr = i resource_name = f'{az}-k8s-public-{project_name}-{stack_name}-{i}' subnet = ec2.Subnet(resource_name=resource_name, availability_zone=az, vpc_id=vpc.id, cidr_block=f"10.100.{public_subnet_addr}.0/24", map_public_ip_on_launch=True, tags={"Project": project_name, "Stack": stack_name, "kubernetes.io/role/elb": "1"}) ec2.RouteTableAssociation(f"route-table-assoc-public-{az}-{i}", route_table_id=route_table.id, subnet_id=subnet.id) public_subnets.append(subnet) for i, az in enumerate(azs_for_subnets): private_subnet_addr = (i + 1) * 16 resource_name = f"{az}-k8s-private-{project_name}-{stack_name}-{i}" subnet = ec2.Subnet(resource_name=resource_name, availability_zone=az, vpc_id=vpc.id, cidr_block=f"10.100.{private_subnet_addr}.0/20", tags={"Project": project_name, "Stack": stack_name, "kubernetes.io/role/internal-elb": "1"}, map_public_ip_on_launch=False) ec2.RouteTableAssociation(resource_name=f"route-table-assoc-private-{az}-{project_name}-{stack_name}-{i}", route_table_id=route_table.id, subnet_id=subnet.id) private_subnets.append(subnet) eks_security_group = ec2.SecurityGroup(resource_name=f'eks-cluster-sg-{project_name}-{stack_name}', vpc_id=vpc.id, description="Allow all HTTP(s) traffic to EKS Cluster", tags={"Project": project_name, "Stack": stack_name}, ingress=[ ec2.SecurityGroupIngressArgs( cidr_blocks=['0.0.0.0/0'], from_port=443, to_port=443, protocol='tcp', description='Allow pods to communicate with the cluster API Server.'), ec2.SecurityGroupIngressArgs( cidr_blocks=['0.0.0.0/0'], from_port=80, to_port=80, protocol='tcp', description='Allow internet access to pods')]) pulumi.export("azs", azs) pulumi.export("vpc", vpc)
[ "pulumi.export", "pulumi_aws.ec2.Subnet", "pulumi.log.warn", "pulumi_aws.ec2.RouteTableRouteArgs", "pulumi_aws.ec2.Vpc", "pulumi_aws.ec2.SecurityGroupIngressArgs", "pulumi_aws.ec2.InternetGateway", "pulumi.Config", "pulumi_aws.ec2.RouteTableAssociation", "pulumi.get_project", "pulumi.get_stack",...
[((80, 98), 'pulumi.get_stack', 'pulumi.get_stack', ([], {}), '()\n', (96, 98), False, 'import pulumi\n'), ((114, 134), 'pulumi.get_project', 'pulumi.get_project', ([], {}), '()\n', (132, 134), False, 'import pulumi\n'), ((145, 165), 'pulumi.Config', 'pulumi.Config', (['"""vpc"""'], {}), "('vpc')\n", (158, 165), False, 'import pulumi\n'), ((173, 404), 'pulumi_aws.ec2.Vpc', 'ec2.Vpc', ([], {'resource_name': 'f"""eks-{project_name}-{stack_name}"""', 'cidr_block': '"""10.100.0.0/16"""', 'enable_dns_support': '(True)', 'enable_dns_hostnames': '(True)', 'instance_tenancy': '"""default"""', 'tags': "{'Project': project_name, 'Stack': stack_name}"}), "(resource_name=f'eks-{project_name}-{stack_name}', cidr_block=\n '10.100.0.0/16', enable_dns_support=True, enable_dns_hostnames=True,\n instance_tenancy='default', tags={'Project': project_name, 'Stack':\n stack_name})\n", (180, 404), False, 'from pulumi_aws import ec2, get_availability_zones\n'), ((489, 633), 'pulumi_aws.ec2.InternetGateway', 'ec2.InternetGateway', ([], {'resource_name': 'f"""vpc-ig-{project_name}-{stack_name}"""', 'vpc_id': 'vpc.id', 'tags': "{'Project': project_name, 'Stack': stack_name}"}), "(resource_name=f'vpc-ig-{project_name}-{stack_name}',\n vpc_id=vpc.id, tags={'Project': project_name, 'Stack': stack_name})\n", (508, 633), False, 'from pulumi_aws import ec2, get_availability_zones\n'), ((5147, 5172), 'pulumi.export', 'pulumi.export', (['"""azs"""', 'azs'], {}), "('azs', azs)\n", (5160, 5172), False, 'import pulumi\n'), ((5173, 5198), 'pulumi.export', 'pulumi.export', (['"""vpc"""', 'vpc'], {}), "('vpc', vpc)\n", (5186, 5198), False, 'import pulumi\n'), ((1826, 1892), 'pulumi.log.warn', 'pulumi.log.warn', (['"""There is only a single usable availability zone"""'], {}), "('There is only a single usable availability zone')\n", (1841, 1892), False, 'import pulumi\n'), ((2349, 2600), 'pulumi_aws.ec2.Subnet', 'ec2.Subnet', ([], {'resource_name': 'resource_name', 'availability_zone': 'az', 'vpc_id': 'vpc.id', 'cidr_block': 'f"""10.100.{public_subnet_addr}.0/24"""', 'map_public_ip_on_launch': '(True)', 'tags': "{'Project': project_name, 'Stack': stack_name, 'kubernetes.io/role/elb': '1'}"}), "(resource_name=resource_name, availability_zone=az, vpc_id=vpc.id,\n cidr_block=f'10.100.{public_subnet_addr}.0/24', map_public_ip_on_launch\n =True, tags={'Project': project_name, 'Stack': stack_name,\n 'kubernetes.io/role/elb': '1'})\n", (2359, 2600), False, 'from pulumi_aws import ec2, get_availability_zones\n'), ((2772, 2891), 'pulumi_aws.ec2.RouteTableAssociation', 'ec2.RouteTableAssociation', (['f"""route-table-assoc-public-{az}-{i}"""'], {'route_table_id': 'route_table.id', 'subnet_id': 'subnet.id'}), "(f'route-table-assoc-public-{az}-{i}',\n route_table_id=route_table.id, subnet_id=subnet.id)\n", (2797, 2891), False, 'from pulumi_aws import ec2, get_availability_zones\n'), ((3148, 3409), 'pulumi_aws.ec2.Subnet', 'ec2.Subnet', ([], {'resource_name': 'resource_name', 'availability_zone': 'az', 'vpc_id': 'vpc.id', 'cidr_block': 'f"""10.100.{private_subnet_addr}.0/20"""', 'tags': "{'Project': project_name, 'Stack': stack_name,\n 'kubernetes.io/role/internal-elb': '1'}", 'map_public_ip_on_launch': '(False)'}), "(resource_name=resource_name, availability_zone=az, vpc_id=vpc.id,\n cidr_block=f'10.100.{private_subnet_addr}.0/20', tags={'Project':\n project_name, 'Stack': stack_name, 'kubernetes.io/role/internal-elb':\n '1'}, map_public_ip_on_launch=False)\n", (3158, 3409), False, 'from pulumi_aws import ec2, get_availability_zones\n'), ((3582, 3749), 'pulumi_aws.ec2.RouteTableAssociation', 'ec2.RouteTableAssociation', ([], {'resource_name': 'f"""route-table-assoc-private-{az}-{project_name}-{stack_name}-{i}"""', 'route_table_id': 'route_table.id', 'subnet_id': 'subnet.id'}), "(resource_name=\n f'route-table-assoc-private-{az}-{project_name}-{stack_name}-{i}',\n route_table_id=route_table.id, subnet_id=subnet.id)\n", (3607, 3749), False, 'from pulumi_aws import ec2, get_availability_zones\n'), ((1286, 1327), 'pulumi_aws.get_availability_zones', 'get_availability_zones', ([], {'state': '"""available"""'}), "(state='available')\n", (1308, 1327), False, 'from pulumi_aws import ec2, get_availability_zones\n'), ((1917, 1980), 'pulumi.log.warn', 'pulumi.log.warn', (['"""There are only two usable availability zones"""'], {}), "('There are only two usable availability zones')\n", (1932, 1980), False, 'import pulumi\n'), ((887, 953), 'pulumi_aws.ec2.RouteTableRouteArgs', 'ec2.RouteTableRouteArgs', ([], {'cidr_block': '"""0.0.0.0/0"""', 'gateway_id': 'igw.id'}), "(cidr_block='0.0.0.0/0', gateway_id=igw.id)\n", (910, 953), False, 'from pulumi_aws import ec2, get_availability_zones\n'), ((4315, 4493), 'pulumi_aws.ec2.SecurityGroupIngressArgs', 'ec2.SecurityGroupIngressArgs', ([], {'cidr_blocks': "['0.0.0.0/0']", 'from_port': '(443)', 'to_port': '(443)', 'protocol': '"""tcp"""', 'description': '"""Allow pods to communicate with the cluster API Server."""'}), "(cidr_blocks=['0.0.0.0/0'], from_port=443,\n to_port=443, protocol='tcp', description=\n 'Allow pods to communicate with the cluster API Server.')\n", (4343, 4493), False, 'from pulumi_aws import ec2, get_availability_zones\n'), ((4765, 4911), 'pulumi_aws.ec2.SecurityGroupIngressArgs', 'ec2.SecurityGroupIngressArgs', ([], {'cidr_blocks': "['0.0.0.0/0']", 'from_port': '(80)', 'to_port': '(80)', 'protocol': '"""tcp"""', 'description': '"""Allow internet access to pods"""'}), "(cidr_blocks=['0.0.0.0/0'], from_port=80,\n to_port=80, protocol='tcp', description='Allow internet access to pods')\n", (4793, 4911), False, 'from pulumi_aws import ec2, get_availability_zones\n')]
# coding=utf-8 from flask import Flask from mailshake import ToConsoleMailer, SMTPMailer from sqlalchemy_wrapper import SQLAlchemy import settings app = Flask(__name__) app.config.from_object(settings) db = SQLAlchemy(settings.SQLALCHEMY_URI, app) if settings.DEBUG: mailer = ToConsoleMailer() else: mailer = SMTPMailer(**settings.MAILER_SETTINGS)
[ "mailshake.ToConsoleMailer", "sqlalchemy_wrapper.SQLAlchemy", "mailshake.SMTPMailer", "flask.Flask" ]
[((156, 171), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (161, 171), False, 'from flask import Flask\n'), ((211, 251), 'sqlalchemy_wrapper.SQLAlchemy', 'SQLAlchemy', (['settings.SQLALCHEMY_URI', 'app'], {}), '(settings.SQLALCHEMY_URI, app)\n', (221, 251), False, 'from sqlalchemy_wrapper import SQLAlchemy\n'), ((285, 302), 'mailshake.ToConsoleMailer', 'ToConsoleMailer', ([], {}), '()\n', (300, 302), False, 'from mailshake import ToConsoleMailer, SMTPMailer\n'), ((322, 360), 'mailshake.SMTPMailer', 'SMTPMailer', ([], {}), '(**settings.MAILER_SETTINGS)\n', (332, 360), False, 'from mailshake import ToConsoleMailer, SMTPMailer\n')]
import numpy as np import os from scipy.interpolate import interp1d import matplotlib.pyplot as plt import starterlite grf = starterlite.simulation.GaussianRandomField() grf_input = np.load(os.getenv('STARTERLITE')+'/output/grf/grf_samples_x180y1z30_N1.npz', allow_pickle=True) f_in = grf_input['grf'] f_in = f_in.squeeze() x_in, z_in, y_in = grf_input['coords'] print('f_in has shape:', f_in.shape) print('x_in has shape:', x_in.shape) print('y_in has shape:', y_in.shape) print('z_in:', z_in) ksph, psph = grf.AverageAutoPS(f=f_in, x=x_in, y=y_in, z=None, bins=11, log=True, avg_type='sph') plt.figure(figsize=(6,3)) plt.loglog(ksph, psph, 'ko-', lw=1) plt.ylim([1e7, 1e9]) plt.xlabel('wavenumber k', fontsize=16) plt.ylabel('measured P(k)', fontsize=16) plt.tight_layout() plt.show()
[ "matplotlib.pyplot.loglog", "os.getenv", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.figure", "starterlite.simulation.GaussianRandomField", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.ylim", "matplotlib.pyplot.show" ]
[((127, 171), 'starterlite.simulation.GaussianRandomField', 'starterlite.simulation.GaussianRandomField', ([], {}), '()\n', (169, 171), False, 'import starterlite\n'), ((601, 627), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 3)'}), '(figsize=(6, 3))\n', (611, 627), True, 'import matplotlib.pyplot as plt\n'), ((627, 662), 'matplotlib.pyplot.loglog', 'plt.loglog', (['ksph', 'psph', '"""ko-"""'], {'lw': '(1)'}), "(ksph, psph, 'ko-', lw=1)\n", (637, 662), True, 'import matplotlib.pyplot as plt\n'), ((663, 699), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[10000000.0, 1000000000.0]'], {}), '([10000000.0, 1000000000.0])\n', (671, 699), True, 'import matplotlib.pyplot as plt\n'), ((684, 723), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""wavenumber k"""'], {'fontsize': '(16)'}), "('wavenumber k', fontsize=16)\n", (694, 723), True, 'import matplotlib.pyplot as plt\n'), ((724, 764), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""measured P(k)"""'], {'fontsize': '(16)'}), "('measured P(k)', fontsize=16)\n", (734, 764), True, 'import matplotlib.pyplot as plt\n'), ((765, 783), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (781, 783), True, 'import matplotlib.pyplot as plt\n'), ((784, 794), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (792, 794), True, 'import matplotlib.pyplot as plt\n'), ((193, 217), 'os.getenv', 'os.getenv', (['"""STARTERLITE"""'], {}), "('STARTERLITE')\n", (202, 217), False, 'import os\n')]
import RPi.GPIO as GPIO import sys import os from datetime import datetime from time import sleep import socket try: from mfrc522 import SimpleMFRC522 from getmac import get_mac_address import hashlib except: os.system("pip3 install mfrc522") os.system("pip3 install getmac") os.system("pip3 install hashlib") RED = "\033[1;31m" BLUE = "\033[1;34m" CYAN = "\033[1;36m" GREEN = "\033[1;32m" RESET = "\033[0;0m" GPIO.setwarnings(False) reader = SimpleMFRC522() os.system('setterm -cursor off') def cpf(text): sizex, sizey = os.get_terminal_size() center = "{:^"+str(sizex)+"}" print(center.format(text)) def getsalt(): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("8.8.8.8", 80)) ip = s.getsockname()[0] mac = get_mac_address(ip=ip) date = datetime.now() signd = date.strftime('%d-%B-%Y') signt = date.strftime("%I:%M:%S%p") salt = str(mac)+str(signt)+str(signd) return salt # Salt for the hash is "<MAC Address of the device><Date><Time>" try: os.system("clear") cpf("-------------") cpf("HASH RFID TAG") cpf("-------------") print() # Checks if hash.dat file and creates if there isn't if not os.path.isfile('hash.dat'): file = open('hash.dat', 'w') file.close() if os.path.isfile('hash.dat'): sys.stdout.write(BLUE) # Reads RFID tag print("[-] INFO: \033[1;36mPlace RFID Card.") sys.stdout.write(RESET) print() id, txt = reader.read() print("Card ID: ", id) sys.stdout.write(GREEN) # Calls getsalt() to generate salt for hash function salt = getsalt() # Increments the salt to the RFID tag's unique ID number string = str(id)+str(salt) # Generates hash hsh = hashlib.md5(string.encode('utf-8')).hexdigest() date = datetime.now() signd = date.strftime('%d-%B-%Y') signt = date.strftime("%H:%M:%S") # writes hash on to RFID tag while not reader.write(hsh): reader.write(hsh) sys.stdout.write(GREEN) print("[+] SUCCESS: Card written successfully.") sys.stdout.write(BLUE) print("[-] INFO: \033[1;36mSigned at "+signt+" on "+signd+".") sys.stdout.write(RESET) # Stores the hash with the timestamp onto a file locally to later read from print() with open("hash.dat", "w") as data: data.write(str(hsh)+" "+str(signd)+" "+str(signt)+"\n") sleep(3) finally: GPIO.cleanup() os.system('setterm -cursor on')
[ "RPi.GPIO.cleanup", "os.get_terminal_size", "socket.socket", "RPi.GPIO.setwarnings", "time.sleep", "mfrc522.SimpleMFRC522", "os.path.isfile", "datetime.datetime.now", "os.system", "getmac.get_mac_address", "sys.stdout.write" ]
[((437, 460), 'RPi.GPIO.setwarnings', 'GPIO.setwarnings', (['(False)'], {}), '(False)\n', (453, 460), True, 'import RPi.GPIO as GPIO\n'), ((470, 485), 'mfrc522.SimpleMFRC522', 'SimpleMFRC522', ([], {}), '()\n', (483, 485), False, 'from mfrc522 import SimpleMFRC522\n'), ((486, 518), 'os.system', 'os.system', (['"""setterm -cursor off"""'], {}), "('setterm -cursor off')\n", (495, 518), False, 'import os\n'), ((555, 577), 'os.get_terminal_size', 'os.get_terminal_size', ([], {}), '()\n', (575, 577), False, 'import os\n'), ((668, 716), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (681, 716), False, 'import socket\n'), ((786, 808), 'getmac.get_mac_address', 'get_mac_address', ([], {'ip': 'ip'}), '(ip=ip)\n', (801, 808), False, 'from getmac import get_mac_address\n'), ((820, 834), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (832, 834), False, 'from datetime import datetime\n'), ((1048, 1066), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (1057, 1066), False, 'import os\n'), ((1311, 1337), 'os.path.isfile', 'os.path.isfile', (['"""hash.dat"""'], {}), "('hash.dat')\n", (1325, 1337), False, 'import os\n'), ((2501, 2515), 'RPi.GPIO.cleanup', 'GPIO.cleanup', ([], {}), '()\n', (2513, 2515), True, 'import RPi.GPIO as GPIO\n'), ((2520, 2551), 'os.system', 'os.system', (['"""setterm -cursor on"""'], {}), "('setterm -cursor on')\n", (2529, 2551), False, 'import os\n'), ((226, 259), 'os.system', 'os.system', (['"""pip3 install mfrc522"""'], {}), "('pip3 install mfrc522')\n", (235, 259), False, 'import os\n'), ((264, 296), 'os.system', 'os.system', (['"""pip3 install getmac"""'], {}), "('pip3 install getmac')\n", (273, 296), False, 'import os\n'), ((301, 334), 'os.system', 'os.system', (['"""pip3 install hashlib"""'], {}), "('pip3 install hashlib')\n", (310, 334), False, 'import os\n'), ((1218, 1244), 'os.path.isfile', 'os.path.isfile', (['"""hash.dat"""'], {}), "('hash.dat')\n", (1232, 1244), False, 'import os\n'), ((1347, 1369), 'sys.stdout.write', 'sys.stdout.write', (['BLUE'], {}), '(BLUE)\n', (1363, 1369), False, 'import sys\n'), ((1449, 1472), 'sys.stdout.write', 'sys.stdout.write', (['RESET'], {}), '(RESET)\n', (1465, 1472), False, 'import sys\n'), ((1560, 1583), 'sys.stdout.write', 'sys.stdout.write', (['GREEN'], {}), '(GREEN)\n', (1576, 1583), False, 'import sys\n'), ((1848, 1862), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1860, 1862), False, 'from datetime import datetime\n'), ((2051, 2074), 'sys.stdout.write', 'sys.stdout.write', (['GREEN'], {}), '(GREEN)\n', (2067, 2074), False, 'import sys\n'), ((2140, 2162), 'sys.stdout.write', 'sys.stdout.write', (['BLUE'], {}), '(BLUE)\n', (2156, 2162), False, 'import sys\n'), ((2242, 2265), 'sys.stdout.write', 'sys.stdout.write', (['RESET'], {}), '(RESET)\n', (2258, 2265), False, 'import sys\n'), ((2478, 2486), 'time.sleep', 'sleep', (['(3)'], {}), '(3)\n', (2483, 2486), False, 'from time import sleep\n')]
import json import os import uuid def main(event, context): return_object = { "success": True, "response_id": str(uuid.uuid4()), "querystring": event.get("queryStringParameters"), "environment_variables": os.environ['GREETING'] } return { "statusCode": 200, "headers": { 'Access-Control-Allow-Origin': os.environ['CORS_ORIGIN'], 'Access-Control-Allow-Credentials': True, }, "body": json.dumps(return_object) }
[ "json.dumps", "uuid.uuid4" ]
[((483, 508), 'json.dumps', 'json.dumps', (['return_object'], {}), '(return_object)\n', (493, 508), False, 'import json\n'), ((135, 147), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (145, 147), False, 'import uuid\n')]
# -*- coding: utf-8 -*- import plotly.graph_objects as go import pandas as pd import dash import dash_core_components as dcc import dash_html_components as html def plot_ohlc(co_df: pd.DataFrame, title: str = ""): colors = { 'background': '#11001A', # onyx 'text': '#7FDBFF' } hovertext = [] for i in range(len(co_df['Open'])): hovertext.append( co_df['Date'][i].strftime("%m/%d/%Y") + '<br>O: '+str(round(co_df['AdjOpen'][i], 4)) + '\tH: '+str(round(co_df['AdjHigh'][i], 4)) + '<br>L: '+str(round(co_df['AdjLow'][i], 4)) + '\tC: '+str(co_df['AdjClose'][i])) fig = go.Figure(data=go.Candlestick(x=co_df['Date'], open=co_df['AdjOpen'], high=co_df['AdjHigh'], low=co_df['AdjLow'], close=co_df['AdjClose'], text=hovertext, hoverinfo='text', increasing_line_color='#0048BA', decreasing_line_color='#E60000'), layout=go.Layout( title=title, width=1200, height=600, plot_bgcolor=colors['background'], paper_bgcolor=colors['background'], xaxis_rangeslider_visible=False) ) fig.update_xaxes( rangebreaks=[ dict(bounds=["sat", "mon"]), # hide weekends # hide Christmas and New Year's dict(values=["2015-12-25", "2016-01-01"]) ] ) # Add range slider fig.update_layout( xaxis=dict( rangeselector=dict( buttons=list([ dict(count=1, label="1m", step="month", stepmode="backward"), dict(count=6, label="6m", step="month", stepmode="backward"), dict(count=1, label="YTD", step="year", stepmode="todate"), dict(count=1, label="1y", step="year", stepmode="backward"), dict(step="all") ]) ), rangeslider=dict( visible=False ), type="date" ) ) app = dash.Dash() app.layout = html.Div( style={'backgroundColor': colors['background']}, children=[ html.H1( children='Historical OHLC Chart', style={ 'fontSize': 50, 'text-decoration': 'underline', 'font-family': 'Ubuntu', 'textAlign': 'center', 'color': colors['text'] } ), html.Div(children='Choose the company symbol below', style={ 'textAlign': 'center', 'fontSize': 25, 'color': colors['text'] }), dcc.Graph( id='ohlc_graph', figure=fig), ] ) # html.Label('OHLC Graph'), app.run_server(debug=True, use_reloader=True) # fig.update_layout(title="AAPL Stock", xaxis_rangeslider_visible=False) # fig.update_layout(xaxis_rangeslider_visible=False) # fig.show() # fig.write_image('figure.png') if __name__ == "__main__": # import .json_to_df as js # from json_to_df import json_to_df # from .plot_ohlc import json_to_df aapl_df = json_to_df('data_aapl.json', ) plot_ohlc(aapl_df, title="Apple Stock")
[ "plotly.graph_objects.Layout", "dash_html_components.H1", "plotly.graph_objects.Candlestick", "dash.Dash", "dash_html_components.Div", "dash_core_components.Graph" ]
[((2638, 2649), 'dash.Dash', 'dash.Dash', ([], {}), '()\n', (2647, 2649), False, 'import dash\n'), ((691, 927), 'plotly.graph_objects.Candlestick', 'go.Candlestick', ([], {'x': "co_df['Date']", 'open': "co_df['AdjOpen']", 'high': "co_df['AdjHigh']", 'low': "co_df['AdjLow']", 'close': "co_df['AdjClose']", 'text': 'hovertext', 'hoverinfo': '"""text"""', 'increasing_line_color': '"""#0048BA"""', 'decreasing_line_color': '"""#E60000"""'}), "(x=co_df['Date'], open=co_df['AdjOpen'], high=co_df['AdjHigh'\n ], low=co_df['AdjLow'], close=co_df['AdjClose'], text=hovertext,\n hoverinfo='text', increasing_line_color='#0048BA',\n decreasing_line_color='#E60000')\n", (705, 927), True, 'import plotly.graph_objects as go\n'), ((1264, 1423), 'plotly.graph_objects.Layout', 'go.Layout', ([], {'title': 'title', 'width': '(1200)', 'height': '(600)', 'plot_bgcolor': "colors['background']", 'paper_bgcolor': "colors['background']", 'xaxis_rangeslider_visible': '(False)'}), "(title=title, width=1200, height=600, plot_bgcolor=colors[\n 'background'], paper_bgcolor=colors['background'],\n xaxis_rangeslider_visible=False)\n", (1273, 1423), True, 'import plotly.graph_objects as go\n'), ((2765, 2943), 'dash_html_components.H1', 'html.H1', ([], {'children': '"""Historical OHLC Chart"""', 'style': "{'fontSize': 50, 'text-decoration': 'underline', 'font-family': 'Ubuntu',\n 'textAlign': 'center', 'color': colors['text']}"}), "(children='Historical OHLC Chart', style={'fontSize': 50,\n 'text-decoration': 'underline', 'font-family': 'Ubuntu', 'textAlign':\n 'center', 'color': colors['text']})\n", (2772, 2943), True, 'import dash_html_components as html\n'), ((3113, 3241), 'dash_html_components.Div', 'html.Div', ([], {'children': '"""Choose the company symbol below"""', 'style': "{'textAlign': 'center', 'fontSize': 25, 'color': colors['text']}"}), "(children='Choose the company symbol below', style={'textAlign':\n 'center', 'fontSize': 25, 'color': colors['text']})\n", (3121, 3241), True, 'import dash_html_components as html\n'), ((3313, 3351), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""ohlc_graph"""', 'figure': 'fig'}), "(id='ohlc_graph', figure=fig)\n", (3322, 3351), True, 'import dash_core_components as dcc\n')]
import unittest import sme class TestMembrane(unittest.TestCase): def test_membrane(self): m = sme.open_example_model() self.assertEqual(len(m.membranes), 2) self.assertRaises(sme.InvalidArgument, lambda: m.membranes["X"]) mem = m.membranes["Outside <-> Cell"] self.assertEqual(mem, m.membranes[0]) self.assertEqual(m.membranes[-1], m.membranes[1]) self.assertEqual(repr(mem), "<sme.Membrane named 'Outside <-> Cell'>") self.assertEqual(str(mem)[0:43], "<sme.Membrane>\n - name: 'Outside <-> Cell'") self.assertEqual(mem.name, "Outside <-> Cell") mem.name = "new name" self.assertEqual(mem.name, "new name") self.assertEqual(len(mem.reactions), 2) self.assertRaises(sme.InvalidArgument, lambda: mem.reactions["X"]) r = mem.reactions["A uptake from outside"] self.assertEqual(r.name, "A uptake from outside") # export model, open again, check membrane is preserved m.export_sbml_file("tmp.xml") m2 = sme.open_sbml_file("tmp.xml") self.assertEqual(len(m2.membranes), 2) mem2 = m2.membranes["new name"] self.assertEqual(mem2.name, mem.name) self.assertEqual(len(mem2.reactions), 2) r2 = mem2.reactions["A uptake from outside"] self.assertEqual(r2.name, r.name)
[ "sme.open_sbml_file", "sme.open_example_model" ]
[((109, 133), 'sme.open_example_model', 'sme.open_example_model', ([], {}), '()\n', (131, 133), False, 'import sme\n'), ((1050, 1079), 'sme.open_sbml_file', 'sme.open_sbml_file', (['"""tmp.xml"""'], {}), "('tmp.xml')\n", (1068, 1079), False, 'import sme\n')]
from flask import Flask, request, Response import json import pymongo from flask_cors import CORS from bson.json_util import dumps, loads import os from azure.storage.blob import BlockBlobService, PublicAccess from celery import Celery import subprocess import uuid app = Flask(__name__) CORS(app) db_client = pymongo.MongoClient(os.environ['SPASS_CONNECTION_STRING']).spassDatabase seismic_blob = BlockBlobService(account_name='seismicdata', account_key=os.environ['SPASS_DATA_BLOB_KEY']) seismic_blob.create_container('seismic-data') seismic_blob.set_container_acl('seismic-data', public_access=PublicAccess.Container) seismic_blob.create_container('seismic-tools') seismic_blob.set_container_acl('seismic-tools', public_access=PublicAccess.Container) seismic_blob.create_container('seismic-results') seismic_blob.set_container_acl('seismic-results', public_access=PublicAccess.Container) celery = Celery(app.name, broker=os.environ['SPASS_CELERY_BROKER'], backend=os.environ['SPASS_CELERY_BROKER']) @celery.task def submit_celery(tool_name, data_name, args): file_id = str(uuid.uuid4()) db_client.statusCollection.insert_one({'status': 'Executing', 'job_id': file_id}) seismic_blob.get_blob_to_path('seismic-tools', tool_name, tool_name) seismic_blob.get_blob_to_path('seismic-data', data_name, data_name) cmd_args = '' for i in range(1, len(args) + 1): cmd_args = cmd_args + ' ' + args[str(i)] total_cmd = './' + tool_name + cmd_args os.system(total_cmd) os.system('rm -rf ' + tool_name + ' ' + data_name) file_name = file_id + '.tar.gz' os.system('tar -czvf ' + file_name+ ' *.su') seismic_blob.create_blob_from_path('seismic-results', file_name, file_name) os.system('rm -rf *.su ' + file_name) data_register = {} data_register['tool'] = tool_name data_register['data'] = data_name data_register['args'] = args data_register['id'] = file_id db_client.resultsCollection.insert_one(data_register) db_client.statusCollection.update({'job_id': file_id}, {'$set': { 'status': 'Executed'}}) return @app.route("/healthz") def health(): return Response(status=200) @app.route("/api/users/create/", methods=['POST']) def create_user(): user_data = request.get_json(force=True) result = db_client.usersCollection.find({'email': user_data['email']}) if result.count() != 0: return Response({'message': 'This email is already in use'},status=409) db_id = db_client.usersCollection.insert_one(user_data).inserted_id if db_id: return Response(status=200) else: return Response(status=409) @app.route("/api/users/delete/", methods=['DELETE']) def delete_user(): user_email = request.get_json(force=True)['email'] result = db_client.usersCollection.delete_one({"email": user_email}) if result.deleted_count: return Response(status=200) else: return Response(status=404) @app.route("/api/users/", methods=['GET']) def list_users(): result = db_client.usersCollection.find({}) return Response(dumps(result), status=200) @app.route("/api/users/authenticate/", methods=['POST']) def authenticate(): data = request.get_json(force=True) result = db_client.usersCollection.find_one({'email': data['email']}) if result: if result['password'] == data['pass']: return Response(status=200) else: return Response(status=401) else: return Response(status=401) @app.route("/api/tasks/parameters/<tool_name>/", methods=['GET']) def get_parameters(tool_name): result = db_client.toolsCollection.find_one({"name": tool_name}) return Response(dumps(result["args"]), status=200) @app.route("/api/tasks/submit/", methods=['POST']) def submit_task(): data = request.get_json(force=True) submit_celery.delay(data['tool'], data['data'], data['args']) return "SUCCESS" @app.route("/api/results/") def get_jobs_results(): all_results = db_client.resultsCollection.find({}) return Response(dumps(all_results),status=200) @app.route("/api/status/") def get_jobs_status(): all_status = db_client.statusCollection.find({}) return Response(dumps(all_status),status=200) @app.route("/api/results/<id>") def get_job_results(id): raise NotImplementedError() @app.route('/api/data/upload/', methods=['POST']) def upload_data(): data = request.files.items() for d in data: data_name = d[0] data_content = d[1] upload_to_azure(data_name,'seismic-data',data_content) return "Uploaded" @app.route('/api/data/', methods=['GET']) def get_files_blob(): return json.dumps(list_files('seismic-data')) def upload_to_azure(data_name, container_name, data_content): data_content.save(data_name) seismic_blob.create_blob_from_path(container_name, data_name, data_name) os.system('rm -rf '+ data_name) @app.route('/api/tools/', methods=['GET']) def get_tools_blob(): return json.dumps(list_files('seismic-tools')) def list_files(container_name): data = seismic_blob.list_blobs(container_name) all_names = [d.name for d in data] return all_names @app.route('/api/tools/upload/', methods=['POST']) def upload_tool(): data = request.files.items() arguments = request.form for d in data: data_name = d[0] data_content = d[1] upload_to_azure(data_name, 'seismic-tools', data_content) for a in arguments.items(): all_arguments = a[1] splited = all_arguments.split(',') tool_document = {} tool_document['name'] = data_name tool_document['args'] = [] for p in splited: name, description = p.split(':') new_arg = {} new_arg['name'] = name new_arg['description'] = description tool_document['args'].append(new_arg) db_client.toolsCollection.insert_one(tool_document) return 'Uploaded' @app.route('/api/tools/<name>/', methods=['DELETE']) def delete_tool(name): delete_blob(name, 'seismic-tools') return 'Deleted' @app.route('/api/data/<name>/', methods=['DELETE']) def delete_data(name): delete_blob(name, 'seismic-data') return 'Deleted' def delete_blob(blob_name, container_name): seismic_blob.delete_blob(container_name, blob_name) if __name__ == "__main__": app.run('0.0.0.0', 5000)
[ "flask_cors.CORS", "flask.Flask", "celery.Celery", "uuid.uuid4", "flask.request.get_json", "flask.Response", "pymongo.MongoClient", "flask.request.files.items", "os.system", "azure.storage.blob.BlockBlobService", "bson.json_util.dumps" ]
[((284, 299), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (289, 299), False, 'from flask import Flask, request, Response\n'), ((301, 310), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (305, 310), False, 'from flask_cors import CORS\n'), ((415, 511), 'azure.storage.blob.BlockBlobService', 'BlockBlobService', ([], {'account_name': '"""seismicdata"""', 'account_key': "os.environ['SPASS_DATA_BLOB_KEY']"}), "(account_name='seismicdata', account_key=os.environ[\n 'SPASS_DATA_BLOB_KEY'])\n", (431, 511), False, 'from azure.storage.blob import BlockBlobService, PublicAccess\n'), ((926, 1032), 'celery.Celery', 'Celery', (['app.name'], {'broker': "os.environ['SPASS_CELERY_BROKER']", 'backend': "os.environ['SPASS_CELERY_BROKER']"}), "(app.name, broker=os.environ['SPASS_CELERY_BROKER'], backend=os.\n environ['SPASS_CELERY_BROKER'])\n", (932, 1032), False, 'from celery import Celery\n'), ((326, 384), 'pymongo.MongoClient', 'pymongo.MongoClient', (["os.environ['SPASS_CONNECTION_STRING']"], {}), "(os.environ['SPASS_CONNECTION_STRING'])\n", (345, 384), False, 'import pymongo\n'), ((1535, 1555), 'os.system', 'os.system', (['total_cmd'], {}), '(total_cmd)\n', (1544, 1555), False, 'import os\n'), ((1561, 1611), 'os.system', 'os.system', (["('rm -rf ' + tool_name + ' ' + data_name)"], {}), "('rm -rf ' + tool_name + ' ' + data_name)\n", (1570, 1611), False, 'import os\n'), ((1654, 1699), 'os.system', 'os.system', (["('tar -czvf ' + file_name + ' *.su')"], {}), "('tar -czvf ' + file_name + ' *.su')\n", (1663, 1699), False, 'import os\n'), ((1785, 1822), 'os.system', 'os.system', (["('rm -rf *.su ' + file_name)"], {}), "('rm -rf *.su ' + file_name)\n", (1794, 1822), False, 'import os\n'), ((2223, 2243), 'flask.Response', 'Response', ([], {'status': '(200)'}), '(status=200)\n', (2231, 2243), False, 'from flask import Flask, request, Response\n'), ((2335, 2363), 'flask.request.get_json', 'request.get_json', ([], {'force': '(True)'}), '(force=True)\n', (2351, 2363), False, 'from flask import Flask, request, Response\n'), ((3317, 3345), 'flask.request.get_json', 'request.get_json', ([], {'force': '(True)'}), '(force=True)\n', (3333, 3345), False, 'from flask import Flask, request, Response\n'), ((3943, 3971), 'flask.request.get_json', 'request.get_json', ([], {'force': '(True)'}), '(force=True)\n', (3959, 3971), False, 'from flask import Flask, request, Response\n'), ((4563, 4584), 'flask.request.files.items', 'request.files.items', ([], {}), '()\n', (4582, 4584), False, 'from flask import Flask, request, Response\n'), ((5050, 5082), 'os.system', 'os.system', (["('rm -rf ' + data_name)"], {}), "('rm -rf ' + data_name)\n", (5059, 5082), False, 'import os\n'), ((5438, 5459), 'flask.request.files.items', 'request.files.items', ([], {}), '()\n', (5457, 5459), False, 'from flask import Flask, request, Response\n'), ((1111, 1123), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1121, 1123), False, 'import uuid\n'), ((2491, 2556), 'flask.Response', 'Response', (["{'message': 'This email is already in use'}"], {'status': '(409)'}), "({'message': 'This email is already in use'}, status=409)\n", (2499, 2556), False, 'from flask import Flask, request, Response\n'), ((2662, 2682), 'flask.Response', 'Response', ([], {'status': '(200)'}), '(status=200)\n', (2670, 2682), False, 'from flask import Flask, request, Response\n'), ((2710, 2730), 'flask.Response', 'Response', ([], {'status': '(409)'}), '(status=409)\n', (2718, 2730), False, 'from flask import Flask, request, Response\n'), ((2829, 2857), 'flask.request.get_json', 'request.get_json', ([], {'force': '(True)'}), '(force=True)\n', (2845, 2857), False, 'from flask import Flask, request, Response\n'), ((2993, 3013), 'flask.Response', 'Response', ([], {'status': '(200)'}), '(status=200)\n', (3001, 3013), False, 'from flask import Flask, request, Response\n'), ((3041, 3061), 'flask.Response', 'Response', ([], {'status': '(404)'}), '(status=404)\n', (3049, 3061), False, 'from flask import Flask, request, Response\n'), ((3197, 3210), 'bson.json_util.dumps', 'dumps', (['result'], {}), '(result)\n', (3202, 3210), False, 'from bson.json_util import dumps, loads\n'), ((3609, 3629), 'flask.Response', 'Response', ([], {'status': '(401)'}), '(status=401)\n', (3617, 3629), False, 'from flask import Flask, request, Response\n'), ((3822, 3843), 'bson.json_util.dumps', 'dumps', (["result['args']"], {}), "(result['args'])\n", (3827, 3843), False, 'from bson.json_util import dumps, loads\n'), ((4194, 4212), 'bson.json_util.dumps', 'dumps', (['all_results'], {}), '(all_results)\n', (4199, 4212), False, 'from bson.json_util import dumps, loads\n'), ((4354, 4371), 'bson.json_util.dumps', 'dumps', (['all_status'], {}), '(all_status)\n', (4359, 4371), False, 'from bson.json_util import dumps, loads\n'), ((3505, 3525), 'flask.Response', 'Response', ([], {'status': '(200)'}), '(status=200)\n', (3513, 3525), False, 'from flask import Flask, request, Response\n'), ((3561, 3581), 'flask.Response', 'Response', ([], {'status': '(401)'}), '(status=401)\n', (3569, 3581), False, 'from flask import Flask, request, Response\n')]
import enum import time from collections import namedtuple from dataclasses import dataclass, field from typing import List, Dict, Any, Union, Tuple, Sequence, Callable, Optional import gym import numpy as np from malib.utils.notations import deprecated """ Rename and definition of basic data types which are correspond to the inputs (args, kwargs) """ PolicyConfig = Dict[str, Any] MetaPolicyConfig = Tuple[gym.spaces.Space, gym.spaces.Space, Sequence[PolicyConfig]] EnvConfig = Dict[str, Any] RolloutConfig = Dict[str, Any] ParameterLibConfig = Dict[str, Any] DatasetConfig = Dict[str, Any] TrainingConfig = Dict[str, Any] ModelConfig = Dict[str, Any] AgentConfig = Dict[str, TrainingConfig] AgentID = str PolicyID = str EnvID = str EpisodeID = str DataBlockID = str DataTransferType = np.ndarray EnvObservationType = Any # next_observation, rewards, done, infos StandardEnvReturns = Tuple[ Dict[str, DataTransferType], Dict[str, float], Dict[str, bool], Dict[str, Any], ] # TODO(ming): mute info temporally to avoid data transferring errors StandardTransition = namedtuple( # "StandardTransition", "obs, new_obs, action, reward, done, info" "StandardTransition", "obs, new_obs, actions, rewards, dones", ) ObservationSpaceType = gym.spaces.Space ActionSpaceType = gym.spaces.Space """ For task categorical and status tagging """ class TaskType(enum.Enum): ASYNC_LEARNING = "async_learning" ADD_WORKER = "add_worker" SAVE_MODEL = "save_model" LOAD_MODEL = "load_model" OPTIMIZE = "optimization" ROLLOUT = "rollout" UPDATE_PARAMETER = "update_PARAMETER" PULL_PARAMETER = "pull_parameter" PUSH_PARAMETER = "push_parameter" SAMPLE_BATCH = "sample_batch" PUSH_SAMPLES = "push_samples" NO = "no" TRAINING_EVALUATE = "evaluate_for_training" ROLLOUT_EVALUATE = "evaluate_for_rollouts" ADD_POLICY = "add_policy" UPDATE_POPULATION = "update_population" EVALUATE = "evaluate" EVALUATE_WRITE_BACK = "evaluate_write_back" INIT = "initialization" CHECK_ADD = "check_add" TERMINATE = "terminate" SIMULATION = "simulation" UPDATE_PAYOFFTABLE = "update_payofftable" class Status(enum.Enum): TERMINATE = "terminate" NORMAL = "normal" LOCKED = "locked" WAITING = "waiting" SUCCESS = "success" IDLE = "idle" IN_PROGRESS = "in progress" EXCEED = "exceed" FAILED = "failed" class Paradigm(enum.Enum): MARL = "marl" META_GAME = "meta_game" class BehaviorMode(enum.IntEnum): """Behavior mode, indicates environment agent behavior""" EXPLORATION = 0 """Trigger exploration mode""" EXPLOITATION = 1 """Trigger exploitation mode""" class MetricType: REWARD = "reward" """Reward""" LIVE_STEP = "live_step" """Agent live step""" REACH_MAX_STEP = "reach_max_step" """Whether reach max step or not""" Parameter = Any """ Description: """ @dataclass class ParameterDescription: class Type: PARAMETER = "parameter" GRADIENT = "gradient" time_stamp: float identify: str # meta policy id env_id: str id: PolicyID type: str = Type.PARAMETER lock: bool = False description: Any = None data: Parameter = None parallel_num: int = 1 version: int = -1 @classmethod def gen_template(cls, **kwargs): return cls( time_stamp=time.time(), identify=kwargs.get("identify", None), id=kwargs["id"], lock=kwargs.get("lock", True), env_id=kwargs.get("env_id", "test"), type=kwargs.get("type", cls.Type.PARAMETER), data=kwargs.get("data", None), description=kwargs.get( "description", { "registered_name": "test", "observation_space": None, "action_space": None, "model_config": {}, "custom_config": {}, }, ), ) @dataclass class MetaParameterDescription: meta_pid: PolicyID parameter_desc_dict: Dict[PolicyID, ParameterDescription] timestamp: float = time.time() identify: str = "MetaParameterDescription" # meta policy id def __post_init__(self): self.identify = f"{self.identify}_mpid_{self.meta_pid}_{self.timestamp}" @classmethod def gen_template(cls, **kwargs): return cls( meta_pid=kwargs["meta_pid"], parameter_desc_dict={ k: ParameterDescription.gen_template(id=k) for k in kwargs["pids"] }, ) @dataclass class BufferDescription: env_id: str agent_id: Union[AgentID, List[AgentID]] policy_id: Union[PolicyID, List[PolicyID]] batch_size: int = 0 sample_mode: str = "" indices: List[int] = None data: Any = None data_shapes: Dict[str, Tuple] = None sample_start_size: int = 0 capacity: int = 1000 identify: str = None def __post_init__(self): if self.identify is None: self.identify = "_".join(sorted(self.agent_id)) def __str__(self): return "<BufferDescription: agent_id={} policy_id={}".format( self.agent_id, self.policy_id ) @dataclass class AgentInvolveInfo: """`AgentInvolveInfo` describes the trainable pairs, populations, environment id and the meta parameter descriptions. """ training_handler: str trainable_pairs: Dict[AgentID, Tuple[PolicyID, PolicyConfig]] """ describe the environment agent id and their binding policy configuration """ populations: Dict[AgentID, Sequence[Tuple[PolicyID, PolicyConfig]]] """ describe the policy population of agents """ env_id: str = None """ environment id """ meta_parameter_desc_dict: Dict[AgentID, MetaParameterDescription] = None """ meta parameter description """ @classmethod def gen_template( cls, agent_ids: List[AgentID], observation_space: gym.Space, action_space: gym.Space, ): example_ptup = ( "policy_0", { "registered_name": "test", "observation_space": observation_space, "action_space": action_space, "mode_config": None, "custom_config": None, }, ) return cls( training_handler="test", trainable_pairs=dict.fromkeys(agent_ids, example_ptup), populations=dict.fromkeys(agent_ids, [example_ptup]), env_id="test", meta_parameter_desc_dict=dict.fromkeys( agent_ids, MetaParameterDescription.gen_template(meta_pid=None, pids=["policy_0"]), ), ) @dataclass class TrainingDescription: agent_involve_info: AgentInvolveInfo stopper: str = "none" stopper_config: Dict[str, Any] = field(default_factory=dict) policy_distribution: Dict[AgentID, Dict[PolicyID, float]] = None update_interval: int = 1 batch_size: int = 64 mode: str = "step" time_stamp: float = time.time() @classmethod def gen_template(cls, **template_attr_kwargs): raise NotImplementedError @dataclass class RolloutDescription: agent_involve_info: AgentInvolveInfo fragment_length: int num_episodes: int episode_seg: int terminate_mode: str mode: str # on_policy or off_policy or imitation learning ? # parameter_desc_seq: Sequence[MetaParameterDescription] = None callback: Union[str, Callable] = "sequential" stopper: str = "none" stopper_config: Dict[str, Any] = field(default_factory=dict) policy_distribution: Dict[AgentID, Dict[PolicyID, float]] = None time_stamp: float = time.time() @classmethod def gen_template(cls, **template_attr_kwargs): agent_involve_info_kwargs = template_attr_kwargs.pop("agent_involve_info") instance = cls( agent_involve_info=AgentInvolveInfo.gen_template( **agent_involve_info_kwargs ), policy_distribution=dict.fromkeys( agent_involve_info_kwargs["agent_ids"], {"policy_0": 1.0} ), **template_attr_kwargs, ) template_attr_kwargs["agent_involve_info"] = agent_involve_info_kwargs return instance @dataclass class SimulationDescription: agent_involve_info: AgentInvolveInfo policy_combinations: List[Dict[AgentID, Tuple[PolicyID, PolicyConfig]]] num_episodes: int callback: Union[str, Callable] = "sequential" max_episode_length: int = None time_stamp: float = time.time() @classmethod def gen_template(cls, **kwargs): agent_involve_template_attrs = kwargs.pop("agent_involve_info") instance = cls( agent_involve_info=AgentInvolveInfo.gen_template( **agent_involve_template_attrs ), **kwargs, ) kwargs["agent_involve_info"] = agent_involve_template_attrs return instance @dataclass class TrainingFeedback: agent_involve_info: AgentInvolveInfo statistics: Dict[AgentID, Any] @dataclass class RolloutFeedback: """RolloutFeedback for rollout tasks""" worker_idx: str """id of rollout worker""" agent_involve_info: AgentInvolveInfo """agent involve info describes the ...""" statistics: Dict[str, Any] policy_combination: Dict[PolicyID, PolicyID] = None def __post_init__(self): pass # for res in self.statistics.values(): # for k, v in res.items(): # if isinstance(v, MetricEntry): # res[k] = v.value @deprecated @dataclass class EvaluationFeedback: # env_id: str agent_involve_info: AgentInvolveInfo statistics: Dict[PolicyID, Dict[str, Any]] policy_combination: Dict[PolicyID, Tuple[PolicyID, PolicyConfig]] @dataclass class TaskDescription: """TaskDescription is a general description of Training, Rollout and Simulation tasks. """ task_type: TaskType """task type used to identify which task description will be used""" content: Union[TrainingDescription, RolloutDescription, SimulationDescription] """content is a detailed task description entity""" state_id: Any timestamp: float = None source_task_id: str = None identify: str = None def __post_init__(self): timestamp = time.time() self.timestamp = timestamp if self.task_type == TaskType.OPTIMIZE: prefix = "TrainingDescription" elif self.task_type == TaskType.ROLLOUT: prefix = "RolloutDescription" elif self.task_type == TaskType.SIMULATION: prefix = "SimulationDescription" else: prefix = "UnknowDescription" self.identify = f"{prefix}_{timestamp}" @classmethod def gen_template(cls, **template_attr_kwargs): task_type = template_attr_kwargs["task_type"] if task_type == TaskType.OPTIMIZE: desc_cls = TrainingDescription elif task_type == TaskType.ROLLOUT: desc_cls = RolloutDescription elif task_type == TaskType.SIMULATION: desc_cls = SimulationDescription else: raise ValueError("Unknow task type: {}".format(task_type)) content_template_attr_kwargs = template_attr_kwargs.pop("content") instance = cls( content=desc_cls.gen_template(**content_template_attr_kwargs), **template_attr_kwargs, ) template_attr_kwargs["content"] = content_template_attr_kwargs return instance @dataclass class TaskRequest: """TaskRequest is a description of""" task_type: TaskType """defines the requested task type""" content: Any """content is the feedback of current handler which request for next task""" state_id: str timestamp: float = None # time.time() identify: str = None computing_mode: str = "bulk_sync" # bulk_sync, async def __post_init__(self): assert self.state_id, "State id cannot be None" timestamp = time.time() self.timestamp = timestamp self.identify = f"TaskRequest_{timestamp}" @staticmethod def from_task_desc(task_desc: TaskDescription, **kwargs) -> "TaskRequest": return TaskRequest( task_type=kwargs.get("task_type", task_desc.task_type), content=kwargs.get("content", task_desc.content), state_id=kwargs.get("state_id", task_desc.state_id), timestamp=kwargs.get("timestamp", None), identify=kwargs.get("identify", None), ) class BColors: HEADER = "\033[95m" OKBLUE = "\033[94m" OKCYAN = "\033[96m" OKGREEN = "\033[92m" WARNING = "\033[93m" FAIL = "\033[91m" ENDC = "\033[0m" BOLD = "\033[1m" UNDERLINE = "\033[4m" class EvaluateResult: CONVERGED = "converged" AVE_REWARD = "average_reward" REACHED_MAX_ITERATION = "reached_max_iteration" @staticmethod def default_result(): return { EvaluateResult.CONVERGED: False, EvaluateResult.AVE_REWARD: -float("inf"), EvaluateResult.REACHED_MAX_ITERATION: False, } class TrainingMetric: LOSS = "loss" @dataclass class BatchMetaInfo: episode_id: str created_time: float meta_policy_id: str = None policy_id: str = None env_id: Any = None policy_type: Any = None class ExperimentManagerTableName: primary: str = "" secondary: str = "" tag: str = "" key: int = 0 nid: int = 0 class EventReportStatus: START = "start" END = "end" # TODO(jing): add docs for MetricEntry class MetricEntry: def __init__(self, value: Any, agg: str = "mean", tag: str = "", log: bool = True): self.value = value self.agg = agg self.tag = tag self.log = log def cleaned_data(self): """Return values"""
[ "collections.namedtuple", "time.time", "dataclasses.field" ]
[((1090, 1163), 'collections.namedtuple', 'namedtuple', (['"""StandardTransition"""', '"""obs, new_obs, actions, rewards, dones"""'], {}), "('StandardTransition', 'obs, new_obs, actions, rewards, dones')\n", (1100, 1163), False, 'from collections import namedtuple\n'), ((4197, 4208), 'time.time', 'time.time', ([], {}), '()\n', (4206, 4208), False, 'import time\n'), ((6953, 6980), 'dataclasses.field', 'field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (6958, 6980), False, 'from dataclasses import dataclass, field\n'), ((7151, 7162), 'time.time', 'time.time', ([], {}), '()\n', (7160, 7162), False, 'import time\n'), ((7684, 7711), 'dataclasses.field', 'field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (7689, 7711), False, 'from dataclasses import dataclass, field\n'), ((7805, 7816), 'time.time', 'time.time', ([], {}), '()\n', (7814, 7816), False, 'import time\n'), ((8689, 8700), 'time.time', 'time.time', ([], {}), '()\n', (8698, 8700), False, 'import time\n'), ((10485, 10496), 'time.time', 'time.time', ([], {}), '()\n', (10494, 10496), False, 'import time\n'), ((12197, 12208), 'time.time', 'time.time', ([], {}), '()\n', (12206, 12208), False, 'import time\n'), ((3413, 3424), 'time.time', 'time.time', ([], {}), '()\n', (3422, 3424), False, 'import time\n')]
# pylint: disable=protected-access # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is licensed under the Apache License 2.0. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # Copyright 2020, CTERA Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import unittest.mock as mock import munch try: from cterasdk import CTERAException except ImportError: # pragma: no cover pass # caught by ctera_common import ansible_collections.ctera.ctera.plugins.modules.ctera_portal_syslog as ctera_portal_syslog import tests.ut.mocks.ctera_portal_base_mock as ctera_portal_base_mock from tests.ut.base import BaseTest try: from cterasdk import CTERAException except ImportError: # pragma: no cover pass # caught by ctera_common class TestCteraPortalSyslog(BaseTest): def setUp(self): super().setUp() ctera_portal_base_mock.mock_bases(self, ctera_portal_syslog.CteraPortalSyslog) def test_get_current_syslog_config(self): syslog = ctera_portal_syslog.CteraPortalSyslog() server = '192.168.0.1' port = 514 min_severity = 'info' expected_server_dict = dict( server=server, port=port, min_severity=min_severity ) syslog._ctera_portal.syslog.get_configuration = mock.MagicMock(return_value=munch.Munch(server=server, port=port, minSeverity=min_severity)) self.assertDictEqual(expected_server_dict, syslog._get_current_syslog_config()) def test_execute(self): syslog = ctera_portal_syslog.CteraPortalSyslog() syslog._ensure_enabled = mock.MagicMock() syslog._ensure_disabled = mock.MagicMock() for state in ['enabled', 'disabled']: syslog.parameters = dict(state=state) syslog._execute() if state == 'enabled': syslog._ensure_enabled.assert_called_once() else: syslog._ensure_disabled.assert_called_once() def test_modify(self): current_server = '192.168.0.1' new_server = '192.168.0.2' syslog = ctera_portal_syslog.CteraPortalSyslog() syslog._get_current_syslog_config = mock.MagicMock(return_value=dict(server=current_server)) syslog.parameters = dict(server=new_server) syslog._ensure_enabled() syslog._ctera_portal.syslog.modify.assert_called_once_with(server=new_server) self.assertTrue(syslog.ansible_return_value.param.changed) self.assertEqual(syslog.ansible_return_value.param.msg, 'Syslog server configuration was modified') def test_ensure_enabled(self): server = '192.168.0.1' syslog = ctera_portal_syslog.CteraPortalSyslog() syslog._get_current_syslog_config = mock.MagicMock(return_value=dict(server=server)) syslog.parameters = dict(server=server) for is_enabled in [True, False]: syslog._ctera_portal.syslog.is_enabled = mock.MagicMock(return_value=is_enabled) syslog._ensure_enabled() if is_enabled: self.assertTrue(syslog.ansible_return_value.param.skipped) self.assertEqual(syslog.ansible_return_value.param.msg, 'Syslog server config did not change') else: syslog._ctera_portal.syslog.enable.assert_called_once_with(server) self.assertTrue(syslog.ansible_return_value.param.changed) self.assertEqual(syslog.ansible_return_value.param.msg, 'Syslog server enabled') def test_ensure_disabled(self): syslog = ctera_portal_syslog.CteraPortalSyslog() for is_enabled in [True, False]: syslog._ctera_portal.syslog.is_enabled = mock.MagicMock(return_value=is_enabled) syslog._ensure_disabled() if is_enabled: syslog._ctera_portal.syslog.disable.assert_called_once() self.assertTrue(syslog.ansible_return_value.param.changed) self.assertEqual(syslog.ansible_return_value.param.msg, 'Syslog server disabled') else: self.assertTrue(syslog.ansible_return_value.param.skipped) self.assertEqual(syslog.ansible_return_value.param.msg, 'Syslog server is already disabled')
[ "ansible_collections.ctera.ctera.plugins.modules.ctera_portal_syslog.CteraPortalSyslog", "unittest.mock.MagicMock", "munch.Munch", "tests.ut.mocks.ctera_portal_base_mock.mock_bases" ]
[((1640, 1718), 'tests.ut.mocks.ctera_portal_base_mock.mock_bases', 'ctera_portal_base_mock.mock_bases', (['self', 'ctera_portal_syslog.CteraPortalSyslog'], {}), '(self, ctera_portal_syslog.CteraPortalSyslog)\n', (1673, 1718), True, 'import tests.ut.mocks.ctera_portal_base_mock as ctera_portal_base_mock\n'), ((1783, 1822), 'ansible_collections.ctera.ctera.plugins.modules.ctera_portal_syslog.CteraPortalSyslog', 'ctera_portal_syslog.CteraPortalSyslog', ([], {}), '()\n', (1820, 1822), True, 'import ansible_collections.ctera.ctera.plugins.modules.ctera_portal_syslog as ctera_portal_syslog\n'), ((2321, 2360), 'ansible_collections.ctera.ctera.plugins.modules.ctera_portal_syslog.CteraPortalSyslog', 'ctera_portal_syslog.CteraPortalSyslog', ([], {}), '()\n', (2358, 2360), True, 'import ansible_collections.ctera.ctera.plugins.modules.ctera_portal_syslog as ctera_portal_syslog\n'), ((2394, 2410), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (2408, 2410), True, 'import unittest.mock as mock\n'), ((2445, 2461), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (2459, 2461), True, 'import unittest.mock as mock\n'), ((2881, 2920), 'ansible_collections.ctera.ctera.plugins.modules.ctera_portal_syslog.CteraPortalSyslog', 'ctera_portal_syslog.CteraPortalSyslog', ([], {}), '()\n', (2918, 2920), True, 'import ansible_collections.ctera.ctera.plugins.modules.ctera_portal_syslog as ctera_portal_syslog\n'), ((3452, 3491), 'ansible_collections.ctera.ctera.plugins.modules.ctera_portal_syslog.CteraPortalSyslog', 'ctera_portal_syslog.CteraPortalSyslog', ([], {}), '()\n', (3489, 3491), True, 'import ansible_collections.ctera.ctera.plugins.modules.ctera_portal_syslog as ctera_portal_syslog\n'), ((4344, 4383), 'ansible_collections.ctera.ctera.plugins.modules.ctera_portal_syslog.CteraPortalSyslog', 'ctera_portal_syslog.CteraPortalSyslog', ([], {}), '()\n', (4381, 4383), True, 'import ansible_collections.ctera.ctera.plugins.modules.ctera_portal_syslog as ctera_portal_syslog\n'), ((3727, 3766), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'return_value': 'is_enabled'}), '(return_value=is_enabled)\n', (3741, 3766), True, 'import unittest.mock as mock\n'), ((4478, 4517), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'return_value': 'is_enabled'}), '(return_value=is_enabled)\n', (4492, 4517), True, 'import unittest.mock as mock\n'), ((2122, 2185), 'munch.Munch', 'munch.Munch', ([], {'server': 'server', 'port': 'port', 'minSeverity': 'min_severity'}), '(server=server, port=port, minSeverity=min_severity)\n', (2133, 2185), False, 'import munch\n')]
import math from typing import Union, Dict import numpy as np import torch import torchvision.transforms as transforms from torchvision.transforms import InterpolationMode import utility as utils import utility.color as color class Resolution: def __init__(self, width, height): self.width = width self.height = height """Class representing the width and height of an image.""" def scale_to_height(self, height: int) -> "Resolution": """Scales this resolution while maintaining the aspect ratio. Args: height (int): The desired new height Returns: a resolution with the specified height but the same aspect ratio """ width = self.width * height // self.height return Resolution(width, height) def square(self) -> "Resolution": """Returns a square version of this resolution.""" size = min(self.width, self.height) return Resolution(size, size) class ImageWrap: def __init__(self, img, space="bgr"): self.img = img self.space = space def reorder(self, input_order='HWC'): """Reorder images to 'HWC' order. If the input_order is (h, w), return (h, w, 1); If the input_order is (c, h, w), return (h, w, c); If the input_order is (h, w, c), return as it is. Args: img (ndarray): Input image. input_order (str): Whether the input order is 'HWC' or 'CHW'. If the input image shape is (h, w), input_order will not have effects. Default: 'HWC'. Returns: ndarray: reordered image. """ if input_order not in ['HWC', 'CHW']: raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' "'HWC' and 'CHW'") if len(self.img.shape) == 2: self.img = self.img[..., None] if input_order == 'CHW': self.img = self.img.transpose(1, 2, 0) return self.img def reshape(self, target_shape): ih, iw = target_shape s = math.sqrt(self.img.shape[1] / (ih * iw)) shape = [self.img.shape[0], round(ih * s), round(iw * s), 3] self.img = self.img.view(*shape) \ .permute(0, 3, 1, 2).contiguous() return self.img def calc_dataset_stats(train_data): dataset_size = len(train_data.targets) total = {"R": 0, "G": 0, "B": 0} total_pixel = 0 for i, batch in enumerate(tqdm(train_data)): for img in batch["images"]: total_pixel = total_pixel + img.shape[1] * img.shape[2] total["R"] = total["R"] + torch.sum((img[0, :, :])) total["G"] = total["G"] + torch.sum((img[1, :, :])) total["B"] = total["B"] + torch.sum((img[2, :, :])) if i > len(train_data): break data_stats["mean"][0] = total["R"]/total_pixel data_stats["mean"][1] = total["G"]/total_pixel data_stats["mean"][2] = total["B"]/total_pixel for i, batch in enumerate(tqdm(train_data)): imgs = batch["images"] for img in imgs: total["R"] = total["R"] + torch.sum((img[0, :, :] - data_stats["mean"][0]) ** 2) total["G"] = total["G"] + torch.sum((img[1, :, :] - data_stats["mean"][1]) ** 2) total["B"] = total["B"] + torch.sum((img[2, :, :] - data_stats["mean"][2]) ** 2) if i > len(train_data): break data_stats["std"][0] = torch.sqrt(total["R"] / total_pixel) data_stats["std"][1]= torch.sqrt(total["G"] / total_pixel) data_stats["std"][2] = torch.sqrt(total["B"] / total_pixel) print(f'\nmeans:\n{data_stats["mean"]},std:\n{data_stats["std"]}') return data_stats def make_img_coeff(data_norm): if data_norm is None: data_norm = { 'inp': {'sub': 0, 'div': 1}, 'gt': {'sub': 0, 'div': 1} } try: result = data_norm.copy() result = utils.dict_apply(result, lambda x: utils.dict_apply(x, lambda y: torch.FloatTensor(y)) ) result['inp'] = utils.dict_apply(result['inp'], lambda x: x.view(1, -1, 1, 1)) result['gt'] = utils.dict_apply(result['gt'], lambda x: x.view(1, 1, -1)) if torch.cuda.is_available(): result = utils.dict_apply(result, lambda x: utils.dict_apply(x, lambda y: y.cuda()) ) return result except Exception as e: print(f"Img coeff fail:\n{e}") return data_norm.copy() def reshape(pred, target_shape): ih, iw = target_shape s = math.sqrt(pred.shape[1] / (ih * iw)) shape = [pred.shape[0], round(ih * s), round(iw * s), 3] pred = pred.view(*shape) \ .permute(0, 3, 1, 2).contiguous() return pred def reorder_image(img, input_order='HWC'): """Reorder images to 'HWC' order. If the input_order is (h, w), return (h, w, 1); If the input_order is (c, h, w), return (h, w, c); If the input_order is (h, w, c), return as it is. Args: img (ndarray): Input image. input_order (str): Whether the input order is 'HWC' or 'CHW'. If the input image shape is (h, w), input_order will not have effects. Default: 'HWC'. Returns: ndarray: reordered image. """ if input_order not in ['HWC', 'CHW']: raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' "'HWC' and 'CHW'") if len(img.shape) == 2: img = img[..., None] if input_order == 'CHW': img = img.transpose(1, 2, 0) return img def resize_fn(img, size): return transforms.ToTensor()( transforms.Resize(size, InterpolationMode.BICUBIC)( transforms.ToPILImage()(img))) def to_frequency_samples(f_img): freq = f_img.view(4, -1).permute(1, 0) return freq def to_y_channel(img): """Change to Y channel of YCbCr. Args: img (ndarray): Images with range [0, 255]. Returns: (ndarray): Images with range [0, 255] (float type) without round. """ img = img.astype(np.float32) / 255. if img.ndim == 3 and img.shape[2] == 3: img = bgr2ycbcr(img, y_only=True) img = img[..., None] return img * 255. # ---- # COLOR SPACES
[ "torchvision.transforms.ToPILImage", "math.sqrt", "torch.sqrt", "torch.cuda.is_available", "torch.sum", "torchvision.transforms.Resize", "torchvision.transforms.ToTensor", "torch.FloatTensor" ]
[((3479, 3515), 'torch.sqrt', 'torch.sqrt', (["(total['R'] / total_pixel)"], {}), "(total['R'] / total_pixel)\n", (3489, 3515), False, 'import torch\n'), ((3542, 3578), 'torch.sqrt', 'torch.sqrt', (["(total['G'] / total_pixel)"], {}), "(total['G'] / total_pixel)\n", (3552, 3578), False, 'import torch\n'), ((3606, 3642), 'torch.sqrt', 'torch.sqrt', (["(total['B'] / total_pixel)"], {}), "(total['B'] / total_pixel)\n", (3616, 3642), False, 'import torch\n'), ((4797, 4833), 'math.sqrt', 'math.sqrt', (['(pred.shape[1] / (ih * iw))'], {}), '(pred.shape[1] / (ih * iw))\n', (4806, 4833), False, 'import math\n'), ((2097, 2137), 'math.sqrt', 'math.sqrt', (['(self.img.shape[1] / (ih * iw))'], {}), '(self.img.shape[1] / (ih * iw))\n', (2106, 2137), False, 'import math\n'), ((4391, 4416), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4414, 4416), False, 'import torch\n'), ((5836, 5857), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5855, 5857), True, 'import torchvision.transforms as transforms\n'), ((5867, 5917), 'torchvision.transforms.Resize', 'transforms.Resize', (['size', 'InterpolationMode.BICUBIC'], {}), '(size, InterpolationMode.BICUBIC)\n', (5884, 5917), True, 'import torchvision.transforms as transforms\n'), ((2661, 2684), 'torch.sum', 'torch.sum', (['img[0, :, :]'], {}), '(img[0, :, :])\n', (2670, 2684), False, 'import torch\n'), ((2725, 2748), 'torch.sum', 'torch.sum', (['img[1, :, :]'], {}), '(img[1, :, :])\n', (2734, 2748), False, 'import torch\n'), ((2789, 2812), 'torch.sum', 'torch.sum', (['img[2, :, :]'], {}), '(img[2, :, :])\n', (2798, 2812), False, 'import torch\n'), ((3161, 3215), 'torch.sum', 'torch.sum', (["((img[0, :, :] - data_stats['mean'][0]) ** 2)"], {}), "((img[0, :, :] - data_stats['mean'][0]) ** 2)\n", (3170, 3215), False, 'import torch\n'), ((3254, 3308), 'torch.sum', 'torch.sum', (["((img[1, :, :] - data_stats['mean'][1]) ** 2)"], {}), "((img[1, :, :] - data_stats['mean'][1]) ** 2)\n", (3263, 3308), False, 'import torch\n'), ((3347, 3401), 'torch.sum', 'torch.sum', (["((img[2, :, :] - data_stats['mean'][2]) ** 2)"], {}), "((img[2, :, :] - data_stats['mean'][2]) ** 2)\n", (3356, 3401), False, 'import torch\n'), ((5931, 5954), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (5952, 5954), True, 'import torchvision.transforms as transforms\n'), ((4109, 4129), 'torch.FloatTensor', 'torch.FloatTensor', (['y'], {}), '(y)\n', (4126, 4129), False, 'import torch\n')]
from pandas import read_sql_query from sqlite3 import connect from pickle import load, dump from time import time from gensim.utils import simple_preprocess from gensim.models import Phrases from gensim.models.phrases import Phraser from gensim.parsing.preprocessing import STOPWORDS from gensim.corpora import Dictionary from gensim.models import TfidfModel, AuthorTopicModel from nltk import SnowballStemmer, WordNetLemmatizer import numpy as np np.random.seed(59) DB_NAME = 'all-the-news.db' SOURCES_FILE = 'sources.bin' YEARS_FILE = 'years.bin' NEWS_FILE = 'news.bin' PROCESSED_NEWS_FILE = 'processed-news.bin' DICTIONARY_FILE = 'dictionary.bin' TFIDF_FILE = 'tf-idf.bin' MODEL_FILE = 'model.bin' def printExecutionTime(start_time): print('Completed in {0:.4f} seconds\n'.format(time() - start_time)) def loadData(): print('Loading data...') start_time = time() try: # Loading if possible sources_file = open(SOURCES_FILE, 'rb') sources = load(sources_file) years_file = open(YEARS_FILE, 'rb') years = load(years_file) news_file = open(NEWS_FILE, 'rb') news = load(news_file) except: conn = connect(DB_NAME) df = read_sql_query('SELECT publication, year, content FROM longform WHERE content != "" AND content IS NOT NULL AND publication != "" AND publication IS NOT NULL', conn) conn.commit() conn.close() sources = dict() years = dict() news = list() for index, row in df.iterrows(): # Populating sources if row.publication not in sources.keys(): sources[row.publication] = list() sources[row.publication].append(index) # Populating years if row.year not in years.keys(): years[row.year] = list() years[row.year] = index # Populating news news.append(row.content) del df # Saving sources to file sources_file = open(SOURCES_FILE, 'wb') dump(sources, sources_file) # Saving years to file years_file = open(YEARS_FILE, 'wb') dump(years, years_file) # Saving news to file news_file = open(NEWS_FILE, 'wb') dump(news, news_file) finally: sources_file.close() years_file.close() news_file.close() printExecutionTime(start_time) return sources, years, news def preProcess(docs): print('Pre-processing...') start_time = time() try: # Loading if possible f = open(PROCESSED_NEWS_FILE, 'rb') processed_docs = load(f) except: stop_words = STOPWORDS stemmer = SnowballStemmer('english') lemmatizer = WordNetLemmatizer() processed_docs = [] for doc in docs: processed_doc = [] for token in simple_preprocess(doc, deacc=True): if token not in stop_words and len(token) > 2: token = lemmatizer.lemmatize(token, pos='v') #token = stemmer.stem(token) processed_doc.append(token) processed_docs.append(processed_doc) # Saving results to file f = open(PROCESSED_NEWS_FILE, 'wb') dump(processed_docs, f) finally: f.close() printExecutionTime(start_time) return processed_docs def extractDictionary(documents): print('Extracting dictionary...') start_time = time() try: # Loading if possible dictionary = Dictionary.load(DICTIONARY_FILE) except: dictionary = Dictionary(documents) dictionary.filter_extremes(no_below=200, no_above=0.8, keep_n=4000) # Saving to file dictionary.save(DICTIONARY_FILE) printExecutionTime(start_time) return dictionary def extractFeatures(documents, dictionary): print('Extracting features...') start_time = time() try: # Loading if possible f = open(TFIDF_FILE, 'rb') tfidf_corpus = load(f) except: bow_corpus = [ dictionary.doc2bow(doc) for doc in documents ] tfidf = TfidfModel(bow_corpus) tfidf_corpus = tfidf[bow_corpus] # Saving to file f = open(TFIDF_FILE, 'wb') dump(tfidf_corpus, f) finally: f.close() printExecutionTime(start_time) return tfidf_corpus def generateAuthorTopicModel(corpus, dictionary, authors): print('Generating author-topic model...') start_time = time() try: # Loading if possible model = AuthorTopicModel.load(MODEL_FILE) except: model = AuthorTopicModel( corpus, num_topics=20, id2word=dictionary, author2doc=authors ) # Saving to file model.save(MODEL_FILE) printExecutionTime(start_time) return model if __name__ == '__main__': sources, years, news = loadData() processed_news = preProcess(news) del news dictionary = extractDictionary(processed_news) tfidf = extractFeatures(processed_news, dictionary) del processed_news model = generateAuthorTopicModel(tfidf.corpus, dictionary, sources) del tfidf print('Topics') for idx, topic in model.print_topics(-1): print('Topic {}: {}'.format(idx, topic)) print('\nAuthors') for author in model.id2author.values(): print('{}: {}'.format(author, model.get_author_topics(author)))
[ "pandas.read_sql_query", "gensim.models.AuthorTopicModel", "gensim.models.AuthorTopicModel.load", "gensim.corpora.Dictionary.load", "pickle.dump", "sqlite3.connect", "gensim.corpora.Dictionary", "nltk.SnowballStemmer", "pickle.load", "nltk.WordNetLemmatizer", "gensim.utils.simple_preprocess", ...
[((451, 469), 'numpy.random.seed', 'np.random.seed', (['(59)'], {}), '(59)\n', (465, 469), True, 'import numpy as np\n'), ((873, 879), 'time.time', 'time', ([], {}), '()\n', (877, 879), False, 'from time import time\n'), ((2337, 2343), 'time.time', 'time', ([], {}), '()\n', (2341, 2343), False, 'from time import time\n'), ((3185, 3191), 'time.time', 'time', ([], {}), '()\n', (3189, 3191), False, 'from time import time\n'), ((3604, 3610), 'time.time', 'time', ([], {}), '()\n', (3608, 3610), False, 'from time import time\n'), ((4132, 4138), 'time.time', 'time', ([], {}), '()\n', (4136, 4138), False, 'from time import time\n'), ((972, 990), 'pickle.load', 'load', (['sources_file'], {}), '(sources_file)\n', (976, 990), False, 'from pickle import load, dump\n'), ((1044, 1060), 'pickle.load', 'load', (['years_file'], {}), '(years_file)\n', (1048, 1060), False, 'from pickle import load, dump\n'), ((1111, 1126), 'pickle.load', 'load', (['news_file'], {}), '(news_file)\n', (1115, 1126), False, 'from pickle import load, dump\n'), ((2439, 2446), 'pickle.load', 'load', (['f'], {}), '(f)\n', (2443, 2446), False, 'from pickle import load, dump\n'), ((3243, 3275), 'gensim.corpora.Dictionary.load', 'Dictionary.load', (['DICTIONARY_FILE'], {}), '(DICTIONARY_FILE)\n', (3258, 3275), False, 'from gensim.corpora import Dictionary\n'), ((3697, 3704), 'pickle.load', 'load', (['f'], {}), '(f)\n', (3701, 3704), False, 'from pickle import load, dump\n'), ((4185, 4218), 'gensim.models.AuthorTopicModel.load', 'AuthorTopicModel.load', (['MODEL_FILE'], {}), '(MODEL_FILE)\n', (4206, 4218), False, 'from gensim.models import TfidfModel, AuthorTopicModel\n'), ((1148, 1164), 'sqlite3.connect', 'connect', (['DB_NAME'], {}), '(DB_NAME)\n', (1155, 1164), False, 'from sqlite3 import connect\n'), ((1174, 1349), 'pandas.read_sql_query', 'read_sql_query', (['"""SELECT publication, year, content FROM longform WHERE content != "" AND content IS NOT NULL AND publication != "" AND publication IS NOT NULL"""', 'conn'], {}), '(\n \'SELECT publication, year, content FROM longform WHERE content != "" AND content IS NOT NULL AND publication != "" AND publication IS NOT NULL\'\n , conn)\n', (1188, 1349), False, 'from pandas import read_sql_query\n'), ((1908, 1935), 'pickle.dump', 'dump', (['sources', 'sources_file'], {}), '(sources, sources_file)\n', (1912, 1935), False, 'from pickle import load, dump\n'), ((2008, 2031), 'pickle.dump', 'dump', (['years', 'years_file'], {}), '(years, years_file)\n', (2012, 2031), False, 'from pickle import load, dump\n'), ((2101, 2122), 'pickle.dump', 'dump', (['news', 'news_file'], {}), '(news, news_file)\n', (2105, 2122), False, 'from pickle import load, dump\n'), ((2498, 2524), 'nltk.SnowballStemmer', 'SnowballStemmer', (['"""english"""'], {}), "('english')\n", (2513, 2524), False, 'from nltk import SnowballStemmer, WordNetLemmatizer\n'), ((2542, 2561), 'nltk.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (2559, 2561), False, 'from nltk import SnowballStemmer, WordNetLemmatizer\n'), ((2992, 3015), 'pickle.dump', 'dump', (['processed_docs', 'f'], {}), '(processed_docs, f)\n', (2996, 3015), False, 'from pickle import load, dump\n'), ((3303, 3324), 'gensim.corpora.Dictionary', 'Dictionary', (['documents'], {}), '(documents)\n', (3313, 3324), False, 'from gensim.corpora import Dictionary\n'), ((3793, 3815), 'gensim.models.TfidfModel', 'TfidfModel', (['bow_corpus'], {}), '(bow_corpus)\n', (3803, 3815), False, 'from gensim.models import TfidfModel, AuthorTopicModel\n'), ((3910, 3931), 'pickle.dump', 'dump', (['tfidf_corpus', 'f'], {}), '(tfidf_corpus, f)\n', (3914, 3931), False, 'from pickle import load, dump\n'), ((4241, 4320), 'gensim.models.AuthorTopicModel', 'AuthorTopicModel', (['corpus'], {'num_topics': '(20)', 'id2word': 'dictionary', 'author2doc': 'authors'}), '(corpus, num_topics=20, id2word=dictionary, author2doc=authors)\n', (4257, 4320), False, 'from gensim.models import TfidfModel, AuthorTopicModel\n'), ((792, 798), 'time.time', 'time', ([], {}), '()\n', (796, 798), False, 'from time import time\n'), ((2652, 2686), 'gensim.utils.simple_preprocess', 'simple_preprocess', (['doc'], {'deacc': '(True)'}), '(doc, deacc=True)\n', (2669, 2686), False, 'from gensim.utils import simple_preprocess\n')]
from sklearn.neighbors import KNeighborsClassifier from skmultiflow.lazy import KNNClassifier from skmultiflow.meta import LearnPPNSEClassifier from cacp import all_datasets, run_experiment, ClassificationDataset from cacp_examples.classifiers import CLASSIFIERS from cacp_examples.example_custom_classifiers.xgboost import XGBoost if __name__ == '__main__': # you can specify datasets by name, all of them will be automatically downloaded experimental_datasets_example = [ ClassificationDataset('iris'), ClassificationDataset('wisconsin'), ClassificationDataset('pima'), ClassificationDataset('sonar'), ClassificationDataset('wdbc'), ] # or use all datasets experimental_datasets = all_datasets() # same for classifiers, you can specify list of classifiers experimental_classifiers_example = [ ('KNN_3', lambda n_inputs, n_classes: KNeighborsClassifier(3)), # you can define classifiers multiple times with different parameters ('KNN_5', lambda n_inputs, n_classes: KNeighborsClassifier(5)), # you can use classifiers from any lib that # supports fit/predict methods eg. scikit-learn/scikit-multiflow ('KNNI', lambda n_inputs, n_classes: KNNClassifier(n_neighbors=3)), # you can also use wrapped algorithms from other libs or custom implementations ('XGB', lambda n_inputs, n_classes: XGBoost()), ('LPPNSEC', lambda n_inputs, n_classes: LearnPPNSEClassifier()) ] # or you can use predefined ones experimental_classifiers = CLASSIFIERS # this is how you trigger experiment run run_experiment( experimental_datasets, experimental_classifiers, results_directory='./example_result' )
[ "skmultiflow.lazy.KNNClassifier", "cacp.all_datasets", "sklearn.neighbors.KNeighborsClassifier", "skmultiflow.meta.LearnPPNSEClassifier", "cacp.run_experiment", "cacp_examples.example_custom_classifiers.xgboost.XGBoost", "cacp.ClassificationDataset" ]
[((745, 759), 'cacp.all_datasets', 'all_datasets', ([], {}), '()\n', (757, 759), False, 'from cacp import all_datasets, run_experiment, ClassificationDataset\n'), ((1641, 1746), 'cacp.run_experiment', 'run_experiment', (['experimental_datasets', 'experimental_classifiers'], {'results_directory': '"""./example_result"""'}), "(experimental_datasets, experimental_classifiers,\n results_directory='./example_result')\n", (1655, 1746), False, 'from cacp import all_datasets, run_experiment, ClassificationDataset\n'), ((492, 521), 'cacp.ClassificationDataset', 'ClassificationDataset', (['"""iris"""'], {}), "('iris')\n", (513, 521), False, 'from cacp import all_datasets, run_experiment, ClassificationDataset\n'), ((531, 565), 'cacp.ClassificationDataset', 'ClassificationDataset', (['"""wisconsin"""'], {}), "('wisconsin')\n", (552, 565), False, 'from cacp import all_datasets, run_experiment, ClassificationDataset\n'), ((575, 604), 'cacp.ClassificationDataset', 'ClassificationDataset', (['"""pima"""'], {}), "('pima')\n", (596, 604), False, 'from cacp import all_datasets, run_experiment, ClassificationDataset\n'), ((614, 644), 'cacp.ClassificationDataset', 'ClassificationDataset', (['"""sonar"""'], {}), "('sonar')\n", (635, 644), False, 'from cacp import all_datasets, run_experiment, ClassificationDataset\n'), ((654, 683), 'cacp.ClassificationDataset', 'ClassificationDataset', (['"""wdbc"""'], {}), "('wdbc')\n", (675, 683), False, 'from cacp import all_datasets, run_experiment, ClassificationDataset\n'), ((912, 935), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', (['(3)'], {}), '(3)\n', (932, 935), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((1062, 1085), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', (['(5)'], {}), '(5)\n', (1082, 1085), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((1258, 1286), 'skmultiflow.lazy.KNNClassifier', 'KNNClassifier', ([], {'n_neighbors': '(3)'}), '(n_neighbors=3)\n', (1271, 1286), False, 'from skmultiflow.lazy import KNNClassifier\n'), ((1421, 1430), 'cacp_examples.example_custom_classifiers.xgboost.XGBoost', 'XGBoost', ([], {}), '()\n', (1428, 1430), False, 'from cacp_examples.example_custom_classifiers.xgboost import XGBoost\n'), ((1481, 1503), 'skmultiflow.meta.LearnPPNSEClassifier', 'LearnPPNSEClassifier', ([], {}), '()\n', (1501, 1503), False, 'from skmultiflow.meta import LearnPPNSEClassifier\n')]
import os,sys from os import walk path='C:\\Users\\PUJITHA\\AppData\\Local\\Programs\\Python\\Python36\\Timestamps' dirs=os.listdir(path) print("list of all the contetnts in the TimeStamps Library ") print(dirs) # Adding the directory contents in list data structure f = [] for (filenames) in walk(path): f.extend(filenames) break print(f)
[ "os.listdir", "os.walk" ]
[((126, 142), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (136, 142), False, 'import os, sys\n'), ((305, 315), 'os.walk', 'walk', (['path'], {}), '(path)\n', (309, 315), False, 'from os import walk\n')]
import csv import datetime import re import json import unicodedata from django.shortcuts import render_to_response, get_object_or_404 from django.template import RequestContext from django.http import HttpResponseRedirect, HttpResponse from django.core.urlresolvers import reverse from django.contrib.auth.decorators import login_required from django.views.decorators.http import require_POST from django.contrib import messages from core.models import Person from core.notifications.email import EmailInfo from core.notifications.models import Notification from form_builder.models import Form, Field, FormResponse, AnonymousResponse from form_builder.forms import FormForm, FieldForm, FieldFormSet, ResponseForm # Helper functions def create_templates(): templates = [(field_type, FieldForm(field_type=field_type, prefix="field_set-{{ i }}")) for (field_type, _) in Field.FIELD_TYPES] return templates def add_message(*args, **kwargs): if 'fail_silently' not in kwargs: kwargs['fail_silently'] = True return messages.add_message(*args, **kwargs) def response_dict(**kwargs): return dict({ 'active_app': 'Form Builder', 'app_link': reverse('form_builder:index')}, **kwargs) def normalize(string): return unicodedata.normalize('NFKD', string).encode('ascii','ignore').strip() # Views @login_required def index(req): forms = req.user.form_set.all() return render_to_response('form_builder/index.html', response_dict(forms=forms), context_instance=RequestContext(req)) def redirect_to_index_or_form(req, form, fields, action="created"): if fields: add_message(req, messages.SUCCESS, "Your form has been " + action + ".") return HttpResponseRedirect(reverse('form_builder:index')) else: add_message(req, messages.INFO, "Your form has been " + action + ". " + "Before others can fill it out, you need to create some " "fields.") return HttpResponseRedirect(reverse('form_builder:edit', args=[form.slug])) @login_required def new(req): form_form = FormForm(req.POST or None) field_form_set = FieldFormSet(req.POST or None) if form_form.is_valid() and field_form_set.is_valid(): custom_form = form_form.save(commit=False) custom_form.save() custom_form.owner.add(req.user) custom_form.save() field_form_set = FieldFormSet(req.POST, instance=custom_form) if field_form_set.is_valid(): fields = field_form_set.save() return redirect_to_index_or_form(req, custom_form, fields) context = RequestContext(req) context["formaction"] = "new" context['compact_header'] = 'compact-header' return render_to_response( 'form_builder/form.html', response_dict(form=form_form, fields=field_form_set, form_action=reverse('form_builder:new'), templates=create_templates()), context_instance=context) @login_required def edit(req, id): # autosave functionality switch, in case something goes wrong # 0=off; 1=on use_form_autosave = 0 custom_form = get_object_or_404(Form, owner=req.user, slug=id) form_form = FormForm(req.POST or None, instance=custom_form) field_form_set = FieldFormSet(req.POST or None, instance=custom_form) context = RequestContext(req) context['compact_header'] = 'compact-header' context["use_form_autosave"] = use_form_autosave context['custom_form'] = custom_form if form_form.is_valid() and field_form_set.is_valid(): custom_form = form_form.save() if req.POST['owner_stub']: person = Person.objects.get(stub=req.POST.get('owner_stub', '').strip()) collab_user = person.user custom_form.owner.add(collab_user) field_form_set.save() try: # In a case where someone created a form with no questions, # field_order would be blank. This would throw up an error to the # user, but the form data saves to the database fine. Easiest # solution is just to ignore the error; the expectation is that # most users will only save the form after adding at least a few # questions. custom_form.set_field_order(req.POST['field_order'].split(",")) except: pass # FORM AUTOSAVE # ============= # Assuming that the form saved properly, send response data back to # the client. This includes a dictionary containing a current queryset # of fields on the form. # This is used by the client to make DOM changes to match the current state of # the form. if (use_form_autosave == 1): time_now = datetime.datetime.now().strftime('%I:%M%p') field_list = Field.objects.filter(id=custom_form.id) field_dict = {} for field_instance in field_list: field_dict[field_instance.id] = field_instance.label responsedata = { "message": "Your form was updated at " + re.sub(r'\A0', '', time_now.lower()) + ".", "formfields": field_dict, } return HttpResponse(json.dumps(responsedata), content_type="application/json") else: add_message(req, messages.SUCCESS, "Your form has been updated.") return HttpResponseRedirect(reverse('form_builder:index')) elif form_form.errors or field_form_set.errors: # this code typically executes when someone has added a form field but # has not filled in a required element, such as the label; it ensures # that the code continues to run without bombing out if (use_form_autosave == 1): field_dict = {} responsedata = { "message": "Waiting to update form...", "formfields": field_dict, } return HttpResponse(json.dumps(responsedata), content_type="application/json") return render_to_response( 'form_builder/form.html', response_dict(form=form_form, fields=field_form_set, form_action=reverse('form_builder:edit', args=[custom_form.slug]), templates=create_templates()), context_instance=context) @login_required def respond(req, id): user_form = get_object_or_404(Form, slug=id) already_responded = AnonymousResponse.objects.check_dupe(user_form.id, req.user.username) if not already_responded: if req.GET: for field in user_form.field_set.all(): if req.GET.has_key(field.label): field.default_value = req.GET[field.label] field.save() response_form = ResponseForm( req.POST or None, form=user_form, user=req.user) if not user_form.is_closed and response_form.is_valid(): form_response = response_form.save() #set notification title = '%s %s submitted the "%s" form' % \ (req.user.first_name, req.user.last_name, user_form) url = "/forms/results/%s/" % user_form.slug if user_form.owner.exists(): if user_form.collect_users: title = '%s %s submitted the "%s" form' % \ (req.user.first_name, req.user.last_name, user_form) text_template = 'form_respond.txt' html_template = 'form_respond.html' else: title = 'Someone submitted the "%s" form' % user_form text_template = 'form_respond_anonymous.txt' html_template = 'form_respond_anonymous.html' for o in user_form.owner.all(): if o != req.user: email_info = EmailInfo( subject=title, text_template='form_builder/email/%s' % text_template, html_template='form_builder/email/%s' % html_template, to_address=o.email ) Notification.set_notification(req.user, req.user, "submitted", user_form, o, title, url, email_info) return HttpResponseRedirect(reverse('form_builder:form_thanks', args=[form_response.pk])) return render_to_response('form_builder/respond.html', {'user_form': user_form, 'response_form': response_form}, context_instance=RequestContext(req)) else: context = RequestContext(req) context['form_title'] = user_form.title return render_to_response('form_builder/thanks.html', {}, context_instance=context) @login_required def form_thanks(req, id=None): if id: form_response = get_object_or_404(FormResponse, pk=id) return render_to_response('form_builder/thanks.html', {'form_response': form_response, 'form': form_response.form}, context_instance=RequestContext(req)) else: form_response = None return render_to_response('form_builder/thanks.html', {}, context_instance=RequestContext(req)) @login_required def results(req, id): form = get_object_or_404(Form, owner=req.user, slug=id) req_new = req.GET.get('new', '') response_count = form.response_set.count() new_responses = False for response in form.response_set.filter(archived=False): new_responses = form.response_set.filter(archived=False) context = RequestContext(req) context['compact_header'] = 'compact-header' if req_new != '': return render_to_response( 'form_builder/results.html', response_dict(form=form, responses=form.response_set.filter(archived=False), fields=form.field_set.all(), new=True, new_responses=new_responses, response_count=response_count), context_instance=context) else: return render_to_response( 'form_builder/results.html', response_dict(form=form, responses=form.response_set.all(), fields=form.field_set.all(), new=False, new_responses=new_responses, response_count=response_count), context_instance=context) @login_required def view_response(req, formid, resid): user_form = get_object_or_404(Form, owner=req.user, slug=formid) response_form = ResponseForm( req.POST or None, form=user_form, user=req.user) result = get_object_or_404(FormResponse, pk=resid) response_set = result.fieldresponse_set.all() for field_response in response_set: field_response.field.label = \ field_response.field.label.replace(user_form.title + ' - ', '') return render_to_response('form_builder/respond.html', response_dict(user_form=user_form, fields=user_form.field_set.all(), response_form=response_form, response=result, response_set=response_set, viewonly=True), context_instance=RequestContext(req)) @login_required def archive_result(req, id): result = get_object_or_404(FormResponse, pk=id) if result.archived is False: result.archived = True result.save() if req.META.get('HTTP_REFERER'): return HttpResponseRedirect(req.META.get('HTTP_REFERER')) else: return HttpResponseRedirect(reverse('form_builder:index')) @login_required def mark_result_as_new(req, id): result = get_object_or_404(FormResponse, pk=id) if result.archived: result.archived = False result.save() if req.META.get('HTTP_REFERER'): return HttpResponseRedirect(req.META.get('HTTP_REFERER')) else: return HttpResponseRedirect(reverse('form_builder:index')) @login_required def archive_all(req, id): form = get_object_or_404(Form, owner=req.user, slug=id) for response in form.response_set.filter(archived=False): response.archived = True response.save() if req.META.get('HTTP_REFERER'): return HttpResponseRedirect(req.META.get('HTTP_REFERER')) else: return HttpResponseRedirect(reverse('form_builder:index')) @login_required def results_csv(req, id): form = get_object_or_404(Form, owner=req.user, slug=id) req_new = req.GET.get('new', '') # Create the HttpResponse object with the appropriate CSV header. http_response = HttpResponse(content_type='text/csv') http_response['Content-Disposition'] = 'attachment; filename="results.csv"' writer = csv.writer(http_response) labels = [normalize(field.label) for field in form.field_set.all()] labels.insert(0, "Date/Time") if form.collect_users: labels.insert(0, "User") writer.writerow(labels) if req_new != '': for response in form.response_set.filter(archived=False): data = [normalize(field_response.value) for field_response in response.fieldresponse_set.all()] data.insert(0, response.submission_date) if form.collect_users: if response.user: data.insert(0, normalize(response.user.first_name) + ' ' + normalize(response.user.last_name)) else: data.insert(0, "anonymous") writer.writerow(data) else: for response in form.response_set.all(): # This assumes a paste operation from an MS Office product # May want to be more sophisticated about encoding data = [normalize(field_response.value) for field_response in response.fieldresponse_set.all()] data.insert(0, response.submission_date) if form.collect_users: if response.user: data.insert(0, normalize(response.user.first_name) + ' ' + normalize(response.user.last_name)) else: data.insert(0, "anonymous") writer.writerow(data) return http_response @login_required @require_POST def delete(req, id): form = get_object_or_404(Form, owner=req.user, slug=id) form.delete() add_message(req, messages.INFO, "Your form was deleted.") return HttpResponseRedirect(reverse('form_builder:index')) @login_required @require_POST def duplicate(req, id): form = get_object_or_404(Form, owner=req.user, slug=id) form.duplicate() add_message(req, messages.INFO, "Your form was duplicated.") return HttpResponseRedirect(reverse('form_builder:index'))
[ "form_builder.forms.FieldFormSet", "form_builder.models.Field.objects.filter", "django.http.HttpResponse", "form_builder.forms.FieldForm", "django.shortcuts.get_object_or_404", "django.template.RequestContext", "csv.writer", "django.core.urlresolvers.reverse", "json.dumps", "form_builder.forms.For...
[((1078, 1115), 'django.contrib.messages.add_message', 'messages.add_message', (['*args'], {}), '(*args, **kwargs)\n', (1098, 1115), False, 'from django.contrib import messages\n'), ((2269, 2295), 'form_builder.forms.FormForm', 'FormForm', (['(req.POST or None)'], {}), '(req.POST or None)\n', (2277, 2295), False, 'from form_builder.forms import FormForm, FieldForm, FieldFormSet, ResponseForm\n'), ((2317, 2347), 'form_builder.forms.FieldFormSet', 'FieldFormSet', (['(req.POST or None)'], {}), '(req.POST or None)\n', (2329, 2347), False, 'from form_builder.forms import FormForm, FieldForm, FieldFormSet, ResponseForm\n'), ((2786, 2805), 'django.template.RequestContext', 'RequestContext', (['req'], {}), '(req)\n', (2800, 2805), False, 'from django.template import RequestContext\n'), ((3367, 3415), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Form'], {'owner': 'req.user', 'slug': 'id'}), '(Form, owner=req.user, slug=id)\n', (3384, 3415), False, 'from django.shortcuts import render_to_response, get_object_or_404\n'), ((3432, 3480), 'form_builder.forms.FormForm', 'FormForm', (['(req.POST or None)'], {'instance': 'custom_form'}), '(req.POST or None, instance=custom_form)\n', (3440, 3480), False, 'from form_builder.forms import FormForm, FieldForm, FieldFormSet, ResponseForm\n'), ((3502, 3554), 'form_builder.forms.FieldFormSet', 'FieldFormSet', (['(req.POST or None)'], {'instance': 'custom_form'}), '(req.POST or None, instance=custom_form)\n', (3514, 3554), False, 'from form_builder.forms import FormForm, FieldForm, FieldFormSet, ResponseForm\n'), ((3569, 3588), 'django.template.RequestContext', 'RequestContext', (['req'], {}), '(req)\n', (3583, 3588), False, 'from django.template import RequestContext\n'), ((6763, 6795), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Form'], {'slug': 'id'}), '(Form, slug=id)\n', (6780, 6795), False, 'from django.shortcuts import render_to_response, get_object_or_404\n'), ((6820, 6889), 'form_builder.models.AnonymousResponse.objects.check_dupe', 'AnonymousResponse.objects.check_dupe', (['user_form.id', 'req.user.username'], {}), '(user_form.id, req.user.username)\n', (6856, 6889), False, 'from form_builder.models import Form, Field, FormResponse, AnonymousResponse\n'), ((10051, 10099), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Form'], {'owner': 'req.user', 'slug': 'id'}), '(Form, owner=req.user, slug=id)\n', (10068, 10099), False, 'from django.shortcuts import render_to_response, get_object_or_404\n'), ((10354, 10373), 'django.template.RequestContext', 'RequestContext', (['req'], {}), '(req)\n', (10368, 10373), False, 'from django.template import RequestContext\n'), ((11379, 11431), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Form'], {'owner': 'req.user', 'slug': 'formid'}), '(Form, owner=req.user, slug=formid)\n', (11396, 11431), False, 'from django.shortcuts import render_to_response, get_object_or_404\n'), ((11453, 11514), 'form_builder.forms.ResponseForm', 'ResponseForm', (['(req.POST or None)'], {'form': 'user_form', 'user': 'req.user'}), '(req.POST or None, form=user_form, user=req.user)\n', (11465, 11514), False, 'from form_builder.forms import FormForm, FieldForm, FieldFormSet, ResponseForm\n'), ((11538, 11579), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['FormResponse'], {'pk': 'resid'}), '(FormResponse, pk=resid)\n', (11555, 11579), False, 'from django.shortcuts import render_to_response, get_object_or_404\n'), ((12383, 12421), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['FormResponse'], {'pk': 'id'}), '(FormResponse, pk=id)\n', (12400, 12421), False, 'from django.shortcuts import render_to_response, get_object_or_404\n'), ((12754, 12792), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['FormResponse'], {'pk': 'id'}), '(FormResponse, pk=id)\n', (12771, 12792), False, 'from django.shortcuts import render_to_response, get_object_or_404\n'), ((13108, 13156), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Form'], {'owner': 'req.user', 'slug': 'id'}), '(Form, owner=req.user, slug=id)\n', (13125, 13156), False, 'from django.shortcuts import render_to_response, get_object_or_404\n'), ((13513, 13561), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Form'], {'owner': 'req.user', 'slug': 'id'}), '(Form, owner=req.user, slug=id)\n', (13530, 13561), False, 'from django.shortcuts import render_to_response, get_object_or_404\n'), ((13690, 13727), 'django.http.HttpResponse', 'HttpResponse', ([], {'content_type': '"""text/csv"""'}), "(content_type='text/csv')\n", (13702, 13727), False, 'from django.http import HttpResponseRedirect, HttpResponse\n'), ((13822, 13847), 'csv.writer', 'csv.writer', (['http_response'], {}), '(http_response)\n', (13832, 13847), False, 'import csv\n'), ((15455, 15503), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Form'], {'owner': 'req.user', 'slug': 'id'}), '(Form, owner=req.user, slug=id)\n', (15472, 15503), False, 'from django.shortcuts import render_to_response, get_object_or_404\n'), ((15714, 15762), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Form'], {'owner': 'req.user', 'slug': 'id'}), '(Form, owner=req.user, slug=id)\n', (15731, 15762), False, 'from django.shortcuts import render_to_response, get_object_or_404\n'), ((2578, 2622), 'form_builder.forms.FieldFormSet', 'FieldFormSet', (['req.POST'], {'instance': 'custom_form'}), '(req.POST, instance=custom_form)\n', (2590, 2622), False, 'from form_builder.forms import FormForm, FieldForm, FieldFormSet, ResponseForm\n'), ((7224, 7285), 'form_builder.forms.ResponseForm', 'ResponseForm', (['(req.POST or None)'], {'form': 'user_form', 'user': 'req.user'}), '(req.POST or None, form=user_form, user=req.user)\n', (7236, 7285), False, 'from form_builder.forms import FormForm, FieldForm, FieldFormSet, ResponseForm\n'), ((9283, 9302), 'django.template.RequestContext', 'RequestContext', (['req'], {}), '(req)\n', (9297, 9302), False, 'from django.template import RequestContext\n'), ((9366, 9442), 'django.shortcuts.render_to_response', 'render_to_response', (['"""form_builder/thanks.html"""', '{}'], {'context_instance': 'context'}), "('form_builder/thanks.html', {}, context_instance=context)\n", (9384, 9442), False, 'from django.shortcuts import render_to_response, get_object_or_404\n'), ((9526, 9564), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['FormResponse'], {'pk': 'id'}), '(FormResponse, pk=id)\n', (9543, 9564), False, 'from django.shortcuts import render_to_response, get_object_or_404\n'), ((15616, 15645), 'django.core.urlresolvers.reverse', 'reverse', (['"""form_builder:index"""'], {}), "('form_builder:index')\n", (15623, 15645), False, 'from django.core.urlresolvers import reverse\n'), ((15881, 15910), 'django.core.urlresolvers.reverse', 'reverse', (['"""form_builder:index"""'], {}), "('form_builder:index')\n", (15888, 15910), False, 'from django.core.urlresolvers import reverse\n'), ((794, 854), 'form_builder.forms.FieldForm', 'FieldForm', ([], {'field_type': 'field_type', 'prefix': '"""field_set-{{ i }}"""'}), "(field_type=field_type, prefix='field_set-{{ i }}')\n", (803, 854), False, 'from form_builder.forms import FormForm, FieldForm, FieldFormSet, ResponseForm\n'), ((1223, 1252), 'django.core.urlresolvers.reverse', 'reverse', (['"""form_builder:index"""'], {}), "('form_builder:index')\n", (1230, 1252), False, 'from django.core.urlresolvers import reverse\n'), ((1620, 1639), 'django.template.RequestContext', 'RequestContext', (['req'], {}), '(req)\n', (1634, 1639), False, 'from django.template import RequestContext\n'), ((1863, 1892), 'django.core.urlresolvers.reverse', 'reverse', (['"""form_builder:index"""'], {}), "('form_builder:index')\n", (1870, 1892), False, 'from django.core.urlresolvers import reverse\n'), ((2129, 2175), 'django.core.urlresolvers.reverse', 'reverse', (['"""form_builder:edit"""'], {'args': '[form.slug]'}), "('form_builder:edit', args=[form.slug])\n", (2136, 2175), False, 'from django.core.urlresolvers import reverse\n'), ((5064, 5103), 'form_builder.models.Field.objects.filter', 'Field.objects.filter', ([], {'id': 'custom_form.id'}), '(id=custom_form.id)\n', (5084, 5103), False, 'from form_builder.models import Form, Field, FormResponse, AnonymousResponse\n'), ((9979, 9998), 'django.template.RequestContext', 'RequestContext', (['req'], {}), '(req)\n', (9993, 9998), False, 'from django.template import RequestContext\n'), ((12302, 12321), 'django.template.RequestContext', 'RequestContext', (['req'], {}), '(req)\n', (12316, 12321), False, 'from django.template import RequestContext\n'), ((12659, 12688), 'django.core.urlresolvers.reverse', 'reverse', (['"""form_builder:index"""'], {}), "('form_builder:index')\n", (12666, 12688), False, 'from django.core.urlresolvers import reverse\n'), ((13022, 13051), 'django.core.urlresolvers.reverse', 'reverse', (['"""form_builder:index"""'], {}), "('form_builder:index')\n", (13029, 13051), False, 'from django.core.urlresolvers import reverse\n'), ((13427, 13456), 'django.core.urlresolvers.reverse', 'reverse', (['"""form_builder:index"""'], {}), "('form_builder:index')\n", (13434, 13456), False, 'from django.core.urlresolvers import reverse\n'), ((3071, 3098), 'django.core.urlresolvers.reverse', 'reverse', (['"""form_builder:new"""'], {}), "('form_builder:new')\n", (3078, 3098), False, 'from django.core.urlresolvers import reverse\n'), ((5492, 5516), 'json.dumps', 'json.dumps', (['responsedata'], {}), '(responsedata)\n', (5502, 5516), False, 'import json\n'), ((5715, 5744), 'django.core.urlresolvers.reverse', 'reverse', (['"""form_builder:index"""'], {}), "('form_builder:index')\n", (5722, 5744), False, 'from django.core.urlresolvers import reverse\n'), ((6529, 6582), 'django.core.urlresolvers.reverse', 'reverse', (['"""form_builder:edit"""'], {'args': '[custom_form.slug]'}), "('form_builder:edit', args=[custom_form.slug])\n", (6536, 6582), False, 'from django.core.urlresolvers import reverse\n'), ((8882, 8942), 'django.core.urlresolvers.reverse', 'reverse', (['"""form_builder:form_thanks"""'], {'args': '[form_response.pk]'}), "('form_builder:form_thanks', args=[form_response.pk])\n", (8889, 8942), False, 'from django.core.urlresolvers import reverse\n'), ((9234, 9253), 'django.template.RequestContext', 'RequestContext', (['req'], {}), '(req)\n', (9248, 9253), False, 'from django.template import RequestContext\n'), ((9809, 9828), 'django.template.RequestContext', 'RequestContext', (['req'], {}), '(req)\n', (9823, 9828), False, 'from django.template import RequestContext\n'), ((1308, 1345), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFKD"""', 'string'], {}), "('NFKD', string)\n", (1329, 1345), False, 'import unicodedata\n'), ((4995, 5018), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5016, 5018), False, 'import datetime\n'), ((6255, 6279), 'json.dumps', 'json.dumps', (['responsedata'], {}), '(responsedata)\n', (6265, 6279), False, 'import json\n'), ((8315, 8477), 'core.notifications.email.EmailInfo', 'EmailInfo', ([], {'subject': 'title', 'text_template': "('form_builder/email/%s' % text_template)", 'html_template': "('form_builder/email/%s' % html_template)", 'to_address': 'o.email'}), "(subject=title, text_template='form_builder/email/%s' %\n text_template, html_template='form_builder/email/%s' % html_template,\n to_address=o.email)\n", (8324, 8477), False, 'from core.notifications.email import EmailInfo\n'), ((8632, 8736), 'core.notifications.models.Notification.set_notification', 'Notification.set_notification', (['req.user', 'req.user', '"""submitted"""', 'user_form', 'o', 'title', 'url', 'email_info'], {}), "(req.user, req.user, 'submitted', user_form, o,\n title, url, email_info)\n", (8661, 8736), False, 'from core.notifications.models import Notification\n')]
import asyncio from aiohttp import ClientSession from ssl_context_builder.builder.builder import SslContextBuilder from ssl_context_builder.http_impl.requests_wrapper.secure_session import RequestsSecureSession def create_strong_crypto_ctx(): builder = SslContextBuilder() ssl_ctx = builder.use_tls_1_2_and_above() \ .set_maximum_key_exchange_security_level() \ .set_cipher_type("chacha", "aes") \ .set_key_length(256) \ .set_cipher_mode("gcm") \ .build() def create_macos_tls_ctx(): builder = SslContextBuilder() ssl_ctx = builder.use_tls_1_2_and_above() \ .set_post_handshake_auth(True) \ .disable_session_ticket() \ .disable_renegotiation() \ .chek_hostname_and_common_name() \ .use_mac_os_cert('user') \ .use_mac_os_cert('system') \ .use_mac_os_cert('root') \ .build() return ssl_ctx def create_windows_tls_ctx(): builder = SslContextBuilder() ssl_ctx = builder.use_tls_1_2_and_above() \ .set_post_handshake_auth(True) \ .disable_session_ticket() \ .disable_renegotiation() \ .chek_hostname_and_common_name() \ .use_windows_os_cert('ROOT') \ .use_windows_os_cert('CA') \ .use_windows_os_cert('MY') \ .build() return ssl_ctx def secure_requests_session(): ctx = create_macos_tls_ctx() with RequestsSecureSession(ctx) as secure_session: print(secure_session.session.get("https://google.com")) def secure_requests_session_with_no_managed_context(): ctx = create_macos_tls_ctx() secure_session = RequestsSecureSession(ctx) print(secure_session.session.get("https://google.com")) def aiohttp_get(): url = "https://google.com" ctx = SslContextBuilder().use_tls_1_2_and_above().build() async def invoke_request(): async with ClientSession() as session: async with session.get(url, ssl_context=ctx) as resp: print(await resp.text()) asyncio.run(invoke_request())
[ "aiohttp.ClientSession", "ssl_context_builder.http_impl.requests_wrapper.secure_session.RequestsSecureSession", "ssl_context_builder.builder.builder.SslContextBuilder" ]
[((261, 280), 'ssl_context_builder.builder.builder.SslContextBuilder', 'SslContextBuilder', ([], {}), '()\n', (278, 280), False, 'from ssl_context_builder.builder.builder import SslContextBuilder\n'), ((552, 571), 'ssl_context_builder.builder.builder.SslContextBuilder', 'SslContextBuilder', ([], {}), '()\n', (569, 571), False, 'from ssl_context_builder.builder.builder import SslContextBuilder\n'), ((964, 983), 'ssl_context_builder.builder.builder.SslContextBuilder', 'SslContextBuilder', ([], {}), '()\n', (981, 983), False, 'from ssl_context_builder.builder.builder import SslContextBuilder\n'), ((1632, 1658), 'ssl_context_builder.http_impl.requests_wrapper.secure_session.RequestsSecureSession', 'RequestsSecureSession', (['ctx'], {}), '(ctx)\n', (1653, 1658), False, 'from ssl_context_builder.http_impl.requests_wrapper.secure_session import RequestsSecureSession\n'), ((1411, 1437), 'ssl_context_builder.http_impl.requests_wrapper.secure_session.RequestsSecureSession', 'RequestsSecureSession', (['ctx'], {}), '(ctx)\n', (1432, 1437), False, 'from ssl_context_builder.http_impl.requests_wrapper.secure_session import RequestsSecureSession\n'), ((1885, 1900), 'aiohttp.ClientSession', 'ClientSession', ([], {}), '()\n', (1898, 1900), False, 'from aiohttp import ClientSession\n'), ((1781, 1800), 'ssl_context_builder.builder.builder.SslContextBuilder', 'SslContextBuilder', ([], {}), '()\n', (1798, 1800), False, 'from ssl_context_builder.builder.builder import SslContextBuilder\n')]
import sys import random import pygame # Define Window Size SCREEN_WIDTH, SCREEN_HEIGHT = 800, 600 # Define Frames Per Second FRAMES_PER_SEC = 25 # Function: Draw a text to the screen def blit_text(screen_object, font, msg_text, color_tuple, center_tuple, antialiased=True): # Define a message in Main Menu with Color Tuple in RGB msg_font = font.render(msg_text, antialiased, color_tuple) # Find position of the message font_pos = msg_font.get_rect(center=center_tuple) # Draw (blit) the message at the calculated position screen_object.blit(msg_font , font_pos) # Function: Check which is winner def check_winner(player1_value, player2_value, screen_object, font_object, clock): frame_count = 0 while True: for event in pygame.event.get(): # If Quit Signal is received, quit the game if event.type == pygame.QUIT: pygame.quit() sys.exit() screen_object.fill((0,0,0)) if frame_count < 100: player1_message = 'Player 1: ' + str(random.randrange(1, 5+1)) blit_text(screen_object, font_object, player1_message, (255, 0, 0), (SCREEN_WIDTH//2, SCREEN_HEIGHT//2 + 40)) player2_message = 'Player 2: ' + str(random.randrange(1, 5+1)) blit_text(screen_object, font_object, player2_message, (0, 0, 255), (SCREEN_WIDTH//2, SCREEN_HEIGHT//2 + 80)) elif frame_count < 200: player1_message = 'Player 1: ' + str(player1_value + 1) blit_text(screen_object, font_object, player1_message, (255, 0, 0), (SCREEN_WIDTH//2, SCREEN_HEIGHT//2 + 40)) player2_message = 'Player 2: ' + str(player2_value + 1) blit_text(screen_object, font_object, player2_message, (0, 0, 255), (SCREEN_WIDTH//2, SCREEN_HEIGHT//2 + 80)) elif frame_count < 300: winner_message = 'Player 2 wins!' if player1_value == player2_value: winner_message = 'Draw Game!' elif player1_value > player2_value or (player1_value == 0 and player2_value == 5): winner_message = 'Player 1 wins!' blit_text(screen_object, font_object, winner_message, (255, 255, 255), (SCREEN_WIDTH//2, SCREEN_HEIGHT//2)) else: break # leave this while-loop and this function frame_count += 1 pygame.display.update() clock.tick(FRAMES_PER_SEC) # Function: Start the game def start_game(): # Initialize pygame library pygame.init() # Set default Font: comic sans font_object=pygame.font.SysFont('comicsans',40) # Define Window Screen Display object screen_object = pygame.display.set_mode((SCREEN_WIDTH,SCREEN_HEIGHT)) # Initial a CLOCK object to tick time CLOCK = pygame.time.Clock() # initial values is_reset = True # Game's main entry print('DEBUG: about to enter the game') while True: if is_reset: # value range: 0 - 4 player1_value = 0 player1_pressed_count = 0 player2_value = 0 player2_pressed_count = 0 is_reset = False for event in pygame.event.get(): # If Quit Signal is received, quit the game if event.type == pygame.QUIT: pygame.quit() sys.exit() # If any key is pressed, go here keys_pressed = pygame.key.get_pressed() if keys_pressed[pygame.K_w]: player1_value = (player1_value + 1) % 5 player1_pressed_count += 1 if keys_pressed[pygame.K_s]: player1_value = (player1_value - 1 + 5) % 5 player1_pressed_count += 1 if keys_pressed[pygame.K_UP]: player2_value = (player2_value + 1) % 5 player2_pressed_count += 1 if keys_pressed[pygame.K_DOWN]: player2_value = (player2_value - 1 + 5) % 5 player2_pressed_count += 1 if keys_pressed[pygame.K_RETURN] and player1_pressed_count > 0 and player2_pressed_count > 0: check_winner(player1_value, player2_value, screen_object, font_object, CLOCK) is_reset = True screen_object.fill((0,0,0)) blit_text(screen_object, font_object, 'Welcome to Number Punch!', (255, 255, 255), (SCREEN_WIDTH//2, SCREEN_HEIGHT//2)) player1_message = 'Player 1, Press W to increase, S to decrease' if player1_pressed_count > 0: if player1_value % 2 == 0: player1_message = 'Player 1: @@' else: player1_message = 'Player 1: ##' blit_text(screen_object, font_object, player1_message, (255, 0, 0), (SCREEN_WIDTH//2, SCREEN_HEIGHT//2 + 40)) player2_message = 'Player 2, Press UP to increase, DOWN to decrease' if player2_pressed_count > 0: if player2_value % 2 == 0: player2_message = 'Player 2: @@' else: player2_message = 'Player 2: ##' blit_text(screen_object, font_object, player2_message, (0, 0, 255), (SCREEN_WIDTH//2, SCREEN_HEIGHT//2 + 80)) blit_text(screen_object, font_object, 'Press ENTER to confirm both', (255, 255, 255), (SCREEN_WIDTH//2, SCREEN_HEIGHT//2 + 120)) pygame.display.update() CLOCK.tick(FRAMES_PER_SEC) if __name__ == '__main__': # Executed when invoked directly start_game()
[ "sys.exit", "pygame.init", "pygame.quit", "pygame.event.get", "random.randrange", "pygame.display.set_mode", "pygame.key.get_pressed", "pygame.time.Clock", "pygame.display.update", "pygame.font.SysFont" ]
[((2517, 2530), 'pygame.init', 'pygame.init', ([], {}), '()\n', (2528, 2530), False, 'import pygame\n'), ((2583, 2619), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""comicsans"""', '(40)'], {}), "('comicsans', 40)\n", (2602, 2619), False, 'import pygame\n'), ((2686, 2740), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(SCREEN_WIDTH, SCREEN_HEIGHT)'], {}), '((SCREEN_WIDTH, SCREEN_HEIGHT))\n', (2709, 2740), False, 'import pygame\n'), ((2795, 2814), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (2812, 2814), False, 'import pygame\n'), ((766, 784), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (782, 784), False, 'import pygame\n'), ((2376, 2399), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (2397, 2399), False, 'import pygame\n'), ((3183, 3201), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (3199, 3201), False, 'import pygame\n'), ((5363, 5386), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (5384, 5386), False, 'import pygame\n'), ((3430, 3454), 'pygame.key.get_pressed', 'pygame.key.get_pressed', ([], {}), '()\n', (3452, 3454), False, 'import pygame\n'), ((908, 921), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (919, 921), False, 'import pygame\n'), ((938, 948), 'sys.exit', 'sys.exit', ([], {}), '()\n', (946, 948), False, 'import sys\n'), ((3317, 3330), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (3328, 3330), False, 'import pygame\n'), ((3347, 3357), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3355, 3357), False, 'import sys\n'), ((1073, 1099), 'random.randrange', 'random.randrange', (['(1)', '(5 + 1)'], {}), '(1, 5 + 1)\n', (1089, 1099), False, 'import random\n'), ((1270, 1296), 'random.randrange', 'random.randrange', (['(1)', '(5 + 1)'], {}), '(1, 5 + 1)\n', (1286, 1296), False, 'import random\n')]
# -*- coding: utf8 -*- import os import sys import logging from logging.handlers import RotatingFileHandler __all__ = ( 'get_logger', ) LOG_PATH = "/var/log/imap2gotify.log" __LOG__ = None def get_logger(path=LOG_PATH): global __LOG__ if __LOG__ is not None: return __LOG__ # Setup the log handlers to stdout and file. log = logging.getLogger('imap2gotify') log.setLevel(logging.DEBUG) formatter = logging.Formatter( '%(asctime)s | %(name)s | %(levelname)10s | %(message)s' ) handler_stdout = logging.StreamHandler(sys.stdout) handler_stdout.setLevel(logging.DEBUG) handler_stdout.setFormatter(formatter) log.addHandler(handler_stdout) if not LOG_PATH.strip(): __LOG__ = log return log handler_file = RotatingFileHandler( LOG_PATH, mode='a', maxBytes=1048576, backupCount=9, encoding='UTF-8', delay=True ) handler_file.setLevel(logging.DEBUG) handler_file.setFormatter(formatter) log.addHandler(handler_file) __LOG__ = log return log
[ "logging.getLogger", "logging.Formatter", "logging.StreamHandler", "logging.handlers.RotatingFileHandler" ]
[((394, 426), 'logging.getLogger', 'logging.getLogger', (['"""imap2gotify"""'], {}), "('imap2gotify')\n", (411, 426), False, 'import logging\n'), ((475, 550), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s | %(name)s | %(levelname)10s | %(message)s"""'], {}), "('%(asctime)s | %(name)s | %(levelname)10s | %(message)s')\n", (492, 550), False, 'import logging\n'), ((590, 623), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (611, 623), False, 'import logging\n'), ((862, 968), 'logging.handlers.RotatingFileHandler', 'RotatingFileHandler', (['LOG_PATH'], {'mode': '"""a"""', 'maxBytes': '(1048576)', 'backupCount': '(9)', 'encoding': '"""UTF-8"""', 'delay': '(True)'}), "(LOG_PATH, mode='a', maxBytes=1048576, backupCount=9,\n encoding='UTF-8', delay=True)\n", (881, 968), False, 'from logging.handlers import RotatingFileHandler\n')]
# -*- encoding: utf-8 -*- from django.http import HttpResponse from django.contrib.auth.decorators import login_required from django.shortcuts import render from django.shortcuts import redirect from django.contrib import messages from django.core.urlresolvers import reverse from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger from .forms import PhotoForm from .models import Photo def hello(request): photos = Photo.objects.filter(published=True).order_by('-date') paginator = Paginator(photos, 1) page = request.GET.get('page') try: photos = paginator.page(page) except PageNotAnInteger: photos = paginator.page(1) except EmptyPage: photos = paginator.page(paginator.num_pages) return render(request, 'index.html', { 'title': u'ostatnie zdjęcia', 'photos': photos, 'paginator': paginator }) def hello_name(request, name): return HttpResponse('Hello %s!' % name) @login_required def upload(request): if request.method == 'POST': form = PhotoForm(request.POST, request.FILES) if form.is_valid(): form.instance.user = request.user form.instance.published = True form.save() messages.success(request, u'Zdjęcie załadowane') return redirect(reverse('hello')) else: messages.error(request, u'Proszę poprawić formularz') else: form = PhotoForm() return render(request, 'upload.html', { 'title': u'przesyłanie zdjęć', 'form': form })
[ "django.shortcuts.render", "django.http.HttpResponse", "django.contrib.messages.error", "django.core.urlresolvers.reverse", "django.contrib.messages.success", "django.core.paginator.Paginator" ]
[((513, 533), 'django.core.paginator.Paginator', 'Paginator', (['photos', '(1)'], {}), '(photos, 1)\n', (522, 533), False, 'from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n'), ((736, 843), 'django.shortcuts.render', 'render', (['request', '"""index.html"""', "{'title': u'ostatnie zdjęcia', 'photos': photos, 'paginator': paginator}"], {}), "(request, 'index.html', {'title': u'ostatnie zdjęcia', 'photos':\n photos, 'paginator': paginator})\n", (742, 843), False, 'from django.shortcuts import render\n'), ((910, 942), 'django.http.HttpResponse', 'HttpResponse', (["('Hello %s!' % name)"], {}), "('Hello %s!' % name)\n", (922, 942), False, 'from django.http import HttpResponse\n'), ((1365, 1442), 'django.shortcuts.render', 'render', (['request', '"""upload.html"""', "{'title': u'przesyłanie zdjęć', 'form': form}"], {}), "(request, 'upload.html', {'title': u'przesyłanie zdjęć', 'form': form})\n", (1371, 1442), False, 'from django.shortcuts import render\n'), ((1173, 1221), 'django.contrib.messages.success', 'messages.success', (['request', 'u"""Zdjęcie załadowane"""'], {}), "(request, u'Zdjęcie załadowane')\n", (1189, 1221), False, 'from django.contrib import messages\n'), ((1270, 1323), 'django.contrib.messages.error', 'messages.error', (['request', 'u"""Proszę poprawić formularz"""'], {}), "(request, u'Proszę poprawić formularz')\n", (1284, 1323), False, 'from django.contrib import messages\n'), ((1241, 1257), 'django.core.urlresolvers.reverse', 'reverse', (['"""hello"""'], {}), "('hello')\n", (1248, 1257), False, 'from django.core.urlresolvers import reverse\n')]
# -*- coding: utf-8 -*- """ @date: 2020/7/14 下午8:34 @file: anno_processor.py @author: zj @description: """ import os import glob from parseanno.anno import build_anno from parseanno.utils.logger import setup_logger class AnnoProcessor(object): """ 对标注数据进行处理,创建指定格式的训练数据 """ def __init__(self, cfg): self.parser = build_anno(cfg.ANNO.PARSER, cfg) self.creator = build_anno(cfg.ANNO.CREATOR, cfg) self.logger = setup_logger(__name__) def process(self): self.logger.info('处理原始标注数据') anno_data = self.parser.process() self.logger.info('保存指定格式数据') self.creator.save(anno_data) self.logger.info('完成')
[ "parseanno.anno.build_anno", "parseanno.utils.logger.setup_logger" ]
[((344, 376), 'parseanno.anno.build_anno', 'build_anno', (['cfg.ANNO.PARSER', 'cfg'], {}), '(cfg.ANNO.PARSER, cfg)\n', (354, 376), False, 'from parseanno.anno import build_anno\n'), ((400, 433), 'parseanno.anno.build_anno', 'build_anno', (['cfg.ANNO.CREATOR', 'cfg'], {}), '(cfg.ANNO.CREATOR, cfg)\n', (410, 433), False, 'from parseanno.anno import build_anno\n'), ((457, 479), 'parseanno.utils.logger.setup_logger', 'setup_logger', (['__name__'], {}), '(__name__)\n', (469, 479), False, 'from parseanno.utils.logger import setup_logger\n')]
import arrow from datetime import timedelta def identify_missing_sources(oh_member): missing_sources = {"oura": True, "fitbit": True} # Check data already in Open Humans. for i in oh_member.list_files(): if i["source"] == "direct-sharing-184" and i["basename"] == "oura-data.json": missing_sources.pop("oura", None) if i["basename"] == "fitbit-data.json" and i["source"] == "direct-sharing-102": missing_sources.pop("fitbit", None) # Check data imported by this app. if hasattr(oh_member, "fitbit_member"): missing_sources.pop("fitbit", None) if hasattr(oh_member, "oura_user"): missing_sources.pop("oura", None) return list(missing_sources.keys()) def get_fitbit_file(oh_member): for dfile in oh_member.list_files(): if "QF-Fitbit" in dfile["metadata"]["tags"]: return dfile["download_url"] return "" def check_update(fitbit_member): if fitbit_member.last_submitted < (arrow.now() - timedelta(hours=1)): return True return False
[ "datetime.timedelta", "arrow.now" ]
[((995, 1006), 'arrow.now', 'arrow.now', ([], {}), '()\n', (1004, 1006), False, 'import arrow\n'), ((1009, 1027), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (1018, 1027), False, 'from datetime import timedelta\n')]
import asyncio from bleak import BleakScanner def detection_callback(*args): print(args) async def run(): scanner = BleakScanner() scanner.register_detection_callback(detection_callback) await scanner.start() await asyncio.sleep(2.0) await scanner.stop() devices = await scanner.get_discovered_devices() for d in devices: print(d) loop = asyncio.get_event_loop() loop.run_until_complete(run())
[ "bleak.BleakScanner", "asyncio.get_event_loop", "asyncio.sleep" ]
[((382, 406), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (404, 406), False, 'import asyncio\n'), ((126, 140), 'bleak.BleakScanner', 'BleakScanner', ([], {}), '()\n', (138, 140), False, 'from bleak import BleakScanner\n'), ((237, 255), 'asyncio.sleep', 'asyncio.sleep', (['(2.0)'], {}), '(2.0)\n', (250, 255), False, 'import asyncio\n')]
# Created byMartin.cz # Copyright (c) <NAME>. All rights reserved. from pero.properties import * from pero import Label, LabelBox from . graphics import InGraphics class Labels(InGraphics): """ Labels container provides a simple tool to draw all given labels at once in the order defined by their 'z_index' property. By default the container makes sure the labels do not overlap each other using their bounding box. If two labels are overlapping, the one with higher 'z_index' is finally drawn. To ignore label overlaps the 'overlap' property must be set to False. All the labels having the anchor coordinates outside the frame are ignored and not drawn. In addition, labels for which the bounding box falls partially outside the the clipping frame, are automatically shifted to ensure their full visibility. Properties: items: (pero.Label,), None or UNDEF Specifies a collection of labels to draw. overlap: bool Specifies whether the labels can overlap each other (True) or should be skipped automatically if there is not enough space available (False). spacing: int, float Specifies the minimum additional space between adjacent labels. padding: int, float, (int,), (float,) or UNDEF Specifies the inner space as a single value or values for individual sides starting from top. This is used in addition to the 'clip' to shift partially visible labels. """ items = ListProperty(UNDEF, types=(Label,), dynamic=False) overlap = BoolProperty(False, dynamic=False) spacing = NumProperty(4, dynamic=False) padding = QuadProperty(5, dynamic=False) def __init__(self, **overrides): """Initializes a new instance of Grid.""" super().__init__(**overrides) self._glyph = LabelBox() def initialize(self, canvas, plot): """ This method is automatically called by parent plot to set specific properties and perform necessary initialization steps. """ # check if visible if not self.visible: return # set items from plot items = [] for series in plot.series: if series.visible and series.show_labels: items += series.get_labels() self.items = items def draw(self, canvas, source=UNDEF, **overrides): """Uses given canvas to draw the legend.""" # check if visible if not self.is_visible(source, overrides): return # get properties frame = self.get_property('frame', source, overrides) # update glyph self._glyph.set_properties_from(self, source=source, overrides=overrides) # draw labels self._glyph.draw(canvas, clip=frame)
[ "pero.LabelBox" ]
[((1973, 1983), 'pero.LabelBox', 'LabelBox', ([], {}), '()\n', (1981, 1983), False, 'from pero import Label, LabelBox\n')]
import pyjion import timeit from statistics import fmean def test_floats(n=10000): for y in range(n): x = 0.1 z = y * y + x - y x *= z def test_ints(n=10000): for y in range(n): x = 2 z = y * y + x - y x *= z if __name__ == "__main__": tests = (test_floats, test_ints) for test in tests: without_result = timeit.repeat(test, repeat=5, number=1000) print("{0} took {1} min, {2} max, {3} mean without Pyjion".format(str(test), min(without_result), max(without_result), fmean(without_result))) pyjion.enable() pyjion.set_optimization_level(1) with_result = timeit.repeat(test, repeat=5, number=1000) pyjion.disable() print("{0} took {1} min, {2} max, {3} mean with Pyjion".format(str(test), min(with_result), max(with_result), fmean(with_result))) delta = (abs(fmean(with_result) - fmean(without_result)) / fmean(without_result)) * 100.0 print(f"Pyjion is {delta:.2f}% faster")
[ "pyjion.set_optimization_level", "timeit.repeat", "statistics.fmean", "pyjion.enable", "pyjion.disable" ]
[((383, 425), 'timeit.repeat', 'timeit.repeat', (['test'], {'repeat': '(5)', 'number': '(1000)'}), '(test, repeat=5, number=1000)\n', (396, 425), False, 'import timeit\n'), ((585, 600), 'pyjion.enable', 'pyjion.enable', ([], {}), '()\n', (598, 600), False, 'import pyjion\n'), ((609, 641), 'pyjion.set_optimization_level', 'pyjion.set_optimization_level', (['(1)'], {}), '(1)\n', (638, 641), False, 'import pyjion\n'), ((664, 706), 'timeit.repeat', 'timeit.repeat', (['test'], {'repeat': '(5)', 'number': '(1000)'}), '(test, repeat=5, number=1000)\n', (677, 706), False, 'import timeit\n'), ((715, 731), 'pyjion.disable', 'pyjion.disable', ([], {}), '()\n', (729, 731), False, 'import pyjion\n'), ((553, 574), 'statistics.fmean', 'fmean', (['without_result'], {}), '(without_result)\n', (558, 574), False, 'from statistics import fmean\n'), ((850, 868), 'statistics.fmean', 'fmean', (['with_result'], {}), '(with_result)\n', (855, 868), False, 'from statistics import fmean\n'), ((938, 959), 'statistics.fmean', 'fmean', (['without_result'], {}), '(without_result)\n', (943, 959), False, 'from statistics import fmean\n'), ((892, 910), 'statistics.fmean', 'fmean', (['with_result'], {}), '(with_result)\n', (897, 910), False, 'from statistics import fmean\n'), ((913, 934), 'statistics.fmean', 'fmean', (['without_result'], {}), '(without_result)\n', (918, 934), False, 'from statistics import fmean\n')]
# import the required packages import warnings import sys from scipy.optimize import OptimizeWarning from PyQt5 import QtCore, QtWidgets from ._tools import Fitter, value_to_string from ._widgets import PlotWidget, ModelWidget, ReportWidget from ._settings import settings from ._version import __version__ as CFGversion class MainWindow(QtWidgets.QMainWindow): def __init__(self, afitter, xlabel, ylabel): super(MainWindow , self).__init__() # perform some initial default settings self.fitter = afitter self.xlabel, self.ylabel = xlabel, ylabel self.output = (None, None) self.xerrorwarning = settings['XERRORWARNING'] self.initGUI() self.plotwidget.update_plot() def closeEvent(self, event): """needed to properly quit when running in IPython console / Spyder IDE""" QtWidgets.QApplication.quit() def initGUI(self): # main GUI proprieties self.setGeometry(100, 100, 1415, 900) self.setWindowTitle('curvefitgui ' + CFGversion) self._main = QtWidgets.QWidget() self.setCentralWidget(self._main) # creating the required widgets self.plotwidget = PlotWidget(self.fitter.data, self.xlabel, self.ylabel) # holds the plot self.modelview = ModelWidget(self.fitter.model, self.fitter.get_weightoptions()) # shows the model and allows users to set fitproperties self.fitbutton = QtWidgets.QPushButton('FIT', clicked = self.fit) self.evalbutton = QtWidgets.QPushButton('EVALUATE', clicked = self.evaluate) self.reportview = ReportWidget() # shows the fitresults self.quitbutton = QtWidgets.QPushButton('QUIT', clicked = self.close) # create a layout for the buttons self.buttons = QtWidgets.QGroupBox() buttonslayout = QtWidgets.QHBoxLayout() buttonslayout.addWidget(self.evalbutton) buttonslayout.addWidget(self.fitbutton) self.buttons.setLayout(buttonslayout) # create a frame with a vertical layout to organize the modelview, fitbutton and reportview self.fitcontrolframe = QtWidgets.QGroupBox() fitcontrollayout = QtWidgets.QVBoxLayout() for widget in (self.modelview, self.buttons, self.reportview, self.quitbutton): fitcontrollayout.addWidget(widget) self.fitcontrolframe.setLayout(fitcontrollayout) # putting it all together: Setup the main layout mainlayout = QtWidgets.QHBoxLayout(self._main) splitter = QtWidgets.QSplitter(QtCore.Qt.Horizontal) splitter.addWidget(self.plotwidget) splitter.addWidget(self.fitcontrolframe) mainlayout.addWidget(splitter) def showdialog(self, message, icon, info='', details=''): """ shows an info dialog """ msg = QtWidgets.QMessageBox() if icon == 'critical': msg.setIcon(QtWidgets.QMessageBox.Critical) if icon == 'warning': msg.setIcon(QtWidgets.QMessageBox.Warning) msg.setText(message) msg.setInformativeText(info) msg.setWindowTitle("Message") msg.setDetailedText(details) msg.setStandardButtons(QtWidgets.QMessageBox.Ok) msg.exec_() def set_output(self, output): """output should be a tuple with variables that are returned when closing the app""" self.output = output def get_output(self): """allows to return the currently stored output of the app when closed""" return self.output def evaluate(self): """ updates the model and computes the model curve with the current parameter values """ # update the modelvalues from userinput try: self.modelview.read_values() except ValueError: self.showdialog('Not a valid input initial parameter values', 'critical') return None # evaluate self.reportview.update_report({}) self.plotwidget.canvas.set_fitline(self.fitter.get_curve()) self.plotwidget.canvas.set_residuals(self.fitter.get_residuals(check=False)) self.plotwidget.canvas.disable_results_box() self.plotwidget.update_plot() def fit(self): """ updates the model performs the fit and updates the widgets with the results """ # update the modelvalues from userinput try: self.modelview.read_values() except ValueError: self.showdialog('Not a valid input initial parameter values', 'critical') return None # update fitrange self.plotwidget.canvas.get_range() # show warning on xerror data if (self.fitter.data.xe is not None) and self.xerrorwarning: self.showdialog('The error in x is ignored in the fit!', 'warning') self.xerrorwarning = False # perform the fit with warnings.catch_warnings(): warnings.simplefilter("error", OptimizeWarning) # make sure the OptimizeWarning is raised as an exception try: fitpars, fitcov = self.fitter.fit() except (ValueError, RuntimeError, OptimizeWarning): self.showdialog(str(sys.exc_info()[1]), 'critical') else: # update output self.set_output((fitpars, fitcov)) # update the widgets self.modelview.update_values() self.reportview.update_report(self.fitter.get_report()) self.plotwidget.canvas.set_fitline(self.fitter.get_fitcurve()) self.plotwidget.canvas.set_residuals(self.fitter.get_residuals()) self.plotwidget.canvas.set_results_box(self._get_result_box_text(), 2) self.plotwidget.update_plot() def _get_result_box_text(self): text = 'Fit results:' text = text + '\n' + 'weight:' + self.fitter.model.weight for par in self.fitter.model.fitpars: n = par.name v = par.value e = par.sigma f = par.fixed text = text + '\n' + value_to_string(n, v, e, f) return text def execute_gui(f, xdata, ydata, xerr, yerr, p0, xlabel, ylabel, absolute_sigma, jac, showgui, **kwargs): """ helper function that executes the GUI with an instance of the fitter class """ afitter = Fitter(f, xdata, ydata, xerr, yerr, p0, absolute_sigma, jac,**kwargs) if not showgui: return afitter.fit() if not QtWidgets.QApplication.instance(): app = QtWidgets.QApplication([]) else: app = QtWidgets.QApplication.instance() MyApplication = MainWindow(afitter, xlabel, ylabel) MyApplication.show() app.exec_() return MyApplication.get_output()
[ "PyQt5.QtWidgets.QWidget", "PyQt5.QtWidgets.QApplication.instance", "PyQt5.QtWidgets.QMessageBox", "warnings.catch_warnings", "PyQt5.QtWidgets.QHBoxLayout", "PyQt5.QtWidgets.QApplication.quit", "PyQt5.QtWidgets.QSplitter", "warnings.simplefilter", "sys.exc_info", "PyQt5.QtWidgets.QGroupBox", "Py...
[((915, 944), 'PyQt5.QtWidgets.QApplication.quit', 'QtWidgets.QApplication.quit', ([], {}), '()\n', (942, 944), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((1133, 1152), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (1150, 1152), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((1506, 1552), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""FIT"""'], {'clicked': 'self.fit'}), "('FIT', clicked=self.fit)\n", (1527, 1552), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((1582, 1638), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""EVALUATE"""'], {'clicked': 'self.evaluate'}), "('EVALUATE', clicked=self.evaluate)\n", (1603, 1638), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((1733, 1782), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""QUIT"""'], {'clicked': 'self.close'}), "('QUIT', clicked=self.close)\n", (1754, 1782), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((1851, 1872), 'PyQt5.QtWidgets.QGroupBox', 'QtWidgets.QGroupBox', ([], {}), '()\n', (1870, 1872), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((1897, 1920), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (1918, 1920), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((2196, 2217), 'PyQt5.QtWidgets.QGroupBox', 'QtWidgets.QGroupBox', ([], {}), '()\n', (2215, 2217), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((2245, 2268), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ([], {}), '()\n', (2266, 2268), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((2548, 2581), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', (['self._main'], {}), '(self._main)\n', (2569, 2581), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((2601, 2642), 'PyQt5.QtWidgets.QSplitter', 'QtWidgets.QSplitter', (['QtCore.Qt.Horizontal'], {}), '(QtCore.Qt.Horizontal)\n', (2620, 2642), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((2912, 2935), 'PyQt5.QtWidgets.QMessageBox', 'QtWidgets.QMessageBox', ([], {}), '()\n', (2933, 2935), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((6616, 6649), 'PyQt5.QtWidgets.QApplication.instance', 'QtWidgets.QApplication.instance', ([], {}), '()\n', (6647, 6649), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((6665, 6691), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['[]'], {}), '([])\n', (6687, 6691), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((6716, 6749), 'PyQt5.QtWidgets.QApplication.instance', 'QtWidgets.QApplication.instance', ([], {}), '()\n', (6747, 6749), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((4970, 4995), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (4993, 4995), False, 'import warnings\n'), ((5009, 5056), 'warnings.simplefilter', 'warnings.simplefilter', (['"""error"""', 'OptimizeWarning'], {}), "('error', OptimizeWarning)\n", (5030, 5056), False, 'import warnings\n'), ((5285, 5299), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (5297, 5299), False, 'import sys\n')]
import http.server import socketserver import socket #set the process name to "chaos_server" so we can easily kill it with "pkill chaos_server" def set_proc_name(newname): from ctypes import cdll, byref, create_string_buffer libc = cdll.LoadLibrary('libc.so.6') buff = create_string_buffer(len(newname)+1) buff.value = newname.encode("ascii") libc.prctl(15, byref(buff), 0, 0, 0) set_proc_name("chaos_server") #start server on port 80 PORT = 80 Handler = http.server.SimpleHTTPRequestHandler class NoTimeWaitTCPServer(socketserver.TCPServer): """ when a socket does is shutdown dance, it ends up in a TIME-WAIT state, which can prevent rebinding on it quickly. here we say "shut up, socket", let me rebind anyways even if you're in TIME-WAIT." that will teach it. """ def server_bind(self): self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.socket.bind(self.server_address) httpd = NoTimeWaitTCPServer(("", PORT), Handler) httpd.serve_forever()
[ "ctypes.byref", "ctypes.cdll.LoadLibrary" ]
[((241, 270), 'ctypes.cdll.LoadLibrary', 'cdll.LoadLibrary', (['"""libc.so.6"""'], {}), "('libc.so.6')\n", (257, 270), False, 'from ctypes import cdll, byref, create_string_buffer\n'), ((379, 390), 'ctypes.byref', 'byref', (['buff'], {}), '(buff)\n', (384, 390), False, 'from ctypes import cdll, byref, create_string_buffer\n')]
import board import adafruit_ahtx0 from datetime import datetime html_dir = "/var/www/html/temp" data_fname = "temperature.csv" html_fname = "index.html" sensor = adafruit_ahtx0.AHTx0(board.I2C()) temp = sensor.temperature hum = sensor.relative_humidity now = datetime.now() # open data file history = [] with open(html_dir + "/" + data_fname, "r") as f: for l in f.readlines(): fields = l.split(";") dt = datetime.strptime(fields[0], "%d/%m/%y %H:%M") data = [dt, float(fields[1]), float(fields[2])] history.append(data) history.append([now, temp, hum]) t_data = [] h_data = [] dt_data= [] for h in history: t_data.append("{:.1f}".format(h[1])) h_data.append("{:.1f}".format(h[2])) dt_data.append(h[0].strftime("'%Y-%m-%d %H:%m'")) with open(html_dir + "/" + data_fname, "a") as f: dt = now.strftime("%d/%m/%y %H:%M") f.write("{};{};{}\n".format(dt, temp, hum)) head="""<html> <head> <title>Temperature</title> </head> <body>\n""" body1 = """ <div id="plotly-chart"></div> <script type=\"text/javascript\" src=\"./plotly-latest.min.js\"></script> <script type=\"text/javascript\" > var temp = { x: [""" body2="],\n y: [" body3="""], name: 'temperature', type: 'scatter' }; var hum = { x: [""" body4="],\n y: [" body5="""], yaxis: 'y2', name: 'humidity', type: 'scatter' }; var data = [temp, hum]; var layout = { title: 'Double Y Axis Example', yaxis: {title: 'Temperature'}, yaxis2: { title: 'Humidity', overlaying: 'y', side: 'right' } }; Plotly.newPlot('plotly-chart', data, layout); </script> """ tail="</body></html>\n" with open(html_dir + "/" + html_fname, "w") as f: f.write(head) f.write(body1) f.write(",".join(dt_data)) f.write(body2) f.write(",".join(t_data)) f.write(body3) f.write(",".join(dt_data)) f.write(body4) f.write(",".join(h_data)) f.write(body5) f.write(tail)
[ "datetime.datetime.strptime", "datetime.datetime.now", "board.I2C" ]
[((264, 278), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (276, 278), False, 'from datetime import datetime\n'), ((186, 197), 'board.I2C', 'board.I2C', ([], {}), '()\n', (195, 197), False, 'import board\n'), ((417, 463), 'datetime.datetime.strptime', 'datetime.strptime', (['fields[0]', '"""%d/%m/%y %H:%M"""'], {}), "(fields[0], '%d/%m/%y %H:%M')\n", (434, 463), False, 'from datetime import datetime\n')]
import os import pickle import json from typing import List def subdirs(folder: str, join: bool = True, prefix: str = None, suffix: str = None, sort: bool = True) -> List[str]: if join: l = os.path.join else: l = lambda x, y: y res = [l(folder, i) for i in os.listdir(folder) if os.path.isdir(os.path.join(folder, i)) and (prefix is None or i.startswith(prefix)) and (suffix is None or i.endswith(suffix))] if sort: res.sort() return res def subfiles(folder: str, join: bool = True, prefix: str = None, suffix: str = None, sort: bool = True) -> List[str]: if join: l = os.path.join else: l = lambda x, y: y res = [l(folder, i) for i in os.listdir(folder) if os.path.isfile(os.path.join(folder, i)) and (prefix is None or i.startswith(prefix)) and (suffix is None or i.endswith(suffix))] if sort: res.sort() return res def nifti_files(folder: str, join: bool = True, sort: bool = True) -> List[str]: return subfiles(folder, join=join, sort=sort, suffix='.nii.gz') def maybe_mkdir_p(directory: str) -> None: os.makedirs(directory, exist_ok=True) def load_pickle(file: str, mode: str = 'rb'): with open(file, mode) as f: a = pickle.load(f) return a def write_pickle(obj, file: str, mode: str = 'wb') -> None: with open(file, mode) as f: pickle.dump(obj, f) def load_json(file: str): with open(file, 'r') as f: a = json.load(f) return a def save_json(obj, file: str, indent: int = 4, sort_keys: bool = True) -> None: with open(file, 'w') as f: json.dump(obj, f, sort_keys=sort_keys, indent=indent) def pardir(path: str): return os.path.join(path, os.pardir) # I'm tired of typing these out join = os.path.join isdir = os.path.isdir isfile = os.path.isfile listdir = os.listdir makedirs = maybe_mkdir_p # I am tired of confusing those subfolders = subdirs save_pickle = write_pickle write_json = save_json
[ "os.listdir", "pickle.dump", "os.makedirs", "pickle.load", "os.path.join", "json.load", "json.dump" ]
[((1154, 1191), 'os.makedirs', 'os.makedirs', (['directory'], {'exist_ok': '(True)'}), '(directory, exist_ok=True)\n', (1165, 1191), False, 'import os\n'), ((1742, 1771), 'os.path.join', 'os.path.join', (['path', 'os.pardir'], {}), '(path, os.pardir)\n', (1754, 1771), False, 'import os\n'), ((1284, 1298), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1295, 1298), False, 'import pickle\n'), ((1414, 1433), 'pickle.dump', 'pickle.dump', (['obj', 'f'], {}), '(obj, f)\n', (1425, 1433), False, 'import pickle\n'), ((1505, 1517), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1514, 1517), False, 'import json\n'), ((1652, 1705), 'json.dump', 'json.dump', (['obj', 'f'], {'sort_keys': 'sort_keys', 'indent': 'indent'}), '(obj, f, sort_keys=sort_keys, indent=indent)\n', (1661, 1705), False, 'import json\n'), ((287, 305), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (297, 305), False, 'import os\n'), ((734, 752), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (744, 752), False, 'import os\n'), ((323, 346), 'os.path.join', 'os.path.join', (['folder', 'i'], {}), '(folder, i)\n', (335, 346), False, 'import os\n'), ((771, 794), 'os.path.join', 'os.path.join', (['folder', 'i'], {}), '(folder, i)\n', (783, 794), False, 'import os\n')]
import torch from torch.autograd import Variable batch_size = 100 row_lenth = 10 col_length = 10 if __name__ == '__main__': a = Variable(torch.randn((batch_size, row_lenth, col_length))) b = Variable(torch.randn((batch_size, row_lenth, col_length))) c = Variable(torch.randn((batch_size, row_lenth, col_length))) sum = Variable(torch.zeros((batch_size, row_lenth, col_length))) #for i in range(batch_size): # sum[i, :, :] = a[i, :, :] + b[i, :, :] # 1. addition #d = torch.add(a, 1, b) #e = torch.add(a, b) # 2. tensor multiplication #d = torch.zeros((batch_size, row_lenth, col_length)) #for i in range(batch_size): # d[i, :, :] = torch.mm(a[i, :, :], b[i, :, :]) #e = torch.matmul(a, b) #if torch.eq(d, e).sum() == 10000: # print('true') # 3. element-wise multiplication #d = torch.FloatTensor([[1, 2], [3, 4]]) #e = torch.FloatTensor([[1, 2], [3, 4]]) #print(torch.addcmul(torch.zeros((2, 2)), value=1, tensor1=d, tensor2=e)) #print(d * e) # 4. outer-product (vector, vector) #d = torch.LongTensor([1, 2, 3, 4, 5]).unsqueeze(0) #e = torch.mm(torch.t(d), d) #print(e) # 5. outer-product (matrix, matrix) #t = (batch_size, row_lenth, row_lenth) #d = torch.randn((batch_size, row_lenth)) #e = torch.randn((batch_size, row_lenth)) #f = d.unsqueeze(-1).expand(*t) * e.unsqueeze(-2).expand(*t) # (100, 10, 10) #print(f.size()) #g = torch.randn((batch_size, row_lenth, row_lenth)) #for i in range(batch_size): # g[i, :, :] = torch.mm(d[i, :].unsqueeze(1), e[i, :].unsqueeze(0)) #print(torch.eq(f, g).sum())
[ "torch.zeros", "torch.randn" ]
[((140, 188), 'torch.randn', 'torch.randn', (['(batch_size, row_lenth, col_length)'], {}), '((batch_size, row_lenth, col_length))\n', (151, 188), False, 'import torch\n'), ((204, 252), 'torch.randn', 'torch.randn', (['(batch_size, row_lenth, col_length)'], {}), '((batch_size, row_lenth, col_length))\n', (215, 252), False, 'import torch\n'), ((268, 316), 'torch.randn', 'torch.randn', (['(batch_size, row_lenth, col_length)'], {}), '((batch_size, row_lenth, col_length))\n', (279, 316), False, 'import torch\n'), ((334, 382), 'torch.zeros', 'torch.zeros', (['(batch_size, row_lenth, col_length)'], {}), '((batch_size, row_lenth, col_length))\n', (345, 382), False, 'import torch\n')]
from django.test import TestCase from django.contrib.auth import get_user_model from unittest.mock import patch from core import models class ModelTests(TestCase): """Test the core models""" def test_create_user_with_email(self): """Create User with email""" email = "<EMAIL>" password = "<PASSWORD>" user = get_user_model().objects.create_user( email=email, password=password ) self.assertEqual(user.email, email) self.assertTrue(user.check_password(password)) def test_normalize_email(self): """Normalize user email""" email = "<EMAIL>" user = get_user_model().objects.create_user(email=email, password='<PASSWORD>') self.assertEqual(user.email, email.lower()) def test_user_invalid_email(self): """Test user invalid email""" password = "<PASSWORD>" with self.assertRaises(ValueError): get_user_model().objects.create_user(None, password) def test_create_new_super_user(self): """create new super user""" email = "<EMAIL>" password = "<PASSWORD>" user = get_user_model().objects.create_superuser(email=email, password=password) self.assertTrue(user.is_superuser) self.assertTrue(user.is_staff)
[ "django.contrib.auth.get_user_model" ]
[((353, 369), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (367, 369), False, 'from django.contrib.auth import get_user_model\n'), ((669, 685), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (683, 685), False, 'from django.contrib.auth import get_user_model\n'), ((1165, 1181), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (1179, 1181), False, 'from django.contrib.auth import get_user_model\n'), ((960, 976), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (974, 976), False, 'from django.contrib.auth import get_user_model\n')]