text
string
size
int64
token_count
int64
# Copyright 2016 James Hensman, alexggmatthews # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------ # Modification notice: # This file was modified by Vincent ADAM # ------------------------------------------ import tensorflow as tf import numpy as np from functools import reduce from settings import int_type,float_type from functions import eye class Kern(object): """ The basic kernel class. Handles input_dim and active dims, and provides a generic '_slice' function to implement them. """ def __init__(self, input_dim, active_dims=None): """ input dim is an integer active dims is either an iterable of integers or None. Input dim is the number of input dimensions to the kernel. If the kernel is computed on a matrix X which has more columns than input_dim, then by default, only the first input_dim columns are used. If different columns are required, then they may be specified by active_dims. If active dims is None, it effectively defaults to range(input_dim), but we store it as a slice for efficiency. """ self.input_dim = int(input_dim) if active_dims is None: self.active_dims = slice(input_dim) elif type(active_dims) is slice: self.active_dims = active_dims if active_dims.start is not None and active_dims.stop is not None and active_dims.step is not None: assert len(range(*active_dims)) == input_dim # pragma: no cover else: self.active_dims = np.array(active_dims, dtype=np.int32) assert len(active_dims) == input_dim self.num_gauss_hermite_points = 20 def _slice(self, X, X2): """ Slice the correct dimensions for use in the kernel, as indicated by `self.active_dims`. :param X: Input 1 (NxD[xB]). :param X2: Input 2 (MxD), may be None. :return: Sliced X, X2, (N x self.input_dim [x B]), (N x self.input_dim) """ if X.get_shape().ndims == 2: # M x D if isinstance(self.active_dims, slice): X = X[:, self.active_dims] if X2 is not None: X2 = X2[:, self.active_dims] else: X = tf.transpose(tf.gather(tf.transpose(X), self.active_dims)) if X2 is not None: X2 = tf.transpose(tf.gather(tf.transpose(X2), self.active_dims)) elif X.get_shape().ndims == 3: # M x D x B if isinstance(self.active_dims, slice): X = X[:, self.active_dims, :] if X2 is not None: X2 = X2[:, self.active_dims] else: X = tf.transpose(tf.gather(tf.transpose(X, (1, 0, 2)), self.active_dims), (1, 0, 2)) if X2 is not None: X2 = tf.transpose(tf.gather(X2, self.active_dims)) with tf.control_dependencies([ tf.assert_equal(tf.shape(X)[1], tf.constant(self.input_dim, dtype=int_type)) ]): X = tf.identity(X) return X, X2 def _slice_cov(self, cov): """ Slice the correct dimensions for use in the kernel, as indicated by `self.active_dims` for covariance matrices. This requires slicing the rows *and* columns. This will also turn flattened diagonal matrices into a tensor of full diagonal matrices. :param cov: Tensor of covariance matrices (NxDxD or NxD). :return: N x self.input_dim x self.input_dim. """ cov = tf.cond(tf.equal(tf.rank(cov), 2), lambda: tf.matrix_diag(cov), lambda: cov) if isinstance(self.active_dims, slice): cov = cov[..., self.active_dims, self.active_dims] else: cov_shape = tf.shape(cov) covr = tf.reshape(cov, [-1, cov_shape[-1], cov_shape[-1]]) gather1 = tf.gather(tf.transpose(covr, [2, 1, 0]), self.active_dims) gather2 = tf.gather(tf.transpose(gather1, [1, 0, 2]), self.active_dims) cov = tf.reshape(tf.transpose(gather2, [2, 0, 1]), tf.concat_v2([cov_shape[:-2], [len(self.active_dims), len(self.active_dims)]], 0)) return cov class Stationary(Kern): """ Base class for kernels that are stationary, that is, they only depend on r = || x - x' || This class handles 'ARD' behaviour, which stands for 'Automatic Relevance Determination'. This means that the kernel has one lengthscale per dimension, otherwise the kernel is isotropic (has a single lengthscale). """ def __init__(self, input_dim, variance=1.0, lengthscales=1., active_dims=None): """ - input_dim is the dimension of the input to the kernel - variance is the (initial) value for the variance parameter - lengthscales is the initial value for the lengthscales parameter defaults to 1.0 - active_dims is a list of length input_dim which controls which columns of X are used. """ Kern.__init__(self, input_dim, active_dims) self.lengthscales = tf.get_variable("lengthscales", [input_dim], initializer=tf.constant_initializer(lengthscales)) self.variance = tf.get_variable("variance", [1], initializer=tf.constant_initializer(variance)) def square_dist(self, X, X2): """ :param X: NxD[xB] :param X2: MxD :return: NxM[xB] """ if X.get_shape().ndims == 2: # M x D X = X / self.lengthscales Xs = tf.reduce_sum(tf.square(X), 1) if X2 is None: return -2 * tf.matmul(X, tf.transpose(X)) + \ tf.reshape(Xs, (-1, 1)) + tf.reshape(Xs, (1, -1)) else: X2 = X2 / self.lengthscales X2s = tf.reduce_sum(tf.square(X2), 1) return -2 * tf.matmul(X, tf.transpose(X2)) + \ tf.reshape(Xs, (-1, 1)) + tf.reshape(X2s, (1, -1)) elif X.get_shape().ndims == 3: # M x D x B X = X / tf.expand_dims(tf.expand_dims(self.lengthscales, -1), 0) Xs = tf.reduce_sum(tf.square(X), 1) # NxB if X2 is None: d = -2 * tf.matmul(tf.transpose(X, (2, 0, 1)), tf.transpose(X, (2, 1, 0))) + \ tf.expand_dims(tf.transpose(Xs), 1) + \ tf.expand_dims(tf.transpose(Xs), -1) else: shape = tf.stack([1, 1, tf.shape(X)[-1]]) X2 = tf.tile(tf.expand_dims(X2 / self.lengthscales, -1), shape) X2s = tf.reduce_sum(tf.square(X2), 1) # NxB d = -2 * tf.matmul(tf.transpose(X, (2, 0, 1)), tf.transpose(X2, (2, 1, 0))) + \ tf.expand_dims(tf.transpose(Xs), -1) + \ tf.expand_dims(tf.transpose(X2s), 1) # d is BxNxN return tf.transpose(d, (1, 2, 0)) # N x N x B def euclid_dist(self, X, X2): r2 = self.square_dist(X, X2) return tf.sqrt(r2 + 1e-12) def Kdiag(self, X, presliced=False): if X.get_shape().ndims == 2: # M x D return tf.fill(tf.stack([tf.shape(X)[0]]), tf.squeeze(self.variance)) elif X.get_shape().ndims == 3: # M x D x B return tf.fill(tf.stack([tf.shape(X)[0], tf.shape(X)[-1]]), tf.squeeze(self.variance)) class RBF(Stationary): """ The radial basis function (RBF) or squared exponential kernel """ def K(self, X, X2=None, presliced=False): if not presliced: X, X2 = self._slice(X, X2) return self.variance * tf.exp(-self.square_dist(X, X2) / 2) class PeriodicKernel(Kern): """ The periodic kernel. Defined in Equation (47) of D.J.C.MacKay. Introduction to Gaussian processes. In C.M.Bishop, editor, Neural Networks and Machine Learning, pages 133--165. Springer, 1998. Derived using the mapping u=(cos(x), sin(x)) on the inputs. """ def __init__(self, input_dim, period=1.0, variance=1.0, lengthscales=1.0, active_dims=None): Kern.__init__(self, input_dim, active_dims) self.lengthscales = tf.get_variable("lengthscales", [input_dim], initializer=tf.constant_initializer(lengthscales)) self.variance = tf.get_variable("variance", [1], initializer=tf.constant_initializer(variance)) self.period = tf.get_variable("period", [1], initializer=tf.constant_initializer(period)) def Kdiag(self, X, presliced=False): return tf.fill(tf.stack([tf.shape(X)[0]]), tf.squeeze(self.variance)) def K(self, X, X2=None, presliced=False): if not presliced: X, X2 = self._slice(X, X2) if X2 is None: X2 = X # Introduce dummy dimension so we can use broadcasting f = tf.expand_dims(X, 1) # now N x 1 x D f2 = tf.expand_dims(X2, 0) # now 1 x M x D r = np.pi * (f - f2) / self.period r = tf.reduce_sum(tf.square(tf.sin(r) / self.lengthscales), 2) return self.variance * tf.exp(-0.5 * r) class LocallyPeriodicKernel(Kern): """ k(t) = var * exp ( - t^2 / len^2 ) * cos ( 2 * pi * t / per ) """ def __init__(self, input_dim, period=1.0, variance=1.0, lengthscales=1.0, active_dims=None): Kern.__init__(self, input_dim, active_dims) self.lengthscales = tf.get_variable("lengthscales", [input_dim], initializer=tf.constant_initializer(lengthscales)) self.variance = tf.get_variable("variance", [1], initializer=tf.constant_initializer(variance)) self.period = tf.get_variable("period", [1], initializer=tf.constant_initializer(period)) def Kdiag(self, X, presliced=False): return tf.fill(tf.stack([tf.shape(X)[0]]), tf.squeeze(self.variance)) def K(self, X, X2=None, presliced=False): if not presliced: X, X2 = self._slice(X, X2) if X2 is None: X2 = X # Introduce dummy dimension so we can use broadcasting f = tf.expand_dims(X, 1) # now N x 1 x D f2 = tf.expand_dims(X2, 0) # now 1 x M x D r = tf.reduce_sum(f-f2,2) #hack for 1d return self.variance * tf.exp( - tf.square(r/self.lengthscales) ) * tf.cos(2.*np.pi *r/ self.period) class Combination(Kern): """ Combine a list of kernels, e.g. by adding or multiplying (see inheriting classes). The names of the kernels to be combined are generated from their class names. """ def __init__(self, kern_list): for k in kern_list: assert isinstance(k, Kern), "can only add Kern instances" input_dim = np.max([k.input_dim if type(k.active_dims) is slice else np.max(k.active_dims) + 1 for k in kern_list]) Kern.__init__(self, input_dim=input_dim) # add kernels to a list, flattening out instances of this class therein self.kern_list = kern_list class Add(Combination): def K(self, X, X2=None, presliced=False): return reduce(tf.add, [k.K(X, X2) for k in self.kern_list]) def Kdiag(self, X, presliced=False): return reduce(tf.add, [k.Kdiag(X) for k in self.kern_list]) class Prod(Combination): def K(self, X, X2=None, presliced=False): return reduce(tf.multiply, [k.K(X, X2) for k in self.kern_list]) def Kdiag(self, X, presliced=False): return reduce(tf.multiply, [k.Kdiag(X) for k in self.kern_list]) class Linear(Kern): """ The linear kernel """ def __init__(self, input_dim, variance=1.0, active_dims=None): """ - input_dim is the dimension of the input to the kernel - variance is the (initial) value for the variance parameter(s) - active_dims is a list of length input_dim which controls which columns of X are used. """ Kern.__init__(self, input_dim, active_dims) self.variance = tf.get_variable("variance", [1], initializer=tf.constant_initializer(variance)) def Kdiag(self, X, presliced=False): if not presliced: X, _ = self._slice(X, None) return tf.reduce_sum(tf.square(X) * self.variance, 1) def K(self, X, X2=None, presliced=False): if not presliced: X, X2 = self._slice_batch(X, X2) if X.get_shape().ndims == 2: # M x D if X2 is None: return tf.matmul(X * self.variance, X, transpose_b=True) else: return tf.matmul(X * self.variance, X2, transpose_b=True) elif X.get_shape().ndims == 3: # M x D x B if X2 is None: return tf.einsum('ndb,mdb->nmb', X, X) else: return tf.einsum('ndb,md->nmb', X, X2) class Static(Kern): """ Kernels who don't depend on the value of the inputs are 'Static'. The only parameter is a variance. """ def __init__(self, input_dim, variance=1.0, active_dims=None): Kern.__init__(self, input_dim, active_dims) self.variance = tf.get_variable("variance", [1], initializer=tf.constant_initializer(variance)) def Kdiag(self, X,presliced=False): if not presliced: X, _ = self._slice_batch(X, None) if X.get_shape().ndims == 2: # M x D return tf.fill(tf.stack([tf.shape(X)[0]]), tf.squeeze(self.variance)) elif X.get_shape().ndims == 3: # M x D x B return tf.fill(tf.stack([tf.shape(X)[0],tf.shape(X)[-1]]), tf.squeeze(self.variance)) class White(Static): """ The White kernel """ def K(self, X, X2=None, presliced=False): if X.get_shape().ndims == 2: # M x D if X2 is None: d = tf.fill(tf.stack([tf.shape(X)[0]]), tf.squeeze(self.variance)) return tf.diag(d) else: shape = tf.stack([tf.shape(X)[0], tf.shape(X2)[0]]) return tf.zeros(shape, float_type) elif X.get_shape().ndims == 3: # M x D x B if X2 is None: d = tf.fill(tf.stack([tf.shape(X)[-1], tf.shape(X)[0]]), tf.squeeze(self.variance)) return tf.transpose(tf.matrix_diag(d), (1, 2, 0)) else: shape = tf.stack([tf.shape(X)[0], tf.shape(X2)[0], tf.shape(X)[-1]]) return tf.zeros(shape, float_type) class Constant(Static): """ The constant kernel """ def K(self, X, X2=None, presliced=False): if X.get_shape().ndims == 2: # M x D if X2 is None: # returns the prior shape = tf.stack([tf.shape(X)[0], tf.shape(X)[0]]) else: shape = tf.stack([tf.shape(X)[0], tf.shape(X2)[0]]) elif X.get_shape().ndims == 3: # M x D x B if X2 is None: # returns the prior shape = tf.stack([tf.shape(X)[0], tf.shape(X)[0], tf.shape(X)[-1]]) else: shape = tf.stack([tf.shape(X)[0], tf.shape(X2)[0], tf.shape(X)[-1]]) return tf.fill(shape, tf.squeeze(self.variance))
15,683
5,413
#!/usr/bin/env python try: from setuptools import setup except ImportError: from distutils.core import setup from os import path here = path.abspath(path.dirname(__file__)) # Get the long description from the README file with open(path.join(here, "README.rst")) as f: long_description = f.read() setup( # Name of the module name="py-heat-magic", # Details version="0.0.2", description="py-heat as IPython magic", long_description=long_description, # The project's main homepage. url="https://github.com/csurfer/pyheatmagic", # Author details author="Vishwas B Sharma", author_email="sharma.vishwas88@gmail.com", # License license="MIT", py_modules=["heat"], keywords="heatmap matplotlib profiling python IPython", classifiers=[ # Intended Audience. "Intended Audience :: Developers", "Intended Audience :: Education", # License. "License :: OSI Approved :: MIT License", # Project maturity. "Development Status :: 3 - Alpha", # Operating Systems. "Operating System :: POSIX", # Supported Languages. "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", # Topic tags. "Topic :: Software Development :: Build Tools", "Topic :: Software Development :: Libraries :: Python Modules", ], install_requires=[ "numpy", "scipy", "matplotlib", "ipython", "jupyter", "pandas", "sympy", "nose", "py-heat", ], )
1,708
528
from datetime import datetime as dt from .client import MeetupClient from ..certificator import BaseCertificator from .models import Event class MeetupCertificator(BaseCertificator): def __init__(self, urlname, event_id, api_key, **kwargs): super().__init__(**kwargs) self.urlname = urlname self.event_id = event_id self.client = MeetupClient(api_key=api_key) @property def certificate_data(self): attendances = self.client.get_attendances(self.urlname, self.event_id) return ({'name': attendance['member']['name']} for attendance in attendances) @property def meta(self): event_data = self.client.get_event(self.urlname, self.event_id) event = Event(**event_data) event.clean() return { 'city': event.venue['city'], 'date': dt.strftime(event.date, '%d/%m/%Y'), 'full_date': event.full_date, 'organizer': event.group['name'], 'place': event.venue['name'], 'title': event.name, 'workload': event.duration, }
1,104
332
from os import read import random import sys import pegtree as pg import argparse import csv from pegtree.optimizer import optimize peg = pg.grammar('yk.tpeg') parse = pg.generate(peg) parser = argparse.ArgumentParser(description='yk for Parameter Handling') parser.add_argument('--notConv', action='store_true') # Python のトークナイズのみ parser.add_argument('--diff', action='store_true') # 変数名 (name) とリテラル (val) に異なるものを付与 parser.add_argument('--shuffle', action='store_true') # 特殊トークンをランダムに付与 (順序を考慮しない) parser.add_argument('--both', action='store_true') # shuffle ありとなしを両方追加 parser.add_argument('--files', nargs='*') # 入力ファイルを与える args = parser.parse_args() token_idx = list(range(1, 7)) def replace_as_special_parameter(s, mapped, token_idx=token_idx, tag=None): # mapped => {'df': '<A>'} if s in mapped: return mapped[s] if tag == 'Name': x = f'<name{token_idx[len(mapped)]}>' elif tag == 'Value': x = f'<val{token_idx[len(mapped)]}>' else: x = f'<var{token_idx[len(mapped)]}>' mapped[s] = x return x def convert_nothing(tok, doc, mapped, token_idx, diff): s = str(tok) if s == ';': # ; だけはセミコロンに変える return '<sep>' return s def convert_all(tok, doc, mapped, token_idx, diff): tag = tok.getTag() s = str(tok) if diff: if tag == 'Name': if s in doc: in_idx = [i for i, x in enumerate(doc) if x == s] flag = 0 for idx in in_idx: try: if '軸' in doc[idx+1] or '座標' in doc[idx+1]: flag += 1 except: pass if len(in_idx) == flag: return s else: return replace_as_special_parameter(s, mapped, token_idx, tag='Name') else: if s.startswith('.'): s = '. ' + s[1:] return s if tag == 'Value': if s in doc: return replace_as_special_parameter(s, mapped, token_idx, tag='Value') s_q1 = f"'{s[1:-1]}'" if s_q1 in doc: return replace_as_special_parameter(s_q1, mapped, token_idx, tag='Value') s_q2 = f'"{s[1:-1]}"' if s_q2 in doc: return replace_as_special_parameter(s_q2, mapped, token_idx, tag='Value') else: if tag == 'Name': if s in doc: return replace_as_special_parameter(s, mapped, token_idx) else: if s.startswith('.'): s = '. ' + s[1:] return s if tag == 'Value': if s in doc: return replace_as_special_parameter(s, mapped, token_idx) s_q1 = f"'{s[1:-1]}'" if s_q1 in doc: return replace_as_special_parameter(s_q1, mapped, token_idx) s_q2 = f'"{s[1:-1]}"' if s_q2 in doc: return replace_as_special_parameter(s_q2, mapped, token_idx) return convert_nothing(tok, doc, mapped, token_idx, diff) def make(code, doc0, convert=convert_all, token_idx=token_idx, diff=False): mapped = {} doc = [] for tok in parse(doc0): s = str(tok) if tok.getTag() == 'Raw': q = f"'{s}'" q2 = f'"{s}"' if q in code: doc.append(q) continue if q2 in code: doc.append(q2) continue doc.append(s) ws = [convert(tok, doc, mapped, token_idx, diff) for tok in parse(code)] code = ' '.join(ws) ws = [] for idx, tok in enumerate(doc): if tok.strip() != '': if tok in mapped: try: if '軸' in doc[idx+1] and '座標' in doc[idx+1]: ws.append(tok) else: ws.append(mapped[tok]) except: ws.append(mapped[tok]) else: ws.append(tok) doc = ' '.join(ws) return code, doc def read_tsv(input_filename, output_filename=None): with open(input_filename) as f: reader = csv.reader(f, delimiter='\t') if output_filename != None: writer = csv.writer(output_filename, delimiter='\t') for row in reader: code0 = None if args.both: token_idx0 = list(range(1, 7)) code0, doc0 = make(row[0], row[1], convert=convert_all , token_idx=token_idx0, diff=args.diff) if args.shuffle or args.both: random.shuffle(token_idx) if args.notConv: code, doc = make(row[0], row[1], convert=convert_nothing , token_idx=token_idx, diff=args.diff) else: code, doc = make(row[0], row[1], convert=convert_all, token_idx=token_idx, diff=args.diff) if output_filename == None: print(code, doc) if code0 != None and code0 != code: print(code0, doc0) else: writer.writerow([code, doc]) if code0 != None and code0 != code: writer.writerow([code0, doc0]) if __name__ == '__main__': if args.files != None: for filename in args.files: try: read_tsv(filename, sys.stdout) except: read_tsv(filename) else: pass
5,583
1,823
# Homework #6. Loops print("--- Task #1. 10 monkeys") # Task #1. Write a program that output the following string: "1 monkey 2 monkeys ... 10 monkeys". for x in range(1, 11): if x == 1: monkey = f"{x} monkey " else: monkey = monkey + f"{x} monkeys " print(monkey.strip()) print("\n--- Task #2. Countdown timer") # Task #2. Write a program that output the string that tracks the number of seconds that remain for the roket launching: "10 seconds...9 seconds...8 seconds...7 seconds...6 seconds...5 seconds...4 seconds...3 seconds...2 seconds...1 second" for x in range(10, 0, -1): print(str(x) + " seconds...") print() print("\n--- Task #3") # Task #3. Input two numbers k and n. Calculate you own power (k**n) without using power (**) operator but by using repeated multiplication (number is being multiplied by itself). # Example: 3**4 = 81 is equivalent to 3*3*3*3 = 81. n = int(input("Please enter any number: ")) k = int(input("Please enter any number for a power: ")) x = 1 s = n for x in range(1, k): n = s * n x += 1 print("k ** n =", n) m = (str(s) + " * ") * (k-1) print(f"{m}{s} = {n}") print("\n--- Task #4") # Task #4. The first day of training, the athlete ran 5 km. Each next day, he ran 5% more than the day before. How many kilometers will the athlete run on the 10th day? day = 1 distance = 5 print("The first day distance = ", distance, "km") distance2 = distance * (1.05**9) # for checking print(f"The 10th day distance should be {distance} * (1.05 ** 9) =", round(distance2,2), "km") print() while day < 10: distance += distance * 5/100 print(distance) day += 1 print("On the 10th day, the athlete run ", round(distance,2), "km") print("\n--- Task #5. ") # Task #5. The student did not know a single English word at the beginning of the training. On the first day of class, he learned 5 English words. On each next day, he learned 2 more words than the day before. In how many days will the student know at least n English words? n = int(input("Please enter number of words: ")) day = 0 # d2 words = 5 print(f"The student knew {day} words before training session.") print(f"The student learned {words} on the first day.") total = 5 while words <= n: words = words + 2 day = day + 1 # total = total + words #? print(f"The student will learn {n} words at the the {day} day, but he may learn {words} words by the end of the {day} day of the traning.") print("Verify with addition: 5" + " + 2" * day + " = " + str(words) + " words") print(total) #? print("\n--- Task #6. ") # Task #6. Prompt to a user to input the nunber of steps. Get the string that contains stairs made of sharp sign (#). # # # # # # # # # # num = int(input("How many steps in the stairs: ")) stairs = '' x = 1 while x <= num: print(" " * x + "#") x += 1 print(stairs) print("--- Task #7. ") # 7. Output stars having the form of a pyramid. With the command input, get the number of levels. Use function for center align the string. # * # *** # ***** # ******* levels = int(input("Please enter any number of levels for pyramid: ")) star = '*' x = 1 # ver 1 c_point = levels * 2 # -> extra space before pyramid if levels*2+1 for x in range(levels): star = "*" + x * 2 * "*" x += 1 print(star.center(c_point)) # ver 2 - in class # levels = int(input("Please enter any number of levels for pyramid: ")) c_point = levels * 2 - 1 for x in range(1,levels + 1): stars = x * 2 - 1 print(("*" * stars).center(c_point))
3,491
1,253
import django_filters from .filter_utils import ValueInFilter from ...models import FN125_Tag from .FishAttr_Filter import FishAttrFilters class FN125TagFilter(FishAttrFilters): """A filter set class for lamprey data. Inherits all of the filters in FishAttrs and add some that are specific to Tag attributes. """ tagid = ValueInFilter(field_name="tagid") tagid__like = ValueInFilter(field_name="tagid") tagid__not_like = ValueInFilter(field_name="tagid", exclude=True) tagdoc = ValueInFilter(field_name="tagdoc") tagdoc__like = ValueInFilter(field_name="tagdoc") tagdoc__not_like = ValueInFilter(field_name="tagdoc", exclude=True) tagstat = ValueInFilter(field_name="tagstat") tagstat__not = ValueInFilter(field_name="tagstat", exclude=True) # consider splitting up tagdoc into consitiuent fields to make it # easier to filter by colour, placement tag type and agency. class Meta: model = FN125_Tag fields = ["tagstat", "tagid", "tagdoc"]
1,022
325
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Some simple tests for the autopaths package. You can run this file like this: ipython -i -- ~/repos/autopaths/test/test_file_path.py """ # Built-in modules # import os, inspect # Get current directory (works always) # file_name = os.path.abspath((inspect.stack()[0])[1]) this_dir = os.path.dirname(os.path.abspath(file_name)) + '/' # All our example file system # dummy_files = this_dir + 'dummy_file_system/' # Internal modules # from autopaths.dir_path import DirectoryPath ############################################################################### def test_symlink(): d = DirectoryPath(dummy_files) one = d['one.txt'] one.link_to(d + 'one_link.txt') ############################################################################### if __name__ == '__main__': test_symlink()
858
287
import FWCore.ParameterSet.Config as cms import TrackingTools.MaterialEffects.OppositeMaterialPropagator_cfi #PropagatorWithMaterialESProducer oppositeToMomElePropagator = TrackingTools.MaterialEffects.OppositeMaterialPropagator_cfi.OppositeMaterialPropagator.clone( Mass = 0.000511, ComponentName = 'oppositeToMomElePropagator' )
350
126
from rdflib import plugin from rdflib import store plugin.register( "SQLAlchemy", store.Store, "rdflib_sqlalchemy.store", "SQLAlchemy", )
155
59
# -*- coding: utf-8 -*- def main(): n, k = map(int, input().split()) a = list(map(int, input().split())) mod = 10 ** 9 + 7 ans = 0 # See: # https://www.youtube.com/watch?v=JTH27weC38k # https://atcoder.jp/contests/jsc2019-qual/submissions/7107452 # Key Insight # 2つの整数の順序対(i, j)の選び方 # 同じブロックにある/異なるブロックにある で場合分け # 同じブロックにある # Bi > Bjとなる組み合わせを全探索 for i in range(n - 1): count = 0 for j in range(i + 1, n): if a[i] > a[j]: count += 1 # 一つのブロックにある組み合わせのk倍 ans += count * k ans %= mod # 別のブロックにある for i in range(n): count = 0 for j in range(n): if a[i] > a[j]: count += 1 # k個のブロックから2個選ぶ(kC2) ans += count * (k * (k - 1) // 2) ans %= mod print(ans) if __name__ == '__main__': main()
943
502
import difflib import discord from discord.ext import commands from discord.ext.commands import CommandNotFound intents = discord.Intents.all() client = commands.Bot(command_prefix="+", intents=intents, help_command=None) @client.event async def on_ready(): print("Bot Online") @client.event async def on_command_error(ctx: commands.Context, exc): if isinstance(exc, CommandNotFound): await send_command_suggestion(ctx, ctx.invoked_with) else: pass async def send_command_suggestion(ctx: commands.Context, command_name: str) -> None: """Sends user similar commands if any can be found.""" raw_commands = [] for cmd in client.walk_commands(): if not cmd.hidden: raw_commands += (cmd.name, *cmd.aliases) if similar_command_data := difflib.get_close_matches(command_name, raw_commands, 1): similar_command_name = similar_command_data[0] similar_command = client.get_command(similar_command_name) if not similar_command: return try: if not await similar_command.can_run(ctx): return except commands.errors.CommandError: return misspelled_content = ctx.message.content e = discord.Embed() e.set_author(name="Did you mean:") e.description = misspelled_content.replace( command_name, similar_command_name, 1 ) await ctx.send(embed=e, delete_after=10.0) client.run("TOKEN")
1,494
450
import asyncio import logging import time from typing import Optional, List from hummingbot.core.data_type.user_stream_tracker_data_source import \ UserStreamTrackerDataSource from hummingbot.logger import HummingbotLogger from hummingbot.connector.exchange.bitfinex.bitfinex_order_book import BitfinexOrderBook from hummingbot.connector.exchange.bitfinex.bitfinex_websocket import BitfinexWebsocket from hummingbot.connector.exchange.bitfinex.bitfinex_auth import BitfinexAuth from hummingbot.connector.exchange.bitfinex.bitfinex_order_book_message import \ BitfinexOrderBookMessage class BitfinexAPIUserStreamDataSource(UserStreamTrackerDataSource): MESSAGE_TIMEOUT = 30.0 _logger: Optional[HummingbotLogger] = None @classmethod def logger(cls) -> HummingbotLogger: if cls._logger is None: cls._logger = logging.getLogger(__name__) return cls._logger def __init__(self, bitfinex_auth: BitfinexAuth, trading_pairs: Optional[List[str]] = None): if trading_pairs is None: trading_pairs = [] self._bitfinex_auth: BitfinexAuth = bitfinex_auth self._trading_pairs = trading_pairs self._current_listen_key = None self._listen_for_user_stream_task = None self._last_recv_time: float = 0 super().__init__() @property def order_book_class(self): return BitfinexOrderBook @property def last_recv_time(self) -> float: return self._last_recv_time async def listen_for_user_stream(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue): while True: try: ws = await BitfinexWebsocket(self._bitfinex_auth).connect() await ws.authenticate() async for msg in ws.messages(): transformed_msg: BitfinexOrderBookMessage = self._transform_message_from_exchange(msg) if transformed_msg is None: continue else: output.put_nowait(transformed_msg) except asyncio.CancelledError: raise except Exception: self.logger().error( "Unexpected error with Bitfinex WebSocket connection. " "Retrying after 30 seconds...", exc_info=True, ) await asyncio.sleep(self.MESSAGE_TIMEOUT) def _transform_message_from_exchange(self, msg) -> Optional[BitfinexOrderBookMessage]: order_book_message: BitfinexOrderBookMessage = BitfinexOrderBook.diff_message_from_exchange(msg, time.time()) if any([ order_book_message.type_heartbeat, order_book_message.event_auth, order_book_message.event_info, ]): # skip unneeded events and types return return order_book_message
2,898
857
from mersenne import generatePotentialMP, isPrime def test_generatePotentialMP(): assert(generatePotentialMP(2) == 3) assert(generatePotentialMP(1) ==1) def test_isPrime(): assert(isPrime(7)) assert(isPrime(2)) assert(isPrime(3)) assert(isPrime(4) == False)
283
109
import math import sys import numpy as np from numpy import ndarray from sim21.data import chemsep from sim21.data.chemsep_consts import GAS_CONSTANT from numba import njit from sim21.provider.generic import calc_ig_props from sim21.provider.flash.basic import basic_flash_temp_press_2phase from sim21.provider.flash.io import flash_press_prop_2phase, flash_press_vap_frac_2phase, flash_temp_vap_frac_2phase, \ flash_temp_prop_2phase MIN_COMPOSITION = math.sqrt(sys.float_info.epsilon) @njit(cache=True) def calc_wilson_k_values(temp, press, tc_list, pc_list, omega_list): k_values = np.empty(len(tc_list)) for i in range(len(k_values)): pc = pc_list[i] tc = tc_list[i] omega = omega_list[i] k_values[i] = (pc / press) * math.exp(5.37 * (1 + omega) * (1 - tc / temp)) return k_values @njit(cache=True) def estimate_nbp_value(feed_comp, tc_list, valid): temp = 0 for i in valid: temp += feed_comp[i] * tc_list[i] return 0.7 * temp class Provider: def __init__(self, components=None): self.observers = set() self.flash_basis = 'mole' self._components = None self.all_comps = None self._id_list = None self._mw_list = None self._tc_list = None self._pc_list = None self._omega_list = None self._ig_temp_ref = None self._ig_press_ref = None self._ig_cp_coeffs = None self._ig_h_form = h = None self._ig_g_form = g = None self._vap_visc = None self._liq_visc = None self._surf_tens = None self._ig_s_form = None self._std_liq_vol = None def add_observer(self, new_obs): self.observers.add(new_obs) @property def components(self): return self._components @property def all_valid_components(self): return self.all_comps def setup_components(self, components, **kwargs): self._components = components self.all_comps = np.arange(0, len(components)) self._id_list = [c.identifier for c in components] self._mw_list = np.array([c.mw for c in components]) self._tc_list = np.array([c.crit_temp for c in components]) self._pc_list = np.array([c.crit_press for c in components]) self._omega_list = np.array([c.acen_fact for c in components]) self._ig_temp_ref = np.array([c.ig_temp_ref for c in components]) self._ig_press_ref = np.array([c.ig_press_ref for c in components]) self._ig_cp_coeffs = np.array([c.ig_cp_mole_coeffs for c in components]) self._ig_h_form = h = np.array([c.ig_enthalpy_form_mole for c in components]) self._ig_g_form = g = np.array([c.ig_gibbs_form_mole for c in components]) self._vap_visc = [c.vap_visc for c in components] self._liq_visc = [c.liq_visc for c in components] self._surf_tens = [c.surf_tens for c in components] self._ig_s_form = (g - h) / -298.15 self._std_liq_vol = np.array([c.std_liq_vol_mole for c in components]) @property def mw(self): return self._mw_list @property def std_liq_vol_mole(self): return self._std_liq_vol def vap_visc(self, temp, comp_mole): return np.dot(comp_mole, [comp_visc(temp) for comp_visc in self._vap_visc]) def liq_visc(self, temp, comp_mole): return np.dot(comp_mole, [comp_visc(temp) for comp_visc in self._liq_visc]) def surf_tens(self, temp, comp_mole): return np.dot(comp_mole, [comp_surf_tens(temp) for comp_surf_tens in self._surf_tens]) def convert_to_mole_basis(self, flow_sum_basis, flow_sum_value, frac_basis, frac_value): if frac_basis == 'mole': frac_value_mole = frac_value elif frac_basis == 'mass': frac_value_mole = frac_value / self._mw_list frac_value_mole /= np.sum(frac_value_mole) else: raise NotImplementedError avg_mw = np.dot(frac_value_mole, self._mw_list) if flow_sum_basis == 'mole': flow_sum_value_mole = flow_sum_value elif flow_sum_basis == 'mass': flow_sum_value_mole = flow_sum_value / avg_mw else: raise NotImplementedError return flow_sum_value_mole, frac_value_mole def scaling(self, prop_type): if prop_type in ('enthalpy_mole', 'ig_enthalpy_mole', 'res_enthalpy_mole', 'gibbs_mole', 'ig_gibbs_mole', 'res_gibbs_mole', 'int_energy_mole', 'ig_int_energy_mole', 'res_int_energy_mole', 'helmholtz_mole', 'ig_helmholtz_mole', 'res_helmholtz_mole'): return GAS_CONSTANT * 298.15 elif prop_type in ('entropy_mole', 'ig_entropy_mole', 'res_entropy_mole'): return GAS_CONSTANT else: raise NotImplementedError def guess_k_value_vle(self, temp, press): return calc_wilson_k_values(temp, press, self._tc_list, self._pc_list, self._omega_list) def guess_nbp(self, feed_comp, valid): return estimate_nbp_value(feed_comp, self._tc_list, valid) def ig_props(self, temp, press, feed_comp, valid=None): if valid is None: valid = np.where(feed_comp > MIN_COMPOSITION)[0] calc_mw, \ calc_ig_cp, \ calc_ig_enthalpy, \ calc_ig_entropy, \ calc_ig_gibbs = calc_ig_props(GAS_CONSTANT, temp, press, feed_comp, 1, valid, self._tc_list, self._mw_list, self._ig_cp_coeffs, self._ig_h_form, self._ig_s_form, self._ig_temp_ref, self._ig_press_ref) calc_ig_helmholtz = calc_ig_gibbs - GAS_CONSTANT * temp calc_ig_int_energy = calc_ig_helmholtz + temp * calc_ig_entropy vol = GAS_CONSTANT * temp / press return vol, calc_mw, calc_ig_cp, calc_ig_enthalpy, calc_ig_entropy, calc_ig_int_energy, calc_ig_gibbs, calc_ig_helmholtz def flash(self, flow_sum_basis=None, flow_sum_value=None, frac_basis=None, frac_value=None, temp=None, press=None, vol_basis=None, vol_value=None, vap_frac_value=None, vap_frac_basis=None, deg_subcool=None, deg_supheat=None, enthalpy_basis=None, enthalpy_value=None, entropy_basis=None, entropy_value=None, int_energy_basis=None, int_energy_value=None, previous=None): assert None not in (flow_sum_basis, flow_sum_value) assert frac_basis in ('mole', 'mass') assert frac_value is not None and isinstance(frac_value, ndarray) valid = np.where(frac_value > MIN_COMPOSITION)[0] if temp is not None: if press is not None: # flash_temp_press return self.flash_temp_press(flow_sum_basis, flow_sum_value, frac_basis, frac_value, temp, press, previous, valid) elif vap_frac_value is not None: # flash_temp_vap_frac return self.flash_temp_vap_frac(flow_sum_basis, flow_sum_value, frac_basis, frac_value, temp, vap_frac_basis, vap_frac_value, previous, valid) elif deg_subcool is not None: # flash_temp_deg_subcool return self.flash_temp_subcool(flow_sum_basis, flow_sum_value, frac_basis, frac_value, temp, deg_subcool, previous, valid) elif deg_supheat is not None: # flash_temp_deg_supheat return self.flash_temp_subcool(flow_sum_basis, flow_sum_value, frac_basis, frac_value, temp, deg_supheat, previous, valid) elif enthalpy_value is not None: # flash_temp_enthalpy return self.flash_temp_prop(flow_sum_basis, flow_sum_value, frac_basis, frac_value, temp, 'enthalpy', enthalpy_basis, enthalpy_value, previous, valid) elif entropy_value is not None: # flash_temp_entropy return self.flash_temp_prop(flow_sum_basis, flow_sum_value, frac_basis, frac_value, temp, 'entropy', entropy_basis, entropy_value, previous, valid) elif int_energy_value is not None: # flash_temp_int_energy return self.flash_temp_prop(flow_sum_basis, flow_sum_value, frac_basis, frac_value, temp, 'int_energy', int_energy_basis, int_energy_value, previous, valid) elif vol_value is not None: # flash_temp_vol return self.flash_temp_vol(flow_sum_basis, flow_sum_value, frac_basis, frac_value, temp, vol_basis, vol_value, previous, valid) else: raise NotImplementedError elif press is not None: if vap_frac_value is not None: # flash_press_vap_frac return self.flash_press_vap_frac(flow_sum_basis, flow_sum_value, frac_basis, frac_value, press, vap_frac_basis, vap_frac_value, previous, valid) elif deg_subcool is not None: # flash_press_deg_subcool return self.flash_press_subcool(flow_sum_basis, flow_sum_value, frac_basis, frac_value, press, deg_subcool, previous, valid) elif deg_supheat is not None: # flash_press_deg_supheat return self.flash_press_subcool(flow_sum_basis, flow_sum_value, frac_basis, frac_value, press, deg_supheat, previous, valid) elif enthalpy_value is not None: # flash_press_enthalpy return self.flash_press_prop(flow_sum_basis, flow_sum_value, frac_basis, frac_value, press, 'enthalpy', enthalpy_basis, enthalpy_value, previous, valid) elif entropy_value is not None: # flash_press_entropy return self.flash_press_prop(flow_sum_basis, flow_sum_value, frac_basis, frac_value, press, 'entropy', entropy_basis, entropy_value, previous, valid) elif int_energy_value is not None: # flash_press_int_energy return self.flash_press_prop(flow_sum_basis, flow_sum_value, frac_basis, frac_value, press, 'int_energy', int_energy_basis, int_energy_value, previous, valid) elif vol_value is not None: # flash_press_vol return self.flash_press_vol(flow_sum_basis, flow_sum_value, frac_basis, frac_value, press, vol_basis, vol_value, previous, valid) else: raise NotImplementedError elif None not in (enthalpy_value, entropy_value, int_energy_value): prop_basis, prop_value = None, None if enthalpy_value is not None: prop_name, prop_basis, prop_value = 'enthalpy', enthalpy_basis, enthalpy_value elif entropy_value is not None: prop_name, prop_basis, prop_value = 'entropy', entropy_basis, entropy_value elif int_energy_value is not None: prop_name, prop_basis, prop_value = 'int_energy', int_energy_basis, int_energy_value else: raise NotImplementedError if vap_frac_value is not None: # flash_prop_vap_frac return self.flash_prop_vap_frac(flow_sum_basis, flow_sum_value, frac_basis, frac_value, prop_name, prop_basis, prop_value, vap_frac_basis, vap_frac_value, previous, valid) elif vol_value is not None: return self.flash_prop_vol(flow_sum_basis, flow_sum_value, frac_basis, frac_value, prop_name, prop_basis, prop_value, vol_basis, vol_value, previous, valid) else: raise NotImplementedError else: raise NotImplementedError def flash_temp_press(self, flow_sum_basis, flow_sum_value, frac_basis, frac_value, temp, press, previous, valid): flow_sum_value_mole, frac_value_mole = self.convert_to_mole_basis(flow_sum_basis, flow_sum_value, frac_basis, frac_value) prev_k = None if previous is not None and previous.contains('vap', 'liq'): prev_k = previous.k_values_vle results = basic_flash_temp_press_2phase(self, temp, press, frac_value_mole, valid, previous_k_values=prev_k) results.scale(flow_sum_mole=flow_sum_value_mole) return results def flash_press_prop(self, flow_sum_basis, flow_sum_value, frac_basis, frac_value, press, prop_name, prop_basis, prop_value, previous, valid): flow_sum_value_mole, frac_value_mole = self.convert_to_mole_basis(flow_sum_basis, flow_sum_value, frac_basis, frac_value) prop_flash_name = prop_name + '_' + prop_basis start_temp = None if previous is not None: start_temp = previous.temp results = flash_press_prop_2phase(self, press, prop_flash_name, prop_value, 0, frac_value, valid=valid, previous=previous, start_temp=start_temp) results.scale(flow_sum_mole=flow_sum_value_mole) return results def flash_temp_prop(self, flow_sum_basis, flow_sum_value, frac_basis, frac_value, temp, prop_name, prop_basis, prop_value, previous, valid): flow_sum_value_mole, frac_value_mole = self.convert_to_mole_basis(flow_sum_basis, flow_sum_value, frac_basis, frac_value) prop_flash_name = prop_name + '_' + prop_basis start_press = None if previous is not None: start_press = previous.press results = flash_temp_prop_2phase(self, temp, prop_flash_name, prop_value, 0, frac_value_mole, valid=valid, previous=previous, start_press=start_press) results.scale(flow_sum_mole=flow_sum_value_mole) return results def flash_press_vap_frac(self, flow_sum_basis, flow_sum_value, frac_basis, frac_value, press, vap_frac_basis, vap_frac_value, previous, valid): flow_sum_value_mole, frac_value_mole = self.convert_to_mole_basis(flow_sum_basis, flow_sum_value, frac_basis, frac_value) if vap_frac_basis != 'mole': raise NotImplementedError results = flash_press_vap_frac_2phase(self, press, vap_frac_value, frac_value_mole, valid=valid, previous=previous) results.scale(flow_sum_mole=flow_sum_value_mole) return results def flash_temp_vap_frac(self, flow_sum_basis, flow_sum_value, frac_basis, frac_value, temp, vap_frac_basis, vap_frac_value, previous, valid): flow_sum_value_mole, frac_value_mole = self.convert_to_mole_basis(flow_sum_basis, flow_sum_value, frac_basis, frac_value) if vap_frac_basis != 'mole': raise NotImplementedError results = flash_temp_vap_frac_2phase(self, temp, vap_frac_value, frac_value_mole, valid=valid, previous=previous) results.scale(flow_sum_mole=flow_sum_value_mole) return results def phase(self, temp, press, n, desired_phase, allow_pseudo=True, valid=None, press_comp_derivs=False, log_phi_temp_press_derivs=False, log_phi_comp_derivs=False): raise NotImplementedError def phases_vle(self, temp, press, liq_comp, vap_comp, allow_pseudo=True, valid=None, press_comp_derivs=False, log_phi_temp_press_derivs=False, log_phi_comp_derivs=False): liq_ph = self.phase(temp, press, liq_comp, 'liq', allow_pseudo, valid, press_comp_derivs, log_phi_temp_press_derivs, log_phi_comp_derivs) vap_ph = self.phase(temp, press, vap_comp, 'vap', allow_pseudo, valid, press_comp_derivs, log_phi_temp_press_derivs, log_phi_comp_derivs) return liq_ph, vap_ph def AddCompound(self, compound_by_name, compound_obj=None): # print('AddCompound:', compound) if compound_obj is None: compound_obj = chemsep.pure(compound_by_name) else: pass if self._components is None: new_components = [compound_obj] else: new_components = self._components[:] new_components.append(compound_obj) # This is really inefficient, but it's simple self.setup_components(new_components) def GetAvCompoundNames(self): return chemsep.available() def DeleteCompound(self, compound): compound = compound.upper() idx = self._id_list.index(compound) new_compounds = self._components[:] new_compounds.pop(idx) self.setup_components(new_compounds) def ExchangeCompound(self, cmp1Name, cmp2Name): cmp1Name = cmp1Name.upper() cmp2Name = cmp2Name.upper() idx_1 = self._id_list.index(cmp1Name) idx_2 = self._id_list.index(cmp2Name) new_compounds = self._components[:] new_compounds[idx_1], new_compounds[idx_2] = new_compounds[idx_2], new_compounds[idx_1] self.setup_components(new_compounds) def MoveCompound(self, cmp1Name, cmp2Name): cmp1Name = cmp1Name.upper() cmp2Name = cmp2Name.upper() new_compounds = self._components[:] item_1 = new_compounds.pop(self._id_list.index(cmp1Name)) new_compounds.insert(self._id_list.index(cmp2Name), item_1) self.setup_components(new_compounds)
18,873
6,049
""" Auth route """ import requests from fastapi import APIRouter, HTTPException from fastapi.param_functions import Depends from sqlalchemy.orm import Session from config.database import get_database from config.logger import logger from config.settings import settings from server.controllers.auth import get_department_id, sign_jwt from server.schemas.users import Users router = APIRouter( prefix="/auth", ) @router.get("/callback/") async def fetch_user_details( code: str, session: Session = Depends(get_database) ): """ Handles the callback route and fetches the user details """ params = { "client_id": settings.client_id, "client_secret": settings.client_secret, "grant_type": "authorization_code", "code": code, "redirect_uri": settings.redirect_url, } try: token_response = requests.post( url=settings.token_endpoint, data=params ).json() logger.debug(token_response) headers = { "Authorization": "Bearer " + token_response["access_token"] } userdetails = requests.post( url=settings.resource_endpoint, headers=headers, ).json() if ( not session.query(Users) .filter_by(email=userdetails["email"]) .first() ): new_user = Users( name=userdetails["name"], email=userdetails["email"], mobile_number=userdetails["phoneNumber"], gender=userdetails["gender"], department_id=get_department_id(userdetails["email"]), fcm_token="123", ) session.add(new_user) session.commit() session.close() jwt = sign_jwt(userdetails["email"], userdetails["name"]) logger.info(f'{userdetails["name"]} user logged in') return { "name": userdetails["name"], "email": userdetails["email"], "phoneNumber": userdetails["phoneNumber"], "gender": userdetails["gender"], "jwt": jwt["jwt_token"], } except Exception as exception: logger.error(f"/dauth failed with {exception}") raise HTTPException( status_code=500, detail="An unexpected error occurred while authentication", headers={ "X-Error": "An unexpected error occurred while authentication" }, ) from exception
2,516
653
import datetime from unittest import TestCase from isc_dhcp_leases.iscdhcpleases import Lease6, utc from freezegun import freeze_time __author__ = 'Martijn Braam <martijn@brixit.nl>' class TestLease6(TestCase): def setUp(self): self.lease_time = datetime.datetime(2015, 8, 18, 16, 55, 37, tzinfo=utc) self.lease_data = { 'binding': 'state active', 'ends': 'never', 'preferred-life': '375', 'max-life': '600' } def test_init(self): lease = Lease6("2001:610:600:891d::60", self.lease_data, self.lease_time, "4dv\\352\\000\\001\\000\\001\\035f\\037\\342\\012\\000'\\000\\000\\000", "na") self.assertEqual(lease.ip, "2001:610:600:891d::60") self.assertEqual(lease.host_identifier, b"4dv\xea\x00\x01\x00\x01\x1df\x1f\xe2\n\x00'\x00\x00\x00") self.assertEqual(lease.valid, True) self.assertEqual(lease.iaid, 3933627444) self.assertEqual(lease.duid, b"\x00\x01\x00\x01\x1df\x1f\xe2\n\x00'\x00\x00\x00") self.assertEqual(lease.active, True) self.assertEqual(lease.binding_state, 'active') self.assertEqual(lease.preferred_life, 375) self.assertEqual(lease.max_life, 600) self.assertEqual(lease.last_communication, self.lease_time) self.assertEqual(lease.type, Lease6.NON_TEMPORARY) def test_repr(self): lease = Lease6("2001:610:600:891d::60", self.lease_data, self.lease_time, "4dv\\352\\000\\001\\000\\001\\035f\\037\\342\\012\\000'\\000\\000\\000", "na") self.assertEqual(repr(lease), '<Lease6 2001:610:600:891d::60>') def _test_valid(self, now=None): lease = Lease6("2001:610:600:891d::60", self.lease_data, self.lease_time, "4dv\\352\\000\\001\\000\\001\\035f\\037\\342\\012\\000'\\000\\000\\000", "na", now=now) self.assertTrue(lease.valid) # Lease is forever lease.end = datetime.datetime(2015, 7, 6, 13, 57, 4, tzinfo=utc) self.assertTrue(lease.valid) # Lease is before end lease.end = lease.end - datetime.timedelta(hours=7) self.assertFalse(lease.valid) # Lease is ended @freeze_time("2015-07-6 8:15:0") def test_valid_frozen(self): self._test_valid() def test_valid_historical(self): self._test_valid( now=datetime.datetime(2015, 7, 6, 8, 15, 0, tzinfo=utc)) def test_eq(self): lease_a = Lease6("2001:610:600:891d::60", self.lease_data, self.lease_time, "4dv\\352\\000\\001\\000\\001\\035f\\037\\342\\012\\000'\\000\\000\\000", "na") lease_b = Lease6("2001:610:600:891d::60", self.lease_data, self.lease_time, "4dv\\352\\000\\001\\000\\001\\035f\\037\\342\\012\\000'\\000\\000\\000", "na") self.assertEqual(lease_a, lease_b) lease_b.ip = "2001:610:600:891d::42" self.assertNotEqual(lease_a, lease_b) lease_b.ip = "2001:610:600:891d::60" lease_b.host_identifier = "gd4\352\000\001\000\001\035b\037\322\012\000'\000\000\000" self.assertNotEqual(lease_a, lease_b) def test_naive_time(self): with self.assertRaises(ValueError): Lease6("2001:610:600:891d::60", self.lease_data, self.lease_time, "4dv\\352\\000\\001\\000\\001\\035f\\037\\342\\012\\000'\\000\\000\\000", "na", now=datetime.datetime.now())
3,480
1,666
import os import numpy as np import torch import gym import pandas as pd from stable_baselines3.common.running_mean_std import RunningMeanStd from collections import defaultdict from torch.utils.data import DataLoader, TensorDataset # This file creates the reward function used by dril. Both reinforcement algorithms # ppo (line: 102) and a2c (line: 92), have dril bc udpates. class DRIL: def __init__(self, device=None, envs=None, ensemble_policy=None, env_name=None, expert_dataset=None, ensemble_size=None, ensemble_quantile_threshold=None, dril_bc_model=None, dril_cost_clip=None, num_dril_bc_train_epoch=None,\ training_data_split=None): self.ensemble_quantile_threshold = ensemble_quantile_threshold self.dril_cost_clip = dril_cost_clip self.device = device self.num_dril_bc_train_epoch = num_dril_bc_train_epoch self.env_name = env_name self.returns = None self.ret_rms = RunningMeanStd(shape=()) self.observation_space = envs.observation_space if envs.action_space.__class__.__name__ == "Discrete": self.num_actions = envs.action_space.n elif envs.action_space.__class__.__name__ == "Box": self.num_actions = envs.action_space.shape[0] elif envs.action_space.__class__.__name__ == "MultiBinary": self.num_actions = envs.action_space.shape[0] self.ensemble_size = ensemble_size # use full data since we don't use a validation set self.trdata = expert_dataset.load_demo_data(1.0, 1, self.ensemble_size)['trdata'] self.ensemble = ensemble_policy # self.bc = dril_bc_model # self.bc.num_batches = num_dril_bc_train_epoch self.clip_variance = self.policy_variance(envs=envs) def policy_variance(self, q=0.98, envs=None): q = self.ensemble_quantile_threshold obs = None acs = None variance = defaultdict(lambda:[]) for batch_idx, batch in enumerate(self.trdata): (state, action) = batch action = action.float().to(self.device) # Image observation if len(self.observation_space.shape) == 3: state = state.repeat(self.ensemble_size, 1,1,1).float().to(self.device) # Feature observations else: state = state.repeat(self.ensemble_size, 1).float().to(self.device) if isinstance(envs.action_space, gym.spaces.discrete.Discrete): # Note: this is just a place holder action_idx = int(action.item()) one_hot_action = torch.FloatTensor(np.eye(self.num_actions)[int(action.item())]) action = one_hot_action elif envs.action_space.__class__.__name__ == "MultiBinary": # create unique id for each combination action_idx = int("".join(str(int(x)) for x in action[0].tolist()), 2) else: action_idx = 0 with torch.no_grad(): ensemble_action = self.ensemble(state).squeeze() if isinstance(envs.action_space, gym.spaces.Box): action = torch.clamp(action, envs.action_space.low[0], envs.action_space.high[0]) ensemble_action = torch.clamp(ensemble_action, envs.action_space.low[0],\ envs. action_space.high[0]) cov = np.cov(ensemble_action.T.cpu().numpy()) action = action.cpu().numpy() # If the env has only one action then we need to reshape cov if envs.action_space.__class__.__name__ == "Box": if envs.action_space.shape[0] == 1: cov = cov.reshape(-1,1) #variance.append(np.matmul(np.matmul(action, cov), action.T).item()) if isinstance(envs.action_space, gym.spaces.discrete.Discrete): for action_idx in range(envs.action_space.n): one_hot_action = torch.FloatTensor(np.eye(self.num_actions)[action_idx]) variance[action_idx].append(np.matmul(np.matmul(one_hot_action, cov), one_hot_action.T).item()) else: variance[action_idx].append(np.matmul(np.matmul(action, cov), action.T).item()) quantiles = {key: np.quantile(np.array(variance[key]), q) for key in list(variance.keys())} if self.dril_cost_clip == '-1_to_1': return {key: lambda x: -1 if x > quantiles[key] else 1 for key in list(variance.keys())} elif self.dril_cost_clip == 'no_clipping': return {key: lambda x: x for i in list(variance.keys())} elif self.dril_cost_clip == '-1_to_0': return {key: lambda x: -1 if x > quantiles[key] else 0 for key in list(variance.keys())} def predict_reward(self, actions, states, envs): rewards = [] for idx in range(actions.shape[0]): # Image observation if len(self.observation_space.shape) == 3: state = states[[idx]].repeat(self.ensemble_size, 1,1,1).float().to(self.device) # Feature observations else: state = states[[idx]].repeat(self.ensemble_size, 1).float().to(self.device) if isinstance(envs.action_space, gym.spaces.discrete.Discrete): one_hot_action = torch.FloatTensor(np.eye(self.num_actions)[int(actions[idx].item())]) action = one_hot_action action_idx = int(actions[idx].item()) elif isinstance(envs.action_space, gym.spaces.Box): action = actions[[idx]] action_idx = 0 elif isinstance(envs.action_space, gym.spaces.MultiBinary): raise Exception('Envrionment shouldnt be MultiBinary') else: raise Exception("Unknown Action Space") with torch.no_grad(): ensemble_action = self.ensemble(state).squeeze().detach() if isinstance(envs.action_space, gym.spaces.Box): action = torch.clamp(action, envs.action_space.low[0], envs.action_space.high[0]) ensemble_action = torch.clamp(ensemble_action, envs.action_space.low[0],\ envs. action_space.high[0]) cov = np.cov(ensemble_action.T.cpu().numpy()) action = action.cpu().numpy() # If the env has only one action then we need to reshape cov if envs.action_space.__class__.__name__ == "Box": if envs.action_space.shape[0] == 1: cov = cov.reshape(-1,1) ensemble_variance = (np.matmul(np.matmul(action, cov), action.T).item()) if action_idx in self.clip_variance: reward = self.clip_variance[action_idx](ensemble_variance) else: reward = -1 rewards.append(reward) return np.array(rewards) # return torch.FloatTensor(np.array(rewards)[np.newaxis].T) def normalize_reward(self, state, action, gamma, masks, reward, update_rms=True): if self.returns is None: self.returns = reward.clone() if update_rms: self.returns = self.returns * masks * gamma + reward self.ret_rms.update(self.returns.cpu().numpy()) return reward / np.sqrt(self.ret_rms.var[0] + 1e-8) def bc_update(self): for dril_epoch in range(self.num_dril_bc_train_epoch): dril_train_loss = self.bc.update(update=True, data_loader_type='train')
7,599
2,346
# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """ Values include Nodes, Edges, Layer, Layers that returned by samplers. Values should be extended with customized samplers. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from graphlearn.python.decoder import Decoder class Values(object): """ Base value class. """ def __init__(self, int_attrs=None, float_attrs=None, string_attrs=None, weights=None, labels=None, shape=None, graph=None): self._weights = weights self._labels = labels self._int_attrs = int_attrs self._float_attrs = float_attrs self._string_attrs = string_attrs self._shape = shape self._graph = graph self._attred = True if self._int_attrs is None and self._float_attrs is None and \ self._string_attrs is None: self._attred = False def _get_decoder(self): return Decoder() @property def int_attrs(self): self._set_attrs() return self._int_attrs @property def float_attrs(self): self._set_attrs() return self._float_attrs @property def string_attrs(self): self._set_attrs() return self._string_attrs @property def weights(self): if self._weights is None and self._get_decoder().weighted: self._lookup_and_set_values() return self._weights @property def labels(self): if self._labels is None and self._get_decoder().labeled: self._lookup_and_set_values() return self._labels @property def shape(self): return self._shape @property def graph(self): return self._graph @int_attrs.setter def int_attrs(self, int_attrs): self._int_attrs = self._reshape(int_attrs, expand_shape=True) @float_attrs.setter def float_attrs(self, float_attrs): self._float_attrs = self._reshape(float_attrs, expand_shape=True) @string_attrs.setter def string_attrs(self, string_attrs): self._string_attrs = self._reshape(string_attrs, expand_shape=True) @weights.setter def weights(self, weights): self._weights = self._reshape(weights) @labels.setter def labels(self, labels): self._labels = self._reshape(labels) @shape.setter def shape(self, shape): self._shape = shape if not isinstance(shape, tuple): raise ValueError("shape must be a tuple, got {}.".format(type(shape))) @graph.setter def graph(self, graph): self._graph = graph def _reshape(self, value, expand_shape=False): """ Reshape for value when `shape` is not None. """ if value is None or value.size == 0: return value if not isinstance(value, np.ndarray): raise ValueError("{} must be a numpy.ndarray.".format(value)) if self._shape: if not isinstance(self._shape, tuple): raise ValueError("shape must be a tuple, got {}." .format(type(self._shape))) if expand_shape: return np.reshape(value, self._shape + (-1, )) # pylint: disable=bad-whitespace return np.reshape(value, self._shape) return value def _lookup_and_set_values(self): pass def _set_attrs(self): if self._get_decoder().attributed: if not self._attred: self._lookup_and_set_values() self._attred = True def _set_values(self, values): self.int_attrs = values.int_attrs self.float_attrs = values.float_attrs self.string_attrs = values.string_attrs self.weights = values.weights self.labels = values.labels class SparseBase(object): """ Sparse Value, the base class of SparseNodes and SparseEdges. """ def __init__(self, offsets, dense_shape): """ Init a SparseBase object. Args: offsets: list or 1D ndarraay, the number of values on each line. dense_shape: the corresponding 2D dense shape. """ self._it = 0 self._offsets = offsets self._dense_shape = dense_shape self._global_offsets = [0] sum_offsets = 0 for offset in self._offsets: sum_offsets += offset self._global_offsets.append(sum_offsets) @property def offsets(self): return self._offsets @property def indices(self): indices = [] for x in range(len(self._offsets)): for y in range(self._offsets[x]): indices.append([x, y]) return indices @property def dense_shape(self): return self._dense_shape @offsets.setter def offsets(self, offsets): """ row offsets """ self._offsets = offsets @dense_shape.setter def dense_shape(self, dense_shape): self._dense_shape = dense_shape def __iter__(self): return self def __next__(self): pass def next(self): return self.__next__() class Nodes(Values): """ As returned object of `get_next` api of `node_sampler` and `negative_sampler`, as returned object of `get_nodes` of `Graph` or as in-memory object for constructing graph. """ def __init__(self, ids, node_type, int_attrs=None, float_attrs=None, string_attrs=None, weights=None, labels=None, shape=None, graph=None): super(Nodes, self).__init__(int_attrs=int_attrs, float_attrs=float_attrs, string_attrs=string_attrs, weights=weights, labels=labels, shape=shape, graph=graph) if not isinstance(ids, np.ndarray): raise ValueError("ids must be an instance of numpy.ndarray, " "got {}.".format(type(ids))) self._shape = shape if shape is not None else ids.shape self._ids = self._reshape(ids) self._type = node_type def _get_decoder(self): return self._graph.get_node_decoder(self._type) @property def ids(self): return self._ids @property def type(self): # pylint: disable=redefined-builtin return self._type @property def shape(self): return self._shape def _lookup_and_set_values(self): values = self._graph.lookup_nodes(self._type, self._ids) self._set_values(values) @ids.setter def ids(self, ids): self._ids = self._reshape(ids) @type.setter def type(self, node_type): # pylint: disable=redefined-builtin self._type = node_type class SparseNodes(Nodes, SparseBase): """ SparseNodes is the returned value of full neighbor sampler which is 2D. It can be easily transformed to Tensorflow or PyTorch Sparse Tensors. """ def __init__(self, ids, offsets, dense_shape, node_type, int_attrs=None, float_attrs=None, string_attrs=None, weights=None, labels=None, graph=None): """ Sparse Nodes. Args: ids: A 1D numpy array, the ids of the nodes. offsets: A python list, each elem of list is an int, which indicates the number of nodes. dense_shape: The shape of the the corresponding dense Nodes. For example, ids=[5, 2, 1, 6, 2, 4], offsets=[3, 2, 1], dense_shape=[3, 5]. The corresponding dense Nodes is [[ 5, 2, 1, -1, -1], [ 6, 2, -1, -1, -1], [ 4, -1, -1, -1, -1]] """ Nodes.__init__(self, ids, node_type, int_attrs=None, float_attrs=None, string_attrs=None, weights=weights, labels=labels, shape=None, graph=graph) SparseBase.__init__(self, offsets, dense_shape) num_nodes = sum(offsets) if ids.shape[0] != num_nodes: raise ValueError("Ids must be the same length of indices") def __next__(self): if self._it < len(self._offsets): l = self._global_offsets[self._it] r = self._global_offsets[self._it + 1] self._it += 1 nodes = Nodes(self._ids[l: r], self._type, graph=self._graph, int_attrs=np.array([int_attr[l: r] \ for int_attr in self._int_attrs]) \ if self._int_attrs is not None else None, float_attrs=np.array([float_attr[l: r] \ for float_attr in self._float_attrs]) \ if self._float_attrs is not None else None, \ string_attrs=np.array([string_attr[l: r] \ for string_attr in self._string_attrs]) \ if self._string_attrs is not None else None, weights=self._weights[l:r] \ if self._weights is not None else None, labels=self._labels[l:r] \ if self._labels is not None else None) return nodes else: raise StopIteration class Edges(Values): """ As returned object of `get_next` api of `edge_sampler` , as returned object of `get_edges` of `Graph` or as in-memory object for constructing graph. """ def __init__(self, src_ids=None, src_type=None, dst_ids=None, dst_type=None, edge_type=None, edge_ids=None, src_nodes=None, dst_nodes=None, int_attrs=None, float_attrs=None, string_attrs=None, weights=None, labels=None, shape=None, graph=None): super(Edges, self).__init__(int_attrs=None, float_attrs=None, string_attrs=None, weights=weights, labels=labels, shape=shape, graph=graph) self._shape = shape if not self._shape: if src_ids is not None: if not isinstance(src_ids, np.ndarray): raise ValueError("src_ids must be an instance of numpy.ndarray, " "got {}.".format(type(src_ids))) self._shape = src_ids.shape if edge_ids is not None: if not isinstance(dst_ids, np.ndarray): raise ValueError("dst_ids must be an instance of numpy.ndarray, " "got {}.".format(type(dst_ids))) self._shape = edge_ids.shape self._src_ids = self._reshape(src_ids) self._src_type = src_type self._dst_ids = self._reshape(dst_ids) self._dst_type = dst_type self._edge_type = edge_type self._edge_ids = self._reshape(edge_ids) self._src_nodes = src_nodes self._dst_nodes = dst_nodes if self._src_ids is not None and self._src_nodes is None: self._src_nodes = Nodes(src_ids, src_type, shape=shape, graph=graph) if self._dst_ids is not None and self._dst_nodes is None: self._dst_nodes = Nodes(dst_ids, dst_type, shape=shape, graph=graph) if self._src_ids is not None and self._dst_ids is not None: if self._src_ids.shape != self._dst_ids.shape: raise ValueError("src_ids and dst_ids must be same shape.") def _get_decoder(self): return self._graph.get_edge_decoder(self._edge_type) @property def src_nodes(self): return self._src_nodes @property def dst_nodes(self): return self._dst_nodes @property def edge_ids(self): return self._edge_ids @property def src_ids(self): return self._src_ids @property def dst_ids(self): return self._dst_ids @property def src_type(self): return self._src_type @property def dst_type(self): return self._dst_type @property def edge_type(self): return self._edge_type @property def type(self): # pylint: disable=redefined-builtin return self._src_type, self._dst_type, self._edge_type @property def shape(self): return self._shape def _lookup_and_set_values(self): values = self._graph.lookup_edges(self._edge_type, self._src_ids, self._edge_ids) self._set_values(values) @edge_ids.setter def edge_ids(self, edge_ids): self._edge_ids = self._reshape(edge_ids) @src_ids.setter def src_ids(self, src_ids): self._src_ids = self._reshape(src_ids) @dst_ids.setter def dst_ids(self, dst_ids): self._dst_ids = self._reshape(dst_ids) @type.setter def type(self, type): # pylint: disable=redefined-builtin if not isinstance(type, tuple) or len(type) != 3: raise ValueError("property type must be a tuple of " "(src_type, dst_type, edge_type).") self._src_type, self._dst_type, self._edge_type = type @src_nodes.setter def src_nodes(self, src_nodes): if not isinstance(src_nodes, Nodes): raise ValueError("property src_nodes must be a Nodes object.") self._src_nodes = src_nodes @dst_nodes.setter def dst_nodes(self, dst_nodes): if not isinstance(dst_nodes, Nodes): raise ValueError("property dst_nodes must be a Nodes object.") self._dst_nodes = dst_nodes class SparseEdges(Edges, SparseBase): """ SparseEdges is the return value of full neighbor sampler. It can be easily transformed to Tensorflow or PyTorch Sparse Tensors. """ def __init__(self, src_ids=None, src_type=None, dst_ids=None, dst_type=None, edge_type=None, offsets=None, dense_shape=None, edge_ids=None, src_nodes=None, dst_nodes=None, int_attrs=None, float_attrs=None, string_attrs=None, weights=None, labels=None, graph=None): """ Sparse Edges. """ Edges.__init__(self, src_ids=src_ids, src_type=src_type, dst_ids=dst_ids, dst_type=dst_type, edge_type=edge_type, edge_ids=edge_ids, src_nodes=src_nodes, dst_nodes=dst_nodes, int_attrs=None, float_attrs=None, string_attrs=None, weights=weights, labels=labels, shape=None, graph=graph) SparseBase.__init__(self, offsets, dense_shape) if not src_nodes: num_edges = sum(offsets) if src_ids is not None and src_ids.shape[0] != num_edges: raise ValueError("Ids must be the same length of indices") self._src_nodes = SparseNodes(src_ids, offsets, dense_shape, src_type, graph=graph) self._dst_nodes = SparseNodes(dst_ids, offsets, dense_shape, dst_type, graph=graph) else: self._dense_shape = dst_nodes.dense_shape self._offsets = dst_nodes.offsets def __next__(self): if self._it < len(self._offsets): l = self._global_offsets[self._it] r = self._global_offsets[self._it + 1] self._it += 1 edges = Edges(self._src_ids[l: r] \ if self._src_ids is not None else None, self._src_type, self._dst_ids[l: r] \ if self._dst_ids is not None else None, self._dst_type, self._edge_type, self._edge_ids[l: r] \ if self._edge_ids is not None else None, next(self._src_nodes), next(self._dst_nodes), weights=self._weights[l:r] \ if self._weights is not None else None, labels=self._labels[l:r] \ if self._labels is not None else None, graph=self._graph) edges.int_attrs = np.array( [int_attr[l: r] for int_attr in self._int_attrs]) \ if self._int_attrs is not None else None edges.float_attrs = np.array( [float_attr[l: r] for float_attr in self._float_attrs]) \ if self._float_attrs is not None else None edges.string_attrs = np.array( [string_attr[l: r] for string_attr in self._string_attrs]) \ if self._string_attrs is not None else None return edges else: raise StopIteration class Layers(object): """ As returned object of `get_next` api of `meta_path_sampler`. """ def __init__(self, layers=None): self.layers = layers if layers else [] def layer(self, layer_id): """ Get one `Layer`. """ layer_id -= 1 if isinstance(self.layers, list) and layer_id < len(self.layers): return self.layers[layer_id] else: raise ValueError("layer id beyond the layers length.") def layer_size(self, layer_id): """ Get size of the given `Layer`. """ layer_id -= 1 if isinstance(self.layers, list) and layer_id < len(self.layers): return self.layers[layer_id].shape else: raise ValueError("layer id beyond the layers length.") def layer_nodes(self, layer_id): """ Get `Nodes` of the given `Layer`. """ layer_id -= 1 if isinstance(self.layers, list) and layer_id < len(self.layers): return self.layers[layer_id].nodes else: raise ValueError("layer id beyond the layers length.") def layer_edges(self, layer_id): """ Get `Edges` of the given `Layer`. """ layer_id -= 1 if isinstance(self.layers, list) and layer_id < len(self.layers): return self.layers[layer_id].edges else: raise ValueError("layer id beyond the layers length.") def set_layer_nodes(self, layer_id, nodes): """ Set `Nodes` of the given `Layer`. """ layer_id -= 1 if isinstance(self.layers, list) and layer_id < len(self.layers): if isinstance(self.layers[layer_id], Layer): self.layers[layer_id].set_nodes(nodes) else: raise ValueError("layer {} is not a SingleLayer".format(layer_id)) else: raise ValueError("layer id beyond the layers length.") def set_layer_edges(self, layer_id, edges): """ Set `Edges` of the given `Layer`. """ layer_id -= 1 if isinstance(self.layers, list) and layer_id < len(self.layers): if isinstance(self.layers[layer_id], Layer): self.layers[layer_id].set_edges(edges) else: raise ValueError("layer {} is not a SingleLayer".format(layer_id)) else: raise ValueError("layer id beyond the layers length.") def append_layer(self, layer): """ Append a `Layer` to layers """ self.layers.append(layer) class Layer(object): """ Layer is 1 hop neighbor nodes and the between edges. """ def __init__(self, nodes, edges=None, shape=None): """ A `Layer` maintain one hop of `Nodes` and `Edges`.""" self._nodes = nodes self._edges = edges self._shape = shape if shape else nodes.shape @property def nodes(self): return self._nodes @property def edges(self): return self._edges @property def shape(self): return self._shape @nodes.setter def nodes(self, nodes): self._nodes = nodes @edges.setter def edges(self, edges): self._edges = edges @shape.setter def shape(self, shape): self._shape = shape
20,226
6,341
import os import re import argparse from collections import defaultdict _AFR_COMPONENTS = [ 'demos', 'freertos_kernel', os.path.join('libraries','abstractions','ble_hal'), os.path.join('libraries','abstractions','common_io'), os.path.join('libraries','abstractions','pkcs11'), os.path.join('libraries','abstractions','platform'), os.path.join('libraries','abstractions','posix'), os.path.join('libraries','abstractions','secure_sockets'), os.path.join('libraries','abstractions','wifi'), os.path.join('libraries','c_sdk','aws','defender'), os.path.join('libraries','c_sdk','aws','shadow'), os.path.join('libraries','c_sdk','standard','ble'), os.path.join('libraries','c_sdk','standard','common'), os.path.join('libraries','c_sdk','standard','https'), os.path.join('libraries','c_sdk','standard','mqtt'), os.path.join('libraries','c_sdk','standard','serializer'), os.path.join('libraries','freertos_plus','aws','greengrass'), os.path.join('libraries','freertos_plus','aws','ota'), os.path.join('libraries','freertos_plus','standard','crypto'), os.path.join('libraries','freertos_plus','standard','freertos_plus_posix'), os.path.join('libraries','freertos_plus','standard','freertos_plus_tcp'), os.path.join('libraries','freertos_plus','standard','pkcs11'), os.path.join('libraries','freertos_plus','standard','tls'), os.path.join('libraries','freertos_plus','standard','utils'), 'tests' ] def ask_question(question): answer = input('{}: '.format(question)) return answer.strip() def ask_multiple_choice_question(question, choices): while True: print('{}?'.format(question)) for i in range(len(choices)): print('{}. {}'.format(i, choices[i])) try: user_choice = int(ask_question('Enter Choice')) except ValueError: print('Incorrect choice. Please choose a number between 0 and {}'.format(len(choices) - 1)) continue if user_choice in range(len(choices)): break else: print('Incorrect choice. Please choose a number between 0 and {}'.format(len(choices) - 1)) return user_choice def ask_yes_no_question(question): while True: answer = ask_question('{} (Y/N)'.format(question)) if answer.lower() == 'y': answer = 'yes' break elif answer.lower() == 'n': answer = 'no' break else: print('Incorrect response. Please answer Y/N.') return answer def print_file_list(file_list): version_line_list = [] for file in file_list: version_number = extract_version_number_from_file(file) version_line_list.append(version_number[0] if version_number[0] is not None else 'Could not detect version') max_filepath_length = len(max(file_list, key=len)) max_version_line_length = len(max(version_line_list, key=len)) print('-' * (max_filepath_length + max_version_line_length + 7)) print('| {file:<{max_filepath_length}} | {version:<{max_version_line_length}} |'.format(file='File', max_filepath_length=max_filepath_length, version='Version Line', max_version_line_length=max_version_line_length)) print('-' * (max_filepath_length + max_version_line_length + 7)) for i in range(len(file_list)): print('| {file:<{max_filepath_length}} | {version:<{max_version_line_length}} |'.format(file=file_list[i], max_filepath_length=max_filepath_length, version=version_line_list[i], max_version_line_length=max_version_line_length)) print('-' * (max_filepath_length + max_version_line_length + 7)) print('\n') def list_files_in_a_component(component, afr_path): ''' Returns a list of all the files in a component. ''' list_of_files = [] search_path = os.path.join(afr_path, component) for root, dirs, files in os.walk(search_path, topdown=True): # Do not search 'portable' and 'third_party' folders. dirs[:] = [d for d in dirs if d not in ['portable', 'third_party']] # Do not include hidden files and folders. dirs[:] = [d for d in dirs if not d[0] == '.'] files = [f for f in files if not f[0] == '.'] for f in files: if f.endswith('.c') or f.endswith('.h'): list_of_files.append(os.path.join(os.path.relpath(root, afr_path), f)) return list_of_files def extract_version_number_from_file(file_path): ''' Extracts version number from the License header in a file. ''' with open(file_path) as f: content = f.read() match = re.search('\s*\*\s*(FreeRTOS.*V(.*))', content, re.MULTILINE) # Is it a kernel file? if match is None: match = re.search('\s*\*\s*(FreeRTOS Kernel.*V(.*))', content, re.MULTILINE) # Is it s FreeRTOS+TCP file? if match is None: match = re.search('\s*\*\s*(FreeRTOS\+TCP.*V(.*))', content, re.MULTILINE) return (match.group(1), match.group(2)) if match is not None else (None, None) def update_version_number_in_files(file_paths, old_version_line, new_version_line): ''' Replaces old_version_line with new_version_line in all the files specified by file_paths. ''' for file_path in file_paths: with open(file_path) as f: content = f.read() content = content.replace(old_version_line, new_version_line) with open(file_path, 'w') as f: f.write(content) def update_version_number_in_a_component(component, afr_path): ''' Updates version numbers in all the files of an AFR component based on user choices. ''' # Get all the files in the component. files_in_component = list_files_in_a_component(component, afr_path) version_numbers = defaultdict(list) # Extract version numbers from all the files. for f in files_in_component: file_path = os.path.join(afr_path, f) version_number = extract_version_number_from_file(file_path) version_numbers[version_number].append(file_path) for key in version_numbers.keys(): old_version_line = key[0] old_version_number = key[1] files_to_update = version_numbers[key] if old_version_line is None: print('\nFailed to detect the version number in the following files:') while True: print_file_list(files_to_update) print('Please update the above files manually!') confirm = ask_yes_no_question('Done updating') if confirm == 'yes': print_file_list(files_to_update) looks_good = ask_yes_no_question('Does it look good') if looks_good == 'yes': break else: print('\n{} files have the following version: {}\n'.format(len(files_to_update), old_version_line)) options = [ 'Update version number [i.e. update "{}"].'.format(old_version_number), 'Update version line [i.e. update "{}"].'.format(old_version_line), 'List files.', 'Do not update.' ] while True: user_selected_option = ask_multiple_choice_question('What do you want to do', options) if user_selected_option == 0: new_version_number = ask_question('Enter new version number') new_version_line = old_version_line.replace(old_version_number, new_version_number) print('Old version line: "{}". New version line: "{}".'.format(old_version_line, new_version_line)) confirm = ask_yes_no_question('Does it look good') if confirm == 'yes': update_version_number_in_files(files_to_update, old_version_line, new_version_line) print('Updated version line to "{}".\n'.format(new_version_line)) break elif user_selected_option == 1: new_version_line = ask_question('Enter new version line') print('Old version line: "{}". New version line: "{}".'.format(old_version_line, new_version_line)) confirm = ask_yes_no_question('Does it look good') if confirm == 'yes': update_version_number_in_files(files_to_update, old_version_line, new_version_line) print('Updated version line to "{}".\n'.format(new_version_line)) break elif user_selected_option == 2: print_file_list(files_to_update) else: print('Skipping update of {}.\n'.format(old_version_line)) break def parse_arguments(): ''' Parses the command line arguments. ''' parser = argparse.ArgumentParser(description='FreeRTOS Checksum Generator') parser.add_argument('--afr', required=True, help='Location of the AFR Code.') args = parser.parse_args() return vars(args) def main(): ''' Main entry point. ''' args = parse_arguments() afr_path = args['afr'] print('AFR Code: {}'.format(afr_path)) for component in _AFR_COMPONENTS: print('\n---------------------------------------------') print('Component: {}'.format(component)) print('---------------------------------------------\n') wanna_update_version = ask_yes_no_question('Do you want to update the component "{}"'.format(component)) if wanna_update_version == 'yes': update_version_number_in_a_component(component, afr_path) if __name__ == '__main__': main()
10,360
3,014
""" There are some spherical balloons spread in two-dimensional space. For each balloon, provided input is the start and end coordinates of the horizontal diameter. Since it's horizontal, y-coordinates don't matter, and hence the x-coordinates of start and end of the diameter suffice. The start is always smaller than the end. An arrow can be shot up exactly vertically from different points along the x-axis. A balloon with x(start) and x(end) bursts by an arrow shot at x if x(start) ≤ x ≤ x(end). There is no limit to the number of arrows that can be shot. An arrow once shot keeps traveling up infinitely. Given an array points where points[i] = [x(start), x(end)], return the minimum number of arrows that must be shot to burst all balloons. Example: Input: points = [[10,16],[2,8],[1,6],[7,12]] Output: 2 Explanation: One way is to shoot one arrow for example at x = 6 (bursting the balloons [2,8] and [1,6]) and another arrow at x = 11 (bursting the other two balloons). Example: Input: points = [[1,2],[3,4],[5,6],[7,8]] Output: 4 Example: Input: points = [[1,2],[2,3],[3,4],[4,5]] Output: 2 Example: Input: points = [[1,2]] Output: 1 Example: Input: points = [[2,3],[2,3]] Output: 1 Constraints: - 0 <= points.length <= 10**4 - points.length == 2 - -2**31 <= xstart < xend <= 2**31 - 1 """ #Difficulty: Medium #45 / 45 test cases passed. #Runtime: 424 ms #Memory Usage: 18.4 MB #Runtime: 424 ms, faster than 86.48% of Python3 online submissions for Minimum Number of Arrows to Burst Balloons. #Memory Usage: 18.4 MB, less than 97.86% of Python3 online submissions for Minimum Number of Arrows to Burst Balloons. class Solution: def findMinArrowShots(self, points: List[List[int]]) -> int: i = 0 length = len(points) points.sort(key=lambda points : points[1]) while True: j = i + 1 if j >= length: return length if points[j][0] <= points[i][1]: points.pop(j) i -= 1 length -= 1 i += 1
2,232
748
import logging from pytorch_lightning.callbacks.base import Callback __all__ = ["Speed"] logger = logging.getLogger(__name__) class Speed(Callback): r""" Training speed callback, require 'simple' or 'advanced' profiler. """ def on_train_batch_end( self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx ): legacy_metrics = ( trainer.logger_connector.cached_results.legacy_batch_log_metrics ) legacy_metrics["iter"] = trainer.global_step legacy_metrics["epoch"] = trainer.current_epoch if not self.__has_profiler(trainer): # if not profiler provided, skip speed and batch_time. return # get training one batch time run_training_batch_time = trainer.profiler.recorded_durations[ "run_training_batch" ][-1] if hasattr(trainer.datamodule, "batch_size"): total_batch_size = ( trainer.datamodule.batch_size * trainer.world_size ) legacy_metrics["speed"] = ( 1.0 * total_batch_size / run_training_batch_time ) else: legacy_metrics["batch_time"] = run_training_batch_time def on_train_epoch_end(self, trainer, pl_module, *args, **kwargs): if not self.__has_profiler(trainer): return run_training_epoch_time = trainer.profiler.recorded_durations[ "run_training_epoch" ] if len(run_training_epoch_time) > 0 and hasattr( trainer.logger, "log_metrics" ): epoch_time = {"epoch_time": run_training_epoch_time[-1]} trainer.logger.log_metrics(epoch_time, step=trainer.current_epoch) def __has_profiler(self, trainer): return hasattr(trainer.profiler, "recorded_durations")
1,844
584
from typing import Optional, List from abc import ABC, abstractmethod from spectacles.client import LookerClient from spectacles.lookml import Project, Model, Dimension from spectacles.select import is_selected from spectacles.exceptions import LookMlNotFound class Validator(ABC): # pragma: no cover """Defines abstract base interface for validators. Not intended to be used directly, only inherited. Attributes: client: Looker API client. """ def __init__(self, client: LookerClient, project: str): self.client = client self.project = Project(project, models=[]) @abstractmethod def validate(self): raise NotImplementedError def build_project( self, selectors: Optional[List[str]] = None, exclusions: Optional[List[str]] = None, build_dimensions: bool = False, ) -> None: """Creates an object representation of the project's LookML. Args: selectors: List of selector strings in 'model_name/explore_name' format. The '*' wildcard selects all models or explores. For instance, 'model_name/*' would select all explores in the 'model_name' model. """ # Assign default values for selectors and exclusions if selectors is None: selectors = ["*/*"] if exclusions is None: exclusions = [] all_models = [ Model.from_json(model) for model in self.client.get_lookml_models( fields=["name", "project_name", "explores"] ) ] project_models = [ model for model in all_models if model.project_name == self.project.name ] if not project_models: raise LookMlNotFound( name="project-models-not-found", title="No configured models found for the specified project.", detail=( f"Go to {self.client.base_url}/projects and confirm " "a) at least one model exists for the project and " "b) it has an active configuration." ), ) for model in project_models: model.explores = [ explore for explore in model.explores if is_selected(model.name, explore.name, selectors, exclusions) ] if build_dimensions: for explore in model.explores: dimensions_json = self.client.get_lookml_dimensions( model.name, explore.name ) for dimension_json in dimensions_json: dimension = Dimension.from_json( dimension_json, model.name, explore.name ) dimension.url = self.client.base_url + dimension.url if not dimension.ignore: explore.add_dimension(dimension) self.project.models = [ model for model in project_models if len(model.explores) > 0 ]
3,158
779
''' Created on Apr 18, 2018 @author: msanchez ''' from scraper.RequestScraper import RequestScraper from scraper.HTMLFilter import HTMLFilter from scraper.NewsFilter import NewsFilter from scraper.utilities.WebUtilities import WebUtilities class Scraper(object): ''' Full scrap operation. Downloads the request with an URL. Checks the HTTP status code. In case it's correct, proceeds with the scrap & filter operation. ''' def __init__(self): ''' Constructor ''' def scrap(self): web = self.__download() result = list() if(200 == web.status_code): scraper = RequestScraper(web) html_news_tags = scraper.scrap_news() cleaned_tags = self.__clean(html_news_tags) result = self.__filter(cleaned_tags) else: print("There was an error on download operation. Status code: ", str(web.status_code)) return result def __download(self): downloader = WebUtilities() return downloader.download("https://www.heraldo.es/") def __clean(self, html_tags): tag_filter = HTMLFilter(html_tags) return tag_filter.filter() def __filter(self, unfiltered_tags): matcher = NewsFilter(unfiltered_tags) return matcher.search()
1,330
388
from django.db import models from django.conf import settings from django.template import Context, Template from cabot.cabotapp.alert import AlertPlugin, AlertPluginUserData from os import environ as env import requests pushover_alert_url = "https://api.pushover.net/1/messages.json" pushover_template = "Service {{ service.name }} {% if service.overall_status == service.PASSING_STATUS %}is back to normal{% else %}reporting {{ service.overall_status }} status{% endif %}: {{ scheme }}://{{ host }}{% url 'service' pk=service.id %}." class PushoverAlert(AlertPlugin): name = "Pushover" author = "Daniel Nelson" def send_alert(self, service, users, duty_officers): # Pushover handles repeat alerts, so we can skip them if service.overall_status == service.old_overall_status: return for u in users: alert = True priority = 1 try: data = AlertPluginUserData.objects.get(user=u, title=PushoverAlertUserData.name) except: pass if service.overall_status == service.WARNING_STATUS: if not data.alert_on_warn: alert = False priority = 0 elif service.overall_status == service.ERROR_STATUS: priority = 1 elif service.overall_status == service.CRITICAL_STATUS: priority = 2 elif service.overall_status == service.PASSING_STATUS: priority = 0 if service.old_overall_status == service.CRITICAL_STATUS: # cancel the recurring crit pass else: # something weird happened alert = False if not alert: return # now let's send c = Context({ 'service': service, 'host': settings.WWW_HTTP_HOST, 'scheme': settings.WWW_SCHEME, 'jenkins_api': settings.JENKINS_API, }) message = Template(pushover_template).render(c) self._send_pushover_alert(message, key=data.key, priority=priority) def _send_pushover_alert(self, message, key, priority=0): payload = { 'token':env['PUSHOVER_TOKEN'], 'user': key, 'priority': priority, 'title': 'Cabot ALERT', 'message': message, } if priority == 2: payload['retry'] = 60 payload['expire'] = 3600 r = requests.post(pushover_alert_url, data=payload) class PushoverAlertUserData(AlertPluginUserData): name = "Pushover Plugin" key = models.CharField(max_length=32, blank=False, verbose_name="User/Group Key") alert_on_warn = models.BooleanField(default=False)
2,877
787
# -*- coding: utf-8 -*- from django import template from mezzanine.conf import settings from mezzanine_faq.models import FaqPage register = template.Library() @register.inclusion_tag('includes/faqlist.html') def faq_list(**kwargs): page = FaqPage.objects.get(**kwargs) return { 'page': page, 'faq_questions': page.faqquestion_set.all(), 'MEDIA_URL': settings.MEDIA_URL, } @register.inclusion_tag('includes/faqlist.html') def faq_last(**kwargs): page = FaqPage.objects.get(**kwargs) return { 'page': page, 'faq_questions': page.faqquestion_set.all().order_by('-id')[:1], 'MEDIA_URL': settings.MEDIA_URL, }
683
249
""" https://leetcode.com/problems/diameter-of-binary-tree/ Given a binary tree, you need to compute the length of the diameter of the tree. The diameter of a binary tree is the length of the longest path between any two nodes in a tree. This path may or may not pass through the root. Example: Given a binary tree 1 / \ 2 3 / \ 4 5 Return 3, which is the length of the path [4,2,1,3] or [5,2,1,3]. Note: The length of path between two nodes is represented by the number of edges between them. """ # Thanks to the solution provided by the problem. # time complexity: O(n), space complexity: O(1) # Definition for a binary tree node. # class TreeNode: # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right class Solution: def diameterOfBinaryTree(self, root: TreeNode) -> int: self.long = 1 self.dfs(root) return self.long - 1 def dfs(self, root: TreeNode) -> int: if root is None: return 0 left = self.dfs(root.left) right = self.dfs(root.right) self.long = max(self.long, left + right + 1) return max(left, right) + 1
1,234
383
#!/usr/bin/env python3 """ Quick script to read all training schools from data file and write them out again to e.g. update the formatting. """ import argparse from hsf_website_helpers.events.event import EventDatabase from hsf_website_helpers.util.cli import add_website_home_option def get_parser() -> argparse.ArgumentParser: d = ( "Quick script to read all training schools from data file and write " "them out again to e.g. update the formatting." ) parser = argparse.ArgumentParser(description=d) add_website_home_option(parser) return parser if __name__ == "__main__": parser = get_parser() args = parser.parse_args() path = args.home / "_data" / "training-schools.yml" if path.is_file(): edb = EventDatabase.from_file(path) print(f"Loaded {len(edb.events)} events from database.") else: print(f"Did not find database at {path}. Initializing empty one.") edb = EventDatabase() edb.write(path) print( "Reformated database. Please commit and submit a PR to add it to " "the webpage." )
1,114
339
from clothstream.lib.rest import SharedAPIRootRouter from .views import ItemStyleTagCreate, StyleTagList router = SharedAPIRootRouter() router.register(r'styletag-item/create', ItemStyleTagCreate, base_name='itemstyletag-create') router.register(r'styletags', StyleTagList)
275
82
# a module that wraps some of the S3 commands import boto3 from botocore.exceptions import ClientError from boto3.s3.transfer import S3Transfer import re import os # check for existance of bucket def list_bucket(bucket_name, region): s3 = boto3.resource('s3', region) bucket = s3.Bucket(bucket_name) object_list = [] try: for key in bucket.objects.all(): print(key.key) object_list.append(key.key) except ClientError as e: #print('code: {}, msg: {}, op name: {}'.format([ # e.error_code, e.error_message, e.operation_name])) #print(e.msg) print(str(e)) print(e.response) except Exception as e: # other response Error keys: Code, Message, BucketName print(e.response['Error']['Code']) print(str(e)) print(e.response) print(e.response['ResponseMetadata']['HTTPStatusCode']) return object_list # get list of bucket contents def get_bucket_list(bucket_name, region): s3 = boto3.resource('s3', region) bucket = s3.Bucket(bucket_name) object_list = [] for key in bucket.objects.all(): object_list.append(key.key) return object_list # check bucket exists (efficient version) # NOTE: s3 bucket name space is for all AWS users # therefore need to also check that have rights to read & write (+list) def bucket_exists(bucket, region): s3 = boto3.resource('s3', region) exists = True try: s3.meta.client.head_bucket(Bucket=bucket) except ClientError as e: # If a client error is thrown, then check that it was a 404 error. # If it was a 404 error, then the bucket does not exist. error_code = int(e.response['Error']['Code']) if error_code == 404: exists = False return exists #upload a file def upload_file(bucket, region, source_file, dest_file): client = boto3.client('s3', region) transfer = S3Transfer(client) transfer.upload_file(source_file, bucket, dest_file) # determine next unique number def get_next_id(bucket_name, region, prefix): """ determines the next sequential numbering for a given folder prefix e.g. prefix is "01092015Tue-"; if a file exists in the folder, then it will be of the form "01092015Tue-xxx/somefilename.ext" - where xxx is some number"; if there is such a file, then next folder will be 01092015Tue-yyy - where yyy = xxx + 1; otherwise, next folder is 01092015Tue-1 Args: prefix: a string that represents the absolute folder name Returns: a string that represents the next folder name in the sequence """ # added () to get group pattern = re.compile(prefix + '([0-9]+)/') ids = get_bucket_list(bucket_name, region) next_num = 1 for name in ids: match = pattern.match(name) if match: # there is only one bracketed group - the number next_num = max(int(match.groups()[0]) + 1, next_num) result = prefix + str(next_num) # want to strip out any "directories" in path & just return id return result.split('/')[-1] # return a list of bucket objects that match a given prefix #TODO remove default bucket name def list_by_prefix(bucket_name, region, prefix=''): """ returns a list of names of bucket objects that start with a given prefix Args: bucket_name: string - the name of the s3 bucket prefix: string - the prefix of the name (key) of the bucket objects Returns: a list of objects whose name (key) starts with the given prefix """ s3 = boto3.resource('s3', region) bucket = s3.Bucket(bucket_name) names = [] # osi - object summary iterator for osi in bucket.objects.filter( Prefix=prefix): name = osi.key names.append(name) return names # determine if a given object key exists in the bucket def key_exists(bucket_name, region, key): """ indicates if a key (object name) is in the bucket Args: bucket_name: string - the name of the s3 bucket key: string - the name of the object key (file-name) Returns: True if key in bucket; False otherwise """ if key in list_by_prefix(bucket_name, region, key): return True return False def get_timing_info(bucket_name, region, prefix): """ gets the timing information for jobs - labelled start & finish Returns: a 3-tuple of (finish time, elapsed time string, task name string) """ s3 = boto3.resource('s3', region) bucket = s3.Bucket(bucket_name) start_dict = {} finish_dict = {} # osi - object summary iterator for osi in bucket.objects.filter( Prefix=prefix): name = osi.key last_mod = osi.last_modified if 'start' in name: start_dict[name] = last_mod if 'finish' in name: finish_dict[name] = last_mod results = [] for name, finish_time in finish_dict.items(): start_name = name.replace('finish', 'start') if start_name in start_dict: elapsed = str(finish_time - start_dict[start_name]) results.append((finish_time, elapsed, name.replace('finish', 'task').split('/')[-1]. split('.')[0])) return sorted(results) # download files matching regex def download_files(bucket_name, region, prefix='', suffix='', dest_dir=''): """ downloads files who's path & name match given prefix & suffix to specified dir Args: bucket_name: the name of the s3 bucket to download from prefix: string - start of full path the s3 file suffix: string - the end characters of the file (e.g. '.vcf') dest_dir: string - the (local) directory to which the files are downloaded """ # TODO better to raise ValueError?? assert (prefix or suffix), 'must have a value for either prefix or suffix' # get rid of '/' at end of dir if exists if dest_dir.endswith('/'): dest_dir = dest_dir[:-1] # create directory in case not exist if dest_dir: os.makedirs(dest_dir, exist_ok=True) else: # no dir provided - default to current dir dest_dir = '.' names = [] client = boto3.client('s3', region) transfer = S3Transfer(client) for name in list_by_prefix(bucket_name, region, prefix): if name.endswith(suffix): # remove any path from the file name fname = name.split('/').pop() # download the file transfer.download_file(bucket_name, name, dest_dir + '/' + fname)
6,652
2,010
"""This module generates and formats instructional messages about fixing Markdown code blocks.""" import logging from typing import Optional from bot.exts.info.codeblock import _parsing log = logging.getLogger(__name__) _EXAMPLE_PY = "{lang}\nprint('Hello, world!')" # Make sure to escape any Markdown symbols here. _EXAMPLE_CODE_BLOCKS = ( "\\`\\`\\`{content}\n\\`\\`\\`\n\n" "**This will result in the following:**\n" "```{content}```" ) def _get_example(language: str) -> str: """Return an example of a correct code block using `language` for syntax highlighting.""" # Determine the example code to put in the code block based on the language specifier. if language.lower() in _parsing.PY_LANG_CODES: log.trace(f"Code block has a Python language specifier `{language}`.") content = _EXAMPLE_PY.format(lang=language) elif language: log.trace(f"Code block has a foreign language specifier `{language}`.") # It's not feasible to determine what would be a valid example for other languages. content = f"{language}\n..." else: log.trace("Code block has no language specifier.") content = "\nHello, world!" return _EXAMPLE_CODE_BLOCKS.format(content=content) def _get_bad_ticks_message(code_block: _parsing.CodeBlock) -> Optional[str]: """Return instructions on using the correct ticks for `code_block`.""" log.trace("Creating instructions for incorrect code block ticks.") valid_ticks = f"\\{_parsing.BACKTICK}" * 3 instructions = ( "It looks like you are trying to paste code into this channel.\n\n" "You seem to be using the wrong symbols to indicate where the code block should start. " f"The correct symbols would be {valid_ticks}, not `{code_block.tick * 3}`." ) log.trace("Check if the bad ticks code block also has issues with the language specifier.") addition_msg = _get_bad_lang_message(code_block.content) if not addition_msg and not code_block.language: addition_msg = _get_no_lang_message(code_block.content) # Combine the back ticks message with the language specifier message. The latter will # already have an example code block. if addition_msg: log.trace("Language specifier issue found; appending additional instructions.") # The first line has double newlines which are not desirable when appending the msg. addition_msg = addition_msg.replace("\n\n", " ", 1) # Make the first character of the addition lower case. instructions += "\n\nFurthermore, " + addition_msg[0].lower() + addition_msg[1:] else: log.trace("No issues with the language specifier found.") example_blocks = _get_example(code_block.language) instructions += f"\n\n**Here is an example of how it should look:**\n{example_blocks}" return instructions def _get_no_ticks_message(content: str) -> Optional[str]: """If `content` is Python/REPL code, return instructions on using code blocks.""" log.trace("Creating instructions for a missing code block.") if _parsing.is_python_code(content): example_blocks = _get_example("python") return ( "It looks like you're trying to paste code into this channel.\n\n" "Discord has support for Markdown, which allows you to post code with full " "syntax highlighting. Please use these whenever you paste code, as this " "helps improve the legibility and makes it easier for us to help you.\n\n" f"**To do this, use the following method:**\n{example_blocks}" ) else: log.trace("Aborting missing code block instructions: content is not Python code.") def _get_bad_lang_message(content: str) -> Optional[str]: """ Return instructions on fixing the Python language specifier for a code block. If `code_block` does not have a Python language specifier, return None. If there's nothing wrong with the language specifier, return None. """ log.trace("Creating instructions for a poorly specified language.") info = _parsing.parse_bad_language(content) if not info: log.trace("Aborting bad language instructions: language specified isn't Python.") return lines = [] language = info.language if info.has_leading_spaces: log.trace("Language specifier was preceded by a space.") lines.append(f"Make sure there are no spaces between the back ticks and `{language}`.") if not info.has_terminal_newline: log.trace("Language specifier was not followed by a newline.") lines.append( f"Make sure you put your code on a new line following `{language}`. " f"There must not be any spaces after `{language}`." ) if lines: lines = " ".join(lines) example_blocks = _get_example(language) # Note that _get_bad_ticks_message expects the first line to have two newlines. return ( f"It looks like you incorrectly specified a language for your code block.\n\n{lines}" f"\n\n**Here is an example of how it should look:**\n{example_blocks}" ) else: log.trace("Nothing wrong with the language specifier; no instructions to return.") def _get_no_lang_message(content: str) -> Optional[str]: """ Return instructions on specifying a language for a code block. If `content` is not valid Python or Python REPL code, return None. """ log.trace("Creating instructions for a missing language.") if _parsing.is_python_code(content): example_blocks = _get_example("python") # Note that _get_bad_ticks_message expects the first line to have two newlines. return ( "It looks like you pasted Python code without syntax highlighting.\n\n" "Please use syntax highlighting to improve the legibility of your code and make " "it easier for us to help you.\n\n" f"**To do this, use the following method:**\n{example_blocks}" ) else: log.trace("Aborting missing language instructions: content is not Python code.") def get_instructions(content: str) -> Optional[str]: """ Parse `content` and return code block formatting instructions if something is wrong. Return None if `content` lacks code block formatting issues. """ log.trace("Getting formatting instructions.") blocks = _parsing.find_code_blocks(content) if blocks is None: log.trace("At least one valid code block found; no instructions to return.") return if not blocks: log.trace("No code blocks were found in message.") instructions = _get_no_ticks_message(content) else: log.trace("Searching results for a code block with invalid ticks.") block = next((block for block in blocks if block.tick != _parsing.BACKTICK), None) if block: log.trace("A code block exists but has invalid ticks.") instructions = _get_bad_ticks_message(block) else: log.trace("A code block exists but is missing a language.") block = blocks[0] # Check for a bad language first to avoid parsing content into an AST. instructions = _get_bad_lang_message(block.content) if not instructions: instructions = _get_no_lang_message(block.content) if instructions: instructions += "\nYou can **edit your original message** to correct your code block." return instructions
7,597
2,078
from django.urls import path from . import views app_name = "app" urlpatterns = [ path('', views.index, name="index"), path('posts/', views.posts, name="posts"), path('categories/', views.categories, name="categories"), path('comments/', views.comments, name="comments"), path('users/', views.users, name="users"), path('test/', views.test, name="test"), path('login/', views.login, name="login"), path('logout/', views.logout, name="logout"), path('details/', views.details, name="details"), ]
533
167
#!/usr/bin/env python # # Copyright (c) 2017 10X Genomics, Inc. All rights reserved. # import cellranger.analysis.io as analysis_io import cellranger.analysis.constants as analysis_constants import cellranger.h5_constants as h5_constants import cellranger.io as cr_io import cellranger.analysis.stats as analysis_stats import collections from irlb import irlb import numpy as np import os import tables # The RUNPCA stage attempts to run the PCA at this threshold, and if that # fails it reruns at zero. In the event thresholding prevents us from # returning the requested number of components and we are at this threshold # value, we throw an exception. DEFAULT_RUNPCA_THRESHOLD = 2 from sklearn.utils import sparsefuncs class MatrixRankTooSmallException(Exception): pass PCA = collections.namedtuple('PCA', ['transformed_pca_matrix', 'components', 'variance_explained', 'dispersion', 'features_selected']) def get_original_columns_used(cols_not_removed, cols_used_after_removal): """If a matrix is subset down to only have columns indexed by cols_not_removed, and then is further subset to only contain cols_used_after removal, in that order, than this method returns the index of which columns in the old matrix correspond the the columns in the new matrix.""" return [cols_not_removed[x] for x in cols_used_after_removal] def run_pca(matrix, pca_features=None, pca_bcs=None, n_pca_components=None, random_state=None, min_count_threshold=0): """ Run a PCA on the matrix using the IRLBA matrix factorization algorithm. Prior to the PCA analysis, the matrix is modified so that all barcodes/columns have the same counts, and then the counts are transformed by a log2(1+X) operation. If desired, only a subset of features (e.g. sample rows) can be selected for PCA analysis. Each feature is ranked by its dispersion relative to other features that have a similar mean count. The top `pca_features` as ranked by this method will then be used for the PCA. One can also select to subset number of barcodes to use (e.g. sample columns), but in this case they are simply randomly sampled. Args: matrix (CountMatrix): The matrix to perform PCA on. pca_features (int): Number of features to subset from matrix and use in PCA. The top pca_features ranked by dispersion are used pca_bcs (int): Number of barcodes to randomly sample for the matrix. n_pca_components (int): How many PCA components should be used. random_state (int): The seed for the RNG min_count_threshold (int): The minimum sum of each row/column for that row/column to be passed to PCA (this filter is prior to any subsetting that occurs). Returns: A PCA object """ if random_state is None: random_state=analysis_constants.RANDOM_STATE np.random.seed(0) # Threshold the rows/columns of matrix, will throw error if an empty matrix results. thresholded_matrix, _, thresholded_features = matrix.select_axes_above_threshold(min_count_threshold) # If requested, we can subsample some of the barcodes to get a smaller matrix for PCA pca_bc_indices = np.arange(thresholded_matrix.bcs_dim) if pca_bcs is None: pca_bcs = thresholded_matrix.bcs_dim pca_bc_indices = np.arange(thresholded_matrix.bcs_dim) elif pca_bcs < thresholded_matrix.bcs_dim: pca_bc_indices = np.sort(np.random.choice(np.arange(thresholded_matrix.bcs_dim), size=pca_bcs, replace=False)) elif pca_bcs > thresholded_matrix.bcs_dim: msg = ("You requested {} barcodes but the matrix after thresholding only " "included {}, so the smaller amount is being used.").format(pca_bcs, thresholded_matrix.bcs_dim) print(msg) pca_bcs = thresholded_matrix.bcs_dim pca_bc_indices = np.arange(thresholded_matrix.bcs_dim) # If requested, select fewer features to use by selecting the features with highest normalized dispersion if pca_features is None: pca_features = thresholded_matrix.features_dim elif pca_features > thresholded_matrix.features_dim: msg = ("You requested {} features but the matrix after thresholding only included {} features," "so the smaller amount is being used.").format(pca_features, thresholded_matrix.features_dim) print(msg) pca_features = thresholded_matrix.features_dim # Calc mean and variance of counts after normalizing # But don't transform to log space, in order to preserve the mean-variance relationship m = analysis_stats.normalize_by_umi(thresholded_matrix) # Get mean and variance of rows (mu, var) = analysis_stats.summarize_columns(m.T) dispersion = analysis_stats.get_normalized_dispersion(mu.squeeze(), var.squeeze()) # TODO set number of bins? pca_feature_indices = np.argsort(dispersion)[-pca_features:] # Now determine how many components. if n_pca_components is None: n_pca_components = analysis_constants.PCA_N_COMPONENTS_DEFAULT likely_matrix_rank = min(pca_features, pca_bcs) if likely_matrix_rank < n_pca_components: if min_count_threshold == DEFAULT_RUNPCA_THRESHOLD: # Kick back to run_pca stage so it can retry with no threshold, this is for historical reasons raise MatrixRankTooSmallException() else: print(("There are fewer nonzero features or barcodes ({}) than requested " "PCA components ({}); reducing the number of components.").format(likely_matrix_rank, n_pca_components)) n_pca_components = likely_matrix_rank if (likely_matrix_rank * 0.5) <= float(n_pca_components): print("Requested number of PCA components is large relative to the matrix size, an exact approach to matrix factorization may be faster.") # Note, after subsetting it is possible some rows/cols in pca_mat have counts below the threshold. # However, we are not performing a second thresholding as in practice subsetting is not used and we explain # that thresholding occurs prior to subsetting in the doc string. pca_mat = thresholded_matrix.select_barcodes(pca_bc_indices).select_features(pca_feature_indices) (pca_norm_mat, pca_center, pca_scale) = normalize_and_transpose(pca_mat) (u, d, v, _, _) = irlb(pca_norm_mat, n_pca_components, center=pca_center.squeeze(), scale=pca_scale.squeeze(), random_state=random_state) # make sure to project the matrix before centering, to avoid densification (full_norm_mat, full_center, full_scale) = normalize_and_transpose(matrix) sparsefuncs.inplace_column_scale(full_norm_mat, 1 / full_scale.squeeze()) # can have some zeros here # Get a coordinate map so we know which columns in the old matrix correspond to columns in the new org_cols_used = get_original_columns_used(thresholded_features, pca_feature_indices) transformed_irlba_matrix = full_norm_mat[:,org_cols_used].dot(v) - (full_center / full_scale)[:,org_cols_used].dot(v) irlba_components = np.zeros((n_pca_components, matrix.features_dim)) irlba_components[:,org_cols_used] = v.T # calc proportion of variance explained variance_sum = len(pca_feature_indices) # each feature has variance=1, mean=0 after normalization variance_explained = np.square(d)/((len(pca_bc_indices)-1) * variance_sum) features_selected = np.array([f.id for f in matrix.feature_ref.feature_defs])[org_cols_used] # Now project back up the dispersion to return. full_dispersion = np.empty(matrix.features_dim) full_dispersion[:] = np.nan full_dispersion[thresholded_features] = dispersion # sanity check dimensions assert transformed_irlba_matrix.shape == (matrix.bcs_dim, n_pca_components) assert irlba_components.shape == (n_pca_components, matrix.features_dim) assert variance_explained.shape == (n_pca_components,) return PCA(transformed_irlba_matrix, irlba_components, variance_explained, full_dispersion, features_selected) def normalize_and_transpose(matrix): matrix.tocsc() m = analysis_stats.normalize_by_umi(matrix) # Use log counts m.data = np.log2(1 + m.data) # Transpose m = m.T # compute centering (mean) and scaling (stdev) (c,v) = analysis_stats.summarize_columns(m) # TODO: Inputs to this function shouldn't have zero variance columns v[np.where(v == 0.0)] = 1.0 s = np.sqrt(v) return (m, c, s) def get_irlb_mem_gb_from_matrix_dim(nonzero_entries): irlba_mem_gb = round(np.ceil(1.0 * nonzero_entries / analysis_constants.NUM_IRLB_MATRIX_ENTRIES_PER_MEM_GB)) + analysis_constants.IRLB_BASE_MEM_GB return h5_constants.MATRIX_MEM_GB_MULTIPLIER * max(h5_constants.MIN_MEM_GB, irlba_mem_gb) def save_pca_csv(pca_map, matrix, base_dir): save_pca_csv_with_bc_feature(pca_map, matrix.bcs, matrix.feature_ref.feature_defs, base_dir) def save_pca_csv_with_bc_feature(pca_map, barcodes, features, base_dir): for n_components, pca in pca_map.iteritems(): n_components_dir = os.path.join(base_dir, '%d_components' % n_components) cr_io.makedirs(n_components_dir, allow_existing=True) matrix_fn = os.path.join(n_components_dir, 'projection.csv') n_columns = pca.transformed_pca_matrix.shape[1] assert n_columns <= n_components matrix_header = ['Barcode'] + ['PC-%d' % (i+1) for i in xrange(n_columns)] analysis_io.save_matrix_csv(matrix_fn, pca.transformed_pca_matrix, matrix_header, barcodes) # FBPCA presently provides 0-sized entries for the following PCA() member variables. # This allows us to distinguish FBPCA from IRLBA, and also avoids weird empty files. if pca.components.size > 0: components_fn = os.path.join(n_components_dir, 'components.csv') components_header = ['PC'] + [f.id for f in features] analysis_io.save_matrix_csv(components_fn, pca.components, components_header, range(1, n_components+1)) if pca.variance_explained.size > 0: variance_fn = os.path.join(n_components_dir, 'variance.csv') variance_header = ['PC','Proportion.Variance.Explained'] analysis_io.save_matrix_csv(variance_fn, pca.variance_explained, variance_header, range(1, n_components+1)) if pca.dispersion.size > 0: dispersion_fn = os.path.join(n_components_dir, 'dispersion.csv') dispersion_header = ['Feature','Normalized.Dispersion'] analysis_io.save_matrix_csv(dispersion_fn, pca.dispersion, dispersion_header, [f.id for f in features]) if pca.features_selected.size > 0: features_fn = os.path.join(n_components_dir, 'features_selected.csv') # TODO: there are two columns here, but only 1 entry in the header...BAD features_header = ['Feature'] analysis_io.save_matrix_csv(features_fn, pca.features_selected, features_header, range(1, len(pca.features_selected)+1)) def save_pca_h5(pca_map, f): group = f.create_group(f.root, analysis_constants.ANALYSIS_H5_PCA_GROUP) for n_components, pca in pca_map.iteritems(): analysis_io.save_h5(f, group, str(n_components), pca) def load_pca_from_h5(filename): """ Load just the PCA info from an analysis h5 """ with tables.open_file(filename, 'r') as f: group = f.root._v_groups[analysis_constants.ANALYSIS_H5_PCA_GROUP] # Just take the first PCA object, assuming we never have multiple for _, pca in analysis_io.load_h5_iter(group, PCA): return pca
11,775
3,717
from .loader import Loader from .extractor import Extractor from .transformer import Transformer from .orchestrator import Orchestrator __all__ = [ "Loader", "Extractor", "Transformer", "Orchestrator", ]
221
69
# Som del av et spørrespill skal du lage en klasse for flervalgspørsmål. # Et flervalgspørsmål skal ha en spørsmålstekst, ei liste med svaralternativer # (hvert svaralternativ er en tekststreng), og et tall som sier hvilket av # svaralternativene som er korrekt.Klassen skal ha en __str__ metode som returnerer # en streng som inneholder spørsmålsteksten og nummerte svaralternativer # på et lett leselig format. Klassen skal ha en sjekk_svar metode som tar # som parameter et heltall som representerer hvilket svar brukeren velger. # Sjekk_svar metoden skal sjekke om svaret brukeren har oppgitt er korrekt. class Question: def __init__(self, question: str, correct: int, alt: list,): # Klassen tar inn tre argument, og alle har definert verditype :str, :int, :list self.question = question self.alt = alt self.correct = correct def sjekk_svar(self, answer): return answer - 1 == self.correct # kontollerar om brukeres svar == correct, (-1 fordi python byrjar å telle på 0) def korrekt_svar_tekst(self): return print(f'Rett svar er: {self.alt[self.correct]} \n') #returnerar rett svar def ask(self): print(self) #printar spørsmål og alternativ, synar __str__ inn_1 = input('Svar frå spiller 1: ') # tek inn svar frå spiller 1 inn_2 = input('Svar frå spiller 2: ') # tek inn svar frå spiller 2 try: correct_1 = self.sjekk_svar(int(inn_1)) #sendar svar frå spiller 1 til def sjekk_svar except: correct_1 = False try: correct_2 = self.sjekk_svar(int(inn_2)) #sendar svar frå spiller 2 til def sjekk_svar except: correct_2 = False print('Spiller 1: '+ ('Riktig svar!' if correct_1 else 'Feil svar')) #printar ut om svaret var rett eller feil, dersom correct_1 er True blir if utførst og 'Riktig svar' printa if correct_1 == True: self.poeng_1 += 1 print('Spiller 2: '+ ('Riktig svar!' if correct_2 else 'Feil svar')) if correct_2 == True: self.poeng_2 += 1 self.korrekt_svar_tekst() # hentar korrekt_svar_tekst, tek ikkje argument fordi den finn dei via self. def __str__(self): questions_str = '' # sett questions_str = lik ei blank str for at python veit kva verdi type det skal vær og for at det skal være ein verdi i variabelen for i in range(0, len(self.alt)): # ein for loop som går frå 0 og til og med lengda av alternativ i lista alt. questions_str += f'{i + 1}. {self.alt[i]} \n' return f'{self.question} \n{questions_str}' def les_fil(): with open('sporsmaalsfil.txt') as sporsmaal: liste_retur = [] for linje in sporsmaal: sporsmaal_liste = '' sporsmaal_split = linje.split(':') sporsmaal_liste = sporsmaal_split sporsmaal_liste[2] = sporsmaal_liste[2].strip()[1:-1].split(', ') liste_retur.append(Question(sporsmaal_liste[0],sporsmaal_liste[1],sporsmaal_liste[2])) return liste_retur if __name__ == "__main__": sporsmaal_liste = les_fil() for a in sporsmaal_liste: print(a.ask())
3,182
1,139
"""Start a local webserver to report the status of an arcyd instance.""" # ============================================================================= # CONTENTS # ----------------------------------------------------------------------------- # abdcmd_instaweb # # Public Functions: # getFromfilePrefixChars # setupParser # process # # ----------------------------------------------------------------------------- # (this contents block is generated, edits will be lost) # ============================================================================= from __future__ import absolute_import import BaseHTTPServer import os import abdcmd_arcydstatushtml import abdcmd_repostatushtml def getFromfilePrefixChars(): return None def setupParser(parser): parser.add_argument( '--port', metavar="PORT", type=int, default=8000, help="port to serve pages on") parser.add_argument( '--report-file', metavar="REPORTFILE", type=str, required=True, help="path to the arcyd report file to render") parser.add_argument( '--repo-file-dir', metavar="REPOFILEDIR", type=str, required=True, help="path to the repo files to render") class _NotFoundError(Exception): pass class _RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): def __init__(self, instaweb_args, *args): self._instaweb_args = instaweb_args self.path = None # for pychecker self.wfile = None # for pychecker BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args) def do_GET(self): try: content = self._get_content() except _NotFoundError: self.send_response(404) self.send_header("Content-type", "text/html") self.end_headers() self.wfile.write("<html><body><h1>404</h1></body></html>") self.wfile.close() else: self.send_response(200) self.send_header("Content-type", "text/html") self.end_headers() self.wfile.write(content) self.wfile.close() def _get_content(self): args = self._instaweb_args if self.path == '/': content = abdcmd_arcydstatushtml.render_content( args.report_file, '') elif self.path.lower().endswith('favicon.ico'): raise _NotFoundError('could not find favicon') else: relative_path = self.path.lstrip('/') dir_path = os.path.join(args.repo_file_dir, relative_path) # XXX: this is fragile, will go away once arcyd folder # layout is standardized repo_path = dir_path + '.try' branches_path = dir_path + '.ok' content = abdcmd_repostatushtml.render_content( repo_path, branches_path) return content def _request_handler_factory(instaweb_args): def factory(*args): return _RequestHandler(instaweb_args, *args) return factory def process(args): # start a webserver server_address = ('', args.port) factory = _request_handler_factory(args) httpd = BaseHTTPServer.HTTPServer(server_address, factory) httpd.serve_forever() # ----------------------------------------------------------------------------- # Copyright (C) 2013-2014 Bloomberg Finance L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------ END-OF-FILE ----------------------------------
4,047
1,147
# (c) Copyright IBM Corp. 2021 # (c) Copyright Instana Inc. 2017 """ This module contains the classes that represents spans. InstanaSpan - the OpenTracing based span used during tracing When an InstanaSpan is finished, it is converted into either an SDKSpan or RegisteredSpan depending on type. BaseSpan: Base class containing the commonalities for the two descendants - SDKSpan: Class that represents an SDK type span - RegisteredSpan: Class that represents a Registered type span """ import six from basictracer.span import BasicSpan import opentracing.ext.tags as ot_tags from .log import logger from .util import DictionaryOfStan class InstanaSpan(BasicSpan): stack = None synthetic = False def mark_as_errored(self, tags=None): """ Mark this span as errored. @param tags: optional tags to add to the span """ try: ec = self.tags.get('ec', 0) self.set_tag('ec', ec + 1) if tags is not None and isinstance(tags, dict): for key in tags: self.set_tag(key, tags[key]) except Exception: logger.debug('span.mark_as_errored', exc_info=True) def assure_errored(self): """ Make sure that this span is marked as errored. @return: None """ try: ec = self.tags.get('ec', None) if ec is None or ec == 0: self.set_tag('ec', 1) except Exception: logger.debug('span.assure_errored', exc_info=True) def log_exception(self, exc): """ Log an exception onto this span. This will log pertinent info from the exception and assure that this span is marked as errored. @param e: the exception to log """ try: message = "" self.mark_as_errored() if hasattr(exc, '__str__') and len(str(exc)) > 0: message = str(exc) elif hasattr(exc, 'message') and exc.message is not None: message = exc.message else: message = repr(exc) if self.operation_name in ['rpc-server', 'rpc-client']: self.set_tag('rpc.error', message) elif self.operation_name == "mysql": self.set_tag('mysql.error', message) elif self.operation_name == "postgres": self.set_tag('pg.error', message) elif self.operation_name in RegisteredSpan.HTTP_SPANS: self.set_tag('http.error', message) elif self.operation_name in ["celery-client", "celery-worker"]: self.set_tag('error', message) elif self.operation_name == "sqlalchemy": self.set_tag('sqlalchemy.err', message) else: self.log_kv({'message': message}) except Exception: logger.debug("span.log_exception", exc_info=True) raise class BaseSpan(object): sy = None def __str__(self): return "BaseSpan(%s)" % self.__dict__.__str__() def __repr__(self): return self.__dict__.__str__() def __init__(self, span, source, service_name, **kwargs): # pylint: disable=invalid-name self.t = span.context.trace_id self.p = span.parent_id self.s = span.context.span_id self.ts = int(round(span.start_time * 1000)) self.d = int(round(span.duration * 1000)) self.f = source self.ec = span.tags.pop('ec', None) self.data = DictionaryOfStan() self.stack = span.stack if span.synthetic is True: self.sy = span.synthetic self.__dict__.update(kwargs) def _validate_tags(self, tags): """ This method will loop through a set of tags to validate each key and value. :param tags: dict of tags :return: dict - a filtered set of tags """ filtered_tags = DictionaryOfStan() for key in tags.keys(): validated_key, validated_value = self._validate_tag(key, tags[key]) if validated_key is not None and validated_value is not None: filtered_tags[validated_key] = validated_value return filtered_tags def _validate_tag(self, key, value): """ This method will assure that <key> and <value> are valid to set as a tag. If <value> fails the check, an attempt will be made to convert it into something useful. On check failure, this method will return None values indicating that the tag is not valid and could not be converted into something useful :param key: The tag key :param value: The tag value :return: Tuple (key, value) """ validated_key = None validated_value = None try: # Tag keys must be some type of text or string type if isinstance(key, (six.text_type, six.string_types)): validated_key = key[0:1024] # Max key length of 1024 characters if isinstance(value, (bool, float, int, list, dict, six.text_type, six.string_types)): validated_value = value else: validated_value = self._convert_tag_value(value) else: logger.debug("(non-fatal) tag names must be strings. tag discarded for %s", type(key)) except Exception: logger.debug("instana.span._validate_tag: ", exc_info=True) return (validated_key, validated_value) def _convert_tag_value(self, value): final_value = None try: final_value = repr(value) except Exception: final_value = "(non-fatal) span.set_tag: values must be one of these types: bool, float, int, list, " \ "set, str or alternatively support 'repr'. tag discarded" logger.debug(final_value, exc_info=True) return None return final_value class SDKSpan(BaseSpan): ENTRY_KIND = ["entry", "server", "consumer"] EXIT_KIND = ["exit", "client", "producer"] def __init__(self, span, source, service_name, **kwargs): # pylint: disable=invalid-name super(SDKSpan, self).__init__(span, source, service_name, **kwargs) span_kind = self.get_span_kind(span) self.n = "sdk" self.k = span_kind[1] if self.k == 1 and service_name is not None: self.data["service"] = service_name self.data["sdk"]["name"] = span.operation_name self.data["sdk"]["type"] = span_kind[0] self.data["sdk"]["custom"]["tags"] = self._validate_tags(span.tags) if span.logs is not None and len(span.logs) > 0: logs = DictionaryOfStan() for log in span.logs: filtered_key_values = self._validate_tags(log.key_values) if len(filtered_key_values.keys()) > 0: logs[repr(log.timestamp)] = filtered_key_values self.data["sdk"]["custom"]["logs"] = logs if "arguments" in span.tags: self.data['sdk']['arguments'] = span.tags["arguments"] if "return" in span.tags: self.data['sdk']['return'] = span.tags["return"] if len(span.context.baggage) > 0: self.data["baggage"] = span.context.baggage def get_span_kind(self, span): """ Will retrieve the `span.kind` tag and return a tuple containing the appropriate string and integer values for the Instana backend :param span: The span to search for the `span.kind` tag :return: Tuple (String, Int) """ kind = ("intermediate", 3) if "span.kind" in span.tags: if span.tags["span.kind"] in self.ENTRY_KIND: kind = ("entry", 1) elif span.tags["span.kind"] in self.EXIT_KIND: kind = ("exit", 2) return kind class RegisteredSpan(BaseSpan): HTTP_SPANS = ("aiohttp-client", "aiohttp-server", "django", "http", "soap", "tornado-client", "tornado-server", "urllib3", "wsgi") EXIT_SPANS = ("aiohttp-client", "boto3", "cassandra", "celery-client", "couchbase", "log", "memcache", "mongo", "mysql", "postgres", "rabbitmq", "redis", "rpc-client", "sqlalchemy", "soap", "tornado-client", "urllib3", "pymongo", "gcs", "gcps-producer") ENTRY_SPANS = ("aiohttp-server", "aws.lambda.entry", "celery-worker", "django", "wsgi", "rabbitmq", "rpc-server", "tornado-server", "gcps-consumer") LOCAL_SPANS = ("render") def __init__(self, span, source, service_name, **kwargs): # pylint: disable=invalid-name super(RegisteredSpan, self).__init__(span, source, service_name, **kwargs) self.n = span.operation_name self.k = 1 if span.operation_name in self.ENTRY_SPANS: # entry self._populate_entry_span_data(span) self.data["service"] = service_name elif span.operation_name in self.EXIT_SPANS: self.k = 2 # exit self._populate_exit_span_data(span) elif span.operation_name in self.LOCAL_SPANS: self.k = 3 # intermediate span self._populate_local_span_data(span) if "rabbitmq" in self.data and self.data["rabbitmq"]["sort"] == "publish": self.k = 2 # exit # unify the span operation_name for gcps-producer and gcps-consumer if "gcps" in span.operation_name: self.n = 'gcps' # Store any leftover tags in the custom section if len(span.tags) > 0: self.data["custom"]["tags"] = self._validate_tags(span.tags) def _populate_entry_span_data(self, span): if span.operation_name in self.HTTP_SPANS: self._collect_http_tags(span) elif span.operation_name == "aws.lambda.entry": self.data["lambda"]["arn"] = span.tags.pop('lambda.arn', "Unknown") self.data["lambda"]["alias"] = None self.data["lambda"]["runtime"] = "python" self.data["lambda"]["functionName"] = span.tags.pop('lambda.name', "Unknown") self.data["lambda"]["functionVersion"] = span.tags.pop('lambda.version', "Unknown") self.data["lambda"]["trigger"] = span.tags.pop('lambda.trigger', None) self.data["lambda"]["error"] = None trigger_type = self.data["lambda"]["trigger"] if trigger_type in ["aws:api.gateway", "aws:application.load.balancer"]: self._collect_http_tags(span) elif trigger_type == 'aws:cloudwatch.events': self.data["lambda"]["cw"]["events"]["id"] = span.tags.pop('data.lambda.cw.events.id', None) self.data["lambda"]["cw"]["events"]["more"] = span.tags.pop('lambda.cw.events.more', False) self.data["lambda"]["cw"]["events"]["resources"] = span.tags.pop('lambda.cw.events.resources', None) elif trigger_type == 'aws:cloudwatch.logs': self.data["lambda"]["cw"]["logs"]["group"] = span.tags.pop('lambda.cw.logs.group', None) self.data["lambda"]["cw"]["logs"]["stream"] = span.tags.pop('lambda.cw.logs.stream', None) self.data["lambda"]["cw"]["logs"]["more"] = span.tags.pop('lambda.cw.logs.more', None) self.data["lambda"]["cw"]["logs"]["events"] = span.tags.pop('lambda.cw.logs.events', None) elif trigger_type == 'aws:s3': self.data["lambda"]["s3"]["events"] = span.tags.pop('lambda.s3.events', None) elif trigger_type == 'aws:sqs': self.data["lambda"]["sqs"]["messages"] = span.tags.pop('lambda.sqs.messages', None) elif span.operation_name == "celery-worker": self.data["celery"]["task"] = span.tags.pop('task', None) self.data["celery"]["task_id"] = span.tags.pop('task_id', None) self.data["celery"]["scheme"] = span.tags.pop('scheme', None) self.data["celery"]["host"] = span.tags.pop('host', None) self.data["celery"]["port"] = span.tags.pop('port', None) self.data["celery"]["retry-reason"] = span.tags.pop('retry-reason', None) self.data["celery"]["error"] = span.tags.pop('error', None) elif span.operation_name == "gcps-consumer": self.data["gcps"]["op"] = span.tags.pop('gcps.op', None) self.data["gcps"]["projid"] = span.tags.pop('gcps.projid', None) self.data["gcps"]["sub"] = span.tags.pop('gcps.sub', None) elif span.operation_name == "rabbitmq": self.data["rabbitmq"]["exchange"] = span.tags.pop('exchange', None) self.data["rabbitmq"]["queue"] = span.tags.pop('queue', None) self.data["rabbitmq"]["sort"] = span.tags.pop('sort', None) self.data["rabbitmq"]["address"] = span.tags.pop('address', None) self.data["rabbitmq"]["key"] = span.tags.pop('key', None) elif span.operation_name == "rpc-server": self.data["rpc"]["flavor"] = span.tags.pop('rpc.flavor', None) self.data["rpc"]["host"] = span.tags.pop('rpc.host', None) self.data["rpc"]["port"] = span.tags.pop('rpc.port', None) self.data["rpc"]["call"] = span.tags.pop('rpc.call', None) self.data["rpc"]["call_type"] = span.tags.pop('rpc.call_type', None) self.data["rpc"]["params"] = span.tags.pop('rpc.params', None) self.data["rpc"]["baggage"] = span.tags.pop('rpc.baggage', None) self.data["rpc"]["error"] = span.tags.pop('rpc.error', None) else: logger.debug("SpanRecorder: Unknown entry span: %s" % span.operation_name) def _populate_local_span_data(self, span): if span.operation_name == "render": self.data["render"]["name"] = span.tags.pop('name', None) self.data["render"]["type"] = span.tags.pop('type', None) self.data["log"]["message"] = span.tags.pop('message', None) self.data["log"]["parameters"] = span.tags.pop('parameters', None) else: logger.debug("SpanRecorder: Unknown local span: %s" % span.operation_name) def _populate_exit_span_data(self, span): if span.operation_name in self.HTTP_SPANS: self._collect_http_tags(span) elif span.operation_name == "boto3": # boto3 also sends http tags self._collect_http_tags(span) for tag in ['op', 'ep', 'reg', 'payload', 'error']: value = span.tags.pop(tag, None) if value is not None: if tag == 'payload': self.data["boto3"][tag] = self._validate_tags(value) else: self.data["boto3"][tag] = value elif span.operation_name == "cassandra": self.data["cassandra"]["cluster"] = span.tags.pop('cassandra.cluster', None) self.data["cassandra"]["query"] = span.tags.pop('cassandra.query', None) self.data["cassandra"]["keyspace"] = span.tags.pop('cassandra.keyspace', None) self.data["cassandra"]["fetchSize"] = span.tags.pop('cassandra.fetchSize', None) self.data["cassandra"]["achievedConsistency"] = span.tags.pop('cassandra.achievedConsistency', None) self.data["cassandra"]["triedHosts"] = span.tags.pop('cassandra.triedHosts', None) self.data["cassandra"]["fullyFetched"] = span.tags.pop('cassandra.fullyFetched', None) self.data["cassandra"]["error"] = span.tags.pop('cassandra.error', None) elif span.operation_name == "celery-client": self.data["celery"]["task"] = span.tags.pop('task', None) self.data["celery"]["task_id"] = span.tags.pop('task_id', None) self.data["celery"]["scheme"] = span.tags.pop('scheme', None) self.data["celery"]["host"] = span.tags.pop('host', None) self.data["celery"]["port"] = span.tags.pop('port', None) self.data["celery"]["error"] = span.tags.pop('error', None) elif span.operation_name == "couchbase": self.data["couchbase"]["hostname"] = span.tags.pop('couchbase.hostname', None) self.data["couchbase"]["bucket"] = span.tags.pop('couchbase.bucket', None) self.data["couchbase"]["type"] = span.tags.pop('couchbase.type', None) self.data["couchbase"]["error"] = span.tags.pop('couchbase.error', None) self.data["couchbase"]["error_type"] = span.tags.pop('couchbase.error_type', None) self.data["couchbase"]["sql"] = span.tags.pop('couchbase.sql', None) elif span.operation_name == "rabbitmq": self.data["rabbitmq"]["exchange"] = span.tags.pop('exchange', None) self.data["rabbitmq"]["queue"] = span.tags.pop('queue', None) self.data["rabbitmq"]["sort"] = span.tags.pop('sort', None) self.data["rabbitmq"]["address"] = span.tags.pop('address', None) self.data["rabbitmq"]["key"] = span.tags.pop('key', None) elif span.operation_name == "redis": self.data["redis"]["connection"] = span.tags.pop('connection', None) self.data["redis"]["driver"] = span.tags.pop('driver', None) self.data["redis"]["command"] = span.tags.pop('command', None) self.data["redis"]["error"] = span.tags.pop('redis.error', None) self.data["redis"]["subCommands"] = span.tags.pop('subCommands', None) elif span.operation_name == "rpc-client": self.data["rpc"]["flavor"] = span.tags.pop('rpc.flavor', None) self.data["rpc"]["host"] = span.tags.pop('rpc.host', None) self.data["rpc"]["port"] = span.tags.pop('rpc.port', None) self.data["rpc"]["call"] = span.tags.pop('rpc.call', None) self.data["rpc"]["call_type"] = span.tags.pop('rpc.call_type', None) self.data["rpc"]["params"] = span.tags.pop('rpc.params', None) self.data["rpc"]["baggage"] = span.tags.pop('rpc.baggage', None) self.data["rpc"]["error"] = span.tags.pop('rpc.error', None) elif span.operation_name == "sqlalchemy": self.data["sqlalchemy"]["sql"] = span.tags.pop('sqlalchemy.sql', None) self.data["sqlalchemy"]["eng"] = span.tags.pop('sqlalchemy.eng', None) self.data["sqlalchemy"]["url"] = span.tags.pop('sqlalchemy.url', None) self.data["sqlalchemy"]["err"] = span.tags.pop('sqlalchemy.err', None) elif span.operation_name == "mysql": self.data["mysql"]["host"] = span.tags.pop('host', None) self.data["mysql"]["port"] = span.tags.pop('port', None) self.data["mysql"]["db"] = span.tags.pop(ot_tags.DATABASE_INSTANCE, None) self.data["mysql"]["user"] = span.tags.pop(ot_tags.DATABASE_USER, None) self.data["mysql"]["stmt"] = span.tags.pop(ot_tags.DATABASE_STATEMENT, None) self.data["mysql"]["error"] = span.tags.pop('mysql.error', None) elif span.operation_name == "postgres": self.data["pg"]["host"] = span.tags.pop('host', None) self.data["pg"]["port"] = span.tags.pop('port', None) self.data["pg"]["db"] = span.tags.pop(ot_tags.DATABASE_INSTANCE, None) self.data["pg"]["user"] = span.tags.pop(ot_tags.DATABASE_USER, None) self.data["pg"]["stmt"] = span.tags.pop(ot_tags.DATABASE_STATEMENT, None) self.data["pg"]["error"] = span.tags.pop('pg.error', None) elif span.operation_name == "mongo": service = "%s:%s" % (span.tags.pop('host', None), span.tags.pop('port', None)) namespace = "%s.%s" % (span.tags.pop('db', "?"), span.tags.pop('collection', "?")) self.data["mongo"]["service"] = service self.data["mongo"]["namespace"] = namespace self.data["mongo"]["command"] = span.tags.pop('command', None) self.data["mongo"]["filter"] = span.tags.pop('filter', None) self.data["mongo"]["json"] = span.tags.pop('json', None) self.data["mongo"]["error"] = span.tags.pop('error', None) elif span.operation_name == "gcs": self.data["gcs"]["op"] = span.tags.pop('gcs.op') self.data["gcs"]["bucket"] = span.tags.pop('gcs.bucket', None) self.data["gcs"]["object"] = span.tags.pop('gcs.object', None) self.data["gcs"]["entity"] = span.tags.pop('gcs.entity', None) self.data["gcs"]["range"] = span.tags.pop('gcs.range', None) self.data["gcs"]["sourceBucket"] = span.tags.pop('gcs.sourceBucket', None) self.data["gcs"]["sourceObject"] = span.tags.pop('gcs.sourceObject', None) self.data["gcs"]["sourceObjects"] = span.tags.pop('gcs.sourceObjects', None) self.data["gcs"]["destinationBucket"] = span.tags.pop('gcs.destinationBucket', None) self.data["gcs"]["destinationObject"] = span.tags.pop('gcs.destinationObject', None) self.data["gcs"]["numberOfOperations"] = span.tags.pop('gcs.numberOfOperations', None) self.data["gcs"]["projectId"] = span.tags.pop('gcs.projectId', None) self.data["gcs"]["accessId"] = span.tags.pop('gcs.accessId', None) elif span.operation_name == "gcps-producer": self.data["gcps"]["op"] = span.tags.pop('gcps.op', None) self.data["gcps"]["projid"] = span.tags.pop('gcps.projid', None) self.data["gcps"]["top"] = span.tags.pop('gcps.top', None) elif span.operation_name == "log": # use last special key values for l in span.logs: if "message" in l.key_values: self.data["log"]["message"] = l.key_values.pop("message", None) if "parameters" in l.key_values: self.data["log"]["parameters"] = l.key_values.pop("parameters", None) else: logger.debug("SpanRecorder: Unknown exit span: %s" % span.operation_name) def _collect_http_tags(self, span): self.data["http"]["host"] = span.tags.pop("http.host", None) self.data["http"]["url"] = span.tags.pop(ot_tags.HTTP_URL, None) self.data["http"]["path"] = span.tags.pop("http.path", None) self.data["http"]["params"] = span.tags.pop('http.params', None) self.data["http"]["method"] = span.tags.pop(ot_tags.HTTP_METHOD, None) self.data["http"]["status"] = span.tags.pop(ot_tags.HTTP_STATUS_CODE, None) self.data["http"]["path_tpl"] = span.tags.pop("http.path_tpl", None) self.data["http"]["error"] = span.tags.pop('http.error', None) if len(span.tags) > 0: if span.operation_name == "soap": self.data["soap"]["action"] = span.tags.pop('soap.action', None) custom_headers = [] for key in span.tags: if key[0:12] == "http.header.": custom_headers.append(key) for key in custom_headers: trimmed_key = key[12:] self.data["http"]["header"][trimmed_key] = span.tags.pop(key)
23,328
7,375
#!/usr/bin/env python """ -------------------------------------------------------- IMPORT_IWORX reads and converts various IWORX datafiles into a FieldTrip-type data structure. Use as data, event = import_iworx(filename) where the filename should point to a .mat or .txt datafile. data has the following nested fields: .trial .time .label event has the following nested fields: .type .sample .value Copyright (C) 2022, Arjen Stolk -------------------------------------------------------- """ import os import scipy.io def import_iworx(filename): # check the input path = os.path.split(filename)[0] # xxx/ name = os.path.split(filename)[-1][:-4] # xxx ext = os.path.splitext(filename)[-1] # .xxx if ext != ".mat" and ext != ".txt": print("file extension should be either .mat or .txt for this function") hasmat = False if ext == ".mat": hasmat = True hastxt = False hasmark = False if ext == ".txt": hastxt = True if name[-10:] == "_MarksData": hasmark = True # organize the input if hasmark: datafile = os.path.join(path, name[:-10] + ".mat") headerfile = os.path.join(path, name[:-10] + ".txt") markerfile = filename elif hastxt or hasmat: datafile = os.path.join(path, name + ".mat") headerfile = os.path.join(path, name + ".txt") markerfile = os.path.join(path, name + "_MarksData.txt") # read the data mat = scipy.io.loadmat(datafile) # initialize data structure class Data(object): def __init__(self): self.trial = [] self.time = [] self.label = [] # organize data structure data = Data() for t in range(mat["n"][0][0]): # n is a variable contained by the mat file data.trial.append(mat["b" + str(t + 1)].T) data.time.append(mat["b" + str(t + 1)][:, 0]) # read the header information try: with open(headerfile) as f: contents = f.readlines() data.label = contents[0].split(" ") except: print("could not read the header information") # initialize event structure class Event(object): def __init__(self): self.type = [] self.sample = [] self.value = [] # read the markers event = Event() try: with open(markerfile) as f: contents = f.readlines() for e in range(1, len(contents)): event.type.append(contents[e].split(" ")[0]) event.sample.append(contents[e].split(" ")[1]) event.value.append(contents[e].split(" ")[4]) except: print("could not read the marker information") return data, event
2,787
872
import arcpy import pandas as pd from sklearn.feature_extraction.text import CountVectorizer from sklearn.naive_bayes import MultinomialNB input_csv = arcpy.GetParameterAsText(0) test_string = arcpy.GetParameterAsText(1) df = pd.read_csv(input_csv) target = df['is_there_an_emotion_directed_at_a_brand_or_product'] text = df['tweet_text'] fixed_text = text[pd.notnull(text)] fixed_target = target[pd.notnull(text)] count_vect = CountVectorizer() count_vect.fit(fixed_text) counts = count_vect.transform(fixed_text) # NB has a bunch of parameters -- somewhat scary for those who haven't # used it before. That said, Scikit-Learn mostly has sane defaults, # and usually it's not necessary to modify them. Can also try to # change a new algorithm, but usually it's not the best way to spend # your time. nb = MultinomialNB() nb.fit(counts, fixed_target) arcpy.AddMessage(nb.predict(count_vect.transform([test_string]))) # testing an addition to the script.
961
324
# # PySNMP MIB module TRANGO-APEX-TRAP-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/TRANGO-APEX-TRAP-MIB # Produced by pysmi-0.3.4 at Mon Apr 29 21:19:34 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint") NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance") Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, NotificationType, IpAddress, Gauge32, Unsigned32, TimeTicks, iso, ModuleIdentity, Bits, Counter32, Integer32, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "NotificationType", "IpAddress", "Gauge32", "Unsigned32", "TimeTicks", "iso", "ModuleIdentity", "Bits", "Counter32", "Integer32", "ObjectIdentity") DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention") MibScalar, MibTable, MibTableRow, MibTableColumn, apex, NotificationType, Unsigned32, ModuleIdentity, ObjectIdentity = mibBuilder.importSymbols("TRANGO-APEX-MIB", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "apex", "NotificationType", "Unsigned32", "ModuleIdentity", "ObjectIdentity") class DisplayString(OctetString): pass trangotrap = MibIdentifier((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6)) trapReboot = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 1)) if mibBuilder.loadTexts: trapReboot.setStatus('current') trapStartUp = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 2)) if mibBuilder.loadTexts: trapStartUp.setStatus('current') traplock = MibIdentifier((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 3)) trapModemLock = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 3, 1)) if mibBuilder.loadTexts: trapModemLock.setStatus('current') trapTimingLock = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 3, 2)) if mibBuilder.loadTexts: trapTimingLock.setStatus('current') trapInnerCodeLock = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 3, 3)) if mibBuilder.loadTexts: trapInnerCodeLock.setStatus('current') trapEqualizerLock = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 3, 4)) if mibBuilder.loadTexts: trapEqualizerLock.setStatus('current') trapFrameSyncLock = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 3, 5)) if mibBuilder.loadTexts: trapFrameSyncLock.setStatus('current') trapthreshold = MibIdentifier((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4)) trapmse = MibIdentifier((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 1)) trapMSEMinThreshold = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 1, 1)) if mibBuilder.loadTexts: trapMSEMinThreshold.setStatus('current') trapMSEMaxThreshold = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 1, 2)) if mibBuilder.loadTexts: trapMSEMaxThreshold.setStatus('current') trapber = MibIdentifier((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 2)) trapBERMinThreshold = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 2, 1)) if mibBuilder.loadTexts: trapBERMinThreshold.setStatus('current') trapBERMaxThreshold = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 2, 2)) if mibBuilder.loadTexts: trapBERMaxThreshold.setStatus('current') trapfer = MibIdentifier((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 3)) trapFERMinThreshold = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 3, 1)) if mibBuilder.loadTexts: trapFERMinThreshold.setStatus('current') trapFERMaxThreshold = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 3, 2)) if mibBuilder.loadTexts: trapFERMaxThreshold.setStatus('current') traprssi = MibIdentifier((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 4)) trapRSSIMinThreshold = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 4, 1)) if mibBuilder.loadTexts: trapRSSIMinThreshold.setStatus('current') trapRSSIMaxThreshold = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 4, 2)) if mibBuilder.loadTexts: trapRSSIMaxThreshold.setStatus('current') trapidutemp = MibIdentifier((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 5)) trapIDUTempMinThreshold = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 5, 1)) if mibBuilder.loadTexts: trapIDUTempMinThreshold.setStatus('current') trapIDUTempMaxThreshold = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 5, 2)) if mibBuilder.loadTexts: trapIDUTempMaxThreshold.setStatus('current') trapodutemp = MibIdentifier((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 6)) trapODUTempMinThreshold = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 6, 1)) if mibBuilder.loadTexts: trapODUTempMinThreshold.setStatus('current') trapODUTempMaxThreshold = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 6, 2)) if mibBuilder.loadTexts: trapODUTempMaxThreshold.setStatus('current') trapinport = MibIdentifier((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 7)) trapInPortUtilMinThreshold = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 7, 1)) if mibBuilder.loadTexts: trapInPortUtilMinThreshold.setStatus('current') trapInPortUtilMaxThreshold = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 7, 2)) if mibBuilder.loadTexts: trapInPortUtilMaxThreshold.setStatus('current') trapoutport = MibIdentifier((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 8)) trapOutPortUtilMinThreshold = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 8, 1)) if mibBuilder.loadTexts: trapOutPortUtilMinThreshold.setStatus('current') trapOutPortUtilMaxThreshold = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 8, 2)) if mibBuilder.loadTexts: trapOutPortUtilMaxThreshold.setStatus('current') trapstandby = MibIdentifier((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 5)) trapStandbyLinkDown = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 5, 1)) if mibBuilder.loadTexts: trapStandbyLinkDown.setStatus('current') trapStandbyLinkUp = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 5, 2)) if mibBuilder.loadTexts: trapStandbyLinkUp.setStatus('current') trapSwitchover = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 5, 3)) if mibBuilder.loadTexts: trapSwitchover.setStatus('current') trapeth = MibIdentifier((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 6)) trapethstatus = MibIdentifier((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 6, 1)) trapEth1StatusUpdate = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 6, 1, 1)) if mibBuilder.loadTexts: trapEth1StatusUpdate.setStatus('current') trapEth2StatusUpdate = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 6, 1, 2)) if mibBuilder.loadTexts: trapEth2StatusUpdate.setStatus('current') trapEth3StatusUpdate = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 6, 1, 3)) if mibBuilder.loadTexts: trapEth3StatusUpdate.setStatus('current') trapEth4StatusUpdate = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 6, 1, 4)) if mibBuilder.loadTexts: trapEth4StatusUpdate.setStatus('current') trapDownShift = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 8)) if mibBuilder.loadTexts: trapDownShift.setStatus('current') trapRapidPortShutdown = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 9)) if mibBuilder.loadTexts: trapRapidPortShutdown.setStatus('current') trapRPSPortUp = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 10)) if mibBuilder.loadTexts: trapRPSPortUp.setStatus('current') mibBuilder.exportSymbols("TRANGO-APEX-TRAP-MIB", trapber=trapber, trapEth3StatusUpdate=trapEth3StatusUpdate, DisplayString=DisplayString, trapFERMinThreshold=trapFERMinThreshold, trapStandbyLinkDown=trapStandbyLinkDown, trapInPortUtilMinThreshold=trapInPortUtilMinThreshold, trapMSEMinThreshold=trapMSEMinThreshold, trapRSSIMaxThreshold=trapRSSIMaxThreshold, traprssi=traprssi, trapStandbyLinkUp=trapStandbyLinkUp, trapIDUTempMinThreshold=trapIDUTempMinThreshold, trapRapidPortShutdown=trapRapidPortShutdown, trangotrap=trangotrap, trapStartUp=trapStartUp, trapMSEMaxThreshold=trapMSEMaxThreshold, trapSwitchover=trapSwitchover, traplock=traplock, trapethstatus=trapethstatus, trapEth2StatusUpdate=trapEth2StatusUpdate, trapodutemp=trapodutemp, trapinport=trapinport, trapReboot=trapReboot, trapthreshold=trapthreshold, trapmse=trapmse, trapEth4StatusUpdate=trapEth4StatusUpdate, trapIDUTempMaxThreshold=trapIDUTempMaxThreshold, trapFrameSyncLock=trapFrameSyncLock, trapOutPortUtilMinThreshold=trapOutPortUtilMinThreshold, trapInnerCodeLock=trapInnerCodeLock, trapfer=trapfer, trapTimingLock=trapTimingLock, trapFERMaxThreshold=trapFERMaxThreshold, trapstandby=trapstandby, trapModemLock=trapModemLock, trapInPortUtilMaxThreshold=trapInPortUtilMaxThreshold, trapOutPortUtilMaxThreshold=trapOutPortUtilMaxThreshold, trapoutport=trapoutport, trapODUTempMinThreshold=trapODUTempMinThreshold, trapDownShift=trapDownShift, trapBERMinThreshold=trapBERMinThreshold, trapRPSPortUp=trapRPSPortUp, trapEqualizerLock=trapEqualizerLock, trapeth=trapeth, trapRSSIMinThreshold=trapRSSIMinThreshold, trapEth1StatusUpdate=trapEth1StatusUpdate, trapidutemp=trapidutemp, trapODUTempMaxThreshold=trapODUTempMaxThreshold, trapBERMaxThreshold=trapBERMaxThreshold)
9,453
4,373
#!/usr/bin/env python # -*- coding: utf-8 -*- import gg from ggconfig import config ############################################################################## # CONTENT SNIPPETS ############################################################################## def test_logo_url(): assert gg.logo_url(config) == 'https://oliz.io/ggpy/static/gg.png' assert gg.logo_url() == '' def test_pagetitle(): assert gg.pagetitle('Good Generator.py', config) == 'Good Generator.py' assert gg.pagetitle('Some Page', config) == 'Some Page | Good Generator.py' assert gg.pagetitle('Title with default config') == 'Title with default config' assert gg.pagetitle('') == '' assert gg.pagetitle() == '' assert gg.pagetitle('', config) == 'Good Generator.py' def test_meta(): meta = gg.meta('oz', 'Nice text!', '__draft__, foo, __inline__, bar, tags, __no_header__') assert meta == \ '''<meta name="author" content="oz"> <meta name="description" content="Nice text!"> <meta name="keywords" content="foo, bar, tags">''' def test_meta_single_special_tag(): meta = gg.meta('oz', 'Nice text!', '__draft__') assert meta == \ '''<meta name="author" content="oz"> <meta name="description" content="Nice text!">''' def test_opengraph(): opengraph = gg.opengraph('Title!', 'https://oliz.io/ggpy/', 'Nice text!', '2020-02-20', config) assert opengraph == \ '''<meta property="og:title" content="Title!"> <meta property="og:type" content="article"> <meta property="og:url" content="https://oliz.io/ggpy/"> <meta property="og:description" content="Nice text!"> <meta property="og:image" content="https://oliz.io/ggpy/static/gg.png"> <meta property="og:locale" content="en-US"> <meta property="article:published_time" content="2020-02-20">''' opengraph_default_config = gg.opengraph('Title!', 'https://oliz.io/ggpy/', 'Nice text!', '2020-02-20') assert opengraph_default_config == \ '''<meta property="og:title" content="Title!"> <meta property="og:type" content="article"> <meta property="og:url" content="https://oliz.io/ggpy/"> <meta property="og:description" content="Nice text!"> <meta property="og:locale" content="en-US"> <meta property="article:published_time" content="2020-02-20">''' def test_json_ld(): json_ld = gg.json_ld('Title! "BAM!"', 'https://oliz.io/ggpy/', 'It says "BAM!"', config) assert json_ld == \ '''<script type="application/ld+json"> {"@context":"http://schema.org","@type":"WebSite","headline":"Title! \\"BAM!\\"","url":"https://oliz.io/ggpy/","name":"Good Generator.py","description":"It says \\"BAM!\\""}</script>''' json_ld_default_config = gg.json_ld('Title! "BAM!"', 'https://oliz.io/ggpy/', 'It says "BAM!"') assert json_ld_default_config == \ '''<script type="application/ld+json"> {"@context":"http://schema.org","@type":"WebSite","headline":"Title! \\"BAM!\\"","url":"https://oliz.io/ggpy/","description":"It says \\"BAM!\\""}</script>''' def test_header(): header = gg.header('https://example.com/logo.png', '<h1>Title!</h1>', '2021-03-27', config) assert header == \ '''<a href="https://oliz.io/ggpy"><img src="https://example.com/logo.png" class="avatar" /></a> <div style="text-align:right;"> <h1>Title!</h1> <small><a href="https://oliz.io/ggpy">Good Gen</a>, 2021-03-27</small> </div>''' header_default_config = gg.header('', '<h1>Title!</h1>', '2021-03-27') assert header_default_config == \ '''<div style="text-align:right;"> <h1>Title!</h1> <small>2021-03-27</small> </div>''' def test_post_header(): post_header = gg.post_header('<h1 id="title">Title!</h1>', '2020-02-20', config) assert post_header == \ '''<div style="text-align:right;"> <h1 id="title">Title!</h1> <small><a href="https://oliz.io/ggpy">Good Gen</a>, 2020-02-20</small> </div>''' post_header_default_config = gg.post_header('<h1 id="title">Title!</h1>', '2020-02-20') assert post_header_default_config == \ '''<div style="text-align:right;"> <h1 id="title">Title!</h1> <small>2020-02-20</small> </div>''' def test_footer_navigation(): footer_nav = gg.footer_navigation() assert footer_nav == \ '''<a href="#" class="nav">top</a> <a href="javascript:toggleTheme()" class="nav">🌓</a> <a href="javascript:toggleFontSize()" class="nav">aA</a>''' def test_about_and_social_icons(): about_and_social = gg.about_and_social_icons(config) assert about_and_social == \ '''<a href="mailto:example@example.com" class="social">email</a> <a href="https://nitter.net/" class="social">twitter</a> <a href="https://github.com/ooz/ggpy" class="social">github</a> <a href="https://oliz.io/about.html" class="social">about</a>''' about_and_social_default_config = gg.about_and_social_icons() assert about_and_social_default_config == '' def test_posts_index(): '''Generate index without inlined posts. ''' posts = gg.scan_posts(['.']) posts = [post for post in posts if gg.TAG_INLINE not in post['tags']] posts_index = gg.posts_index(posts) assert posts_index == \ '''<div> <div class="card"><small class="social">2021-04-04</small><a href="test/features/meta.html"><b>Markdown Meta Data</b></a></div> <div class="card"><small class="social">2018-03-17</small><a href="test/some-post.html"><b>Some Post</b></a></div> <div class="card"><small class="social">1996-06-06</small><a href="test/features/"><b>Markdown Feature Test without &quot;quotes bug&quot;</b></a></div> </div>''' def test_posts_index_inline(): '''Generate index with inlined posts. Four cases: 1. Lots of content but not description -> details block with title as summary 2. Lots of content with description -> details block with description as summary 3. Has description but no content -> only show description 4. Else -> show content directly ''' posts = gg.scan_posts(['test/features/index-inline-posts/']) posts_index = gg.posts_index(posts) assert posts_index == \ '''<div> <div class="card"><small class="social">2021-07-17</small> <a href="little-inline-content-no-description.html"><b>Little inline content, no description</b></a> <div> <p>This shows directly on the card, without details+summary blocks.</p> </div> </div> <div class="card"><small class="social">2021-07-17</small> <a href="no-content-with-description.html"><b>No content, but with description</b></a> <div> Just some more minor text from the description </div> </div> <div class="card"><small class="social">2021-07-17</small> <a href="lots-of-content-with-description.html"><b>Lots of content, with description</b></a> <details><summary>Click here to expand...</summary> <ul> <li>One</li> <li>Two</li> <li>Three</li> <li>Four</li> <li>Five</li> <li>Six</li> <li>Seven</li> <li>Eight</li> <li>Nine</li> <li>Ten</li> </ul> <p>... and some more lines.</p> </details> </div> <div class="card"><small class="social">2021-07-17</small> <details><summary><a href="lots-of-content-no-description.html"><b>Lots of content, no description</b></a></summary> <ul> <li>One</li> <li>Two</li> <li>Three</li> <li>Four</li> <li>Five</li> <li>Six</li> <li>Seven</li> <li>Eight</li> <li>Nine</li> <li>Ten</li> </ul> <p>... and some more lines.</p> </details> </div> </div>''' ############################################################################## # HTML SNIPPETS ############################################################################## def test_html_opening_boilerplate(): assert gg.html_opening_boilerplate() == \ '''<!DOCTYPE html> <html lang="en-US"> <head> <meta charset="UTF-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1"> <meta name="viewport" content="width=device-width,initial-scale=1">''' def test_html_head_body_boilerplate(): assert gg.html_head_body_boilerplate() == \ '''</head> <body onload="initTheme()">''' def test_html_tag_line(): assert gg.html_tag_line('title', 'Nice!') == '<title>Nice!</title>' def test_html_tag_block(): assert gg.html_tag_block('footer', '<p>in closing</p>') == \ '''<footer> <p>in closing</p> </footer>''' def test_html_tag_empty(): link_tag = gg.html_tag_empty('link', [('rel', 'canonical'), ('href','https://example.com')]) assert link_tag == '<link rel="canonical" href="https://example.com">' omit_empty_tag = gg.html_tag_empty('link', []) assert omit_empty_tag == '' def test_html_closing_boilerplate(): assert gg.html_closing_boilerplate() == \ '''</body> </html> ''' def test_inline_style(): style = gg.inline_style() assert 'body {' in style assert '.dark-mode' in style assert '.avatar' in style assert '.nav' in style assert '.social' in style def test_inline_javascript(): js = gg.inline_javascript() assert 'function toggleTheme' in js assert 'function initTheme' in js
8,780
3,184
# -*- coding: utf-8 -*- from dp_tornado.engine.controller import Controller class ByValueController(Controller): def get(self): param_key = 'csrf' if not self.helper.security.web.csrf.verify_token(controller=self, value=self.get_argument(param_key)): return self.parent.finish_with_error(400) self.finish('done')
359
121
from typing import TYPE_CHECKING, Any, ClassVar, Optional from urllib.parse import quote as _uriquote if TYPE_CHECKING: from dis_snek.models.discord.snowflake import Snowflake_Type __all__ = ["Route"] class Route: BASE: ClassVar[str] = "https://discord.com/api/v9" path: str params: dict[str, str | int] webhook_id: Optional["Snowflake_Type"] webhook_token: Optional[str] def __init__(self, method: str, path: str, **parameters: Any): self.path: str = path self.method: str = method self.params = parameters self.channel_id = parameters.get("channel_id") self.guild_id = parameters.get("guild_id") self.webhook_id = parameters.get("webhook_id") self.webhook_token = parameters.get("webhook_token") self.known_bucket: Optional[str] = None def __eq__(self, other): if isinstance(other, Route): return self.rl_bucket == other.rl_bucket return NotImplemented def __hash__(self): return hash(self.rl_bucket) def __repr__(self): return f"<Route {self.endpoint}>" def __str__(self): return self.endpoint @property def rl_bucket(self) -> str: """This route's full rate limit bucket""" if self.known_bucket: return self.known_bucket if self.webhook_token: return f"{self.webhook_id}{self.webhook_token}:{self.channel_id}:{self.guild_id}:{self.endpoint}" return f"{self.channel_id}:{self.guild_id}:{self.endpoint}" @property def endpoint(self) -> str: """The endpoint for this route""" return f"{self.method} {self.path}" @property def url(self) -> str: """The full url for this route""" return f"{self.BASE}{self.path}".format_map( {k: _uriquote(v) if isinstance(v, str) else v for k, v in self.params.items()} )
1,913
628
# udi dataset process module # modiflied from nuscenes_dataset.py import json import pickle import time import random from copy import deepcopy from functools import partial from pathlib import Path import subprocess import fire import numpy as np import os from second.core import box_np_ops from second.core import preprocess as prep from second.data import kitti_common as kitti from second.data.dataset import Dataset, register_dataset from second.utils.eval import get_coco_eval_result, get_official_eval_result from second.utils.progress_bar import progress_bar_iter as prog_bar from second.utils.timer import simple_timer @register_dataset class UDIDataset(Dataset): NumPointFeatures = 4 NameMapping = { 'car': 'car', 'pedestrian': 'pedestrian', 'cyclist': 'cyclist', 'truck': 'truck', 'forklift': 'forklift', 'golf car': 'golf car', 'motorcyclist': 'motorcyclist', 'bicycle': 'bicycle', 'motorbike': 'motorbike' } DefaultAttribute = { "car": "object_action_parked", "pedestrain": "object_action_walking", "bicycle": "object_action_driving_straight_forward", "motorcycle": "object_action_parked", "other_vehicle": "object_action_driving_straight_forward", "emergency_vehicle": "object_action_driving_straight_forward", "truck": "object_action_parked", "animal": "", "bus": "object_action_driving_straight_forward", } def __init__(self, root_path, info_path, class_names=None, prep_func=None, num_point_features=None): self._root_path = Path(root_path) self._info_path = Path(info_path) with open(info_path, 'rb') as f: data = pickle.load(f) self._udi_infos = data["infos"] self._metadata = data["metadata"] self._class_names = class_names self._prep_func = prep_func self.version = self._metadata["version"] self._with_velocity = False def __len__(self): return len(self._udi_infos) def __getitem__(self, idx): input_dict = self.get_sensor_data(idx) example = self._prep_func(input_dict=input_dict) example["metadata"] = input_dict["metadata"] if "anchors_mask" in example: example["anchors_mask"] = example["anchors_mask"].astype(np.uint8) return example def get_sensor_data(self, query): idx = query if isinstance(query, dict): assert "lidar" in query idx = query["lidar"]["idx"] info = self._udi_infos[idx] res = { "lidar": { "type": "lidar", "points": None, }, "metadata": { "token": info["token"] }, } lidar_path = Path(info['lidar_path']) points = np.fromfile(str(lidar_path), dtype=np.float32).reshape((-1,4)) points[:, 3] /= 255 res["lidar"]["points"] = points if 'gt_boxes' in info: res["lidar"]["annotations"] = { 'boxes': info["gt_boxes"], 'names': info["gt_names"] } return res def evaluation_udi(self, detections, output_dir): version = self.version eval_set_map = { # "v1.0-mini": "mini_train", "v1.0-trainval": "val", } # gt_annos = self.ground_truth_annotations # if gt_annos is None: # return None udi_annos = {} mapped_class_names = self._class_names token2info = {} for info in self._udi_infos: token2info[info["token"]] = info for det in detections: annos = [] boxes = _second_det_to_udi_box(det) for i, box in enumerate(boxes): name = mapped_class_names[box.label] velocity = box.velocity[:2].tolist() box.velocity = np.array([*velocity, 0.0]) for i, box in enumerate(boxes): name = mapped_class_names[box.label] velocity = box.velocity[:2].tolist() nusc_anno = { "sample_token": det["metadata"]["token"], "translation": box.center.tolist(), "size": box.wlh.tolist(), "rotation": box.orientation.elements.tolist(), "velocity": velocity, "detection_name": name, "detection_score": box.score, "attribute_name": "", } annos.append(nusc_anno) udi_annos[det["metadata"]["token"]] = annos nusc_submissions = { "meta": { "use_camera": False, "use_lidar": False, "use_radar": False, "use_map": False, "use_external": False, }, "results": udi_annos, } res_path = Path(output_dir) / "results_udi.json" with open(res_path, "w") as f: json.dump(nusc_submissions, f) eval_main_file = Path(__file__).resolve().parent / "udi_eval.py" # why add \"{}\"? to support path with spaces. cmd = f"python3 {str(eval_main_file)} --root_path=\"{str(self._root_path)}\"" cmd += f" --info_path=\"{str(self._info_path)}\"" cmd += f" --version={self.version}" cmd += f" --res_path=\"{str(res_path)}\" --eval_set={eval_set_map[self.version]}" cmd += f" --output_dir=\"{output_dir}\"" # use subprocess can release all nusc memory after evaluation subprocess.check_output(cmd, shell=True) with open(Path(output_dir) / "metrics_summary.json", "r") as f: metrics = json.load(f) detail = {} res_path.unlink() # delete results_nusc.json since it's very large result = f"Nusc {version} Evaluation\n" for name in mapped_class_names: detail[name] = {} for k, v in metrics["label_aps"][name].items(): detail[name][f"dist@{k}"] = v tp_errs = [] tp_names = [] for k, v in metrics["label_tp_errors"][name].items(): detail[name][k] = v tp_errs.append(f"{v:.4f}") tp_names.append(k) threshs = ', '.join(list(metrics["label_aps"][name].keys())) scores = list(metrics["label_aps"][name].values()) scores = ', '.join([f"{s * 100:.2f}" for s in scores]) result += f"{name} Nusc dist AP@{threshs} and TP errors\n" result += scores result += "\n" result += ', '.join(tp_names) + ": " + ', '.join(tp_errs) result += "\n" return { "results": { "nusc": result }, "detail": { "nusc": detail }, } def evaluation(self, detections, output_dir): res_udi = self.evaluation_udi(detections, output_dir) res = { "results": { "nusc": res_udi["result"]["nusc"], }, "detail": { "eval.nusc": res_udi["detail"]["nusc"], }, } return res def _second_det_to_udi_box(detection): from udi_eval import Box import pyquaternion box3d = detection["box3d_lidar"].detach().cpu().numpy() scores = detection["scores"].detach().cpu().numpy() labels = detection["label_preds"].detach().cpu().numpy() box3d[:, 6] = -box3d[:, 6] - np.pi/2 box_list = [] for i in range(box3d.shape[0]): quat = pyquaternion.Quaternion(axis=[0, 0, 1], radians=box3d[i,6]) velocity = (np.nan, np.nan, np.nan) # if box3d.shape[1] == 9: # velocity = (*box3d[i, 7:9], 0.0) box = Box( box3d[i, :3], box3d[i, 3:6], quat, label=labels[i], score=scores[i], velocity=velocity) box_list.append(box) return box_list # def _lidar_nusc_box_to_global(info, boxes, classes, eval_version="ICLR 2019"): # import pyquaternion # box_list = [] # for box in boxes: # box.rotate(pyquaternion.Quaternion(info['lidar2ego_rotation'])) # box.translate(np.array(info['lidar2ego_translation'])) # box.rotate(pyquaternion.Quaternion(info['ego2global_rotation'])) # box.translate(np.array(info['ego2global_translation'])) # box_list.append(box) # return box_list # def _get_available_scenes(lyft): # available_scenes = [] # print("total scene num:", len(lyft.scene)) # for scene in lyft.scene: # scene_token = scene["token"] # scene_rec = lyft.get('scene', scene_token) # sample_rec = lyft.get('sample', scene_rec['first_sample_token']) # sd_rec = lyft.get('sample_data', sample_rec['data']["LIDAR_TOP"]) # has_more_frames = True # scene_not_exist = False # while has_more_frames: # lidar_path, boxes, _ = lyft.get_sample_data(sd_rec['token']) # if not Path(lidar_path).exists(): # scenes_not_exist = True # break # else: # break # if not sd_rec['next'] == "": # sd_rec = lyft.get('sample_data', sd_rec['next']) # else: # has_more_frames = False # if scene_not_exist: # continue # available_scenes.append(scene) # print("exist scene num:", len(available_scenes)) # return available_scenes def _fill_train_infos(root_path): train_udi_infos = [] lidar_root_path = root_path+ "/lidar" label_root_path = root_path + "/label" img_root_path = root_path + "/image" filenames = os.listdir(lidar_root_path) for filename in prog_bar(filenames): index = filename.split(".")[0] lidar_path = lidar_root_path + "/" + index + ".bin" cam_path = img_root_path + "/" + index + ".jpg" label_path = label_root_path + "/" + index + "_bin.json" assert Path(lidar_path).exists() assert Path(cam_path).exists() assert Path(label_path).exists() with open(label_path, encoding='utf-8') as f: res = f.read() result = json.loads(res) boxes = result["elem"] info = { "lidar_path": lidar_path, "cam_front_path": cam_path, "filename": filename, "token": int(index), } gt_locs_list = [] gt_dims_list = [] print("label file path:", label_path) for box in boxes: box_loc = box["position"] box_size = box["size"] box_loc_ = np.array([box_loc["x"],box_loc["y"], box_loc["z"]], dtype=np.float) box_size_ = np.array([box_size["width"],box_size["depth"],box_size["height"]], dtype=np.float) box_loc_ = box_loc_.reshape(-1, 3) box_size_ = box_size_.reshape(-1, 3) gt_locs_list.append(box_loc_) gt_dims_list.append(box_size_) locs = np.concatenate(gt_locs_list, axis=0) dims = np.concatenate(gt_dims_list, axis=0) rots = np.array([b["yaw"] for b in boxes]).reshape(-1, 1) names = [b["class"] for b in boxes] for i in range(len(names)): if names[i] in UDIDataset.NameMapping: names[i] = UDIDataset.NameMapping[names[i]] names = np.array(names) # we need to convert rot to SECOND format. # change the rot format will break all checkpoint. gt_boxes = np.concatenate([locs, dims, -rots - np.pi / 2], axis=1) info["gt_boxes"] = gt_boxes info["gt_names"] = names train_udi_infos.append(info) return train_udi_infos def create_udi_infos(root_path): # root_path = Path(root_path) root_path = str(root_path) train_udi_infos = _fill_train_infos(root_path) metadata = { "version": "v0.1-train", } print( f"train sample: {len(train_udi_infos)}" ) data = { "infos": train_udi_infos, "metadata": metadata, } with open(root_path + "/infos_udi_train.pkl", 'wb') as f: pickle.dump(data, f) def get_box_mean(info_path, class_name="car"): with open(info_path, 'rb') as f: lyft_infos = pickle.load(f)["infos"] gt_boxes_list = [] for info in lyft_infos: gt_boxes = info["gt_boxes"] gt_names = info["gt_names"] mask = np.array([s == class_name for s in info["gt_names"]], dtype=np.bool_) gt_names = gt_names[mask] gt_boxes = gt_boxes[mask] gt_boxes_list.append(gt_boxes.reshape(-1, 7)) gt_boxes_list = np.concatenate(gt_boxes_list, axis=0) return { "box3d": gt_boxes_list.mean(0).tolist(), "detail": gt_boxes_list } def get_all_box_mean(info_path): det_names = set() for k, v in UDIDataset.NameMapping.items(): if v not in det_names: det_names.add(v) det_names = sorted(list(det_names)) res = {} details = {} for k in det_names: result = get_box_mean(info_path, k) details[k] = result["detail"] res[k] = result["box3d"] print(json.dumps(res, indent=2)) return details if __name__ == "__main__": fire.Fire()
13,539
4,485
#!/usr/bin/env python """ Configure folder for Multicolor testing. Hazen 01/18 """ import argparse import inspect import numpy import os import pickle import subprocess import storm_analysis import storm_analysis.sa_library.parameters as parameters import storm_analysis.sa_library.sa_h5py as saH5Py import storm_analysis.simulator.background as background import storm_analysis.simulator.camera as camera import storm_analysis.simulator.drift as drift import storm_analysis.simulator.photophysics as photophysics import storm_analysis.simulator.psf as psf import storm_analysis.simulator.simulate as simulate import storm_analysis.sCMOS.scmos_analysis as scmos import storm_analysis.diagnostics.multicolor.settings as settings def testingParametersSCMOS(): """ Create a sCMOS parameters object. """ params = parameters.ParametersSCMOS() params.setAttr("max_frame", "int", -1) params.setAttr("start_frame", "int", -1) params.setAttr("background_sigma", "float", 8.0) params.setAttr("camera_calibration", "filename", "calib.npy") params.setAttr("find_max_radius", "int", 5) params.setAttr("foreground_sigma", "float", 1.5) params.setAttr("iterations", "int", settings.iterations) params.setAttr("model", "string", "2dfixed") params.setAttr("pixel_size", "float", settings.pixel_size) params.setAttr("sigma", "float", 150.0/settings.pixel_size) params.setAttr("threshold", "float", 6.0) # Don't do tracking. params.setAttr("descriptor", "string", "1") params.setAttr("radius", "float", "0.0") # Don't do drift-correction. params.setAttr("d_scale", "int", 2) params.setAttr("drift_correction", "int", 0) params.setAttr("frame_step", "int", 500) params.setAttr("z_correction", "int", 0) return params def testingParametersMC(): """ Create a Multiplane parameters object. """ params = parameters.ParametersMultiplaneArb() params.setAttr("max_frame", "int", -1) params.setAttr("start_frame", "int", -1) params.setAttr("background_sigma", "float", 8.0) params.setAttr("find_max_radius", "int", 2) params.setAttr("independent_heights", "int", settings.independent_heights) params.setAttr("iterations", "int", settings.iterations) params.setAttr("mapping", "filename", "map.map") params.setAttr("no_fitting", "int", 0) params.setAttr("pixel_size", "float", settings.pixel_size) params.setAttr("sigma", "float", 1.5) params.setAttr("threshold", "float", 6.0) params.setAttr("weights", "filename", "weights.npy") params.setAttr("z_value", "float-array", settings.z_value) params.setAttr("channel0_cal", "filename", "calib.npy") params.setAttr("channel1_cal", "filename", "calib.npy") params.setAttr("channel2_cal", "filename", "calib.npy") params.setAttr("channel3_cal", "filename", "calib.npy") params.setAttr("channel0_ext", "string", "_c1.dax") params.setAttr("channel1_ext", "string", "_c2.dax") params.setAttr("channel2_ext", "string", "_c3.dax") params.setAttr("channel3_ext", "string", "_c4.dax") params.setAttr("channel0_offset", "int", 0) params.setAttr("channel1_offset", "int", 0) params.setAttr("channel2_offset", "int", 0) params.setAttr("channel3_offset", "int", 0) params.setAttr("spline0", "filename", "c1_psf.spline") params.setAttr("spline1", "filename", "c2_psf.spline") params.setAttr("spline2", "filename", "c3_psf.spline") params.setAttr("spline3", "filename", "c4_psf.spline") # Do tracking (localization color analysis depends on the tracks). params.setAttr("descriptor", "string", "1") params.setAttr("radius", "float", "1.0") params.setAttr("max_z", "float", str(0.001 * settings.psf_z_range)) params.setAttr("min_z", "float", str(-0.001 * settings.psf_z_range)) # Don't do drift-correction. params.setAttr("d_scale", "int", 2) params.setAttr("drift_correction", "int", 0) params.setAttr("frame_step", "int", 500) params.setAttr("z_correction", "int", 0) return params def configure(): # Get relevant paths. mm_path = os.path.dirname(inspect.getfile(storm_analysis)) + "/micrometry/" mp_path = os.path.dirname(inspect.getfile(storm_analysis)) + "/multi_plane/" sp_path = os.path.dirname(inspect.getfile(storm_analysis)) + "/spliner/" # Create analysis XML files. # print("Creating XML files.") params = testingParametersSCMOS() params.toXMLFile("scmos.xml") params = testingParametersMC() params.toXMLFile("multicolor.xml") # Useful variables aoi_size = int(settings.psf_size/2)+1 # Create sCMOS data and HDF5 files we'll need for the simulation. # if True: # Create sCMOS camera calibration files. # numpy.save("calib.npy", [numpy.zeros((settings.y_size, settings.x_size)) + settings.camera_offset, numpy.ones((settings.y_size, settings.x_size)) * settings.camera_variance, numpy.ones((settings.y_size, settings.x_size)) * settings.camera_gain, 1]) # Create localization on a grid file. # print("Creating gridded localizations.") sim_path = os.path.dirname(inspect.getfile(storm_analysis)) + "/simulator/" subprocess.call(["python", sim_path + "emitters_on_grid.py", "--bin", "grid_list.hdf5", "--nx", str(settings.nx), "--ny", str(settings.ny), "--spacing", "20", "--zrange", str(settings.test_z_range), "--zoffset", str(settings.test_z_offset)]) # Create randomly located localizations file (for STORM movies). # print("Creating random localizations.") subprocess.call(["python", sim_path + "emitters_uniform_random.py", "--bin", "random_storm.hdf5", "--density", "1.0", "--margin", str(settings.margin), "--sx", str(settings.x_size), "--sy", str(settings.y_size), "--zrange", str(settings.test_z_range)]) # Create randomly located localizations file (for mapping measurement). # print("Creating random localizations.") subprocess.call(["python", sim_path + "emitters_uniform_random.py", "--bin", "random_map.hdf5", "--density", "0.0003", "--margin", str(settings.margin), "--sx", str(settings.x_size), "--sy", str(settings.y_size)]) # Create sparser grid for PSF measurement. # print("Creating data for PSF measurement.") sim_path = os.path.dirname(inspect.getfile(storm_analysis)) + "/simulator/" subprocess.call(["python", sim_path + "emitters_on_grid.py", "--bin", "psf_list.hdf5", "--nx", "6", "--ny", "3", "--spacing", "40"]) ## This part makes / tests measuring the mapping. ## if True: print("Measuring mapping.") # Make localization files for simulations. # locs = saH5Py.loadLocalizations("random_map.hdf5") locs["z"][:] = 1.0e-3 * settings.z_planes[0] saH5Py.saveLocalizations("c1_random_map.hdf5", locs) for i in range(1,4): locs["x"] += settings.dx locs["y"] += settings.dy locs["z"][:] = settings.z_planes[i] saH5Py.saveLocalizations("c" + str(i+1) + "_random_map.hdf5", locs) # Make localization files for simulations. # locs = saH5Py.loadLocalizations("random_map.hdf5") locs["z"][:] = 1.0e-3 * settings.z_planes[0] saH5Py.saveLocalizations("c1_random_map.hdf5", locs) for i in range(1,4): locs["x"] += settings.dx locs["y"] += settings.dy locs["z"][:] = settings.z_planes[i] saH5Py.saveLocalizations("c" + str(i+1) + "_random_map.hdf5", locs) # Make simulated mapping data. # bg_f = lambda s, x, y, h5 : background.UniformBackground(s, x, y, h5, photons = 10) cam_f = lambda s, x, y, h5 : camera.SCMOS(s, x, y, h5, "calib.npy") pp_f = lambda s, x, y, h5 : photophysics.AlwaysOn(s, x, y, h5, 20000.0) psf_f = lambda s, x, y, i3 : psf.GaussianPSF(s, x, y, i3, settings.pixel_size) sim = simulate.Simulate(background_factory = bg_f, camera_factory = cam_f, photophysics_factory = pp_f, psf_factory = psf_f, x_size = settings.x_size, y_size = settings.y_size) for i in range(4): sim.simulate("c" + str(i+1) + "_map.dax", "c" + str(i+1) + "_random_map.hdf5", 1) # Analyze simulated mapping data # for i in range(4): scmos.analyze("c" + str(i+1) + "_map.dax", "c" + str(i+1) + "_map.hdf5", "scmos.xml") # Measure mapping. # for i in range(3): subprocess.call(["python", mm_path + "micrometry.py", "--locs1", "c1_map.hdf5", "--locs2", "c" + str(i+2) + "_map.hdf5", "--results", "c1_c" + str(i+2) + "_map.map", "--no_plots"]) # Merge mapping. # subprocess.call(["python", mm_path + "merge_maps.py", "--results", "map.map", "--maps", "c1_c2_map.map", "c1_c3_map.map", "c1_c4_map.map"]) # Print mapping. # if True: print("Mapping is:") subprocess.call(["python", mp_path + "print_mapping.py", "--mapping", "map.map"]) print("") # Check that mapping is close to what we expect (within 5%). # with open("map.map", 'rb') as fp: mappings = pickle.load(fp) for i in range(3): if not numpy.allclose(mappings["0_" + str(i+1) + "_x"], numpy.array([settings.dx*(i+1), 1.0, 0.0]), rtol = 0.05, atol = 0.05): print("X mapping difference for channel", i+1) if not numpy.allclose(mappings["0_" + str(i+1) + "_y"], numpy.array([settings.dy*(i+1), 0.0, 1.0]), rtol = 0.05, atol = 0.05): print("Y mapping difference for channel", i+1) ## This part measures / test the PSF measurement. ## if True: # Create drift file, this is used to displace the localizations in the # PSF measurement movie. # dz = numpy.arange(-settings.psf_z_range, settings.psf_z_range + 0.05, 0.01) drift_data = numpy.zeros((dz.size, 3)) drift_data[:,2] = dz numpy.savetxt("drift.txt", drift_data) # Also create the z-offset file. # z_offset = numpy.ones((dz.size, 2)) z_offset[:,1] = dz numpy.savetxt("z_offset.txt", z_offset) # Create simulated data for PSF measurements. # bg_f = lambda s, x, y, h5 : background.UniformBackground(s, x, y, h5, photons = 10) cam_f = lambda s, x, y, h5 : camera.SCMOS(s, x, y, h5, "calib.npy") drift_f = lambda s, x, y, h5 : drift.DriftFromFile(s, x, y, h5, "drift.txt") pp_f = lambda s, x, y, h5 : photophysics.AlwaysOn(s, x, y, h5, 20000.0) psf_f = lambda s, x, y, h5 : psf.PupilFunction(s, x, y, h5, settings.pixel_size, []) sim = simulate.Simulate(background_factory = bg_f, camera_factory = cam_f, drift_factory = drift_f, photophysics_factory = pp_f, psf_factory = psf_f, x_size = settings.x_size, y_size = settings.y_size) if True: for i in range(4): sim.simulate("c" + str(i+1) + "_zcal.dax", "c" + str(i+1) + "_random_map.hdf5", dz.size) # Get localizations to use for PSF measurement. # subprocess.call(["python", mp_path + "psf_localizations.py", "--bin", "c1_map_ref.hdf5", "--map", "map.map", "--aoi_size", str(aoi_size)]) # Create PSF z stacks. # for i in range(4): subprocess.call(["python", mp_path + "psf_zstack.py", "--movie", "c" + str(i+1) + "_zcal.dax", "--bin", "c1_map_ref_c" + str(i+1) + "_psf.hdf5", "--zstack", "c" + str(i+1) + "_zstack", "--scmos_cal", "calib.npy", "--aoi_size", str(aoi_size)]) # Measure PSF. # for i in range(4): subprocess.call(["python", mp_path + "measure_psf.py", "--zstack", "c" + str(i+1) + "_zstack.npy", "--zoffsets", "z_offset.txt", "--psf_name", "c" + str(i+1) + "_psf_normed.psf", "--z_range", str(settings.psf_z_range), "--normalize"]) ## This part creates the splines. ## if True: print("Measuring Splines.") for i in range(4): subprocess.call(["python", sp_path + "psf_to_spline.py", "--psf", "c" + str(i+1) + "_psf_normed.psf", "--spline", "c" + str(i+1) + "_psf.spline", "--spline_size", str(settings.psf_size)]) ## This part measures the Cramer-Rao weights. ## if True: print("Calculating weights.") subprocess.call(["python", mp_path + "plane_weighting.py", "--background", str(settings.photons[0][0]), "--photons", str(settings.photons[0][1]), "--output", "weights.npy", "--xml", "multicolor.xml", "--no_plots"]) if (__name__ == "__main__"): configure()
14,657
4,900
"""Application View Tests.""" # Standard Python Libraries import json # cisagov Libraries from tests.data.application_data import get_applications def test_applications_get(client, mocker): """Test getting list of applications.""" mocker.patch("api.manager.ApplicationManager.all", return_value=get_applications(5)) resp = client.get("/api/applications/") data = json.loads(resp.data) assert len(data) == 5
430
132
from __future__ import absolute_import from infratabtask.celery import app from celery import Task from infratabapp.utils import send_email_notf, send_phone_notf class SendNotf(Task): def __init__(self, *args, **kwargs): self.pk = kwargs.get('pk', None) def run(self): self.get_object() self.email_notf() self.phone_notf() def get_object(self): from infratabapp.models import ReminderDetails self.obj = ReminderDetails.objects.get(pk=self.pk) self.message = self.obj.message def email_notf(self): self.email_list = [] for x in self.obj.emailnotification_set.all(): self.email_list.append(x.email) send_email_notf(self.email_list, self.message) def phone_notf(self): self.phone_list = [] for y in self.obj.smsnotification_set.all(): self.phone_list.append(y.phone) send_phone_notf(self.phone_notf, self.message)
967
322
""" Provide accessors to these models via the Django Admin pages """ from django import forms from django.contrib import admin from lms.djangoapps.survey.models import SurveyForm class SurveyFormAdminForm(forms.ModelForm): """Form providing validation of SurveyForm content.""" class Meta: model = SurveyForm fields = ('name', 'form') def clean_form(self): """Validate the HTML template.""" form = self.cleaned_data["form"] SurveyForm.validate_form_html(form) return form class SurveyFormAdmin(admin.ModelAdmin): """Admin for SurveyForm""" form = SurveyFormAdminForm admin.site.register(SurveyForm, SurveyFormAdmin)
695
195
import json import xbmc from . import utils as utils class GuiSettingsManager: filename = 'kodi_settings.json' systemSettings = None def __init__(self): # get all of the current Kodi settings json_response = json.loads(xbmc.executeJSONRPC('{"jsonrpc":"2.0", "id":1, "method":"Settings.GetSettings","params":{"level":"expert"}}')) self.systemSettings = json_response['result']['settings'] def backup(self): utils.log('Backing up Kodi settings') # return all current settings return self.systemSettings def restore(self, restoreSettings): utils.log('Restoring Kodi settings') updateJson = {"jsonrpc": "2.0", "id": 1, "method": "Settings.SetSettingValue", "params": {"setting": "", "value": ""}} # create a setting=value dict of the current settings settingsDict = {} for aSetting in self.systemSettings: # ignore action types, no value if(aSetting['type'] != 'action'): settingsDict[aSetting['id']] = aSetting['value'] restoreCount = 0 for aSetting in restoreSettings: # only update a setting if its different than the current (action types have no value) if(aSetting['type'] != 'action' and settingsDict[aSetting['id']] != aSetting['value']): if(utils.getSettingBool('verbose_logging')): utils.log('%s different than current: %s' % (aSetting['id'], str(aSetting['value']))) updateJson['params']['setting'] = aSetting['id'] updateJson['params']['value'] = aSetting['value'] xbmc.executeJSONRPC(json.dumps(updateJson)) restoreCount = restoreCount + 1 utils.log('Update %d settings' % restoreCount)
1,804
499
""" District Mapping ================ Defines the algorithms that perform the mapping from precincts to districts. """
120
30
from .DataType import BaseType class Column(object): COLUMN_COMMON = 0 COLUMN_BLOOM = 1 COLUMN_INDEXED = 2 COLUMN_PRIMARY = 3 DEFAULT_FAIL_SHARE = 0.2 __NAME_TO_MOD = dict( bloom=1, indexed=2, pk=3, ) def __init__(self, name: str, kind: BaseType, mod: int, table=None, fail_share=None): self.name = name self.kind = kind self.mod = mod self.table = table self.fail_share = self.DEFAULT_FAIL_SHARE if fail_share is None else fail_share @classmethod def auto(cls, name: str, kind: BaseType, mod: str, table=None, fail_share=None): return Column(name, kind, cls.__NAME_TO_MOD[mod.lower()], table, fail_share) @classmethod def common(cls, name: str, kind: BaseType, table=None, fail_share=None): return Column(name, kind, cls.COLUMN_COMMON, table, fail_share) @property def is_common(self): return self.mod == self.COLUMN_COMMON @classmethod def bloom(cls, name: str, kind: BaseType, table=None, fail_share=None): return Column(name, kind, cls.COLUMN_BLOOM, table, fail_share) @property def is_bloom(self): return self.mod == self.COLUMN_BLOOM @classmethod def indexed(cls, name: str, kind: BaseType, table=None, fail_share=None): return Column(name, kind, cls.COLUMN_INDEXED, table, fail_share) @property def is_indexed(self): return self.mod == self.COLUMN_INDEXED @classmethod def primary(cls, name: str, kind: BaseType, table=None, fail_share=None): return Column(name, kind, cls.COLUMN_PRIMARY, table, fail_share) @property def is_primary(self): return self.mod == self.COLUMN_PRIMARY
1,741
607
# -*- coding: utf-8 -*- # Copyright (c) 2015-2016 Tigera, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ test_hwm ~~~~~~~~ Tests for high water mark tracking function. """ import logging from unittest import TestCase from mock import Mock, call, patch from calico.etcddriver import hwm from calico.etcddriver.hwm import HighWaterTracker _log = logging.getLogger(__name__) class TestHighWaterTracker(TestCase): def setUp(self): self.hwm = HighWaterTracker() def test_mainline(self): # Test merging of updates between a snapshot with etcd_index 10 and # updates coming in afterwards with indexes 11, 12, ... # We use prefix "/a/$" because $ is not allowed in the trie so it # implicitly tests encoding/decoding is being properly applied. old_hwm = self.hwm.update_hwm("/a/$/c", 9) # Pre-snapshot self.assertEqual(old_hwm, None) old_hwm = self.hwm.update_hwm("/b/c/d", 9) # Pre-snapshot self.assertEqual(old_hwm, None) old_hwm = self.hwm.update_hwm("/j/c/d", 9) # Pre-snapshot self.assertEqual(old_hwm, None) self.assertEqual(len(self.hwm), 3) # While merging a snapshot we track deletions. self.hwm.start_tracking_deletions() # Send in some keys from the snapshot. old_hwm = self.hwm.update_hwm("/a/$/c", 10) # From snapshot self.assertEqual(old_hwm, 9) old_hwm = self.hwm.update_hwm("/a/$/d", 10) # From snapshot self.assertEqual(old_hwm, None) old_hwm = self.hwm.update_hwm("/d/e/f", 10) # From snapshot self.assertEqual(old_hwm, None) self.assertEqual(len(self.hwm), 5) # This key is first seen in the event stream, so the snapshot version # should be ignored. old_hwm = self.hwm.update_hwm("/a/h/i", 11) # From events self.assertEqual(old_hwm, None) old_hwm = self.hwm.update_hwm("/a/h/i", 10) # From snapshot self.assertEqual(old_hwm, 11) old_hwm = self.hwm.update_hwm("/a/h/i", 12) # From events self.assertEqual(old_hwm, 11) # Still 11, snapshot ignored. self.assertEqual(len(self.hwm), 6) # Then a whole subtree gets deleted by the events. deleted_keys = self.hwm.store_deletion("/a/$", 13) self.assertEqual(set(deleted_keys), set(["/a/$/c", "/a/$/d"])) self.assertEqual(len(self.hwm), 4) # But afterwards, we see a snapshot key within the subtree, it should # be ignored. old_hwm = self.hwm.update_hwm("/a/$/e", 10) self.assertEqual(old_hwm, 13) # Returns the etcd_index of the delete. # Then a new update from the event stream, recreates the directory. old_hwm = self.hwm.update_hwm("/a/$/f", 14) self.assertEqual(old_hwm, None) self.assertEqual(len(self.hwm), 5) # And subsequent updates are processed ignoring the delete. old_hwm = self.hwm.update_hwm("/a/$/f", 15) self.assertEqual(old_hwm, 14) # However, snapshot updates from within the deleted subtree are still # ignored. old_hwm = self.hwm.update_hwm("/a/$/e", 10) self.assertEqual(old_hwm, 13) # Returns the etcd_index of the delete. old_hwm = self.hwm.update_hwm("/a/$/f", 10) self.assertEqual(old_hwm, 13) # Returns the etcd_index of the delete. old_hwm = self.hwm.update_hwm("/a/$/g", 10) self.assertEqual(old_hwm, 13) # Returns the etcd_index of the delete. self.assertEqual(len(self.hwm), 5) # But ones outside the subtree ar not. old_hwm = self.hwm.update_hwm("/f/g", 10) self.assertEqual(old_hwm, None) # And subsequent updates are processed ignoring the delete. old_hwm = self.hwm.update_hwm("/a/$/f", 16) self.assertEqual(old_hwm, 15) # End of snapshot: we stop tracking deletions, which should free up the # resources. self.hwm.stop_tracking_deletions() self.assertEqual(self.hwm._deletion_hwms, None) # Then, subseqent updates should be handled normally. old_hwm = self.hwm.update_hwm("/a/$/f", 17) self.assertEqual(old_hwm, 16) # From previous event old_hwm = self.hwm.update_hwm("/g/b/f", 18) self.assertEqual(old_hwm, None) # Seen for the first time. old_hwm = self.hwm.update_hwm("/d/e/f", 19) self.assertEqual(old_hwm, 10) # From the snapshot. self.assertEqual(len(self.hwm), 7) # We should be able to find all the keys that weren't seen during # the snapshot. old_keys = self.hwm.remove_old_keys(10) self.assertEqual(set(old_keys), set(["/b/c/d", "/j/c/d"])) self.assertEqual(len(self.hwm), 5) # They should now be gone from the index. old_hwm = self.hwm.update_hwm("/b/c/d", 20) self.assertEqual(old_hwm, None) self.assertEqual(len(self.hwm), 6) class TestKeyEncoding(TestCase): def test_encode_key(self): self.assert_enc_dec("/calico/v1/foo/bar", "/calico/v1/foo/bar/") self.assert_enc_dec("/:_-./foo", "/:_-./foo/") self.assert_enc_dec("/:_-.~/foo", "/:_-.%7E/foo/") self.assert_enc_dec("/%/foo", "/%25/foo/") self.assert_enc_dec(u"/\u01b1/foo", "/%C6%B1/foo/") self.assertEqual(hwm.encode_key("/foo/"), "/foo/") def assert_enc_dec(self, key, expected_encoding): encoded = hwm.encode_key(key) self.assertEqual( encoded, expected_encoding, msg="Expected %r to encode as %r but got %r" % (key, expected_encoding, encoded)) decoded = hwm.decode_key(encoded) self.assertEqual( decoded, key, msg="Expected %r to decode as %r but got %r" % (encoded, key, decoded))
6,369
2,263
# Copyright (c) AIRBUS and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .gym import GymDomain, DeterministicInitializedGymDomain, GymWidthDomain, \ GymDiscreteActionDomain, DeterministicGymDomain, CostDeterministicGymDomain, \ GymPlanningDomain, GymDomainStateProxy, GymDomainHashable, AsGymEnv
404
127
from django.test import TestCase from django.urls import reverse from django_netjsongraph.tests import CreateGraphObjectsMixin from django_netjsongraph.tests.base.test_admin import TestAdminMixin from openwisp_users.tests.utils import TestOrganizationMixin from openwisp_utils.tests.utils import TestMultitenantAdminMixin from . import CreateOrgMixin from ..apps import OpenwispNetworkTopologyConfig as appconfig from ..models import Link, Node, Topology class TestAdmin(CreateGraphObjectsMixin, CreateOrgMixin, TestAdminMixin, TestCase): topology_model = Topology link_model = Link node_model = Node @property def prefix(self): return 'admin:{0}'.format(appconfig.label) def setUp(self): org = self._create_org() t = self._create_topology(organization=org) self._create_node(label="node1", addresses="192.168.0.1;", topology=t, organization=org) self._create_node(label="node2", addresses="192.168.0.2;", topology=t, organization=org) super(TestAdmin, self).setUp() class TestMultitenantAdmin(CreateGraphObjectsMixin, TestMultitenantAdminMixin, TestOrganizationMixin, TestCase): topology_model = Topology node_model = Node link_model = Link operator_permission_filters = [ {'codename__endswith': 'topology'}, {'codename__endswith': 'node'}, {'codename__endswith': 'link'}, ] def _create_multitenancy_test_env(self): org1 = self._create_org(name='test1org') org2 = self._create_org(name='test2org') inactive = self._create_org(name='inactive-org', is_active=False) operator = self._create_operator(organizations=[org1, inactive]) t1 = self._create_topology(label='topology1org', organization=org1) t2 = self._create_topology(label='topology2org', organization=org2) t3 = self._create_topology(label='topology3org', organization=inactive) n11 = self._create_node(label='node1org1', topology=t1, organization=org1) n12 = self._create_node(label='node2org1', topology=t1, organization=org1) n21 = self._create_node(label='node1org2', topology=t2, organization=org2) n22 = self._create_node(label='node2org2', topology=t2, organization=org2) n31 = self._create_node(label='node1inactive', topology=t3, organization=inactive) n32 = self._create_node(label='node2inactive', topology=t3, organization=inactive) l1 = self._create_link(topology=t1, organization=org1, source=n11, target=n12) l2 = self._create_link(topology=t2, organization=org2, source=n21, target=n22) l3 = self._create_link(topology=t3, organization=inactive, source=n31, target=n32) data = dict(t1=t1, t2=t2, t3_inactive=t3, n11=n11, n12=n12, l1=l1, n21=n21, n22=n22, l2=l2, n31=n31, n32=n32, l3_inactive=l3, org1=org1, org2=org2, inactive=inactive, operator=operator) return data def test_topology_queryset(self): data = self._create_multitenancy_test_env() self._test_multitenant_admin( url=reverse('admin:topology_topology_changelist'), visible=[data['t1'].label, data['org1'].name], hidden=[data['t2'].label, data['org2'].name, data['t3_inactive'].label] ) def test_topology_organization_fk_queryset(self): data = self._create_multitenancy_test_env() self._test_multitenant_admin( url=reverse('admin:topology_topology_add'), visible=[data['org1'].name], hidden=[data['org2'].name, data['inactive']], select_widget=True ) def test_node_queryset(self): data = self._create_multitenancy_test_env() self._test_multitenant_admin( url=reverse('admin:topology_node_changelist'), visible=[data['n11'].label, data['n12'].label, data['org1'].name], hidden=[data['n21'].label, data['n22'].label, data['org2'].name, data['n31'].label, data['n32'].label, data['inactive']] ) def test_node_organization_fk_queryset(self): data = self._create_multitenancy_test_env() self._test_multitenant_admin( url=reverse('admin:topology_node_add'), visible=[data['org1'].name], hidden=[data['org2'].name, data['inactive']], select_widget=True ) def test_link_queryset(self): data = self._create_multitenancy_test_env() self._test_multitenant_admin( url=reverse('admin:topology_link_changelist'), visible=[str(data['l1']), data['org1'].name], hidden=[str(data['l2']), data['org2'].name, str(data['l3_inactive'])] ) def test_link_organization_fk_queryset(self): data = self._create_multitenancy_test_env() self._test_multitenant_admin( url=reverse('admin:topology_link_add'), visible=[data['org1'].name], hidden=[data['org2'].name, data['inactive']], select_widget=True ) def test_node_topology_fk_queryset(self): data = self._create_multitenancy_test_env() self._test_multitenant_admin( url=reverse('admin:topology_node_add'), visible=[data['t1'].label], hidden=[data['t2'].label, data['t3_inactive'].label] ) def test_link_topology_fk_queryset(self): data = self._create_multitenancy_test_env() self._test_multitenant_admin( url=reverse('admin:topology_link_add'), visible=[data['t1'].label], hidden=[data['t2'].label, data['t3_inactive'].label] ) def test_node_topology_filter(self): data = self._create_multitenancy_test_env() t_special = self._create_topology(label='special', organization=data['org1']) self._test_multitenant_admin( url=reverse('admin:topology_node_changelist'), visible=[data['t1'].label, t_special.label], hidden=[data['t2'].label, data['t3_inactive'].label] ) def test_link_topology_filter(self): data = self._create_multitenancy_test_env() t_special = self._create_topology(label='special', organization=data['org1']) self._test_multitenant_admin( url=reverse('admin:topology_link_changelist'), visible=[data['t1'].label, t_special.label], hidden=[data['t2'].label, data['t3_inactive'].label] )
7,092
2,180
import csv def read_regressor_examples(num_of_features, num_of_decisions, file_path): xs = [] ys = [] with open(file_path, mode='r', encoding='utf-8') as file: reader = csv.reader(file, delimiter=' ') for row in reader: x = [float(value) for value in row[0 : num_of_features]] y = [float(value) for value in row[num_of_features : num_of_features + num_of_decisions]] xs.append(x) ys.append(y) return { 'x': xs, 'y': ys }
527
185
from pathlib import Path import shutil from django.conf import settings from django.core import cache, mail from django.test import TestCase from django_mail_viewer.backends.database.models import EmailMessage class DatabaseBackendEmailMessageTest(TestCase): connection_backend = 'django_mail_viewer.backends.database.backend.EmailBackend' @classmethod def setUpTestData(cls): m = mail.EmailMultiAlternatives( 'Email subject', 'Email text', 'test@example.com', ['to1@example.com', 'to2.example.com'] ) m.attach_alternative( '<html><body><p style="background-color: #AABBFF; color: white">Email html</p></body></html>', 'text/html', ) current_dir = Path(__file__).resolve().parent m.attach_file(current_dir / 'test_files' / 'icon.gif', 'image/gif') with mail.get_connection(cls.connection_backend) as connection: connection.send_messages([m]) cls.multipart_message = EmailMessage.objects.filter(parent=None).first() @classmethod def tearDownClass(cls) -> None: try: shutil.rmtree(settings.MEDIA_ROOT) finally: super().tearDownClass() def test_get(self): test_matrix = [ {'header_name': 'Content-Type', 'value': 'multipart/mixed'}, {'header_name': 'Subject', 'value': 'Email subject'}, ] for t in test_matrix: with self.subTest(header=t['header_name']): self.assertEqual(self.multipart_message.get(t['header_name']), t['value']) # test that looking up by headeris not case sensitive self.assertEqual( self.multipart_message.get(t['header_name']), self.multipart_message.get(t['header_name'].lower()) ) def test_is_multipart(self): self.assertTrue(self.multipart_message.is_multipart()) with mail.get_connection(self.connection_backend) as connection: mail.EmailMultiAlternatives( f'Not multipart', f'Not multipart', 'test@example.com', ['to1@example.com', 'to2.example.com'], connection=connection, ).send() m = EmailMessage.objects.filter(parent=None).latest('id') self.assertFalse(m.is_multipart()) def test_walk(self): self.assertEqual( list(EmailMessage.objects.filter(parent=self.multipart_message).order_by('-created_at', 'id')), list(self.multipart_message.walk()), ) def test_get_content_type(self): # The main message followed by each of its parts expected_content_types = ['multipart/mixed', 'multipart/alternative', 'text/plain', 'text/html', 'image/gif'] self.assertEqual( expected_content_types, [m.get_content_type() for m in EmailMessage.objects.all().order_by('created_at', 'id')], ) def test_get_payload(self): m = self.multipart_message.parts.exclude(file_attachment='').get() # May need to seek back to 0 after this self.assertEqual(m.file_attachment.read(), m.get_payload()) def test_get_filename(self): m = self.multipart_message.parts.exclude(file_attachment='').get() self.assertEqual('icon.gif', m.get_filename())
3,355
983
''' utils.py General utility functions: unit conversions, great-circle distances, CSV queries, platform-independent web browsing. ''' import csv import math import webbrowser # UNIT CONVERSIONS MPS_TO_KTS = 1.944 class units: def mps_to_kts(mps): return mps*MPS_TO_KTS def enforceTwoDigits(numStr): if len(numStr) == 1: return "0"+numStr return numStr def enforceDigitsLeading(numStr, maxDig): digits = len(numStr) if digits < maxDig: for i in range(maxDig-digits): numStr = "0" + numStr return numStr def enforceDigitsTrailing(numStr, maxDig): digits = len(numStr) if digits < maxDig: for i in range(maxDig-digits): numStr = numStr + "0" return numStr class geo: def nearestSea(lat, lon): # true if a is inside the range [b, c] def within(a, b, c): if b > c: c,b=b,c return a >= b and a <= c def inBbox(e): lat0,lon0 = float(e['lat0']),float(e['lon0']) lat1,lon1 = float(e['lat1']),float(e['lon1']) clat,clon = float(e['clat']),float(e['clon']) dist = geo.dist_coord(lat, lon, clat, clon) return (within(lat, lat0, lat1) and within(lon, lon0, lon1),dist) def saveDist(e, args): e['dist'] = args[0] def sortDist(e): return e['dist'] seas = db.query("./data/worldseas.csv", inBbox, saveDist) seas.sort(key=sortDist) if len(seas) > 0: return seas[0]['name'] return "" def latlon_to_nmea(lat, lon): latDeg = lat latMin = (latDeg - math.floor(latDeg))*60 lonDeg = lon lonMin = (lonDeg - math.floor(lonDeg))*60 if latDeg > 0: latDir = "N" else: latDir = "S" if lonDeg > 0: lonDir = "E" else: lonDir = "W" latMinStr = str(round(latMin,4)) latMinMajorStr = latMinStr[:latMinStr.find(".")] latMinMinorStr = latMinStr[latMinStr.find(".")+1:] latMinMajorStr = units.enforceDigitsLeading(latMinMajorStr, 2) latMinMinorStr = units.enforceDigitsTrailing(latMinMinorStr, 4) latMinStr = latMinMajorStr + "." + latMinMinorStr lonMinStr = str(round(lonMin,4)) lonMinMajorStr = lonMinStr[:lonMinStr.find(".")] lonMinMinorStr = lonMinStr[lonMinStr.find(".")+1:] lonMinMajorStr = units.enforceDigitsLeading(lonMinMajorStr, 2) lonMinMinorStr = units.enforceDigitsTrailing(lonMinMinorStr, 4) lonMinStr = lonMinMajorStr + "." + lonMinMinorStr return str(int(abs(latDeg)))+latMinStr + "," + latDir + "," + str(int(abs(lonDeg)))+lonMinStr + "," + lonDir def deg_to_dms(deg, type='lat'): # source: https://stackoverflow.com/questions/2579535/convert-dd-decimal-degrees-to-dms-degrees-minutes-seconds-in-python decimals, number = math.modf(deg) d = int(number) m = int(decimals * 60) s = (deg - d - m / 60) * 3600.00 compass = { 'lat': ('N','S'), 'lon': ('E','W') } compass_str = compass[type][0 if d >= 0 else 1] return '{}{}º{}\'{:.2f}"'.format(compass_str, abs(d), abs(m), abs(s)) def latlon_to_str(lat, lon): return geo.deg_to_dms(lat,'lat'),geo.deg_to_dms(lon,'lon') # distance between two global points in nautical miles def dist_coord(lat1,lon1,lat2,lon2): # source: https://stackoverflow.com/questions/19412462/getting-distance-between-two-points-based-on-latitude-longitude R = 6373.0 # approximate radius of earth in km lat1 = math.radians(lat1) lon1 = math.radians(lon1) lat2 = math.radians(lat2) lon2 = math.radians(lon2) dlon = lon2 - lon1 dlat = lat2 - lat1 a = math.sin(dlat / 2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2)**2 c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a)) return 0.539957*R * c # wraps angle to range [0, 360) def wrap_angle(b): deg = b while deg < 0: deg = 360+deg while deg >= 360: deg = deg-360 return deg class webviz: def loadURL(url): webbrowser.open(url) def openseamap(lat, lon): return "https://map.openseamap.org/?zoom=8&lat=" + lat + "&lon=" + lon + "&mlat=" + lat + "&mlon=" + lon + "&layers=BFTFFFTFFTF0FFFFFFFFFF" def pirosail(boatid): return "http://piro.biz/tracker/?2d&marineid=" + boatid def earthwindmap(lat, lon): return "https://earth.nullschool.net/#current/wind/surface/level/orthographic=" + lon + "," + lat + ",3000/loc=" + lon + "," + lat class db: # execute a function on each element of a CSV def execute(csvFile, executeFunc): with open(csvFile, newline='') as csvfile: elements = csv.DictReader(csvfile) for element in elements: executeFunc(element) # return results filtered by a query function, and optionally post-process results def query(csvFile, queryFunc, processFunc=None): results = [] with open(csvFile, newline='') as csvfile: elements = csv.DictReader(csvfile) for element in elements: res = queryFunc(element) if res[0]: if processFunc != None: processFunc(element, res[1:]) results.append(element) return results # return first element matching query function def findFirst(csvFile, queryFunc): with open(csvFile, newline='') as csvfile: elements = csv.DictReader(csvfile) for element in elements: if queryFunc(element): return element return None
5,977
2,065
from flask import Flask from flask import request, render_template, redirect from datetime import datetime from pymongo import MongoClient import html import random import json import ast from flask.ext.pymongo import PyMongo from flask import make_response, request, current_app from functools import update_wrapper app = Flask(__name__) mongo = PyMongo(app) app.config['MONGO_HOST'] = 'localhost' app.config['MONGO_PORT'] = 27017 app.config['MONGO_DBNAME'] = 'chirrup' mClient = MongoClient('localhost',27017) collection = mClient['chirrup']['tweets'] @app.route('/', methods=['GET','POST']) def home(): if request.method=='POST': var = request.form['query'] return redirect('/'+var, code=302) else: distincthashtags = collection.distinct("hashtags") return render_template("home.html",distincthashtags=distincthashtags) @app.route('/<input>', methods=['GET','POST']) def analyze(input): hashtag = input country_sentiment_query = list(collection.aggregate([{"$match":{"hashtags":hashtag}},{"$group":{'_id':'$country',"avgsentiment": {"$avg":"$sentiment"}}}])) average_sentiment_query = list(collection.aggregate([{"$match":{"hashtags":hashtag}},{"$group":{'_id':'sentiment',"avgsentiment": {"$avg":"$sentiment"}}}])) if len(average_sentiment_query)==0: return render_template('fourohfour.html') country_wise_sentiment = json.dumps(country_sentiment_query) average_sentiment = json.dumps(average_sentiment_query[0]) sorter = [('timestamp', 1)] last_ten_tweets = list(collection.find({"hashtags":hashtag},{'timestamp':0, '_id': 0}).sort(sorter))[:10] return render_template("analysis.html",country_wise_sentiment=country_wise_sentiment, average_sentiment=average_sentiment, hashtag=hashtag, last_ten_tweets=last_ten_tweets) if __name__=="__main__": app.run(debug=True)
1,862
629
from resticweb.dictionary.resticweb_variables import Config import resticweb.engine as local_engine from resticweb.dictionary.resticweb_exceptions import NoEngineAvailable import subprocess import os.path as path def configure_engine(): return_value = False command = [Config.ENGINE_COMMAND, 'version'] try: finished_process = subprocess.run( command, shell=False, capture_output=True) if finished_process: line = finished_process.stdout.decode('utf-8') errors = finished_process.stderr.decode('utf-8') print(errors) if len(line) > 0: if "compiled with go" in line: return_value = True if return_value: return return_value except FileNotFoundError: pass location, throwaway = path.split(local_engine.__file__) Config.ENGINE_COMMAND = f'{location}{path.sep}restic' command = [Config.ENGINE_COMMAND, 'version'] try: finished_process = subprocess.run( command, shell=False, capture_output=True) line = finished_process.stdout.decode('utf-8') if len(line) > 0: if "compiled with go" in line: return_value = True except FileNotFoundError: raise NoEngineAvailable("Unable to find a backup engine.") if return_value: return return_value else: raise NoEngineAvailable("Unable to find a backup engine.")
1,578
423
import pandas as pd import json import time from bentoml import env, artifacts, api, BentoService from bentoml.adapters import DataframeInput, JsonInput, StringInput from bentoml.frameworks.sklearn import SklearnModelArtifact @env(infer_pip_packages=True) @artifacts([SklearnModelArtifact('model')]) class AnomalyDetection(BentoService): """ A minimum prediction service exposing a Scikit-learn model """ @api(input=JsonInput()) def analyse(self, param: json): """ An inference API named `analyse` with Dataframe input adapter, which codifies how HTTP requests or CSV files are converted to a pandas Dataframe object as the inference API function iwnput """ dic = {} if param['taskType']=='async': time.sleep(30) try: if len(param['seriesList'])<2: raise Exception() else: series = [] series.append([1635216096000, 23.541]) dic['predictSeriesList'] = series except Exception as ex: dic['code'] = 'detectorError' dic['message'] = 'some error in detector internal!' return dic @api(input=DataframeInput(), batch=True) def predict(self, df: pd.DataFrame): """ An inference API named `predict` with Dataframe input adapter, which codifies how HTTP requests or CSV files are converted to a pandas Dataframe object as the inference API function input """ return self.artifacts.model.predict(df) @api(input=JsonInput()) def analyze(self, param: json): """ An inference API named `predict` with Dataframe input adapter, which codifies how HTTP requests or CSV files are converted to a pandas Dataframe object as the inference API function input """ return "good" @api(input=StringInput()) def doc(self, message: str): """ get README.md """ f = open("README.md") doc = f.read() f.close() return doc
2,104
573
#!/usr/bin/env python3 import sys with open(sys.argv[1], 'w') as f: print('EXPORTS', file=f) print(' somedllfunc', file=f)
139
56
# -*- coding: utf-8 -*- """ Created on Wed Jul 28 13:31:06 2021 @author: user24 """ ''' Suchi wo nyuryoku only accepts integer end shuryou creates a graph as image ''' import matplotlib.pyplot as plt cnt = 0 Y = [] while True: ans = input("数値を入力してください \n-->") if ans == "end": break try: ans_int = int(ans) Y.append(ans_int) cnt += 1 except: print("文字列を読めない!数値を入れてください。") except Exception as error: print(error) X = range(0, cnt) plt.plot(X, Y, marker="o", color="r", linestyle="--") plt.savefig("test.png") # plt.xlabel("入力順番") # Japanese char return erro plt.show() # ========================================================================== ''' '''
730
301
from flanexport import FlanExport, timeout_after import os import ast try: from boto.sqs import connection from boto.sqs.message import Message except: pass class AWSSQS(FlanExport): def __init__(self, meta, config): name = self.__class__.__name__ super().__init__(name, meta, config) @timeout_after(10) def prepare(self): aws_access_key_id = self._getsetting('aws_access_key_id', checkenv=True) aws_secret_access_key = self._getsetting('aws_secret_access_key', checkenv=True) is_secure = self._getsetting('is_secure', erroronnone=False, defaultvalue=True) port = self._getsetting('port', erroronnone=False) proxy = self._getsetting('proxy', erroronnone=False) proxy_port = self._getsetting('proxy_port', erroronnone=False) proxy_user = self._getsetting('proxy_user', erroronnone=False) proxy_pass = self._getsetting('proxy_pass', erroronnone=False) region = self._getsetting('region', erroronnone=False) path = self._getsetting('region', defaultvalue="/") security_token = self._getsetting('security_token', erroronnone=False) validate_certs = self._getsetting('region', defaultvalue=True) profile_name = self._getsetting('profile_name', erroronnone=False) queue_name = self._getsetting('queue_name', erroronnone=True, defaultvalue="flan") sqs_message_attributes = self._getsetting('sqs_message_attributes', erroronnone=False) if sqs_message_attributes: self.sqs_message_attributes = ast.literal_eval(sqs_message_attributes) else: self.sqs_message_attributes = {} try: self.conn = connection.SQSConnection( aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, is_secure=is_secure, port=port, proxy=proxy, proxy_port=proxy_port, proxy_user=proxy_user, proxy_pass=proxy_pass, region=region, path=path, security_token=security_token, validate_certs=validate_certs, profile_name=profile_name ) self.sender = self.conn.create_queue(queue_name, self._getsetting('timeout')) except Exception as e: self.logerr('Flan->%s connection to %s:%s failed: %s' % (self.name, self.config["host"], self.config["port"], str(e))) os._exit(1) @timeout_after(10) def send(self, data): try: m = Message() m.message_attributes = self.sqs_message_attributes m.set_body(data) self.sender.write(m) except Exception as e: self.logerr('Flan->%s delivery failed: %s' % (self.name, str(e))) pass return @property def closed(self): return False @timeout_after(10) def close(self): try: self.conn.close() except: pass return
3,167
914
import os import sys from setuptools import setup, find_packages if sys.version_info < (3, 6): sys.exit("Sorry, Python >= 3.6 is required for ko_lm_dataformat") with open("requirements.txt") as f: require_packages = [line.strip() for line in f] with open(os.path.join("ko_lm_dataformat", "version.txt")) as f: version = f.read().strip() setup( name="ko_lm_dataformat", version=version, author="Jangwon Park", author_email="adieujw@gmail.com", description="A utility for storing and reading files for Korean LM training.", long_description=open("./README.md", "r", encoding="utf-8").read(), long_description_content_type="text/markdown", url="https://github.com/monologg/ko_lm_dataformat", packages=find_packages(exclude=["tests"]), python_requires=">=3.6", zip_safe=False, include_package_data=True, install_requires=require_packages, )
905
306
#!/usr/bin/env python import getopt, sys, io import pandas as pd # # Usage # def usage (script_name): """ Display the usage. """ print ("") print ("Usage: {} [options]".format(script_name)) print ("") print ("That script transforms and filter a fix width data file into a hat symbol separated CSV one") print ("") print ("Options:") print (" -h, --help : outputs this help and exits") print (" -v, --verbose : verbose output (debugging)") print (" -i, --input <input data file-path>") print (" -o, --output <output data file-path>") print ("") # # Command-line arguments # def handle_opt(): """ Handle the command-line options """ try: opts, args = getopt.getopt (sys.argv[1:], "hv:i:o:", ["help", "verbose", "input", "output"]) except (getopt.GetoptError, err): # Print help information and exit. It will print something like # "option -d not recognized" print (str (err)) usage (sys.argv[0], usage_doc) sys.exit(2) # Options verboseFlag = False airline_input_filepath = '' airline_output_filepath = '' airline_input_file = sys.stdin # '/dev/stdin' airline_output_file = sys.stdout # '/dev/stdout' # Input stream/file if len (args) != 0: airline_input_filepath = args[0] # Handling for o, a in opts: if o in ("-h", "--help"): usage (sys.argv[0]) sys.exit() elif o in ("-v", "--verbose"): verboseFlag = True elif o in ("-i", "--input"): airline_input_filepath = a elif o in ("-o", "--output"): airline_output_filepath = a else: raise ValueError ("That option ({}) is unknown. Rerun that script with the -h option to see the accepted options".format(o)) # Input file. That file may be compressed with GNU Zip (gzip) if (airline_input_filepath != ''): airline_input_file = open (airline_input_filepath, 'rb') # Output file-path if (airline_output_filepath != ''): airline_output_file = open (airline_output_filepath, 'w') # Report the configuration airline_input_filepath_str = airline_input_filepath \ if airline_input_filepath != '' \ else 'Standard input' airline_output_filepath_str = airline_output_filepath \ if airline_output_filepath != '' \ else 'Standard output' if (airline_output_filepath_str != 'Standard output'): print ("Input data file: '{}'".format(airline_input_filepath_str)) print ("Output data file: '{}'".format(airline_output_filepath_str)) # return (verboseFlag, airline_input_filepath, airline_output_file) def extract_df (airline_input_filepath): """ Parse a fix width data file containing details about IATA referenced airlines, and fill in a Pandas data-frame """ # Using Pandas with column specification col_names = ['name', 'num_code', '3char_code', '2char_code', 'address_street_1', 'address_street_2', 'address_city_name', 'address_state_name', 'address_country_name', 'address_postal_code', 'flag_1', 'flag_2', 'flag_3', 'flag_4', 'type', 'num_code_2'] col_specs = [(0, 80), (80, 84), (84, 87), (87, 90), (90, 130), (130, 170), (170, 195), (195, 215), (215, 259), (259, 373), (373, 374), (374, 375), (375, 376), (376, 377), (377, 379), (379, 385)] col_converters = { 'num_code': lambda x: str(int(x)), 'num_code_2': lambda x: str(int(x))} airline_df = pd.read_fwf(airline_input_filepath, colspecs = col_specs, header = None, names = col_names, converters = col_converters) # Leave empty fields empty (otherwise, Pandas specifies NaN) airline_df.fillna (value = '', method = None, inplace = True) # Merge num_code and num_code2 airline_df['num_code'] = airline_df \ .apply(lambda r: r['num_code'] if r['num_code'] != '' else r['num_code_2'], axis = 1) # DEBUG #print (str(airline_df.head())) #print (str(airline_df.dtypes)) # return (airline_df) def dump_to_csv (airline_df, airline_output_file): """ Dump a sub-set of the the Pandas data-frame into a CSV file. The field delimiter is the hat symbol ('^'). """ subcol_names = ['2char_code', '3char_code', 'num_code', 'name', 'type'] # DEBUG #airline_spec_df = airline_df[airline_df['2char_code'] == 'LH'][subcol_names] #print (str(airline_spec_df)) # Sort by IATA and ICAO codes airline_df.sort_values(['2char_code', '3char_code', 'num_code', 'name'], ascending = True, inplace = True) # Dump the data-frame into a CSV file airline_df.to_csv (airline_output_file, sep = '^', columns = subcol_names, header = True, index = False, doublequote = False, quotechar = '|') # # Main # def main(): """ Main """ # Parse command options (verboseFlag, airline_input_filepath, airline_output_file) = handle_opt() # DEBUG #print ("Type of file: '{}'".format(type(airline_input_filepath))) # Parse the fixed width data file of airline details airline_df = extract_df (airline_input_filepath) # Dump the Pandas data-frame into a CSV file dump_to_csv (airline_df, airline_output_file) # # Main, when launched from a library # if __name__ == "__main__": main()
5,852
1,942
from collections import Counter from typing import ( Any, cast, ) from django.contrib import messages from django.contrib.auth.mixins import ( LoginRequiredMixin, UserPassesTestMixin, ) from django.db import transaction from django.db.models import ( Count, Q, ) from django.forms.models import ModelForm from django.http import ( HttpRequest, HttpResponse, ) from django.http.response import ( HttpResponseBase, HttpResponseRedirectBase, ) from django.shortcuts import redirect from django.utils import timezone from django.views.generic import ( CreateView, DetailView, UpdateView, ) from meta.views import MetadataMixin from sanalberto.forms import PollVoteForm from users.models import User from ..models import ( Poll, PollDesign, PollVote, ) from .common import EventMixin class PollMixin(EventMixin): """Poll mixin. """ poll: Poll def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context['poll'] = self.poll return context def get_subtitle(self, context: dict[str, Any]) -> str: return self.poll.title def check_poll(self, poll: Poll) -> bool: # Default method return True def check_poll_redirect(self, poll: Poll) -> HttpResponseRedirectBase: # Default method return redirect('sanalberto:index') def dispatch(self, request, *args, **kwargs): try: slug = kwargs.get('slug') self.poll = Poll.objects.filter(slug=slug).get() assert self.check_poll(self.poll) is True except (Poll.DoesNotExist, AssertionError): return self.check_poll_redirect(self.poll) return super().dispatch(request, *args, **kwargs) class PollDetailView(EventMixin, MetadataMixin, DetailView): """Poll detail view. """ model = Poll def get_queryset(self): return super().get_queryset().prefetch_related('designs', 'winner__user') def get_subtitle(self, context: dict[str, Any]) -> str: return cast(Poll, context['object']).title def get_context_data(self, **kwargs): poll = self.get_object() user = self.request.user query = Q(is_approved=True) if user.is_authenticated: query |= Q(user=user) designs = poll.designs.filter(query) my_designs: list[PollDesign] = [] approved_designs: list[PollDesign] = [] for design in designs: if user.is_authenticated and design.user == user: my_designs.append(design) if design.is_approved: approved_designs.append(design) my_vote: 'PollVote | None' = None if user.is_authenticated and poll.voting_enabled: my_vote = ( poll .votes .filter(user=user) .prefetch_related('first', 'second', 'third') .first() ) context = super().get_context_data(**kwargs) context['now'] = timezone.now() context['approved_designs'] = approved_designs context['my_designs'] = my_designs context['my_vote'] = my_vote return context class DesignCreateView(PollMixin, MetadataMixin, LoginRequiredMixin, CreateView): """Design create view. """ model = PollDesign fields = ['title', 'image', 'source_file', 'vector_file'] def check_poll(self, poll: Poll) -> bool: return poll.register_enabled def check_poll_redirect(self, poll: Poll) -> HttpResponseRedirectBase: return redirect('sanalberto:poll_detail', slug=poll.slug) def get_subtitle(self, context: dict[str, Any]) -> str: title = cast(Poll, context['object']).title return f'Presentar diseño para {title}' def form_valid(self, form: 'ModelForm[PollDesign]') -> HttpResponse: obj = form.save(commit=False) obj.poll = self.poll obj.user = cast(User, self.request.user) obj.save() return redirect('sanalberto:poll_detail', slug=self.poll.slug) class PollVoteCreateView( PollMixin, MetadataMixin, LoginRequiredMixin, UpdateView): """Poll vote create view. """ model = PollVote form_class = PollVoteForm def get_object(self, *args) -> 'PollVote | None': return PollVote.objects.filter(user=self.request.user).first() def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context['designs'] = self.poll.designs.all() return context def get_subtitle(self, context: dict[str, Any]) -> str: title = cast(Poll, context['object']).title return f'Votar diseño para {title}' def get_initial(self) -> dict[str, Any]: initial = super().get_initial() try: initial['first'] = int(self.request.GET['selected']) except (ValueError, KeyError): pass return initial def get_form_kwargs(self) -> dict[str, Any]: kwargs = super().get_form_kwargs() kwargs['designs'] = self.poll.designs.all() return kwargs def check_poll(self, poll: Poll) -> bool: return poll.voting_enabled def check_poll_redirect(self, poll: Poll) -> HttpResponseRedirectBase: return redirect('sanalberto:poll_detail', slug=poll.slug) def form_valid(self, form: 'ModelForm[PollVote]') -> HttpResponse: obj = form.save(commit=False) with transaction.atomic(): existing = ( PollVote .objects .filter(user=self.request.user, poll=self.poll) .select_for_update() .first() ) if existing is None: obj.poll = self.poll obj.user = cast(User, self.request.user) obj.save() else: existing.first = obj.first existing.second = obj.second existing.third = obj.third existing.save() return redirect('sanalberto:poll_detail', slug=obj.poll.slug) def dispatch(self, request: HttpRequest, *args: str, **kwargs: Any) -> HttpResponseBase: if isinstance(request.user, User) and not request.user.is_verified: messages.error( request, 'Debes verificar tu e-mail para poder votar', extra_tags='show_profile_btn' ) return redirect('sanalberto:poll_detail', kwargs['slug']) return super().dispatch(request, *args, **kwargs) class PollAdminView( PollMixin, MetadataMixin, UserPassesTestMixin, DetailView): model = Poll template_name_suffix = '_admin' def test_func(self) -> 'bool | None': user = self.request.user return isinstance(user, User) and user.has_perms(( 'sanalberto.view_poll', 'sanalberto.view_pollvote', )) def get_subtitle(self, context: dict[str, Any]) -> str: title = cast(Poll, context['object']).title return f'Administrar encuesta para {title}' def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) poll: Poll = context['object'] points: Counter[int] = Counter() votes = 0 if poll.voting_start < timezone.now(): votes = poll.votes.count() for field, multiplier in (('first', 3), ('second', 2), ('third', 1)): all_votes = poll.votes.values(field).annotate(count=Count(field)) for item in all_votes: points[item[field]] += item['count'] * multiplier designs = [ (obj, points.get(obj.id, 0)) for obj in poll.designs.all() ] designs.sort(key=lambda obj: obj[1], reverse=True) context['designs'] = designs context['votes'] = votes if designs[0][1] > 0: context['winner'] = designs[0][0] return context
8,117
2,487
import numpy as np import scipy.spatial as spatial def bilinear_interpolate(img, coords): """ Interpolates over every image channel http://en.wikipedia.org/wiki/Bilinear_interpolation :param img: max 3 channel image :param coords: 2 x _m_ array. 1st row = xcoords, 2nd row = ycoords :returns: array of interpolated pixels with same shape as coords """ int_coords = np.int32(coords) x0, y0 = int_coords dx, dy = coords - int_coords # 4 Neighour pixels q11 = img[y0, x0] q21 = img[y0, x0+1] q12 = img[y0+1, x0] q22 = img[y0+1, x0+1] btm = q21.T * dx + q11.T * (1 - dx) top = q22.T * dx + q12.T * (1 - dx) inter_pixel = top * dy + btm * (1 - dy) return inter_pixel.T def grid_coordinates(points): """ x,y grid coordinates within the ROI of supplied points :param points: points to generate grid coordinates :returns: array of (x, y) coordinates """ xmin = np.min(points[:, 0]) xmax = np.max(points[:, 0]) + 1 ymin = np.min(points[:, 1]) ymax = np.max(points[:, 1]) + 1 return np.asarray([(x, y) for y in range(ymin, ymax) for x in range(xmin, xmax)], np.uint32) def process_warp(src_img, result_img, tri_affines, dst_points, delaunay): """ Warp each triangle from the src_image only within the ROI of the destination image (points in dst_points). """ roi_coords = grid_coordinates(dst_points) # indices to vertices. -1 if pixel is not in any triangle roi_tri_indices = delaunay.find_simplex(roi_coords) for simplex_index in range(len(delaunay.simplices)): coords = roi_coords[roi_tri_indices == simplex_index] num_coords = len(coords) out_coords = np.dot(tri_affines[simplex_index], np.vstack((coords.T, np.ones(num_coords)))) x, y = coords.T result_img[y, x] = bilinear_interpolate(src_img, out_coords) return None def triangular_affine_matrices(vertices, src_points, dest_points): """ Calculate the affine transformation matrix for each triangle (x,y) vertex from dest_points to src_points :param vertices: array of triplet indices to corners of triangle :param src_points: array of [x, y] points to landmarks for source image :param dest_points: array of [x, y] points to landmarks for destination image :returns: 2 x 3 affine matrix transformation for a triangle """ ones = [1, 1, 1] for tri_indices in vertices: src_tri = np.vstack((src_points[tri_indices, :].T, ones)) dst_tri = np.vstack((dest_points[tri_indices, :].T, ones)) mat = np.dot(src_tri, np.linalg.inv(dst_tri))[:2, :] yield mat def warp_image(src_img, src_points, dest_points, dest_shape, dtype=np.uint8): # Resultant image will not have an alpha channel num_chans = 3 src_img = src_img[:, :, :3] rows, cols = dest_shape[:2] result_img = np.zeros((rows, cols, num_chans), dtype) delaunay = spatial.Delaunay(dest_points) tri_affines = np.asarray(list(triangular_affine_matrices( delaunay.simplices, src_points, dest_points))) process_warp(src_img, result_img, tri_affines, dest_points, delaunay) return result_img def test_local(): from functools import partial import cv2 import scipy.misc import locator import aligner from matplotlib import pyplot as plt # Load source image face_points_func = partial(locator.face_points, '../data') base_path = '../females/Screenshot 2015-03-04 17.11.12.png' src_path = '../females/BlDmB5QCYAAY8iw.jpg' src_img = cv2.imread(src_path) # Define control points for warps src_points = face_points_func(src_path) base_img = cv2.imread(base_path) base_points = face_points_func(base_path) size = (600, 500) src_img, src_points = aligner.resize_align(src_img, src_points, size) base_img, base_points = aligner.resize_align(base_img, base_points, size) result_points = locator.weighted_average_points(src_points, base_points, 0.2) # Perform transform dst_img1 = warp_image(src_img, src_points, result_points, size) dst_img2 = warp_image(base_img, base_points, result_points, size) import blender ave = blender.weighted_average(dst_img1, dst_img2, 0.6) mask = blender.mask_from_points(size, result_points) blended_img = blender.poisson_blend(dst_img1, dst_img2, mask) plt.subplot(2, 2, 1) plt.imshow(ave) plt.subplot(2, 2, 2) plt.imshow(dst_img1) plt.subplot(2, 2, 3) plt.imshow(dst_img2) plt.subplot(2, 2, 4) plt.imshow(blended_img) plt.show() if __name__ == "__main__": test_local()
4,473
1,770
#!/usr/bin/env python # coding=utf-8 # coding: utf8 """ configuration for gunicorn """ import multiprocessing bind = '0.0.0.0:8888' backlog = 2048 workers = multiprocessing.cpu_count() * 2 + 1 threads = 1 worker_class = 'sync' worker_connections = 1000 timeout = 500 keepalive = 40 daemon = False loglevel = 'info' errorlog = '-' accesslog = '-'
346
148
print('Digite um número inteiro positivo de três dígitos (100 a 999), para gerar o número invertido') num = int(input('Número: ')) num = str(num) reverso = num[::-1] print(f'O número ao contrário de: {num} é: {reverso}')
220
87
import os import sys import transaction import csv from pyramid.paster import ( get_appsettings, setup_logging, ) from pyramid.scripts.common import parse_vars from ..models.meta import Base from ..models import ( get_engine, get_session_factory, get_tm_session, ) from ..models import Instrument def usage(argv): cmd = os.path.basename(argv[0]) print('usage: %s <config_uri> [var=value]\n' '(example: "%s development.ini")' % (cmd, cmd)) sys.exit(1) def main(argv=sys.argv): if len(argv) < 3: usage(argv) config_uri = argv[1] fname = argv[2] options = parse_vars(argv[3:]) setup_logging(config_uri) settings = get_appsettings(config_uri, options=options) engine = get_engine(settings) Base.metadata.create_all(engine) session_factory = get_session_factory(engine) with transaction.manager: dbsession = get_tm_session(session_factory, transaction.manager) with open(fname) as csvfile: reader = csv.DictReader(csvfile) for row in reader: p=Instrument() p.name=row['name'] p.description=row['description'] p.importTag=row['importTag'] dbsession.add(p)
1,277
404
#!python3 """ A utility for performing simulation experiments on auction mechanisms. The experiment is similar to the one described by McAfee (1992), Table I (page 448). In each experiment, we measure the actual vs. the optimal gain-from-trade. This experiment using the real prices from Stock market. the prices are in csv files in stocks folder. The results are printed to a CSV file. The columns are: * stock_name - The stock name which the prices came from. * recipe - (1,1) for McAfee; can be any vector of ones, e.g. (1,1,1), for our trade-reduction mechanism, or any vector of positive integers for our ascending-auction mechanism. * num_possible_trades = n = total number of potential procurement-sets (e.g. if n=100 and recipe=[1,2] then there are 100 buyers and 200 sellers). * optimal_count = k = number of deals in the optimal trade, averaged over all iterations. Note that k <= n. E.g., there may be 100 buyers and 100 sellers, but only 50 procurement-sets with positive GFT, so k=50. * optimal_gft - OPT = gain-from-trade in the optimal trade, * optimal_trade_with_gft_zero - OPT = gain-from-trade in the optimal trade, including sets with GFT=0 * count = k' = number of deals done by our auction, averaged over all iterations. Theoretically, since at most one deal is removed, it should be either k or k-1. * count_ratio = %k' = count / optimal_count * 100%. * total_gft = GFT = gain-from-trade in the auction, including auctioneer * total_gft_ratio = %GFT = total_gft / optimal_gft * 100%. Theoretically it should be at least 1 - 1/k. In the results, it is usually higher. * market_gft = Market GFT = gain-from-trade in the auction, not including auctioneer. * market_gft_ratio = Market %GFT = market_gft / optimal_gft * 100%. Theoretically it should be at least 1 - 1/k. In the results, it is usually higher. Recommended: add manually at the beginning of the file the header line: ,recipe,n,k,k+0,OPT_GFT,OPT_GFT+0,McAfee_k',%k',total_gft,%total_gft,market_gft,market_%gft,McAfee_Without_Heuristic_k',%k',total_gft,%total_gft,market_gft,market_%gft,SBB_External_Competition_k',%k',gft,%gft,market_gft,market_%gft,SBB_Ascending_Prices_k,%k',gft,%gft,market_gft,market_%gft Author: Dvir Gilor Since: 2020-08 """ from markets import Market from agents import AgentCategory from tee_table.tee_table import TeeTable from collections import OrderedDict from get_stocks_data import getStocksPricesShuffled import random from os import path def experiment(results_csv_file:str, auction_functions:list, auction_names:str, recipe:tuple, nums_of_agents=None, stocks_prices:list=None, stock_names:list=None, num_of_iterations=1000, run_with_stock_prices=True, report_diff=False): """ Run an experiment similar to McAfee (1992) experiment on the given auction. :param results_csv_file: the experiment result file. :param auction_functions: list of functions for executing the auction under consideration. :param auction_names: titles of the experiment, for printouts. :param recipe: can be any vector of ones, e.g. (1,1,1), for our trade-reduction mechanism, or any vector of positive integers for our ascending-auction mechanism. :param stocks_prices: list of prices for each stock and each agent. :param stock_names: list of stocks names which prices are belongs, for naming only. """ TABLE_COLUMNS = ["iterations", "stockname", "recipe", "numpossibletrades", "optimalcount", "gftratioformula", "optimalcountwithgftzero", "optimalgft", "optimalgftwithgftzero"] AUCTION_COLUMNS = ["count", "countratio", "totalgft", "totalgftratio", "withoutgftzerocountratio", "withoutgftzerototalgft", "withoutgftzerototalgftratio", "marketgft", "marketgftratio"] if path.exists(results_csv_file): print('The file', results_csv_file, 'already exists, skipping') return else: print('Running for the file', results_csv_file) if stocks_prices is None: (stocks_prices, stock_names) = getStocksPricesShuffled() column_names = TABLE_COLUMNS column_names += [auction_name + column for auction_name in auction_names for column in AUCTION_COLUMNS] results_table = TeeTable(column_names, results_csv_file) recipe_str = ":".join(map(str,recipe)) recipe_sum = sum(recipe) recipe_sum_for_buyer = (recipe_sum-recipe[0])/recipe[0] if nums_of_agents is None: nums_of_agents = [10000000] #print(nums_of_agents) total_results = {} for num_of_agents_per_category in nums_of_agents: total_results[str(num_of_agents_per_category)] = [] #print(total_results) for i in range(len(stocks_prices)): stock_prices = stocks_prices[i] for num_of_possible_ps in nums_of_agents: for iteration in range(num_of_iterations): categories = [] if run_with_stock_prices: while len(stock_prices) < num_of_possible_ps * recipe_sum: stock_prices = stock_prices + stock_prices random.shuffle(stock_prices) index = 0 for category in recipe: next_index = index + num_of_possible_ps * category price_sign = recipe_sum_for_buyer if index == 0 else -1 #price_value_multiple = -1 * buyer_agent_count if index > 0 else recipe_sum - buyer_agent_count categories.append(AgentCategory("agent", [int(price*price_sign) for price in stock_prices[index:next_index]])) index = next_index else: #prices from random. for index in range(len(recipe)): #for category in recipe: min_value = -100000 if index > 0 else recipe_sum_for_buyer max_value = -1 if index > 0 else 100000 * recipe_sum_for_buyer categories.append(AgentCategory.uniformly_random("agent", num_of_possible_ps*recipe[index], min_value, max_value)) market = Market(categories) (optimal_trade, _) = market.optimal_trade(ps_recipe=list(recipe), max_iterations=10000000, include_zero_gft_ps=False) optimal_count = optimal_trade.num_of_deals() optimal_gft = optimal_trade.gain_from_trade() (optimal_trade_with_gft_zero, _) = market.optimal_trade(ps_recipe=list(recipe), max_iterations=10000000) optimal_count_with_gft_zero = optimal_trade_with_gft_zero.num_of_deals() optimal_gft_with_gft_zero = optimal_trade_with_gft_zero.gain_from_trade() results = [("iterations", num_of_iterations), ("stockname", stock_names[i]), ("recipe", recipe_str), ("numpossibletrades", int(num_of_possible_ps)), ("optimalcount", optimal_count), ("gftratioformula", (optimal_count - 1) * 100 / (optimal_count if min(recipe) == max(recipe) and recipe[0] == 1 else optimal_count + 1) if optimal_count > 1 else 0), ("optimalcountwithgftzero", optimal_count_with_gft_zero), ("optimalgft", optimal_gft), ("optimalgftwithgftzero", optimal_gft_with_gft_zero)] for auction_index in range(len(auction_functions)): auction_trade = auction_functions[auction_index](market, recipe) count = auction_trade.num_of_deals() total_gft = auction_trade.gain_from_trade(including_auctioneer=True) market_gft = auction_trade.gain_from_trade(including_auctioneer=False) auction_name = auction_names[auction_index] results.append((auction_name + "count", auction_trade.num_of_deals())) results.append((auction_name + "countratio", 0 if optimal_count==0 else (count / optimal_count_with_gft_zero) * 100)) results.append((auction_name + "totalgft", total_gft)) results.append((auction_name + "totalgftratio", 0 if optimal_gft==0 else total_gft / optimal_gft_with_gft_zero*100)) results.append((auction_name + "marketgft", market_gft)) results.append((auction_name + "marketgftratio", 0 if optimal_gft == 0 else market_gft / optimal_gft_with_gft_zero * 100)) results.append((auction_name + "withoutgftzerocountratio", 0 if optimal_count==0 else (count / optimal_count) * 100)) results.append((auction_name + "withoutgftzerototalgft", total_gft)) results.append((auction_name + "withoutgftzerototalgftratio", 0 if optimal_gft==0 else total_gft / optimal_gft*100)) #We check which auction did better and print the market and their results. if report_diff: gft_to_compare = -1 k_to_compare = -1 gft_found = False k_found = False for (label, value) in results: if 'SBB' in label: if gft_found is False and label.endswith('totalgft'): if gft_to_compare < 0: gft_to_compare = value elif gft_to_compare != value: with open('diff_in_sbbs_gft.txt', 'a') as f: f.write('There is diff in gft between two auctions: ' + str(gft_to_compare) + ' ' + str(value) + '\n') f.write(str(results) + '\n') if num_of_possible_ps < 10: f.write(str(market) + '\n') gft_found = True elif k_found is False and label.endswith('count'): if k_to_compare < 0: k_to_compare = value elif k_to_compare != value: with open('diff_in_sbbs_k.txt', 'a') as f: f.write('There is diff in gft between two auctions: ' + str(k_to_compare) + ' ' + str(value) + '\n') f.write(str(results) + '\n') if num_of_possible_ps < 10: f.write(str(market) + '\n') k_found = True compare_sbbs = True if compare_sbbs: gft_to_compare = -1 k_to_compare = -1 gft_found = False k_found = False for (label, value) in results: if 'SBB' in label: if gft_found is False and label.endswith('totalgft'): if gft_to_compare < 0: gft_to_compare = value elif gft_to_compare > value: with open('diff_in_sbbs_gft.txt', 'a') as f: f.write('There is diff in gft between two auctions: ' + str(gft_to_compare) + ' ' + str(value) + '\n') f.write(str(results) + '\n') if num_of_possible_ps < 10: f.write(str(market) + '\n') gft_found = True elif k_found is False and label.endswith('count'): if k_to_compare < 0: k_to_compare = value elif k_to_compare > value: with open('diff_in_sbbs_k.txt', 'a') as f: f.write('There is diff in gft between two auctions: ' + str(k_to_compare) + ' ' + str(value) + '\n') f.write(str(results) + '\n') if num_of_possible_ps < 10: f.write(str(market) + '\n') k_found = True #results_table.add(OrderedDict(results)) #print(results) if len(total_results[str(num_of_possible_ps)]) == 0: total_results[str(num_of_possible_ps)] = results[0:len(results)] else: sum_result = total_results[str(num_of_possible_ps)] for index in range(len(results)): if index > 3: sum_result[index] = (results[index][0], sum_result[index][1] + results[index][1]) #print(total_results) print(stock_names[i], end=',') #break print() division_number = num_of_iterations * len(stocks_prices) #division_number = num_of_iterations for num_of_possible_ps in nums_of_agents: results = total_results[str(num_of_possible_ps)] for index in range(len(results)): if 'gftratio' in results[index][0]: results[index] = (results[index][0], padding_zeroes(results[index][1] / division_number, 3)) elif index > 3: results[index] = (results[index][0], padding_zeroes(results[index][1] / division_number, 2)) elif index == 1: results[index] = (results[index][0], 'Average') #print(results) results_table.add(OrderedDict(results)) results_table.done() def padding_zeroes(result, num_digits:int): str_result = str(result) str_result += ("0" * num_digits) if '.' in str_result else '.' + ("0" * num_digits) return str_result[0 : str_result.index('.') + num_digits + 1]
14,440
4,324
# -*- coding: utf-8 -*- i = 1 for x in range(60, -1, -5): print('I={} J={}'.format(i, x)) i += 3
106
58
from copy import deepcopy from math import inf import platform from os import system if platform.system() == 'Windows': def clear(): system('cls') else: def clear(): system('clear') board = [[0,0,0], [0,0,0], [0,0,0]] player1 = 1 player2 = -1 null_move = [None, None] draw = 0 def _allequal(*args) -> bool: value = args[0] for arg in args[1:]: if arg != value: return False return True def wins(state: list, player: int) -> bool: """ Returns if the player won in the state For the given board position 'state' returns if the given 'player' has won the game or not. Parameters ---------- state : list The board position to be evaluated player : int The player to be checked if they won or not """ if _allequal(player, state[0][0], state[1][1], state[2][2]) \ or _allequal(player, state[0][2], state[1][1], state[2][0]): return True for i in range(len(state)): if _allequal(player, state[i][0], state[i][1], state[i][2]) \ or _allequal(player, state[0][i], state[1][i], state[2][i]): return True return False def game_end(state: list) -> bool: """ Returns if the game has ended For the given board position 'state' returns if the game can be continued or not. Parameters ---------- state : list The board position to be evaluated """ if wins(state, player1) or wins(state, player2): return True else: for row in state: for cell in row: if cell == 0: return False return True def evaluate(state: list) -> int: """ Returns which player has won the game For the given board position 'state' returns the player who has won the game Parameters ---------- state : list The board position to be evaluated """ if wins(state, player1): return player1 elif wins(state, player2): return player2 else: return 0 def possible_moves(state: list) -> list: """ Returns list of all possible moves For the given board position 'state' returns list of all possible moves for the next turn Parameters ---------- state : list The board position to be evaluated """ moves = [] for x, row in enumerate(state): for y, cell in enumerate(row): if cell == 0: moves.append([x,y]) return moves def play_move(state: list, move: list, player: int) -> list: """ Returns the board position with the specified move played For the given board position 'state', the move to be played and the player playing the move returns the state after the move was played Parameters ---------- state : list The board position for the move to be played move : list The move to be played on the given board position player : int The player playing the move """ new_state = state new_state[move[0]][move[1]] = player return new_state def minimax(state: list, depth: int, player: int) -> list: """ Returns the [Move, Best Evaluation] for given state, depth, player Runs the minimax algorithm with depth 'depth' for player 'player' for the given board position 'state'. Returns the best move and the best evaluation for it. Parameters ---------- state : list The board position to be evaluated depth : int The maximum depth to which the minimax algorithm should evaluate player : int The player whose move is to be optimised """ if depth == 0 or game_end(state): return [null_move, evaluate(state)] else: if player == player1: best = [null_move, -inf] for move in possible_moves(state): next_state = play_move(deepcopy(state), move, player) tree_eval = minimax(next_state, depth - 1, -player) if tree_eval[1] > best[1]: best = [move, tree_eval[1]] return best else: best = [null_move, inf] for move in possible_moves(state): next_state = play_move(deepcopy(state), move, player) tree_eval = minimax(next_state, depth - 1, -player) if tree_eval[1] < best[1]: best = [move, tree_eval[1]] return best def display(state: list) -> None: """ Displays the board in a much better way Parameters ---------- state : list The board position to be displayed """ char = {-1: 'O',0: ' ',1: 'X'} for row in state: print(char[row[0]], "|", char[row[1]], "|", char[row[2]]) print("---------") def get_comp_move(state: list, depth: int, player: int) -> list: """ Returns the best move for the computer based on the given parameters For a given board position, depth and the computer player returns the best move by the minimax algorithm Parameters ---------- state : list The state to be evaluated for best move depth : int The depth to which the minimax algorithm searches player : int The player whose move it is in the given state """ evaluation = minimax(state, depth, player) best_move = evaluation[0] return best_move def get_player_move() -> list: inp_string = input("Enter your move: ") coords = inp_string.split(',') move = [int(coord) for coord in coords] return move def main() -> None: """ The main function containing all of the functions """ global board difficulty = 10 comp_player = None current_player = player1 while not game_end(board): if current_player == comp_player: move = get_comp_move(board, difficulty, current_player) else: clear() display(board) move = get_player_move() board = play_move(board, move, current_player) current_player *= -1 result = evaluate(board) if result == player1: clear() display(board) print("Yay! you won") elif result == 0: clear() display(board) print("You got a draw") elif result == player2: clear() display(board) print("You lost, good luck next time") if __name__ == "__main__": main()
6,572
1,903
from setuptools import setup, find_packages from distutils.core import setup from Cython.Build import cythonize setup(name="mcmc", ext_modules=cythonize("./src/koleksyon/mcmc.pyx"))
183
65
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Function used in Worker tests of legacy GCF Python 3.7 logging.""" import logging X_GOOGLE_FUNCTION_NAME = "gcf-function" X_GOOGLE_ENTRY_POINT = "function" HOME = "/tmp" def function(request): """Test function which logs exceptions. Args: request: The HTTP request which triggered this function. """ try: raise Exception except: logging.exception("log") return None
995
299
from tivol.base_classes.mappers import CsvMapper from tivol.base_classes.migration_handler_base import MigrationHandlerBase import os class AnimalMigration(MigrationHandlerBase): def init_metadata(self): csv_mapper = CsvMapper() path = os.path.join( os.getcwd(), 'tivol', 'tests', 'assets', 'animals.csv' ) csv_mapper.set_destination_file(path=path) self.id = 'animal' self.name = 'Animal migration' self.description = 'Migrating animals into the system' self.add_source_mapper(csv_mapper)
574
183
from typing import Set day_num = "25" day_title = "The Halting Problem" def part1(): tape: Set[int] = set() curr: int = 0 state: str = 'a' for _ in range(12523873): is_one = curr in tape if state == 'a' and not is_one: tape.add(curr) curr += 1 state = 'b' elif state == 'a' and is_one: tape.add(curr) curr -= 1 state = 'e' elif state == 'b' and not is_one: tape.add(curr) curr += 1 state = 'c' elif state == 'b' and is_one: tape.add(curr) curr += 1 state = 'f' elif state == 'c' and not is_one: tape.add(curr) curr -= 1 state = 'd' elif state == 'c' and is_one: tape.remove(curr) curr += 1 state = 'b' elif state == 'd' and not is_one: tape.add(curr) curr += 1 state = 'e' elif state == 'd' and is_one: tape.remove(curr) curr -= 1 state = 'c' elif state == 'e' and not is_one: tape.add(curr) curr -= 1 state = 'a' elif state == 'e' and is_one: tape.remove(curr) curr += 1 state = 'd' elif state == 'f' and not is_one: tape.add(curr) curr += 1 state = 'a' elif state == 'f' and is_one: tape.add(curr) curr += 1 state = 'c' print("Part 1:", len(tape)) def main(): print(f"Day {day_num}: {day_title}") part1() if __name__ == '__main__': main()
1,718
576
def process(N): temp = str(N) temp = temp.replace('4','2') res1 = int(temp) res2 = N-res1 return res1,res2 T = int(input()) for t in range(T): N = int(input()) res1,res2 = process(N) print('Case #{}: {} {}'.format(t+1,res1,res2))
263
111
import face_embedding import argparse import cv2 import numpy as np parser = argparse.ArgumentParser(description='face model test') # general parser.add_argument('--image-size', default='112,112', help='') parser.add_argument('--model', default='../models/model-r34-amf/model,0', help='path to load model.') parser.add_argument('--gpu', default=None, type=int, help='gpu id') parser.add_argument('--det', default=2, type=int, help='mtcnn option, 2 means using R+O, else using O') parser.add_argument('--flip', default=0, type=int, help='whether do lr flip aug') parser.add_argument('--threshold', default=1.24, type=float, help='ver dist threshold') args = parser.parse_args() if __name__ == '__main__': model = face_embedding.FaceModel(args) img = cv2.imread('/Users/aub3/1.jpg') f1 = model.get_feature(img) img = cv2.imread('/Users/aub3/2.jpg') f2 = model.get_feature(img) img = cv2.imread('/Users/aub3/3.jpg') f3 = model.get_feature(img) dist1 = np.sum(np.square(f1-f2)) dist2 = np.sum(np.square(f1-f3)) print(dist1,dist2)
1,067
401
# Licensed under the terms of the BSD-3-Clause license. # Copyright (C) 2019 Michael Blaß # michael.blass@uni-hamburg.de """apollon/io.py -- General I/O functionallity. Classes: ArrayEncoder Serialize numpy array to JSON. FileAccessControl Descriptor for file name attributes. Functions: array_print_opt Set format for printing numpy arrays. decode_array Decode numpy array from JSON. files_in_folder Iterate over all files in given folder. load Load pickled data. repath Change path but keep file name. save Pickle some data. """ from contextlib import contextmanager as _contextmanager import json as _json import pathlib as _pathlib import pickle import typing import numpy as _np from . import types as _types class ArrayEncoder(_json.JSONEncoder): # pylint: disable=E0202 # Issue: False positive for E0202 (method-hidden) #414 # https://github.com/PyCQA/pylint/issues/414 """Encode np.ndarrays to JSON. Simply set the `cls` parameter of the dump method to this class. """ def default(self, o): """Custon default JSON encoder. Properly handles numpy arrays and JSONEncoder.default for all other types. Params: o (any) Object to encode. Returns: (dict) """ if isinstance(o, _np.ndarray): out = {'__ndarray__': True, '__dtype__': o.dtype.str, 'data': o.astype('float64').tolist()} return out return _json.JSONEncoder.default(self, o) def decode_array(json_data: dict) -> typing.Any: """Properly decodes numpy arrays from a JSON data stream. This method need to be called on the return value of ``json.load`` or ``json.loads``. Args: json_data (dict) JSON formatted dict to encode. Returns: (any) """ if '__ndarray__' in json_data and '__dtype__' in json_data: return _np.array(json_data['data'], dtype=json_data['__dtype__']) return json_data class PoissonHmmEncoder(ArrayEncoder): """JSON encoder for PoissonHmm. """ def default(self, o): """Custon default JSON encoder. Properly handles <class 'PoissonHMM'>. Note: Falls back to ``ArrayEncoder`` for all types that do not implement a ``to_dict()`` method. Params: o (any) Object to encode. Returns: (dict) """ if isinstance(o, HMM): items = {} for attr in o.__slots__: try: items[attr] = getattr(o, attr).to_dict() except AttributeError: items[attr] = getattr(o, attr) return items return ArrayEncoder.default(self, o) def dump_json(obj, path: _types.PathType = None) -> None: """Write ``obj`` to JSON. This function can handel numpy arrays. If ``path`` is None, this fucntion writes to stdout. Otherwise, encoded object is written to ``path``. Args: obj (any) Object to be encoded. path (PathType) Output file path. """ if path is None: print(_json.dumps(obj, cls=ArrayEncoder)) else: path = _pathlib.Path(path) with path.open('w') as json_file: _json.dump(obj, json_file, cls=ArrayEncoder) class WavFileAccessControl: """Control initialization and access to the ``file`` attribute of class:``AudioData``. This assures that the path indeed points to a file, which has to be a .wav file. Otherwise an error is raised. The path to the file is saved as absolute path and the attribute is read-only. """ def __init__(self): """Hi there!""" self.__attribute = {} def __get__(self, obj, objtype): return self.__attribute[obj] def __set__(self, obj, file_name): if obj not in self.__attribute.keys(): _path = _pathlib.Path(file_name).resolve() if _path.exists(): if _path.is_file(): if _path.suffix == '.wav': self.__attribute[obj] = _path else: raise IOError('`{}` is not a .wav file.' .format(file_name)) else: raise IOError('`{}` is not a file.'.format(file_name)) else: raise FileNotFoundError('`{}` does not exists.' .format(file_name)) else: raise AttributeError('File name cannot be changed.') def __delete__(self, obj): del self.__attribute[obj] @_contextmanager def array_print_opt(*args, **kwargs): """Set print format for numpy arrays. Thanks to unutbu: https://stackoverflow.com/questions/2891790/how-to-pretty-print-a-numpy-array-without- scientific-notation-and-with-given-pre """ std_options = _np.get_printoptions() _np.set_printoptions(*args, **kwargs) try: yield finally: _np.set_printoptions(**std_options) def load(path: _types.PathType) -> typing.Any: """Load a pickled file. Args: path (str) Path to file. Returns: (object) unpickled object """ path = _pathlib.Path(path) with path.open('rb') as file: data = pickle.load(file) return data def repath(current_path: _types.PathType, new_path: _types.PathType, ext: str = None) -> _types.PathType: """Change the path and keep the file name. Optinally change the extension, too. Args: current_path (str or Path) The path to change. new_path (str or Path) The new path. ext (str or None) Change file extension if ``ext`` is not None. Returns: (pathlib.Path) """ current_path = _pathlib.Path(current_path) new_path = _pathlib.Path(new_path) file_path = current_path.stem if ext is not None: if not ext.startswith('.'): ext = '.' + ext file_path.join(ext) return new_path.joinpath(file_path) def save(data: typing.Any, path: _types.PathType): """Pickles data to path. Args: data (Any) Pickleable object. path (str or Path) Path to safe the file. """ path = _pathlib.Path(path) with path.open('wb') as file: pickle.dump(data, file)
6,509
1,975
import logging import requests import urllib.parse from datetime import datetime from stockbot.provider.base import BaseQuoteService, BaseQuote LOGGER = logging.getLogger(__name__) class YahooFallbackQuote(object): def __init__(self, *args, **kwargs): pass def __str__(self): return "Didn't find anything" def is_empty(self): return False def is_fresh(self): return False class YahooQuote(BaseQuote): def __init__(self, o): for k, v in o["optionChain"]["result"][0]["quote"].items(): setattr(self, k, v) if self.regularMarketTime == "N/A": self.timestamp = None self.timestamp_str = "unknown" else: self.timestamp = datetime.fromtimestamp(int(self.regularMarketTime)) self.timestamp_str = self.timestamp.strftime("%Y-%m-%d %H:%M:%S") self.is_pre_market = self.marketState == "PRE" self.fields = [ ["Name", self.shortName], ["Price", self.regularMarketPrice], ["Low Price", self.regularMarketDayLow], ["High Price", self.regularMarketDayHigh], ["Percent Change 1 Day", self.regularMarketChangePercent] ] if self.is_pre_market: self.fields.extend([ ["Price Pre Market", self.preMarketPrice], ["Percent Change Pre Market", self.preMarketChangePercent] ]) self.fields.extend([ ["Market", self.market], ["Update Time", self.timestamp_str] ]) def is_fresh(self): if self.timestamp is None: return False return (datetime.now() - self.timestamp).total_seconds() < 16 * 60 class YahooSearchResult(object): def __init__(self, o): self.o = o def get_tickers(self): return [x["symbol"] for x in self.o["quotes"] if "symbol" in x] def is_empty(self): return not ( "quotes" in self.o and len(self.o["quotes"]) > 0 and any([True for x in self.o["quotes"] if "symbol" in x]) ) class YahooQueryService(BaseQuoteService): # search results probably don't change that much so cache them search_cache = {} def __init__(self, *args, **kwargs): pass def get_quote(self, ticker): search_result = self.search(ticker) if not search_result.is_empty(): t = search_result.get_tickers()[0] response = requests.get("https://query1.finance.yahoo.com/v7/finance/options/{t}".format(t=t)) response.raise_for_status() return YahooQuote(response.json()) else: return YahooFallbackQuote() def search(self, query): query_encoded = urllib.parse.quote(query) response = requests.get( 'https://query2.finance.yahoo.com/v1/finance/search?q=' '{query}&lang=en-US&region=US&quotesCount=1&newsCount=0&enableFuzzyQuery=false&quotesQueryId' '=tss_match_phrase_query&multiQuoteQueryId=multi_quote_single_token_query&newsQueryId=news_cie_vespa' '&enableCb=true&enableNavLinks=true&enableEnhancedTrivialQuery=true'.format(query=query_encoded)) response.raise_for_status() return YahooSearchResult(response.json())
3,327
1,014
import sqlite3 X = sqlite3.connect('NeDB.db') Y = X.cursor() Y.execute('''CREATE TABLE IF NOT EXISTS EMPLOYEE ( ID integer, Name text NOT NULL, Date_Join text, Place text, Age integer, Salary real);''') Y.execute('''INSERT INTO Employee VALUES (1,'John','2020-03-01','Kerala',32,25000),(2,'Adam','2020-01-01','TN',22,30000),(3,'Mary','2022-01-01','Karnataka',24,120000) ,(4,'Jacob','2022-01-01','Mharashtra',24,430000),(5,'Johny','2022-01-01','Karnataka',24,34000),(6,'Lynda','2022-01-01','Delhi',24,56700), (7,'Smith','2022-01-01','Kerala',24,234000),(8,'Gem','2022-01-01','Karnataka',24,120000)''') data = Y.execute("SELECT * from Employee LIMIT 2 OFFSET 4"); for k in data: print (k) X.commit() Y.close()
850
420
import json def lambda_handler(event, context): ip = event["requestContext"]["identity"]["sourceIp"] return { 'statusCode': 200, 'body': json.dumps(ip) }
196
63
n = int(input()) n = n % 100 print(n // 10)
47
28
import functools from flask import ( Blueprint, flash, g, redirect, render_template, request, session, url_for, jsonify, abort ) from werkzeug.security import check_password_hash, generate_password_hash from qtpp import db from qtpp.libs.framework.operate_db import OperationDB from qtpp.libs.framework.constant import Const from qtpp.models.user import User ''' 这里创建了一个名称为 'auth' 的 Blueprint 。和应用对象一样, 蓝图需要知道是在哪里定义的,因此把 __name__ 作为函数的第二个参数。 url_prefix 会添加到所有与该蓝图关联的 URL 前面。 ''' bp = Blueprint('auth', __name__, url_prefix='/auth') odb = OperationDB() ''' 认证蓝图将包括注册新用户、登录和注销视图。 ''' @bp.route('/register', methods=('GET', 'POST')) def register(): ''' 当用访问 /auth/register URL 时, register 视图会返回用于填写注册 内容的表单的 HTML 。 当用户提交表单时,视图会验证表单内容,然后要么再次 显示表单并显示一个出错信息, 要么创建新用户并显示登录页面。 ''' if request.method == 'POST': username = request.json['username'] password = request.json['password'] # 带有 ? 占位符 的 SQL 查询语句。占位符可以代替后面的元组参数中相应的值。 # 使用占位符的 好处是会自动帮你转义输入值,以抵御 SQL 注入攻击 。 # fetchone() 根据查询返回一个记录行。 如果查询没有结果,则返回 None 。 # 后面还用到 fetchall() ,它返回包括所有结果的列表。 # 使用 generate_password_hash() 生成安全的哈希值并储存 到数据库中。 # url_for() 根据登录视图的名称生成相应的 URL if not (username and password): return jsonify(Const.errcode('1003')) elif odb.query_per(User, 'username', username) is not None: return jsonify(Const.errcode('1004', res={"username": username})) odb.add(User(username, generate_password_hash(password))) return jsonify(Const.errcode('0')) return abort(404) @bp.route('/login', methods=('GET', 'POST')) def login(): if request.method == 'POST': username = request.json['username'] password = request.json['password'] error = None user = odb.query_per(User, 'username', username) # check_password_hash() 以相同的方式哈希提交的 密码并安全的比较哈希值。 # 如果匹配成功,那么密码就是正确的。 # session 是一个 dict ,它用于储存横跨请求的值。 # 当验证 成功后,用户的 id 被储存于一个新的会话中。 # 会话数据被储存到一个 向浏览器发送的 cookie 中,在后继请求中,浏览器会返回它。 # Flask 会安全对数据进行 签名 以防数据被篡改。 if (user is None) or (not check_password_hash(user.password, password)): return jsonify(Const.errcode('1003')) if error is None: session.clear() session['user_id'] = user.uid session['user_name'] = user.username res = { "user_id": user.uid, "name": user.username } return jsonify(Const.errcode('0', res=res)) # flash(error) return abort(404) ''' bp.before_app_request() 注册一个 在视图函数之前运行的函数,不论其 URL 是什么。 load_logged_in_user 检查用户 id 是否已经储存在 session 中,并从数据库中获取用户数据, 然后储存在 g.user 中。 g.user 的持续时间比请求要长。 如果没有用户 id ,或者 id 不存在, 那么 g.user 将会是 None 。 ''' @bp.before_app_request def load_logged_in_user(): user_id = session.get('user_id') if user_id is None: g.user = None else: g.user = odb.query_per(User, 'uid', user_id) ''' 注销的时候需要把用户 id 从 session 中移除。 然后 load_logged_in_user 就不会在后继请求中载入用户了。 ''' @bp.route('/logout', methods=('GET', 'POST')) def logout(): session.clear() return jsonify(Const.errcode('0')) ''' 用户登录以后才能创建、编辑和删除。 在每个视图中可以使用 装饰器 来完成这个工作。 装饰器返回一个新的视图,该视图包含了传递给装饰器的原视图。 新的函数检查用户 是否已载入。如果已载入,那么就继续正常执行原视图, 否则就重定向到登录页面。 我们会在应用视图中使用这个装饰器。 ''' def login_required(view): @functools.wraps(view) def wrapped_view(**kwargs): if g.user is None: return jsonify(Const.errcode('1001')) return view(**kwargs) return wrapped_view
3,581
1,786
import pygame import engine.file class Mixer(): def __init__(self): self._sounds = {} def _getSound(self, name): if not name in self._sounds: try: img = pygame.mixer.Sound(engine.file.getPath(name)) except: return None self._sounds[name] = img return self._sounds[name] def playSound(self, name): sound = self._getSound(name) if sound == None: return sound.play()
403
166
import time def readcsv(filename, linenumber): import csv data = list(csv.reader(open(filename + ".csv"))) return data def schoolpo8finder(data,name): info = [] for i in range(0, len(data)): try: if (str(name)).lower() in (str(data[i][4])).lower(): info.append(data[i][4]) info.append(data[i][60]) except: print("") try: for i in range(0, len(info)): if i % 2 == 0: try: e = float(info[i]) print(print(info[i+1] + ": progress 8 score is " + info[i])) except: print(info[i] + ": progress 8 score is " + info[i+1]) except: print("") def listfl(data): array = [] import sys for i in range(1,len(data)-1): try: if float(data[i][60]) < float(100): array.append(float(data[i][60])) except: array.append(float(0)) return array def listnames(data): array = [] for i in range(0,len(data)): try: array.append(str(data[i][4])) except: array.append("null") return array def combind(array, names): newarray = [] for i in range(0, len(names)): try: hold = [names[i],array[i]] newarray.append(hold) except: print("") return newarray def schoolranker(data): swaps = True array = listfl(data) names = listnames(data) newranking = combind(array, names) j = 0 while swaps == True: swaps = False j += 1 for i in range(0, len(array)-1): if newranking[i][1] < newranking[i+1][1]: hold = newranking[i] newranking[i] = newranking[i+1] newranking[i+1] = hold swaps = True return newranking def output(data, slient): print("\n\n\n\n") listrank = schoolranker(data) rank = open("rank.txt","w") if slient == 1: for i in range(0, len(listrank)): print(str(i+1) + ". " + str(listrank[i][0]) + " with average progress 8 score of " + str(listrank[i][1])) rank.write(str(i+1) + ". " + str(listrank[i][0]) + " with average progress 8 score of " + str(listrank[i][1])) rank.write("\n") else: for i in range(0, len(listrank)): rank.write(str(i+1) + ". " + str(listrank[i][0]) + " with average progress 8 score of " + str(listrank[i][1])) rank.write("\n") rank.close() def readingrank(find): file = open("rank.txt","r") try: for i in range(0, 10000): hold = file.readline() if find.lower() in str(hold).lower(): print(hold) file.close() except: file.close() print("starting up") filename = input("filename without csv? ") data = readcsv(filename, 5680) print("data loaded") print("\n") while True: print("\n") choice = input("would you like to do?\n1.generate rank text file\n2.search for progress 8 average\n3.search for ranking number by school\n > ") if choice == "1": print("making rank table") output(data, 1) print("done") elif choice == "2": schoolname = input("what is the name of the school? ") print("\n\n") print("here is what is found in csv:") schoolpo8finder(data, schoolname) elif choice == "3": find = input("name of school? ") try: print("here is what's been found? ") readingrank(find) except: print("please wait..") output(data, 0) print("here is what's been found? ") readingrank(find)
3,984
1,365
class T: WORK_REQUEST = 1 WORK_REPLY = 2 REDUCE = 3 BARRIER = 4 TOKEN = 7 class Tally: total_dirs = 0 total_files = 0 total_filesize = 0 total_stat_filesize = 0 total_symlinks = 0 total_skipped = 0 total_sparse = 0 max_files = 0 total_nlinks = 0 total_nlinked_files = 0 total_0byte_files = 0 devfile_cnt = 0 devfile_sz = 0 spcnt = 0 # stripe cnt account per process # ZFS total_blocks = 0 class G: ZERO = 0 ABORT = -1 WHITE = 50 BLACK = 51 NONE = -99 TERMINATE = -100 MSG = 99 MSG_VALID = True MSG_INVALID = False fmt1 = '%(asctime)s - %(levelname)s - %(rank)s:%(filename)s:%(lineno)d - %(message)s' fmt2 = '%(asctime)s - %(rank)s:%(filename)s:%(lineno)d - %(message)s' bare_fmt = '%(name)s - %(levelname)s - %(message)s' mpi_fmt = '%(name)s - %(levelname)s - %(rank)s - %(message)s' bare_fmt2 = '%(message)s' str = {WHITE: "white", BLACK: "black", NONE: "not set", TERMINATE: "terminate", ABORT: "abort", MSG: "message"} KEY = "key" VAL = "val" logger = None logfile = None loglevel = "warn" use_store = False fix_opt = False preserve = False DB_BUFSIZE = 10000 memitem_threshold = 100000 tempdir = None total_chunks = 0 rid = None chk_file = None chk_file_db = None totalsize = 0 src = None dest = None args_src = None args_dest = None resume = None reduce_interval = 30 reduce_enabled = False verbosity = 0 am_root = False copytype = 'dir2dir' # Lustre file system fs_lustre = None lfs_bin = None stripe_threshold = None b0 = 0 b4k = 4 * 1024 b8k = 8 * 1024 b16k = 16 * 1024 b32k = 32 * 1024 b64k = 64 * 1024 b128k = 128 * 1024 b256k = 256 * 1024 b512k = 512 * 1024 b1m = 1024 * 1024 b2m = 2 * b1m b4m = 4 * b1m b8m = 8 * b1m b16m = 16 * b1m b32m = 32 * b1m b64m = 64 * b1m b128m = 128 * b1m b256m = 256 * b1m b512m = 512 * b1m b1g = 1024 * b1m b4g = 4 * b1g b16g = 16 * b1g b64g = 64 * b1g b128g = 128 * b1g b256g = 256 * b1g b512g = 512 * b1g b1tb = 1024 * b1g b4tb = 4 * b1tb FSZ_BOUND = 64 * b1tb # 25 bins bins = [b0, b4k, b8k, b16k, b32k, b64k, b128k, b256k, b512k, b1m, b2m, b4m, b16m, b32m, b64m, b128m, b256m, b512m, b1g, b4g, b64g, b128g, b256g, b512g, b1tb, b4tb] # 17 bins, the last bin is special # This is error-prone, to be refactored. # bins_fmt = ["B1_000k_004k", "B1_004k_008k", "B1_008k_016k", "B1_016k_032k", "B1_032k_064k", "B1_064k_256k", # "B1_256k_512k", "B1_512k_001m", # "B2_001m_004m", "B2_m004_016m", "B2_016m_512m", "B2_512m_001g", # "B3_001g_100g", "B3_100g_256g", "B3_256g_512g", # "B4_512g_001t", # "B5_001t_up"] # GPFS gpfs_block_size = ("256k", "512k", "b1m", "b4m", "b8m", "b16m", "b32m") gpfs_block_cnt = [0, 0, 0, 0, 0, 0, 0] gpfs_subs = (b256k/32, b512k/32, b1m/32, b4m/32, b8m/32, b16m/32, b32m/32) dev_suffixes = [".C", ".CC", ".CU", ".H", ".CPP", ".HPP", ".CXX", ".F", ".I", ".II", ".F90", ".F95", ".F03", ".FOR", ".O", ".A", ".SO", ".S", ".IN", ".M4", ".CACHE", ".PY", ".PYC"]
3,430
1,851
import os import sys from gensim.scripts.glove2word2vec import glove2word2vec from kge.misc import kge_base_dir def _convert_to_word2vec( filename: str ): """ Convert file of pretrained embeddings in GloVe format to word2vec format. """ folder = os.path.join(kge_base_dir(), "pretrained") input_file = os.path.join(folder, filename) index = filename.rindex(".") output_file = filename[0:index] + "_word2vec" + filename[index:len(filename)] output_file = os.path.join(folder, output_file) glove2word2vec(input_file, output_file) # give file name in folder 'pretrained' as first command line argument if __name__ == '__main__': _convert_to_word2vec(sys.argv[1])
714
249
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'command_list.ui' # # Created by: PyQt5 UI code generator 5.15.4 # # WARNING: Any manual changes made to this file will be lost when pyuic5 is # run again. Do not edit this file unless you know what you are doing. from PyQt5 import QtCore, QtGui, QtWidgets class Ui_Dialog(object): def setupUi(self, Dialog): Dialog.setObjectName("Dialog") Dialog.resize(531, 473) Dialog.setStyleSheet("QDialog {\n" " background-color:#ddedff;\n" "}\n" "QTextEdit {\n" " border-width: 1px;\n" " border-style: solid;\n" " border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n" "}\n" "QPlainTextEdit {\n" " border-width: 1px;\n" " border-style: solid;\n" " border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n" "}\n" "QToolButton {\n" " border-style: solid;\n" " border-top-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n" " border-right-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(217, 217, 217), stop:1 rgb(227, 227, 227));\n" " border-left-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(227, 227, 227), stop:1 rgb(217, 217, 217));\n" " border-bottom-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n" " border-width: 1px;\n" " border-radius: 5px;\n" " color: rgb(0,0,0);\n" " padding: 2px;\n" " background-color: rgb(255,255,255);\n" "}\n" "QToolButton:hover{\n" " border-style: solid;\n" " border-top-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(195, 195, 195), stop:1 rgb(222, 222, 222));\n" " border-right-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(197, 197, 197), stop:1 rgb(227, 227, 227));\n" " border-left-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(227, 227, 227), stop:1 rgb(197, 197, 197));\n" " border-bottom-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(195, 195, 195), stop:1 rgb(222, 222, 222));\n" " border-width: 1px;\n" " border-radius: 5px;\n" " color: rgb(0,0,0);\n" " padding: 2px;\n" " background-color: rgb(255,255,255);\n" "}\n" "QToolButton:pressed{\n" " border-style: solid;\n" " border-top-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n" " border-right-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(217, 217, 217), stop:1 rgb(227, 227, 227));\n" " border-left-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(227, 227, 227), stop:1 rgb(217, 217, 217));\n" " border-bottom-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n" " border-width: 1px;\n" " border-radius: 5px;\n" " color: rgb(0,0,0);\n" " padding: 2px;\n" " background-color: rgb(142,142,142);\n" "}\n" "QPushButton{\n" " border-style: solid;\n" " border-top-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n" " border-right-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(217, 217, 217), stop:1 rgb(227, 227, 227));\n" " border-left-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(227, 227, 227), stop:1 rgb(217, 217, 217));\n" " border-bottom-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n" " border-width: 1px;\n" " border-radius: 5px;\n" " color: rgb(0,0,0);\n" " padding: 2px;\n" " background-color: rgb(255,255,255);\n" "}\n" "QPushButton::default{\n" " border-style: solid;\n" " border-top-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n" " border-right-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(217, 217, 217), stop:1 rgb(227, 227, 227));\n" " border-left-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(227, 227, 227), stop:1 rgb(217, 217, 217));\n" " border-bottom-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n" " border-width: 1px;\n" " border-radius: 5px;\n" " color: rgb(0,0,0);\n" " padding: 2px;\n" " background-color: rgb(255,255,255);\n" "}\n" "QPushButton:hover{\n" " border-style: solid;\n" " border-top-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(195, 195, 195), stop:1 rgb(222, 222, 222));\n" " border-right-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(197, 197, 197), stop:1 rgb(227, 227, 227));\n" " border-left-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(227, 227, 227), stop:1 rgb(197, 197, 197));\n" " border-bottom-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(195, 195, 195), stop:1 rgb(222, 222, 222));\n" " border-width: 1px;\n" " border-radius: 5px;\n" " color: rgb(0,0,0);\n" " padding: 2px;\n" " background-color: rgb(255,255,255);\n" "}\n" "QPushButton:pressed{\n" " border-style: solid;\n" " border-top-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n" " border-right-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(217, 217, 217), stop:1 rgb(227, 227, 227));\n" " border-left-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(227, 227, 227), stop:1 rgb(217, 217, 217));\n" " border-bottom-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n" " border-width: 1px;\n" " border-radius: 5px;\n" " color: rgb(0,0,0);\n" " padding: 2px;\n" " background-color: rgb(142,142,142);\n" "}\n" "QPushButton:disabled{\n" " border-style: solid;\n" " border-top-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n" " border-right-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(217, 217, 217), stop:1 rgb(227, 227, 227));\n" " border-left-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(227, 227, 227), stop:1 rgb(217, 217, 217));\n" " border-bottom-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n" " border-width: 1px;\n" " border-radius: 5px;\n" " color: #808086;\n" " padding: 2px;\n" " background-color: rgb(142,142,142);\n" "}\n" "QLineEdit {\n" " border-width: 1px; border-radius: 4px;\n" " border-style: solid;\n" " border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n" "}\n" "QLabel {\n" " color: #000000;\n" "}\n" "QLCDNumber {\n" " color: rgb(0, 113, 255, 255);\n" "}\n" "QProgressBar {\n" " text-align: center;\n" " color: rgb(240, 240, 240);\n" " border-width: 1px; \n" " border-radius: 10px;\n" " border-color: rgb(230, 230, 230);\n" " border-style: solid;\n" " background-color:rgb(207,207,207);\n" "}\n" "QProgressBar::chunk {\n" " background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(49, 147, 250, 255), stop:1 rgba(34, 142, 255, 255));\n" " border-radius: 10px;\n" "}\n" "QMenuBar {\n" " background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(207, 209, 207, 255), stop:1 rgba(230, 229, 230, 255));\n" "}\n" "QMenuBar::item {\n" " color: #000000;\n" " spacing: 3px;\n" " padding: 1px 4px;\n" " background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(207, 209, 207, 255), stop:1 rgba(230, 229, 230, 255));\n" "}\n" "\n" "QMenuBar::item:selected {\n" " background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n" " color: #FFFFFF;\n" "}\n" "QMenu::item:selected {\n" " border-style: solid;\n" " border-top-color: transparent;\n" " border-right-color: transparent;\n" " border-left-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n" " border-bottom-color: transparent;\n" " border-left-width: 2px;\n" " color: #000000;\n" " padding-left:15px;\n" " padding-top:4px;\n" " padding-bottom:4px;\n" " padding-right:7px;\n" "}\n" "QMenu::item {\n" " border-style: solid;\n" " border-top-color: transparent;\n" " border-right-color: transparent;\n" " border-left-color: transparent;\n" " border-bottom-color: transparent;\n" " border-bottom-width: 1px;\n" " color: #000000;\n" " padding-left:17px;\n" " padding-top:4px;\n" " padding-bottom:4px;\n" " padding-right:7px;\n" "}\n" "QTabWidget {\n" " color:rgb(0,0,0);\n" " background-color:#000000;\n" "}\n" "QTabWidget::pane {\n" " border-color: rgb(223,223,223);\n" " background-color:rgb(226,226,226);\n" " border-style: solid;\n" " border-width: 2px;\n" " border-radius: 6px;\n" "}\n" "QTabBar::tab:first {\n" " border-style: solid;\n" " border-left-width:1px;\n" " border-right-width:0px;\n" " border-top-width:1px;\n" " border-bottom-width:1px;\n" " border-top-color: rgb(209,209,209);\n" " border-left-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(209, 209, 209, 209), stop:1 rgba(229, 229, 229, 229));\n" " border-bottom-color: rgb(229,229,229);\n" " border-top-left-radius: 4px;\n" " border-bottom-left-radius: 4px;\n" " color: #000000;\n" " padding: 3px;\n" " margin-left:0px;\n" " background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(247, 247, 247, 255), stop:1 rgba(255, 255, 255, 255));\n" "}\n" "QTabBar::tab:last {\n" " border-style: solid;\n" " border-width:1px;\n" " border-top-color: rgb(209,209,209);\n" " border-left-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(209, 209, 209, 209), stop:1 rgba(229, 229, 229, 229));\n" " border-right-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(209, 209, 209, 209), stop:1 rgba(229, 229, 229, 229));\n" " border-bottom-color: rgb(229,229,229);\n" " border-top-right-radius: 4px;\n" " border-bottom-right-radius: 4px;\n" " color: #000000;\n" " padding: 3px;\n" " margin-left:0px;\n" " background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(247, 247, 247, 255), stop:1 rgba(255, 255, 255, 255));\n" "}\n" "QTabBar::tab {\n" " border-style: solid;\n" " border-top-width:1px;\n" " border-bottom-width:1px;\n" " border-left-width:1px;\n" " border-top-color: rgb(209,209,209);\n" " border-left-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(209, 209, 209, 209), stop:1 rgba(229, 229, 229, 229));\n" " border-bottom-color: rgb(229,229,229);\n" " color: #000000;\n" " padding: 3px;\n" " margin-left:0px;\n" " background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(247, 247, 247, 255), stop:1 rgba(255, 255, 255, 255));\n" "}\n" "QTabBar::tab:selected, QTabBar::tab:last:selected, QTabBar::tab:hover {\n" " border-style: solid;\n" " border-left-width:1px;\n" " border-right-color: transparent;\n" " border-top-color: rgb(209,209,209);\n" " border-left-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(209, 209, 209, 209), stop:1 rgba(229, 229, 229, 229));\n" " border-bottom-color: rgb(229,229,229);\n" " color: #FFFFFF;\n" " padding: 3px;\n" " margin-left:0px;\n" " background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n" "}\n" "\n" "QTabBar::tab:selected, QTabBar::tab:first:selected, QTabBar::tab:hover {\n" " border-style: solid;\n" " border-left-width:1px;\n" " border-bottom-width:1px;\n" " border-top-width:1px;\n" " border-right-color: transparent;\n" " border-top-color: rgb(209,209,209);\n" " border-left-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(209, 209, 209, 209), stop:1 rgba(229, 229, 229, 229));\n" " border-bottom-color: rgb(229,229,229);\n" " color: #FFFFFF;\n" " padding: 3px;\n" " margin-left:0px;\n" " background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n" "}\n" "\n" "QCheckBox {\n" " color: #000000;\n" " padding: 2px;\n" "}\n" "QCheckBox:disabled {\n" " color: #808086;\n" " padding: 2px;\n" "}\n" "\n" "QCheckBox:hover {\n" " border-radius:4px;\n" " border-style:solid;\n" " padding-left: 1px;\n" " padding-right: 1px;\n" " padding-bottom: 1px;\n" " padding-top: 1px;\n" " border-width:1px;\n" " border-color: transparent;\n" "}\n" "QCheckBox::indicator:checked {\n" "\n" " height: 10px;\n" " width: 10px;\n" " border-style:solid;\n" " border-width: 1px;\n" " border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n" " color: #000000;\n" " background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n" "}\n" "QCheckBox::indicator:unchecked {\n" "\n" " height: 10px;\n" " width: 10px;\n" " border-style:solid;\n" " border-width: 1px;\n" " border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n" " color: #000000;\n" "}\n" "QRadioButton {\n" " color: 000000;\n" " padding: 1px;\n" "}\n" "QRadioButton::indicator:checked {\n" " height: 10px;\n" " width: 10px;\n" " border-style:solid;\n" " border-radius:5px;\n" " border-width: 1px;\n" " border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n" " color: #a9b7c6;\n" " background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n" "}\n" "QRadioButton::indicator:!checked {\n" " height: 10px;\n" " width: 10px;\n" " border-style:solid;\n" " border-radius:5px;\n" " border-width: 1px;\n" " border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n" " color: #a9b7c6;\n" " background-color: transparent;\n" "}\n" "QStatusBar {\n" " color:#027f7f;\n" "}\n" "QSpinBox {\n" " border-style: solid;\n" " border-width: 1px;\n" " border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n" "}\n" "QDoubleSpinBox {\n" " border-style: solid;\n" " border-width: 1px;\n" " border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n" "}\n" "QTimeEdit {\n" " border-style: solid;\n" " border-width: 1px;\n" " border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n" "}\n" "QDateTimeEdit {\n" " border-style: solid;\n" " border-width: 1px;\n" " border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n" "}\n" "QDateEdit {\n" " border-style: solid;\n" " border-width: 1px;\n" " border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n" "}\n" "\n" "QToolBox {\n" " color: #a9b7c6;\n" " background-color:#000000;\n" "}\n" "QToolBox::tab {\n" " color: #a9b7c6;\n" " background-color:#000000;\n" "}\n" "QToolBox::tab:selected {\n" " color: #FFFFFF;\n" " background-color:#000000;\n" "}\n" "QScrollArea {\n" " color: #FFFFFF;\n" " background-color:#000000;\n" "}\n" "QSlider::groove:horizontal {\n" " height: 5px;\n" " background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(49, 147, 250, 255), stop:1 rgba(34, 142, 255, 255));\n" "}\n" "QSlider::groove:vertical {\n" " width: 5px;\n" " background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(49, 147, 250, 255), stop:1 rgba(34, 142, 255, 255));\n" "}\n" "QSlider::handle:horizontal {\n" " background: rgb(253,253,253);\n" " border-style: solid;\n" " border-width: 1px;\n" " border-color: rgb(207,207,207);\n" " width: 12px;\n" " margin: -5px 0;\n" " border-radius: 7px;\n" "}\n" "QSlider::handle:vertical {\n" " background: rgb(253,253,253);\n" " border-style: solid;\n" " border-width: 1px;\n" " border-color: rgb(207,207,207);\n" " height: 12px;\n" " margin: 0 -5px;\n" " border-radius: 7px;\n" "}\n" "QSlider::add-page:horizontal {\n" " background: rgb(181,181,181);\n" "}\n" "QSlider::add-page:vertical {\n" " background: rgb(181,181,181);\n" "}\n" "QSlider::sub-page:horizontal {\n" " background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(49, 147, 250, 255), stop:1 rgba(34, 142, 255, 255));\n" "}\n" "QSlider::sub-page:vertical {\n" " background-color: qlineargradient(spread:pad, y1:0.5, x1:1, y2:0.5, x2:0, stop:0 rgba(49, 147, 250, 255), stop:1 rgba(34, 142, 255, 255));\n" "}\n" "QScrollBar:horizontal {\n" " max-height: 20px;\n" " border: 1px transparent grey;\n" " margin: 0px 20px 0px 20px;\n" "}\n" "QScrollBar:vertical {\n" " max-width: 20px;\n" " border: 1px transparent grey;\n" " margin: 20px 0px 20px 0px;\n" "}\n" "QScrollBar::handle:horizontal {\n" " background: rgb(253,253,253);\n" " border-style: solid;\n" " border-width: 1px;\n" " border-color: rgb(207,207,207);\n" " border-radius: 7px;\n" " min-width: 25px;\n" "}\n" "QScrollBar::handle:horizontal:hover {\n" " background: rgb(253,253,253);\n" " border-style: solid;\n" " border-width: 1px;\n" " border-color: rgb(147, 200, 200);\n" " border-radius: 7px;\n" " min-width: 25px;\n" "}\n" "QScrollBar::handle:vertical {\n" " background: rgb(253,253,253);\n" " border-style: solid;\n" " border-width: 1px;\n" " border-color: rgb(207,207,207);\n" " border-radius: 7px;\n" " min-height: 25px;\n" "}\n" "QScrollBar::handle:vertical:hover {\n" " background: rgb(253,253,253);\n" " border-style: solid;\n" " border-width: 1px;\n" " border-color: rgb(147, 200, 200);\n" " border-radius: 7px;\n" " min-height: 25px;\n" "}\n" "QScrollBar::add-line:horizontal {\n" " border: 2px transparent grey;\n" " border-top-right-radius: 7px;\n" " border-bottom-right-radius: 7px;\n" " background: rgba(34, 142, 255, 255);\n" " width: 20px;\n" " subcontrol-position: right;\n" " subcontrol-origin: margin;\n" "}\n" "QScrollBar::add-line:horizontal:pressed {\n" " border: 2px transparent grey;\n" " border-top-right-radius: 7px;\n" " border-bottom-right-radius: 7px;\n" " background: rgb(181,181,181);\n" " width: 20px;\n" " subcontrol-position: right;\n" " subcontrol-origin: margin;\n" "}\n" "QScrollBar::add-line:vertical {\n" " border: 2px transparent grey;\n" " border-bottom-left-radius: 7px;\n" " border-bottom-right-radius: 7px;\n" " background: rgba(34, 142, 255, 255);\n" " height: 20px;\n" " subcontrol-position: bottom;\n" " subcontrol-origin: margin;\n" "}\n" "QScrollBar::add-line:vertical:pressed {\n" " border: 2px transparent grey;\n" " border-bottom-left-radius: 7px;\n" " border-bottom-right-radius: 7px;\n" " background: rgb(181,181,181);\n" " height: 20px;\n" " subcontrol-position: bottom;\n" " subcontrol-origin: margin;\n" "}\n" "QScrollBar::sub-line:horizontal {\n" " border: 2px transparent grey;\n" " border-top-left-radius: 7px;\n" " border-bottom-left-radius: 7px;\n" " background: rgba(34, 142, 255, 255);\n" " width: 20px;\n" " subcontrol-position: left;\n" " subcontrol-origin: margin;\n" "}\n" "QScrollBar::sub-line:horizontal:pressed {\n" " border: 2px transparent grey;\n" " border-top-left-radius: 7px;\n" " border-bottom-left-radius: 7px;\n" " background: rgb(181,181,181);\n" " width: 20px;\n" " subcontrol-position: left;\n" " subcontrol-origin: margin;\n" "}\n" "QScrollBar::sub-line:vertical {\n" " border: 2px transparent grey;\n" " border-top-left-radius: 7px;\n" " border-top-right-radius: 7px;\n" " background: rgba(34, 142, 255, 255);\n" " height: 20px;\n" " subcontrol-position: top;\n" " subcontrol-origin: margin;\n" "}\n" "QScrollBar::sub-line:vertical:pressed {\n" " border: 2px transparent grey;\n" " border-top-left-radius: 7px;\n" " border-top-right-radius: 7px;\n" " background: rgb(181,181,181);\n" " height: 20px;\n" " subcontrol-position: top;\n" " subcontrol-origin: margin;\n" "}\n" "QScrollBar::left-arrow:horizontal {\n" " border: 1px transparent grey;\n" " border-top-left-radius: 3px;\n" " border-bottom-left-radius: 3px;\n" " width: 6px;\n" " height: 6px;\n" " background: white;\n" "}\n" "QScrollBar::right-arrow:horizontal {\n" " border: 1px transparent grey;\n" " border-top-right-radius: 3px;\n" " border-bottom-right-radius: 3px;\n" " width: 6px;\n" " height: 6px;\n" " background: white;\n" "}\n" "QScrollBar::up-arrow:vertical {\n" " border: 1px transparent grey;\n" " border-top-left-radius: 3px;\n" " border-top-right-radius: 3px;\n" " width: 6px;\n" " height: 6px;\n" " background: white;\n" "}\n" "QScrollBar::down-arrow:vertical {\n" " border: 1px transparent grey;\n" " border-bottom-left-radius: 3px;\n" " border-bottom-right-radius: 3px;\n" " width: 6px;\n" " height: 6px;\n" " background: white;\n" "}\n" "QScrollBar::add-page:horizontal, QScrollBar::sub-page:horizontal {\n" " background: none;\n" "}\n" "QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical {\n" " background: none;\n" "}") self.verticalLayout = QtWidgets.QVBoxLayout(Dialog) self.verticalLayout.setObjectName("verticalLayout") self.horizontalLayout_2 = QtWidgets.QHBoxLayout() self.horizontalLayout_2.setObjectName("horizontalLayout_2") self.label = QtWidgets.QLabel(Dialog) self.label.setObjectName("label") self.horizontalLayout_2.addWidget(self.label) self.comboBox = QtWidgets.QComboBox(Dialog) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.comboBox.sizePolicy().hasHeightForWidth()) self.comboBox.setSizePolicy(sizePolicy) self.comboBox.setObjectName("comboBox") self.horizontalLayout_2.addWidget(self.comboBox) self.label_2 = QtWidgets.QLabel(Dialog) self.label_2.setObjectName("label_2") self.horizontalLayout_2.addWidget(self.label_2) self.lineEdit = QtWidgets.QLineEdit(Dialog) self.lineEdit.setObjectName("lineEdit") self.horizontalLayout_2.addWidget(self.lineEdit) self.pushButton_4 = QtWidgets.QPushButton(Dialog) self.pushButton_4.setObjectName("pushButton_4") self.horizontalLayout_2.addWidget(self.pushButton_4) self.verticalLayout.addLayout(self.horizontalLayout_2) self.listWidget = QtWidgets.QListWidget(Dialog) self.listWidget.setObjectName("listWidget") self.verticalLayout.addWidget(self.listWidget) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName("horizontalLayout") self.pushButton_2 = QtWidgets.QPushButton(Dialog) self.pushButton_2.setObjectName("pushButton_2") self.horizontalLayout.addWidget(self.pushButton_2) self.pushButton_3 = QtWidgets.QPushButton(Dialog) self.pushButton_3.setObjectName("pushButton_3") self.horizontalLayout.addWidget(self.pushButton_3) spacerItem = QtWidgets.QSpacerItem(268, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem) self.pushButton = QtWidgets.QPushButton(Dialog) self.pushButton.setObjectName("pushButton") self.horizontalLayout.addWidget(self.pushButton) self.verticalLayout.addLayout(self.horizontalLayout) self.retranslateUi(Dialog) QtCore.QMetaObject.connectSlotsByName(Dialog) def retranslateUi(self, Dialog): _translate = QtCore.QCoreApplication.translate Dialog.setWindowTitle(_translate("Dialog", "Dialog")) self.label.setText(_translate("Dialog", "分类")) self.label_2.setText(_translate("Dialog", "搜索")) self.pushButton_4.setText(_translate("Dialog", "查找")) self.pushButton_2.setText(_translate("Dialog", "导入")) self.pushButton_3.setText(_translate("Dialog", "导出")) self.pushButton.setText(_translate("Dialog", "添加"))
25,829
13,021
# Generated by Django 2.1.7 on 2019-02-14 13:07 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Pedido', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True, verbose_name='criado em')), ('modified', models.DateTimeField(auto_now=True, verbose_name='modificado em')), ('active', models.BooleanField(default=True, verbose_name='ativo')), ('status', models.IntegerField(blank=True, choices=[(0, 'Aberto'), (1, 'Enviado'), (2, 'Finalizado'), (3, 'Cancelado')], default=0, verbose_name='Situação')), ('parceiro', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='parceiro')), ], options={ 'verbose_name': 'pedido', 'verbose_name_plural': 'pedidos', }, ), migrations.CreateModel( name='PedidoItem', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ], ), ]
1,514
457
# -*- coding: utf-8 -*- # # Copyright © 2009-2010 Pierre Raybaut # Licensed under the terms of the MIT License # (see spyderlib/__init__.py for details) """Debug utilities""" import inspect import traceback import time def log_time(fd): timestr = "Logging time: %s" % time.ctime(time.time()) print >>fd, "="*len(timestr) print >>fd, timestr print >>fd, "="*len(timestr) print >>fd, "" def log_last_error(fname, context=None): """Log last error in filename *fname* -- *context*: string (optional)""" fd = open(fname, 'a') log_time(fd) if context: print >>fd, "Context" print >>fd, "-------" print >>fd, "" print >>fd, context print >>fd, "" print >>fd, "Traceback" print >>fd, "---------" print >>fd, "" traceback.print_exc(file=fd) print >>fd, "" print >>fd, "" def log_dt(fname, context, t0): fd = open(fname, 'a') log_time(fd) print >>fd, "%s: %d ms" % (context, 10*round(1e2*(time.time()-t0))) print >>fd, "" print >>fd, "" def caller_name(skip=2): """Get a name of a caller in the format module.class.method `skip` specifies how many levels of stack to skip while getting caller name. skip=1 means "who calls me", skip=2 "who calls my caller" etc. An empty string is returned if skipped levels exceed stack height """ stack = inspect.stack() start = 0 + skip if len(stack) < start + 1: return '' parentframe = stack[start][0] name = [] module = inspect.getmodule(parentframe) # `modname` can be None when frame is executed directly in console # TODO(techtonik): consider using __main__ if module: name.append(module.__name__) # detect classname if 'self' in parentframe.f_locals: # I don't know any way to detect call from the object method # XXX: there seems to be no way to detect static method call - it will # be just a function call name.append(parentframe.f_locals['self'].__class__.__name__) codename = parentframe.f_code.co_name if codename != '<module>': # top level usually name.append( codename ) # function or a method del parentframe return ".".join(name)
2,358
809
# Copyright 2018 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Implementation of the `swift_c_module` rule.""" load(":swift_common.bzl", "swift_common") load(":utils.bzl", "merge_runfiles") def _swift_c_module_impl(ctx): module_map = ctx.file.module_map deps = ctx.attr.deps cc_infos = [dep[CcInfo] for dep in deps] data_runfiles = [dep[DefaultInfo].data_runfiles for dep in deps] default_runfiles = [dep[DefaultInfo].default_runfiles for dep in deps] if cc_infos: cc_info = cc_common.merge_cc_infos(cc_infos = cc_infos) compilation_context = cc_info.compilation_context else: cc_info = None compilation_context = cc_common.create_compilation_context() providers = [ # We must repropagate the dependencies' DefaultInfos, otherwise we # will lose runtime dependencies that the library expects to be # there during a test (or a regular `bazel run`). DefaultInfo( data_runfiles = merge_runfiles(data_runfiles), default_runfiles = merge_runfiles(default_runfiles), files = depset([module_map]), ), swift_common.create_swift_info( modules = [ swift_common.create_module( name = ctx.attr.module_name, clang = swift_common.create_clang_module( compilation_context = compilation_context, module_map = module_map, # TODO(b/142867898): Precompile the module and place it # here. precompiled_module = None, ), ), ], ), ] if cc_info: providers.append(cc_info) return providers swift_c_module = rule( attrs = { "module_map": attr.label( allow_single_file = True, doc = """\ The module map file that should be loaded to import the C library dependency into Swift. """, mandatory = True, ), "module_name": attr.string( doc = """\ The name of the top-level module in the module map that this target represents. A single `module.modulemap` file can define multiple top-level modules. When building with implicit modules, the presence of that module map allows any of the modules defined in it to be imported. When building explicit modules, however, there is a one-to-one correspondence between top-level modules and BUILD targets and the module name must be known without reading the module map file, so it must be provided directly. Therefore, one may have multiple `swift_c_module` targets that reference the same `module.modulemap` file but with different module names and headers. """, mandatory = True, ), "deps": attr.label_list( allow_empty = False, doc = """\ A list of C targets (or anything propagating `CcInfo`) that are dependencies of this target and whose headers may be referenced by the module map. """, mandatory = True, providers = [[CcInfo]], ), }, doc = """\ Wraps one or more C targets in a new module map that allows it to be imported into Swift to access its C interfaces. The `cc_library` rule in Bazel does not produce module maps that are compatible with Swift. In order to make interop between Swift and C possible, users have one of two options: 1. **Use an auto-generated module map.** In this case, the `swift_c_module` rule is not needed. If a `cc_library` is a direct dependency of a `swift_{binary,library,test}` target, a module map will be automatically generated for it and the module's name will be derived from the Bazel target label (in the same fashion that module names for Swift targets are derived). The module name can be overridden by setting the `swift_module` tag on the `cc_library`; e.g., `tags = ["swift_module=MyModule"]`. 2. **Use a custom module map.** For finer control over the headers that are exported by the module, use the `swift_c_module` rule to provide a custom module map that specifies the name of the module, its headers, and any other module information. The `cc_library` targets that contain the headers that you wish to expose to Swift should be listed in the `deps` of your `swift_c_module` (and by listing multiple targets, you can export multiple libraries under a single module if desired). Then, your `swift_{binary,library,test}` targets should depend on the `swift_c_module` target, not on the underlying `cc_library` target(s). NOTE: Swift at this time does not support interop directly with C++. Any headers referenced by a module map that is imported into Swift must have only C features visible, often by using preprocessor conditions like `#if __cplusplus` to hide any C++ declarations. """, implementation = _swift_c_module_impl, )
5,495
1,500
import smtplib import sys import time from datetime import datetime # variable trigger = 0 myName = "your name" myEmail = "your@email.com" myPass = "y0urP4s5w0rd" myEmailSMTP = "smtp.yourEmailProvider.com" #for gmail: smtp.gmail.com for outlook: smtp.office365.com mySMTPPort = 587 receivers = {"receiver name": "receiver@email.com"} emailSubject = "I'm Pytomation Mail" emailBody = """ Hello there, Feel free to use this Pytomation Mail and modify it \ base on your needs Thanks and Regards, Riens Winoto """ # function def initial_setup(): try: broad_caster = smtplib.SMTP(myEmailSMTP, mySMTPPort) broad_caster.ehlo() broad_caster.starttls() broad_caster.login(myEmail, myPass) except IOError as err: print(str(err)) time.sleep(1.0) sys.exit() return broad_caster def get_date_time(): date_and_time = datetime.now() str_date_time = date_and_time.strftime('%b %-d,%Y, %-I:%M%p') return str_date_time def get_sender(sender_name, sender_email): from_sender = "from:" + " " + sender_name + " " + "<" + sender_email + ">" return from_sender def get_receiver(receiver_name, receiver_email): to_receiver = "to:" + " " + receiver_name + " " + "<" + receiver_email + ">" return to_receiver def get_email_message(email_subject, email_body): email_message = "subject:" + " " + email_subject + "\n" + email_body return email_message if __name__ == "__main__": broadCaster = initial_setup() if trigger >= len(receivers): print("Enter receiver name and email next time") else: for receiverName, receiverEmail in receivers.items(): fromSender = get_sender(myName, myEmail) toReceiver = get_receiver(receiverName, receiverEmail) emailMessage = get_email_message(emailSubject, emailBody) messenger = fromSender + "\n" + toReceiver + "\n" + emailMessage broadCaster.sendmail(myEmail, receiverEmail, messenger) sendDateTime = get_date_time() print("e-mail sent successfully to {} at {} \n".format(receiverName, sendDateTime)) broadCaster.quit()
2,170
718
import Tkinter parent_widget = Tkinter.Tk() scale_widget = Tkinter.Scale(parent_widget, from_=0, to=100, orient=Tkinter.HORIZONTAL) scale_widget.set(25) scale_widget.pack() Tkinter.mainloop()
220
84
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Feb 24 14:12:38 2022 @author: j64280 """ import alluvial import pandas as pd import matplotlib.pyplot as plt import numpy as np import matplotlib.cm list_2017 = pd.read_csv('parrainagestotal_2017.csv',sep=';') list_2022 = pd.read_csv('parrainagestotal_2022.csv',sep=';') list_2017['Candidat 2022']='Pas de parrainage en 2022' n_2017,_=list_2017.shape n_2022,_=list_2022.shape n=0 for i in range(n_2017): ind = ((list_2022.Nom == list_2017.Nom.iloc[i]) & (list_2022.Prénom == list_2017.Prénom.iloc[i]) & (list_2022.Département == list_2017.Département.iloc[i]) & (list_2022.Circonscription == list_2017.Circonscription.iloc[i])) if ind.any(): list_2017['Candidat 2022'].iloc[i] = list_2022[ind].Candidat.values[0] list_2017['Candidat-e parrainé-e'].iloc[i] = list_2017['Candidat-e parrainé-e'].iloc[i] + ' ' conserve_2017 = ['FILLON François ','MACRON Emmanuel ', 'HAMON Benoît ','ARTHAUD Nathalie ', 'DUPONT-AIGNAN Nicolas ','MELENCHON Jean-Luc ', 'LASSALLE Jean ','POUTOU Philippe ','CHEMINADE Jacques ','ASSELINEAU François ', 'LE PEN Marine ','YADE Rama ','JUPPE Alain ','JARDIN Alexandre ','MARCHANDISE Charlotte ', 'ALLIOT-MARIE Michèle ','TAUZIN Didier ','GORGES Jean-Pierre ','TROADEC Christian ', 'LARROUTUROU Pierre ','GUAINO Henri ','BAROIN François '] conserve_2022 = ['PÉCRESSE Valérie','MACRON Emmanuel','HIDALGO Anne','ARTHAUD Nathalie', 'DUPONT-AIGNAN Nicolas','ROUSSEL Fabien','MÉLENCHON Jean-Luc','LASSALLE Jean','POUTOU Philippe', 'ZEMMOUR Éric','ASSELINEAU François','LE PEN Marine','JADOT Yannick','KAZIB Anasse', 'KUZMANOVIC Georges','THOUY Hélène','TAUBIRA Christiane','KOENIG Gaspard','MIGUET Nicolas'] list_2017_filtered = list_2017[list_2017['Candidat 2022']!='Pas de parrainage en 2022'] n_2017_filtered,_=list_2017_filtered.shape couple_par = [] for i in range(n_2017_filtered): if list_2017_filtered['Candidat-e parrainé-e'].iloc[i] in conserve_2017: if list_2017_filtered['Candidat 2022'].iloc[i] in conserve_2022: couple_par.append([list_2017_filtered['Candidat-e parrainé-e'].iloc[i], list_2017_filtered['Candidat 2022'].iloc[i]]) #%% cmap = matplotlib.cm.get_cmap('jet') ax = alluvial.plot( couple_par, alpha=0.8, color_side=0, rand_seed=4, figsize=(10,15), disp_width=True, wdisp_sep=' '*2, fontname='Monospace', colors = cmap(np.linspace(0,8,len(conserve_2017)) % 1), a_sort=conserve_2017[::-1],b_sort=conserve_2022[::-1]) ax.set_title('Transferts de parrainage entre 2017 et 2022', fontsize=14, fontname='Monospace') plt.text(1.1,-150,'@Alexandre_Goupy') plt.savefig('report_signatures.png',bbox_inches='tight',dpi=200)#,transparent=True)
2,864
1,308
"""Build OpenSees 3D model files.""" import os from collections import OrderedDict, defaultdict from itertools import chain from typing import List, Optional, Tuple import numpy as np from bridge_sim.model import PierSettlement, PointLoad, Config, Material from bridge_sim.sim.model import ( BuildContext, DeckNodes, DeckShells, Node, PierNodes, PierShells, SimParams, ) from bridge_sim.sim.build import ( det_nodes_id_str, det_shells_id_str, det_shells, get_bridge_shells_and_nodes, to_deck_nodes, ) from bridge_sim.sim.run.opensees.build.d3.self_weight import opensees_self_weight_loads from bridge_sim.sim.run.opensees.build.d3.thermal import ( opensees_thermal_axial_deck_loads, opensees_thermal_moment_deck_loads, ) from bridge_sim.sim.run.opensees.build.d3.util import comment from bridge_sim.util import flatten, print_d, print_i, print_w, round_m # Print debug information for this file. # D: str = "fem.run.opensees.build.d3" D: bool = False ##### Begin nodes ##### def opensees_support_nodes( c: Config, deck_nodes: DeckNodes, all_support_nodes: PierNodes, ) -> str: """Opensees node commands for the supports (ignoring deck). By 'ignoring deck' we mean that nodes that belong to both supports and the deck will not be returned by this function but instead by 'opensees_deck_nodes'. Args: c: Config, global configuration object. deck_nodes: DeckNodes, to check for already added support nodes. all_support_nodes: AllSupportNodes, all support nodes to generate commands for. """ # We want to avoid generating commands for support nodes that also belong to # the deck, thus we create a set for fast indexing to allow this check. deck_nodes = set(chain.from_iterable(deck_nodes)) nodes = OrderedDict() # For each support. for s_nodes in all_support_nodes: # For each wall of the support (there are two). for w_nodes in s_nodes: # For each ~vertical line of nodes for a z position at top of wall. for y_nodes in w_nodes: # For each node in the ~vertical line. for y, node in enumerate(y_nodes): # Insert the node, if not part of the deck nodes. if node not in deck_nodes: # A dictionary is used incase the node is already added, # incase it is a bottom node shared by both walls. nodes[node] = None return comment( "support nodes", "\n".join(map(lambda n: n.command_3d(), nodes.keys())), units="node nodeTag x y z", ) def opensees_deck_nodes(c: Config, deck_nodes: DeckNodes) -> str: """OpenSees node commands for a bridge deck. The nodes are created based on given positions of deck nodes. Args: c: Config, global configuratin object. """ node_strings = [] node_strings += list( map(lambda node: node.command_3d(), list(chain.from_iterable(deck_nodes)),) ) return comment("deck nodes", "\n".join(node_strings), units="node nodeTag x y z") ##### End nodes ##### ##### Begin fixed nodes ##### class FixNode: """A command to fix a node in some degrees of freedom (dof). Args: node: Node, the node with dof to fix specified. comment_: Optional[str], an optional comment for the command. """ def __init__( self, node: Node, fix_x_translation: bool, fix_y_translation: bool, fix_z_translation: bool, fix_x_rotation: bool, fix_y_rotation: bool, fix_z_rotation: bool, comment: Optional[str] = None, ): self.node = node self.fix_x_translation = fix_x_translation self.fix_y_translation = fix_y_translation self.fix_z_translation = fix_z_translation self.fix_x_rotation = fix_x_rotation self.fix_y_rotation = fix_y_rotation self.fix_z_rotation = fix_z_rotation self.comment = comment def command_3d(self): """The command in string format for a TCL file.""" # TODO: Update comment to include support ID. comment_ = "" if self.comment is None else f"; # {self.comment}" return ( f"fix {self.node.n_id}" + f" {int(self.fix_x_translation)}" + f" {int(self.fix_y_translation)}" + f" {int(self.fix_z_translation)}" + f" {int(self.fix_x_rotation)}" + f" {int(self.fix_y_rotation)}" + f" {int(self.fix_z_rotation)}" + f"{comment_}" ) def opensees_fixed_abutment_nodes( c: Config, sim_params: SimParams, deck_nodes: DeckNodes ) -> str: """OpenSees fix commands for fixed nodes on the abument. Fixed for translation but not for rotation. """ thermal = (sim_params.axial_delta_temp is not None) or ( sim_params.moment_delta_temp is not None ) fixed_nodes: List[FixNode] = [] for i_x, x_nodes in enumerate(deck_nodes): assert len(x_nodes) >= 2 for node in [x_nodes[0], x_nodes[-1]]: fixed_nodes.append( FixNode( node=node, fix_x_translation=False, fix_y_translation=True, fix_z_translation=True, # fix_z_translation=(not thermal) or (i_x == (len(deck_nodes) // 2)), fix_x_rotation=False, fix_y_rotation=False, fix_z_rotation=False, ) ) return comment( "fixed deck nodes", "\n".join(map(lambda f: f.command_3d(), fixed_nodes)), units="fix nodeTag x y z rx ry rz", ) def opensees_fixed_pier_nodes( c: Config, sim_params: SimParams, all_support_nodes: PierNodes, pier_disp: List[PierSettlement], ) -> str: """OpenSees fix commands for fixed support nodes.""" # First, for thermal loading, we determine the piers at each longitudinal # (x) position, so for each x position we can then determine which piers # will be fixed in transverse (z) translation. pier_positions = defaultdict(set) for p_i, _ in enumerate(all_support_nodes): pier = c.bridge.supports[p_i] pier_positions[round_m(pier.x)].add(round_m(pier.z)) pier_positions = { pier_x: sorted(pier_zs) for pier_x, pier_zs in pier_positions.items() } fixed_nodes: List[FixNode] = [] # Iterate through each pier. Note that p_nodes is a tuple of nodes for each # pier wall. And each wall is a 2-d array of nodes. for p_i, p_nodes in enumerate(all_support_nodes): pier = c.bridge.supports[p_i] # If pier displacement for this pier then select the bottom central node # for the integrator command, and attach it to the pier. free_y_trans = False for ps in pier_disp: if p_i == ps.pier: free_y_trans = True pier = c.bridge.supports[ps.pier] pier.disp_node = p_nodes[0][len(p_nodes[0]) // 2][-1] if len(p_nodes[0]) % 2 == 0: print_w("Pier settlement:") print_w(" no central node (even number of nodes)") # For each ~vertical line of nodes for a z position at top of wall. for y_i, y_nodes in enumerate(p_nodes[0]): # We will fix the bottom node. node = y_nodes[-1] fixed_nodes.append( FixNode( node=node, fix_x_translation=pier.fix_x_translation, fix_y_translation=False if free_y_trans else pier.fix_y_translation, # fix_z_translation=fix_pier_z_translation(pier), fix_z_translation=True, fix_x_rotation=pier.fix_x_rotation, fix_y_rotation=pier.fix_y_rotation, fix_z_rotation=pier.fix_z_rotation, comment=f"pier {p_i} y {y_i}", ) ) return comment( "fixed support nodes", "\n".join(map(lambda f: f.command_3d(), fixed_nodes)), units="fix nodeTag x y z rx ry rz", ) ##### End fixed nodes ##### ##### Begin sections ##### def opensees_section(section: Material): """OpenSees ElasticMembranePlateSection command for a Material.""" # TODO: Implicit information, assumption that if young's modulus in x # direction is modified that cracking is desired (poisson's set to 0). CRACK_Z = not np.isclose(section.youngs_x(), section.youngs) # New orthotropic method. return ( f"nDMaterial ElasticOrthotropic {section.id}" f" {section.youngs_x() * 1E6} {section.youngs * 1E6} {section.youngs * 1E6}" f" {0 if CRACK_Z else section.poissons} {section.poissons} {section.poissons}" f" {(section.youngs * 1E6) / (2 * (1 + section.poissons))}" f" {(section.youngs * 1E6) / (2 * (1 + section.poissons))}" f" {(section.youngs * 1E6) / (2 * (1 + section.poissons))}" f" {section.density * 1E-3}" f"\nsection PlateFiber {section.id} {section.id} {section.thickness}" ) # Old isotropic method. raise ValueError("Not using orthotropic method") return ( f"section ElasticMembranePlateSection {section.id}" + f" {section.youngs * 1E6} {section.poissons} {section.thickness}" + f" {section.density * 1E-3}" ) def opensees_deck_sections(c: Config): """Sections used in the bridge deck.""" return comment( "deck sections", "\n".join([opensees_section(section) for section in c.bridge.sections]), units=( "section ElasticMembranePlateSection secTag youngs_modulus" + " poisson_ratio depth mass_density" ), ) def opensees_pier_sections(c: Config, all_pier_elements: PierShells): """Sections used in the bridge's piers.""" pier_shells = det_shells(all_pier_elements) # Some pier's may refer to the same section so we create a set to avoid # rendering duplicate section definitions into the .tcl file. pier_sections = set([pier_shell.section for pier_shell in pier_shells]) return comment( "pier sections", "\n".join([opensees_section(section) for section in pier_sections]), units=( "section ElasticMembranePlateSection secTag youngs_modulus" + " poisson_ratio depth mass_density" ), ) ##### End sections ##### ##### Begin shell elements ##### def opensees_deck_elements(c: Config, deck_elements: DeckShells) -> str: """OpenSees element commands for a bridge deck.""" deck_shells = det_shells(deck_elements) return comment( "deck shell elements", "\n".join(map(lambda e: e.command_3d(), deck_shells)), units="element ShellMITC4 eleTag iNode jNode kNode lNode secTag", ) def opensees_pier_elements(c: Config, all_pier_elements: PierShells) -> str: """OpenSees element commands for a bridge's piers.""" pier_shells = det_shells(all_pier_elements) return comment( "pier shell elements", "\n".join(map(lambda e: e.command_3d(), pier_shells)), units="element ShellMITC4 eleTag iNode jNode kNode lNode secTag", ) # End shell elements # # Begin loads # def opensees_load( c: Config, pload: PointLoad, deck_nodes: DeckNodes, ): """An OpenSees load command.""" assert deck_nodes[0][0].y == 0 assert deck_nodes[-1][-1].y == 0 best_node = sorted( chain.from_iterable(deck_nodes), key=lambda node: node.distance(x=pload.x, y=0, z=pload.z), )[0] assert np.isclose(best_node.y, 0) print(f"before assert load.x = {pload.x}") print(f"best_node_x = {best_node.x}") assert np.isclose(best_node.x, pload.x) assert np.isclose(best_node.z, pload.z) return f"load {best_node.n_id} 0 {pload.load} 0 0 0 0" def opensees_loads( c: Config, ploads: List[PointLoad], deck_nodes: DeckNodes, pier_disp: List[PierSettlement], ): """OpenSees load commands for a .tcl file.""" # In case of pier displacement apply load at the pier's central bottom node, # the load intensity doesn't matter though, only the position matters. if len(pier_disp) > 0: load_str = "" for ps in pier_disp: node = c.bridge.supports[ps.pier].disp_node load_str += f"\nload {node.n_id} 0 {ps.settlement * 1000} 0 0 0 0" # Otherwise find the deck nodes which best suit given point loads. else: load_str = "\n".join( opensees_load(c=c, pload=pload, deck_nodes=deck_nodes) for pload in ploads ) return comment("loads", load_str, units="load nodeTag N_x N_y N_z N_rx N_ry N_rz") ##### End loads ##### ##### Begin recorders ##### def opensees_translation_recorders( c: Config, fem_params: SimParams, os_runner: "OSRunner", ctx: BuildContext ) -> str: """OpenSees recorder commands for translation.""" # A list of tuples of ResponseType and OpenSees direction index, for # translation response types, if requested in fem_params.response_types. translation_response_types = [] # X translation. x_path = os_runner.x_translation_path(c, fem_params) translation_response_types.append((x_path, 1)) print_i(f"OpenSees: saving x translation at {x_path}") # Y translation. y_path = os_runner.y_translation_path(c, fem_params) translation_response_types.append((y_path, 2)) print_i(f"OpenSees: saving y translation at {y_path}") # Z translation. z_path = os_runner.z_translation_path(c, fem_params) translation_response_types.append((z_path, 3)) print_i(f"OpenSees: saving z translation at {z_path}") # Append a recorder string for each response type (recording nodes). recorder_strs = [] node_str = det_nodes_id_str(ctx) for response_path, direction in translation_response_types: print_d(D, f"Adding response path to build: {response_path}") recorder_strs.append( f"recorder Node -file {response_path} -node {node_str} -dof" + f" {direction} disp" ) return comment( "translation recorders", "\n".join(recorder_strs), units="recorder Node -file path -node nodeTags -dof direction disp", ) def opensees_strain_recorders( c: Config, sim_params: SimParams, os_runner: "OSRunner", ctx: BuildContext ): """OpenSees recorder commands for translation.""" return "\n".join( f"recorder Element" f" -file {os_runner.strain_path(config=c, sim_params=sim_params, point=point)}" f" -ele {det_shells_id_str(ctx)} material {str(point)} deformation" for point in [1, 2, 3, 4] ) def opensees_forces( config: Config, sim_params: SimParams, os_runner: "OSRunner", ctx: BuildContext ): return ( f"recorder Element" f" -file {os_runner.forces_path(config=config, sim_params=sim_params)}" f" -ele {det_shells_id_str(ctx)} forces" ) def opensees_stress_variables( c: Config, sim_params: SimParams, os_runner: "OSRunner", ctx: BuildContext ) -> Tuple[str, str]: """OpenSees stress recorder variables. These replace <<ELEM_IDS>> and <<FORCES_OUT_FILE>> in the TCL file. """ return ( det_shells_id_str(ctx), os_runner.stress_path(config=c, sim_params=sim_params), ) def opensees_integrator(c: Config, pier_disp: List[PierSettlement]): """The integrator command to use based on FEMParams.""" if len(pier_disp) > 0: node = c.bridge.supports[pier_disp[0].pier].disp_node if len(pier_disp) > 1: print_w(f"Using pier {pier_disp[0].pier} for DisplacementControl") return ( f"integrator DisplacementControl {node.n_id} 2" + f" {pier_disp[0].settlement}" ) return "integrator LoadControl 1" def opensees_algorithm(pier_disp: List[PierSettlement]): """The algorithm command to use based on FEMParams.""" if len(pier_disp) > 0: return "algorithm Linear" return "algorithm Newton" def opensees_test(pier_disp: List[PierSettlement]): """The test command to use based on FEMParams.""" if len(pier_disp) > 0: return "" return "test NormDispIncr 1.0e-12 1000" ##### End recorders ##### def build_model_3d(c: Config, expt_params: List[SimParams], os_runner: "OSRunner"): """Build OpenSees 3D model files. TODO: ExptParams -> SimParams. """ # Read in the template model file. dir_path = os.path.dirname(os.path.realpath(__file__)) template_path = os.path.normpath( os.path.join(dir_path, "../../../../../../", c.os_3d_model_template_path) ) with open(template_path) as f: in_tcl = f.read() # Build a model file for each simulation. for sim_params in expt_params: # Setup the 'BuildContext' for this simulation. sim_ctx = sim_params.build_ctx() # Determine nodes and shells. bridge_shells, bridge_nodes = get_bridge_shells_and_nodes( bridge=c.bridge, ctx=sim_ctx ) deck_shells, pier_shells = bridge_shells deck_shell_nodes, pier_nodes = bridge_nodes deck_nodes = to_deck_nodes(deck_shell_nodes) # Attaching nodes and shells to the 'SimParams'. This allows the convert # process to build a deterministic list of nodes and shells. They should # be deleted again at that point. sim_params.bridge_shells = bridge_shells sim_params.bridge_nodes = bridge_nodes # Build the 3D model file by replacements in the template model file. out_tcl = ( in_tcl.replace( "<<DECK_NODES>>", opensees_deck_nodes(c=c, deck_nodes=deck_nodes), ) .replace( "<<SUPPORT_NODES>>", opensees_support_nodes( c=c, deck_nodes=deck_nodes, all_support_nodes=pier_nodes, ), ) .replace( "<<FIX_DECK>>", opensees_fixed_abutment_nodes( c=c, sim_params=sim_params, deck_nodes=deck_nodes ), ) .replace( "<<FIX_SUPPORTS>>", opensees_fixed_pier_nodes( c=c, sim_params=sim_params, all_support_nodes=pier_nodes, pier_disp=sim_params.pier_settlement, ), ) .replace( "<<LOAD>>", opensees_loads( c=c, ploads=sim_params.ploads, deck_nodes=deck_nodes, pier_disp=sim_params.pier_settlement, ), ) .replace( "<<THERMAL_AXIAL_LOAD_DECK>>", opensees_thermal_axial_deck_loads( c=c, sim_params=sim_params, deck_elements=deck_shells, ctx=sim_ctx, ), ) .replace( "<<THERMAL_MOMENT_LOAD_DECK>>", opensees_thermal_moment_deck_loads( c=c, sim_params=sim_params, deck_elements=deck_shells, ctx=sim_ctx, ), ) .replace( "<<SELF_WEIGHT>>", opensees_self_weight_loads(c, sim_params, deck_shells), ) .replace("<<SUPPORTS>>", "") .replace("<<DECK_SECTIONS>>", opensees_deck_sections(c=c)) .replace( "<<TRANS_RECORDERS>>", opensees_translation_recorders( c=c, fem_params=sim_params, os_runner=os_runner, ctx=sim_ctx ), ) .replace( "<<FORCES>>", opensees_forces( config=c, sim_params=sim_params, os_runner=os_runner, ctx=sim_ctx ), ) .replace( "<<DECK_ELEMENTS>>", opensees_deck_elements(c=c, deck_elements=deck_shells), ) .replace( "<<PIER_ELEMENTS>>", opensees_pier_elements(c=c, all_pier_elements=pier_shells), ) .replace( "<<PIER_SECTIONS>>", opensees_pier_sections(c=c, all_pier_elements=pier_shells), ) .replace( "<<INTEGRATOR>>", opensees_integrator(c=c, pier_disp=sim_params.pier_settlement), ) .replace("<<ALGORITHM>>", opensees_algorithm(sim_params.pier_settlement)) .replace("<<TEST>>", opensees_test(sim_params.pier_settlement)) ) elem_ids, forces_out_file = opensees_stress_variables( c=c, sim_params=sim_params, os_runner=os_runner, ctx=sim_ctx ) out_tcl = out_tcl.replace("<<ELEM_IDS>>", elem_ids).replace( "<<FORCES_OUT_FILE>>", forces_out_file ) out_tcl = out_tcl.replace( "<<STRAIN_RECORDERS>>", opensees_strain_recorders( c=c, sim_params=sim_params, os_runner=os_runner, ctx=sim_ctx ), ) # Write the generated model file. model_path = os_runner.sim_model_path( config=c, sim_params=sim_params, ext="tcl" ) with open(model_path, "w") as f: f.write(out_tcl) num_nodes = len(set(flatten(bridge_nodes, Node))) print_i(f"OpenSees: saved 3D model ({num_nodes} nodes) file to {model_path}") return expt_params
21,767
7,062
from setuptools import setup, find_packages from codecs import open import os import klpysci VERSION = klpysci.__version__ cwd = os.path.abspath(os.path.dirname(__file__)) # Get the long_description from the Description.rst file with open(os.path.join(cwd, 'DESCRIPTION.rst'), encoding='utf-8') as f: long_description = f.read() MODULENAME = 'klpysci' DATA_FILES = [] DOC_FILES = [(os.path.join('share',MODULENAME,root), [os.path.join(root,f) for f in files]) \ for root, dirs, files in os.walk('docs')] DATA_FILES.extend(DOC_FILES) setup( name = MODULENAME, version = VERSION, description = 'Scientific Python utilities and tools', long_description = long_description, url = 'https://github.com/KathleenLabrie/KLpysci', author = 'Kathleen Labrie', author_email = 'kathleen.labrie.phd@gmail.com', license = 'LICENSE', # See https://pypi.python.org/pypi?%3Aaction=list_classifiers classifiers = [ 'Development Status :: 2 - Pre-Alpha', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: ISC License (ISCL)', 'Operating System :: Mac OS :: MacOS X', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python :: 2.7', 'Topic :: Scientific/Engineering :: Astronomy' ], keywords = 'mathematics physics data processing', packages = find_packages(exclude=['docs']), #install_requires = [''] #extras_require = { # 'dev': [''], #}, #package_data = { # 'klpysci': [''], # }, data_files = DATA_FILES, #scripts = [ # 'klpysci/...' # ], zip_safe = False, )
1,944
619
class ProgramError(Exception): """Generic exception class for errors in this program.""" pass class PluginError(ProgramError): pass class NoOptionError(ProgramError): pass class ExtCommandError(ProgramError): pass
240
66
import enum import numpy as np import matplotlib.pyplot as plt import matplotlib.patches as patches from scipy.io import wavfile import json jupyter_nb_mode = False try: assert jupyter_nb_mode from IPython.display import Audio except: import sounddevice as sd def Audio(array,rate=44100): sd.play(array,rate) class SolfegeSymbol(enum.Enum): DO,Do,do,D,d,p,o = 1,1,1,1,1,1,1 RE,Re,re,R,r,k,e = 2,2,2,2,2,2,2 MI,Mi,mi,M,m,i = 3,3,3,3,3,3 FA,Fa,fa,F,f,a = 4,4,4,4,4,4 SOL,Sol,sol,So,so,S,s,u = 5,5,5,5,5,5,5,5 LA,La,la,L,l,au = 6,6,6,6,6,6 SI,Si,si,TI,Ti,ti,T,t,ai = 7,7,7,7,7,7,7,7,7 @property def freq(self,octave=4): notes = [261.63,293.66,329.63,349.23,392.00,440.00,493.88] return notes[self.value-1]*(2**(octave-4)) @property def shortname(self): names = 'drmfslt' return names[self.value-1] @property def sescons(self): names = 'pkmfslt' return names[self.value-1] @property def sesvowel(self): names = list('oeiau')+['ai','au'] return names[self.value-1] def makeglyph(self,xy,scale=1,color='black',weight=2,doubler=False): x,y=xy if doubler: shape = [ patches.FancyArrowPatch((x-scale/2,y+2*scale/6),(x-scale/2,y+4*scale/6),arrowstyle='-',color=color,linewidth=weight), patches.FancyArrowPatch((x-scale/6,y+scale/2),(x+scale/6,y+scale/2),arrowstyle='-',color=color,linewidth=weight), patches.FancyArrowPatch((x-scale/2,y+2*scale/6),(x-scale/2,y+4*scale/6),arrowstyle='-',color=color,linewidth=weight), patches.FancyArrowPatch((x-4*scale/6,y+scale/2),(x-scale/3,y+scale/2),arrowstyle='-',color=color,linewidth=weight), patches.FancyArrowPatch((x-scale/2,y-scale/6),(x-scale/2,y+scale/6),arrowstyle='-',color=color,linewidth=weight), patches.FancyArrowPatch((x-2*scale/6,y+scale/2),(x-4*scale/6,y+scale/2),arrowstyle='-',color=color,linewidth=weight), patches.FancyArrowPatch((x-4*scale/6,y-scale/2),(x-scale/3,y-scale/2),arrowstyle='-',color=color,linewidth=weight), ][self.value-1] attachment = xy else: shape, attachment = [ (patches.Circle((x+scale/2,y),scale/2,fill=False,color=color,linewidth=weight),(x+scale,y)), (patches.FancyArrowPatch((x,y),(x,y-scale),arrowstyle='-',color=color,linewidth=weight),(x,y-scale)), (patches.Arc((x+scale/2,y),scale,scale,theta1=0.0,theta2=180.0,color=color,linewidth=weight),(x+scale,y)), (patches.FancyArrowPatch((x,y),(x+scale,y-scale),arrowstyle='-',color=color,linewidth=weight),(x+scale,y-scale)), (patches.FancyArrowPatch((x,y),(x+scale,y),arrowstyle='-',color=color,linewidth=weight),(x+scale,y)), (patches.Arc((x,y-scale/2),scale,scale,theta1=90.0,theta2=-90.0,color=color,linewidth=weight),(x,y-scale)), (patches.FancyArrowPatch((x,y),(x+scale,y+scale),arrowstyle='-',color=color,linewidth=weight),(x+scale,y+scale)) ][self.value-1] return shape, attachment def generate_note(frequency, duration, sample_rate=44100, amplitude=1, envelope_ratio=1/3): fmul = 2*frequency*np.pi/sample_rate note = np.sin(fmul*np.arange(sample_rate*duration)) env_time = int(envelope_ratio*sample_rate*duration) envelope = np.concatenate((np.linspace(0,amplitude,env_time),amplitude*np.ones(int(sample_rate*duration-2*env_time)),np.linspace(amplitude,0,env_time))) return note*envelope class SolresolWord(): def __init__(self, word, syntax='default'): if isinstance(word, list): if isinstance(word[0], SolfegeSymbol): self.word = word elif isinstance(word[0], str): self.word = [SolfegeSymbol[s] for s in word] elif isinstance(word[0], int): self.word = [SolfegeSymbol(i) for i in word] elif isinstance(word, str): if syntax in ['ses','s']: self.word = [SolfegeSymbol[s] for s in word.replace('ai','l').replace('au','t')] elif syntax in ['num','#',0]: self.word = [SolfegeSymbol(int(s)) for s in word.strip('0')] elif syntax in ['full','default']: self.word = [] while len(word) > 0: if word.lower().startswith('sol') and not word.lower().startswith('sola'): self.word.append(SolfegeSymbol.SOL) word = word[3:] else: self.word.append(SolfegeSymbol[word[:2]]) word = word[2:] elif isinstance(word, int): self.word = [SolfegeSymbol(int(s)) for s in oct(word)[2:].strip('0')] def __repr__(self): return f"{type(self).__name__}(['"+"','".join(smb.name for smb in self.word)+"'])" def __getitem__(self,ix): return self.word.__getitem__(ix) def __len__(self): return len(self.word) def __iter__(self): return iter(self.word) @property def ses(self): if len(self) == 1: return self.word[0].sesvowel else: return ''.join(ltr.sescons if ix%2==0 else ltr.sesvowel for ix,ltr in enumerate(self.word)) @property def fulltext(self): return ''.join(smb.name for smb in self.word).lower() def __str__(self): return self.fulltext @property def value(self): return int(''.join(str(ltr.value) for ltr in self.word),8) @property def definition(self): return solresol_dict[self.fulltext] def __int__(self): return self.value def melody(self, note_len=0.2, amplitude=1, envelope_ratio=0.2, sample_rate=44100): return np.concatenate([generate_note(ltr.freq,note_len,sample_rate,amplitude,envelope_ratio) for ltr in self.word]) def draw(self,ax,color='black',weight=2,startpos=(0,0)): pos=startpos for ix,ltr in enumerate(self.word): if ltr==SolfegeSymbol.LA and (self.word[ix-1]==SolfegeSymbol.SI or self.word[ix-1]==SolfegeSymbol.DO) and ix>0: pos = (pos[0]+0.5,pos[1]+0.5) g,pos = ltr.makeglyph(pos,color=color,weight=weight,doubler=(ltr==self.word[ix-1] and ix>0)) ax.add_patch(g) ax.axis('scaled') ax.axis('off') return pos[0]+2,startpos[1] class Solresol(): def __init__(self, text, syntax='default'): if isinstance(text,str): text = text.translate(str.maketrans('','','!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~')) self.words = [SolresolWord(word,syntax) for word in text.split()] elif isinstance(text,list): self.words = [SolresolWord(word,syntax) for word in text] elif isinstance(text,int): sw = oct(text)[2:] self.words = [SolresolWord(int(sw[i:i+5],8)) for i in range(0,len(sw),5)] @property def fulltext(self): return ' '.join(word.fulltext for word in self.words) @property def ses(self): return ' '.join(word.ses for word in self.words) @property def numlist(self): return [int(word) for word in self.words] @property def value(self): return int(''.join(oct(num)[2:].ljust(5,'0') for num in self.numlist),8) def __int__(self): return self.value def __str__(self): return self.fulltext def __getitem__(self,ix): return self.words.__getitem__(ix) def __len__(self): return len(self.words) def __iter__(self): return iter(self.words) def __repr__(self): return f"Solresol('{str(self)}')" def melody(self, note_len=0.2, amplitude=1, envelope_ratio=0.2, gap_ratio=1, sample_rate=44100): notes = [] for word in self.words: notes.append(word.melody(note_len, amplitude, envelope_ratio, sample_rate)) notes.append(np.zeros(int(note_len*sample_rate*gap_ratio))) return np.concatenate(notes) def play(self, note_len=0.2, amplitude=1, envelope_ratio=0.2, gap_ratio=1): return Audio(self.melody(note_len, amplitude, envelope_ratio, gap_ratio, 44100),rate=44100) def draw(self,color='black',weight=2,subplot_mode=False,rowmax=5): if len(self) > 1 and subplot_mode: fig,axs = plt.subplots(len(self)//rowmax+1,(len(self)-1)%rowmax+1) for word,ax in zip(self.words,axs): word.draw(ax,color=color,weight=weight) else: fig,ax = plt.subplots() pos = (0,0) for word in self.words: pos = word.draw(ax,color=color,weight=weight,startpos=pos) return fig def translate(self,alldefs=False,random=False,ix=0): translation = [] for word in self.words: if alldefs: translation.append(f'{word.fulltext}: ({word.definition})') else: dfn = word.definition.split(',') ix = np.random.randint(len(dfn)) if random else ix translation.append(dfn[ix].strip()) return ' '.join(translation) with open('solresol_dict.json') as f: solresol_dict = json.load(f) dictionary_url = "https://docs.google.com/spreadsheets/d/1-3lBxMURGN4AtGG846kuVGVNuEiHewCT88PiBahnODA/edit#gid=0"
9,439
3,435
""" Icegrams: A trigrams library for Icelandic CFFI builder for _trie module Copyright (C) 2020 Miðeind ehf. Original author: Vilhjálmur Þorsteinsson This software is licensed under the MIT License: Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. This module only runs at setup/installation time. It is invoked from setup.py as requested by the cffi_modules=[] parameter of the setup() function. It causes the _trie.*.so CFFI wrapper library to be built from its source in trie.cpp. """ import os import platform import cffi # Don't change the name of this variable unless you # change it in setup.py as well ffibuilder = cffi.FFI() _PATH = os.path.dirname(__file__) or "." WINDOWS = platform.system() == "Windows" MACOS = platform.system() == "Darwin" # What follows is the actual Python-wrapped C interface to trie.*.so # It must be kept in sync with trie.h declarations = """ typedef unsigned int UINT; typedef uint8_t BYTE; typedef uint32_t UINT32; typedef uint64_t UINT64; typedef void VOID; UINT mapping(const BYTE* pbMap, const BYTE* pbWord); UINT bitselect(const BYTE* pb, UINT n); UINT retrieve(const BYTE* pb, UINT nStart, UINT n); UINT lookupFrequency(const BYTE* pb, UINT nQuantumSize, UINT nIndex); UINT64 lookupMonotonic(const BYTE* pb, UINT nQuantumSize, UINT nIndex); VOID lookupPairMonotonic(const BYTE* pb, UINT nQuantumSize, UINT nIndex, UINT64* pn1, UINT64* pn2); UINT64 lookupPartition(const BYTE* pb, UINT nOuterQuantum, UINT nInnerQuantum, UINT nIndex); VOID lookupPairPartition(const BYTE* pb, UINT nQuantumSize, UINT nInnerQuantum, UINT nIndex, UINT64* pn1, UINT64* pn2); UINT searchMonotonic(const BYTE* pb, UINT nQuantumSize, UINT nP1, UINT nP2, UINT64 n); UINT searchMonotonicPrefix(const BYTE* pb, UINT nQuantumSize, UINT nP1, UINT nP2, UINT64 n); UINT searchPartition(const BYTE* pb, UINT nOuterQuantum, UINT nInnerQuantum, UINT nP1, UINT nP2, UINT64 n); UINT searchPartitionPrefix(const BYTE* pb, UINT nOuterQuantum, UINT nInnerQuantum, UINT nP1, UINT nP2, UINT64 n); """ # Do the magic CFFI incantations necessary to get CFFI and setuptools # to compile trie.cpp at setup time, generate a .so library and # wrap it so that it is callable from Python and PyPy as _trie if WINDOWS: extra_compile_args = ["/Zc:offsetof-"] elif MACOS: os.environ["CFLAGS"] = "-stdlib=libc++" # Fixes PyPy build on macOS 10.15.6+ extra_compile_args = ["-mmacosx-version-min=10.7", "-stdlib=libc++"] else: # Adding -O3 to the compiler arguments doesn't seem to make # any discernible difference in lookup speed extra_compile_args = ["-std=c++11"] ffibuilder.set_source( "icegrams._trie", # trie.cpp is written in C++ but must export a pure C interface. # This is the reason for the "extern 'C' { ... }" wrapper. 'extern "C" {\n' + declarations + "\n}\n", source_extension=".cpp", sources=["src/icegrams/trie.cpp"], extra_compile_args=extra_compile_args, ) ffibuilder.cdef(declarations) if __name__ == "__main__": ffibuilder.compile(verbose=False)
4,351
1,525