text
string
size
int64
token_count
int64
#!/usr/bin/env python3 # -*-coding:utf-8-*- """ This module is used to extract features from the data """ import numpy as np from scipy.fftpack import fft from scipy.fftpack.realtransforms import dct import python_speech_features eps = 0.00000001 def file_length(soundParams): """Returns the file length, in seconds""" return soundParams[3] / soundParams[2] def zcr(frame): """Computes zero crossing rate of frame""" count = len(frame) countZ = np.sum(np.abs(np.diff(np.sign(frame)))) / 2 return countZ / (count - 1) def energy(frame): """Computes signal energy of frame""" return np.sum(frame ** 2) / len(frame) def energy_entropy(frame, numOfShortBlocks=10): """Computes entropy of energy""" tfe = np.sum(frame ** 2) # total frame energy L = len(frame) subWinLength = int(np.floor(L / numOfShortBlocks)) if L != subWinLength * numOfShortBlocks: frame = frame[0:subWinLength * numOfShortBlocks] # subWindows is of size [numOfShortBlocks x L] subWindows = frame.reshape(subWinLength, numOfShortBlocks, order='F').copy() # Compute normalized sub-frame energies: s = np.sum(subWindows ** 2, axis=0) / (tfe + eps) # Compute entropy of the normalized sub-frame energies: entropy = -1 * np.sum(s * np.log2(s + eps)) return entropy def spectral_centroid_and_spread(X, fs): """Computes spectral centroid of frame (given abs(FFT))""" ind = (np.arange(1, len(X) + 1)) * (fs/(2.0 * len(X))) Xt = X.copy() Xt = Xt / Xt.max() NUM = np.sum(ind * Xt) DEN = np.sum(Xt) + eps C = (NUM / DEN) # Centroid S = np.sqrt(np.sum(((ind - C) ** 2) * Xt) / DEN) # Spread # Normalize: C = C / (fs / 2.0) S = S / (fs / 2.0) return (C, S) def avg_mfcc(sound_obj, avg=True): """Extract the MFCC from the sound object""" soundD = sound_obj["sound"] # raw data sr = sound_obj["params"][2] # samplerate # nf = sound_obj["params"][3] # nframes all_mfcc = python_speech_features.mfcc(soundD, samplerate=sr, winlen=0.025, winstep=1) if avg: return np.mean(all_mfcc, axis=0) return all_mfcc def mfcc_init_filter_banks(fs, nfft): """Computes the triangular filterbank for MFCC computation""" # filter bank params: lowfreq = 133.33 linsc = 200/3. logsc = 1.0711703 numLinFiltTotal = 13 numLogFilt = 27 # Total number of filters nFiltTotal = numLinFiltTotal + numLogFilt # Compute frequency points of the triangle: freqs = np.zeros(nFiltTotal+2) freqs[:numLinFiltTotal] = lowfreq + np.arange(numLinFiltTotal) * linsc freqs[numLinFiltTotal:] = freqs[numLinFiltTotal-1] * logsc ** np.arange(1, numLogFilt + 3) heights = 2./(freqs[2:] - freqs[0:-2]) # Compute filterbank coeff (in fft domain, in bins) fbank = np.zeros((nFiltTotal, nfft)) nfreqs = np.arange(nfft) / (1. * nfft) * fs for i in range(nFiltTotal): lowTrFreq = freqs[i] cenTrFreq = freqs[i+1] highTrFreq = freqs[i+2] lid = np.arange(np.floor(lowTrFreq * nfft / fs) + 1, np.floor(cenTrFreq * nfft / fs) + 1, dtype=np.int) lslope = heights[i] / (cenTrFreq - lowTrFreq) rid = np.arange(np.floor(cenTrFreq * nfft / fs) + 1, np.floor(highTrFreq * nfft / fs) + 1, dtype=np.int) rslope = heights[i] / (highTrFreq - cenTrFreq) fbank[i][lid] = lslope * (nfreqs[lid] - lowTrFreq) fbank[i][rid] = rslope * (highTrFreq - nfreqs[rid]) return fbank, freqs def mfcc(X, fbank, nceps=13): """Computes the MFCCs of a frame, given the fft mag""" mspec = np.log10(np.dot(X, fbank.T)+eps) ceps = dct(mspec, type=2, norm='ortho', axis=-1)[:nceps] return ceps def extract_all_features0(sound_obj): """Extract the features from the sound object""" # fl = file_length(sound_obj["params"]) test_mfcc_avg = avg_mfcc(sound_obj) # return np.concatenate(([fl], test_mfcc_avg)) return test_mfcc_avg def features_labels0(): """Give a name to each feature""" return ["mfcc{}".format(i) for i in range(13)] def extract_all_features(sound_obj, wins=None, steps=None): """Extract the features from the sound object""" sr = sound_obj["params"][2] # samplerate nbs = sound_obj["params"][3] # number of samples if wins is None: wins = int(0.050 * sr) if steps is None: steps = int(nbs/15 - wins) # Signal normalization signal = sound_obj["sound"] signal = signal / (2.0 ** 15) DC = signal.mean() MAX = (np.abs(signal)).max() signal = (signal - DC) / (MAX + 0.0000000001) N = len(signal) # total number of samples curPos = steps // 2 # skip the very beginning nFFT = wins // 2 # compute the triangular filter banks used in the mfcc calculation #[fbank, _] = mfcc_init_filter_banks(sr, nFFT) totalNumOfFeatures = 5 + 13 stFeatures = [] while curPos + wins - 1 < N: # for each short-term window until the end of signal x = signal[curPos:curPos+wins] # get current window curPos = curPos + steps # update window position X = abs(fft(x)) # get fft magnitude X = X[0:nFFT] # normalize fft X = X / len(X) curFV = np.zeros(totalNumOfFeatures) curFV[0] = zcr(x) # zero crossing rate curFV[1] = energy(x) # short-term energy curFV[2] = energy_entropy(x) # short-term entropy of energy [curFV[3], curFV[4]] = spectral_centroid_and_spread(X, sr) # spectral centroid and spread # curFV[5] = stSpectralEntropy(X) # spectral entropy # curFV[6] = stSpectralFlux(X, Xprev) # spectral flux # curFV[7] = stSpectralRollOff(X, 0.90, sr) # spectral rolloff # curFV[numOfTimeSpectralFeatures:numOfTimeSpectralFeatures+nceps, 0] = stMFCC(X, fbank, nceps).copy() # MFCCs # # chromaNames, chromaF = stChromaFeatures(X, sr, nChroma, nFreqsPerChroma) # curFV[numOfTimeSpectralFeatures + nceps: numOfTimeSpectralFeatures + nceps + numOfChromaFeatures - 1] = chromaF # curFV[numOfTimeSpectralFeatures + nceps + numOfChromaFeatures - 1] = chromaF.std() #curFV[5:18] = mfcc(X, fbank, 13) #curFV[0:13] = mfcc(X, fbank, 13) curFV[5:18] = python_speech_features.mfcc(x, samplerate=sr, winlen=wins/sr, winstep=steps/sr) # TEMP #curFV = python_speech_features.mfcc(signal, samplerate=sr, winlen=wins, winstep=steps).T stFeatures.append(curFV) # stFeatures = np.array(stFeatures) stFeatures = np.concatenate(stFeatures, 0).flatten() #stFeatures = np.mean(stFeatures, axis=0) # stFeatures = python_speech_features.mfcc(signal, samplerate=sr, winlen=wins/sr, winstep=steps/sr) # stFeatures = np.mean(stFeatures, axis=0) return stFeatures # sound_obj2 = sound_obj.copy() # sound_obj2["sound"] = signal # # # fl = file_length(sound_obj["params"]) # test_mfcc_avg = avg_mfcc(sound_obj2) # # return np.concatenate(([fl], test_mfcc_avg)) # return test_mfcc_avg def features_labels(): """Give a name to each feature""" return ["zrc", "energy", "en_ent", "centr", "spread"] + ["mfcc{}".format(i) for i in range(13)]
7,458
2,943
from .combine_sector_results import * from .DiagnosticsLog import * from .EstimationModel import * from .format_regression_table import * from .save_and_load import * from .SlimResults import * from .Specification import * from .visualize_results import *
255
73
# Part of the Engi-WebGL suite. from bpy.props import * from bpy_extras.io_utils import ExportHelper from mathutils import * from functools import reduce import os, sys, os.path, bpy, bmesh, math, struct, base64, itertools bl_info = { 'name': 'Curve Export (.json)', 'author': 'Lasse Nielsen', 'version': (0, 2), 'blender': (2, 72, 0), 'location': 'File > Export > Curve (.json)', 'description': 'Curve Export (.json)', 'category': 'Import-Export' } # Compress number representation to save as much space as possible. def cnr(n): s = '%.4f' % n while s[-1] == '0': s = s[:-1] if s[-1] == '.': s = s[:-1] return s def format_stream(ident, id, s): return '%s%s: [%s]' % (ident, id, ','.join(map(cnr, s))) class EngiCurveExporter(bpy.types.Operator, ExportHelper): bl_idname = 'curve.json' bl_label = 'Export Curve (.json)' bl_options = {'PRESET'} filename_ext = ".json" filter_glob = StringProperty(default="*.json", options={'HIDDEN'}) #filepath = StringProperty() filename = StringProperty() directory = StringProperty() # Black Magic... check_extension = True def execute(self, context): filename = os.path.splitext(self.filename)[0] filename = filename + '.json' # Check for a valid selection. We expect a single object of type 'CURVE'. if bpy.context.active_object.type != 'CURVE': print('The current selection is invalid. Please select a single curve to export.') return {'FINISHED'} spline = bpy.context.active_object.data.splines[0] points = spline.points json = '{\n' json += '\t"count": ' + str(len(points)) + ',\n' x_stream = [] y_stream = [] z_stream = [] for point in points: x_stream.append(point.co[0]) y_stream.append(point.co[1]) z_stream.append(point.co[2]) json += format_stream('\t', '"x"', x_stream) + ',\n' json += format_stream('\t', '"y"', y_stream) + ',\n' json += format_stream('\t', '"z"', z_stream) + '\n' json += '}' with open(self.directory + filename, 'w') as out: out.write(json) return {'FINISHED'} def menu_func(self, context): self.layout.operator(EngiCurveExporter.bl_idname, text="Curve (.json)") def register(): bpy.utils.register_class(EngiCurveExporter) bpy.types.INFO_MT_file_export.append(menu_func) def unregister(): bpy.utils.unregister_class(EngiCurveExporter) bpy.types.INFO_MT_file_export.remove(menu_func) if __name__ == '__main__': register()
2,435
983
import mimetypes from pathlib import Path from organize.utils import DotDict, flatten from .filter import Filter class MimeType(Filter): """ Filter by MIME type associated with the file extension. Supports a single string or list of MIME type strings as argument. The types don't need to be fully specified, for example "audio" matches everything from "audio/midi" to "audio/quicktime". You can see a list of known MIME types on your system by running this oneliner: .. code-block:: yaml python3 -c "import mimetypes as m; print('\\n'.join(sorted(set(m.common_types.values()) | set(m.types_map.values()))))" Examples: - Show MIME types: .. code-block:: yaml :caption: config.yaml rules: - folders: '~/Downloads' filters: - mimetype actions: - echo: '{mimetype}' - Filter by "image" mimetype: .. code-block:: yaml :caption: config.yaml rules: - folders: '~/Downloads' filters: - mimetype: image actions: - echo: This file is an image: {mimetype} - Filter by specific MIME type: .. code-block:: yaml :caption: config.yaml rules: - folders: '~/Desktop' filters: - mimetype: application/pdf actions: - echo: 'Found a PDF file' - Filter by multiple specific MIME types: .. code-block:: yaml :caption: config.yaml rules: - folders: '~/Music' filters: - mimetype: - application/pdf - audio/midi actions: - echo: 'Found Midi or PDF.' """ def __init__(self, *mimetypes): self.mimetypes = list(map(str.lower, flatten(list(mimetypes)))) @staticmethod def mimetype(path): type_, _ = mimetypes.guess_type(path, strict=False) return type_ def matches(self, path: Path): mimetype = self.mimetype(path) if mimetype is None: return False if not self.mimetypes: return True return any(mimetype.startswith(x) for x in self.mimetypes) def pipeline(self, args: DotDict): if self.matches(args.path): result = self.mimetype(args.path) return {"mimetype": result} return None def __str__(self): return "MimeType(%s)" % ", ".join(self.mimetypes)
2,678
752
from conans import ConanFile, CMake, tools class LibprotobufMutatorConan(ConanFile): name = "libprotobuf-mutator" version = "20200506" license = "Apache-2.0" settings = "os", "compiler", "build_type", "arch" generators = "cmake" exports_sources = "patches/*", build_requires = "protoc_installer/3.9.1@bincrafters/stable", options = { "fPIC" : [True, False] } default_options = { "fPIC" : True } short_paths = True def configure(self): if self.settings.os == "Windows": del self.options.fPIC def source(self): tools.get(**self.conan_data["sources"][self.version]) for patch in self.conan_data["patches"][self.version]: tools.patch(**patch) def requirements(self): self.requires("lzma_sdk/19.00@orbitdeps/stable") self.requires("zlib/1.2.11") self.requires("protobuf/3.9.1@bincrafters/stable") def build(self): self._source_subfolder = self.conan_data["source_subfolder"][self.version] cmake = CMake(self) cmake.definitions["LIB_PROTO_MUTATOR_TESTING"] = False cmake.definitions["CMAKE_CXX_FLAGS"] = "-fPIE" cmake.definitions["CMAKE_C_FLAGS"] = "-fPIE" cmake.configure(source_folder=self._source_subfolder) cmake.build() def package(self): self.copy("*.h", dst="include", src="{}/src".format(self._source_subfolder)) self.copy("*.h", dst="include/port", src="{}/port".format(self._source_subfolder)) self.copy("*.lib", dst="lib", keep_path=False) self.copy("*.pdb", dst="lib", keep_path=False) self.copy("*.a", dst="lib", keep_path=False) def package_info(self): self.cpp_info.libdirs = ["lib"] self.cpp_info.libs = ["protobuf-mutator-libfuzzer", "protobuf-mutator"]
1,857
663
import numpy as np VEC_FORWARD = np.array([0, 0, 1]) VEC_UP = np.array([0, 1, 0]) VEC_RIGHT = np.array([1, 0, 0]) STYLE_NOMOVE = np.array([1, 0, 0, 0, 0, 0]) STYLE_TROT = np.array([0, 1, 0, 0, 0, 0]) STYLE_JUMP = np.array([0, 0, 1, 0, 0, 0]) STYLE_SIT = np.array([0, 0, 0, 1, 0, 0]) STYLE_STAND = np.array([0, 0, 0, 0, 1, 0]) STYLE_LAY = np.array([0, 0, 0, 0, 0, 1]) NUM_STYLES = 6 SYS_FREQ = 60 DURATION = 9 NUM_QUERIES = SYS_FREQ * DURATION MOCAP_SAMPLE_PATH = "animation/data/mocap-sample.txt"
524
334
#!/usr/bin/python import argparse,codes3d,configparser, os if __name__ == "__main__": parser = argparse.ArgumentParser(description="Create a BED file detailing the locations of genes in the genome, and a database containing additional gene information. Note: If a file in .gtf format is supplied, no other arguments are required.") parser.add_argument("-i","--gene_files",required=True,nargs='+',help="The gene file/s to be indexed; either in tabular format, or, by default, the .gtf file format, as supplied by the GTEx project.") parser.add_argument("-g","--symbol_col",type=int,help="The index of the column containing the gene symbol (non-zero based; default: ).") parser.add_argument("-c","--chr_col",type=int,help="The index of the column containing the chromosome name (non-zero based; default: ).") parser.add_argument("-s","--start_col",type=int,help="The index of the column containing the gene start site (non-zero based; default: ).") parser.add_argument("-e","--end_col",type=int,help="The index of the column containing the gene end site (non-zero based; default: ).") parser.add_argument("-p","--p_threshold_col",type=int,help="The index of the column containing the GTEx p-threshold for this gene (optional; non-zero based; default: ).") parser.add_argument("-H","--no_header",action="store_true",help="Use this option if the table has no header.") parser.add_argument("-b","--output_bed_fp",help="The path to which to output the resultant BED file of gene locations (default: the input file name with the extension \".bed\").") parser.add_argument("-o","--output_db",help="The path to which to output the resultant gene index database (default: the input file name with the extension \".db\").") parser.add_argument("-C","--config_file",default=os.path.join(os.path.dirname(__file__),"../docs/codes3d.conf"),help="The configuration file specifying the location of the CoDeS3D library (default: docs/codes3d.conf).") args = parser.parse_args() config = configparser.ConfigParser() config.read(args.config_file) codes3d.build_gene_index(args.gene_files,args.output_bed_fp,args.output_db,config,args.symbol_col,args.chr_col,args.start_col,args.end_col,args.p_threshold_col,args.no_header)
2,222
685
import numpy as np from Optimizer.path import get_x_substeps from Kinematic import frames, chain as kc def initialize_frames(shape, robot, mode='hm'): return frames.initialize_frames(shape=shape + (robot.n_frames,), n_dim=robot.n_dim, mode=mode) def initialize_frames_jac(shape, robot, mode='hm'): f = initialize_frames(shape=shape, robot=robot, mode=mode) j = frames.initialize_frames(shape=shape + (robot.n_dof, robot.n_frames), n_dim=robot.n_dim, mode='zero') return f, j # General def get_frames(q, robot): return robot.get_frames(q) def get_frames_jac(*, q, robot): return robot.get_frames_jacs(q=q) def get_x_frames(*, q, robot): return robot.get_frames(q=q)[..., :-1, -1] def frames2pos(f, frame_idx, rel_pos): return (f[:, :, frame_idx, :, :] @ rel_pos[:, :, np.newaxis])[..., :-1, 0] def frames2spheres(f, robot): """ x_spheres (n_samples, n_wp, n_links, n_dim) """ return frames2pos(f, frame_idx=robot.spheres_frame_idx, rel_pos=robot.spheres_position) def frames2spheres_jac(f, j, robot): """ x_spheres (n_samples, n_wp, n_spheres, n_dim) dx_dq (n_samples, n_wp, n_dof, n_spheres, n_dim) """ x_spheres = frames2spheres(f=f, robot=robot) dx_dq = (j[:, :, :, robot.spheres_frame_idx, :, :] @ robot.spheres_position[:, :, np.newaxis])[..., :-1, 0] return x_spheres, dx_dq def get_x_spheres(q, robot, return_frames2=False): f = robot.get_frames(q=q) x_spheres = frames2spheres(f=f, robot=robot) if return_frames2: return f, x_spheres else: return x_spheres def get_x_spheres_jac(*, q, robot, return_frames2=False): f, j = robot.get_frames_jac(q=q) x_spheres, dx_dq = frames2spheres_jac(f=f, j=j, robot=robot) if return_frames2: return (f, j), (x_spheres, dx_dq) else: return x_spheres, dx_dq def get_x_spheres_substeps(*, q, robot, n_substeps, return_frames2=False): q_ss = get_x_substeps(x=q, n_substeps=n_substeps, infinity_joints=robot.infinity_joints, include_end_point=True) return get_x_spheres(q=q_ss, robot=robot, return_frames2=return_frames2) def get_x_spheres_substeps_jac(*, q, robot, n_substeps, return_frames2=False): q_ss = get_x_substeps(x=q, n_substeps=n_substeps, infinity_joints=robot.infinity_joints, include_end_point=True) return get_x_spheres_jac(q=q_ss, robot=robot, return_frames2=return_frames2) def get_frames_substeps(*, q, robot, n_substeps): q_ss = get_x_substeps(x=q, n_substeps=n_substeps, infinity_joints=robot.infinity_joints, include_end_point=True) return get_frames(q=q_ss, robot=robot) def get_frames_substeps_jac(*, q, robot, n_substeps): q_ss = get_x_substeps(x=q, n_substeps=n_substeps, infinity_joints=robot.infinity_joints, include_end_point=True) return get_frames_jac(q=q_ss, robot=robot) # nfi - next frame index # iff - influence frame frame # Helper # Combine fun def create_frames_dict(f, nfi): """ Create a dict to minimize the calculation of unnecessary transformations between the frames The value to the key 0 holds all transformations form the origin to the whole chain. Each next field holds the transformation from the current frame to all frames to come. The calculation happens from back to front, to save some steps # 0 1 2 3 4 # F01 # F02 F12 # F03 F13 F23 # F04 F14 F24 F34 # F05 F15 F25 F35 F45 """ n_frames = f.shape[-3] d = {} for i in range(n_frames - 1, -1, -1): nfi_i = nfi[i] if nfi_i == -1: d[i] = f[..., i:i + 1, :, :] elif isinstance(nfi_i, (list, tuple)): d[i] = np.concatenate([ f[..., i:i + 1, :, :], f[..., i:i + 1, :, :] @ np.concatenate([d[j] for j in nfi_i], axis=-3)], axis=-3) else: d[i] = np.concatenate([f[..., i:i + 1, :, :], f[..., i:i + 1, :, :] @ d[nfi_i]], axis=-3) return d def combine_frames(f, prev_frame_idx): for i, pfi in enumerate(prev_frame_idx[1:], start=1): f[..., i, :, :] = f[..., pfi, :, :] @ f[..., i, :, :] def combine_frames_jac(j, d, robot): jf_all, jf_first, jf_last = kc.__get_joint_frame_indices_first_last(jfi=robot.joint_frame_idx) pfi_ = robot.prev_frame_idx[jf_first] joints_ = np.arange(robot.n_dof)[pfi_ != -1] jf_first_ = jf_first[pfi_ != -1] pfi_ = pfi_[pfi_ != -1] # Previous to joint frame # j(b)__a_b = f__a_b * j__b j[..., joints_, jf_first_, :, :] = (d[0][..., pfi_, :, :] @ j[..., joints_, jf_first_, :, :]) # After for i in range(robot.n_dof): jf_inf_i = robot.joint_frame_influence[i, :] jf_inf_i[:jf_last[i] + 1] = False nfi_i = robot.next_frame_idx[jf_last[i]] # Handle joints which act on multiple frames if jf_first[i] != jf_last[i]: for kk, fj_cur in enumerate(jf_all[i][:-1]): jf_next = jf_all[i][kk + 1] jf_next1 = jf_next - 1 if jf_next - fj_cur > 1: j[..., i, fj_cur + 1:jf_next, :, :] = (j[..., i, fj_cur:fj_cur + 1, :, :] @ d[robot.next_frame_idx[fj_cur]][..., :jf_next - fj_cur - 1, :, :]) j[..., i, jf_next, :, :] = ((j[..., i, jf_next1, :, :] @ d[robot.next_frame_idx[jf_next1]][..., 0, :, :]) + (d[0][..., jf_next1, :, :] @ j[..., i, jf_next, :, :])) # j(b)__a_c = j__a_b * f__b_c if isinstance(nfi_i, (list, tuple)): j[..., i, jf_inf_i, :, :] = (j[..., i, jf_last[i]:jf_last[i] + 1, :, ] @ np.concatenate([d[j] for j in nfi_i], axis=-3)) elif nfi_i != -1: j[..., i, jf_inf_i, :, :] = (j[..., i, jf_last[i]:jf_last[i] + 1, :, :] @ d[nfi_i])
5,900
2,506
import dns.resolver import dns.ipv4 import argparse parser = argparse.ArgumentParser() parser.add_argument('-l', "--list", help="List of dns names you want IP's for") parser.add_argument('-o', "--output", help="Output file to save list") args = parser.parse_args() ip_list = [...] subs = open(args.list, 'r', newline='') if args.list: for host in subs: host = host.strip('\n',) host = host.strip('https://') host = host.strip('http://') # print(host) try: i = dns.resolver.query(host,'A' ) #print(i.rrset.items[0]) for item in i: if not item in ip_list: ip_list.append(item) print(item) except Exception as error: a = error if args.output: file = open(args.output, "w") for p in ip_list: file.write(str(p)) file.write("\n") file.close()
977
302
import pandas import numpy as np import math import os import sys import re from utils import * DIR_PATH = os.path.dirname(os.path.realpath(__file__)) percentiles = [ 10, 25, 50, 75, 90, 95, 99, 99.9 ] DATA_FOLDER = DIR_PATH + '/data' def getResult(trial_string, thread, client_num=2): print("thread: {}".format(thread)) datas = [] end_times = [] for i in range(1, client_num+1): first_start_time = math.inf last_start_time = 0 first_end_time = math.inf last_end_time = 0 trial_name = DIR_PATH + '/./trials/{}-{}-{}'.format(i, trial_string, thread) lats_folder = trial_name + '/cobra/lats' if not os.path.exists(lats_folder): continue files = os.listdir(lats_folder) for fname in files: fpath = lats_folder + '/' + fname data = pandas.read_csv(fpath, sep=' ').values start_time = np.min(data[:, 0]) end_time = np.max(data[:, 1]) first_start_time = min(first_start_time, start_time) last_start_time = max(last_start_time, start_time) first_end_time = min(first_end_time, end_time) last_end_time = max(last_end_time, end_time) end_times.append(first_end_time - first_start_time) data -= first_start_time datas.append(data) print("{}: start time gap: {}, end time gap: {}".format(i, (last_start_time - first_start_time) / 1e9, (last_end_time - first_end_time) / 1e9)) print("total end time gap of all clients: {}s".format((max(end_times)-min(end_times))/1e9)) count_start = 0 count_end = min(end_times) - 0 count_time = count_end - count_start print("total time: {}s".format((last_end_time - first_start_time)/1e9)) print("counted time: {}s".format(count_time/1e9)) res = [] res.append(thread) lats = [] before_trimming = 0 for data in datas: before_trimming += data.shape[0] data = data[np.where(data[:,1] > count_start)] data = data[np.where(data[:,1] < count_end)] lats += (data[:,1]-data[:,0]).tolist() print("Data size before trimming: {}, after trimming: {}".format(before_trimming, len(lats))) tps = len(lats)/count_time*1e9 res.append(tps) print('TPS: {}'.format(tps)) lats = np.array(lats) lats.sort() print('Latencies:') for per in percentiles: latency_value = np.percentile(lats, per)/1e6 print('{}%(ms) : {}'.format(per, latency_value)) res.append(latency_value) # plt.hist(lats[:-int(0.001*len(lats))], bins="auto") # plt.show() return res def get_report(trial_string, client_num): thread_tps_lats = [] threads = {} dir_names = os.listdir('trials') for s in dir_names: if '-'+trial_string+'-' in s: threads[int(s.split('-')[-1])] = True if len(threads.keys()) == 0: return for thread in sorted(threads.keys()): res = getResult(trial_string, thread, client_num) thread_tps_lats.append(res) df = pandas.DataFrame(thread_tps_lats) if not os.path.exists(DATA_FOLDER): os.makedirs(DATA_FOLDER) fname = DATA_FOLDER + '/{}.data'.format(trial_string) df.to_csv(fname, sep=' ', header=['#thread', 'tps']+percentiles, index=False, float_format="%.5f") printG("FINISHED: " + trial_string) def get_network_old(fname): net_thpt_rx = [] net_thpt_tx = [] with open(fname) as f: for sline in f: line = sline.split() net_thpt_tx.append(float(line[1])) net_thpt_rx.append(float(line[2])) net_thpt_rx = np.array(net_thpt_rx) net_thpt_tx = np.array(net_thpt_tx) net_thpt_rx.sort() net_thpt_tx.sort() # print('receive peak: {}, send peak: {}'.format(net_thpt_rx[-1], net_thpt_tx[-1])) top10p = int(len(net_thpt_rx) *100 / 30) avg_rx = net_thpt_rx[-top10p: -1].mean() avg_tx = net_thpt_tx[-top10p: -1].mean() # print('avg of top 10% rx: {}'.format(avg_rx)) # print('avg of top 10% tx: {}'.format(avg_tx)) return avg_rx, avg_tx def get_num_op(trial_string): threads = {} dir_names = os.listdir('trials') for s in dir_names: if trial_string in s: threads[int(s.split('-')[-1])] = True if len(threads.keys()) == 0: printB('not found: ' + trial_string) return thread = 24 trial_name = DIR_PATH + '/./trials/{}-{}-{}/client.txt'.format(1, trial_string, thread) result = '' with open(trial_name) as f: for line in f: if re.search(r'NumOp: [0-9]+', line): result = line break result = result.split()[1] return result def get_network(fname): lines = [] with open(fname) as f: for sline in f: line = sline.split() lines.append(line) rx = int(lines[2][4]) - int(lines[0][4]) tx = int(lines[3][4]) - int(lines[1][4]) return (rx, tx) def get_trace_size(trial_string): threads = {} dir_names = os.listdir('trials') for s in dir_names: if trial_string in s: threads[int(s.split('-')[-1])] = True if len(threads.keys()) == 0: printB('not found: ' + trial_string) return thread = 24 trial_name = DIR_PATH + '/./trials/{}-{}-{}/client.txt'.format(1, trial_string, thread) result = '' with open(trial_name) as f: for line in f: if re.search(r'SizeOfTrace: [0-9]+', line): result = line break result = result.split()[1] return result def main(): if len(sys.argv) == 1: databases = ['rocksdb', 'postgres', 'google'] workload = 'cheng' inst_level = 'cloud' for database in databases: for contention in ['low', 'high']: for workload in ['cheng', 'tpcc', 'twitter', 'ycsb', 'rubis']: for inst_level in ['no', 'ww', 'cloud', 'cloudnovnofz', 'cloudnofz', 'local']: trial_string = '{}-{}-{}-{}'.format(database, workload, contention, inst_level) get_report(trial_string, 10 if database == 'postgres' else 1) elif sys.argv[1] == 'net': database = 'postgres' workloads = ['cheng', 'ycsb', 'twitter', 'rubis', 'tpcc'] inst_levels = ['no', 'local'] result_str = 'workload ' + ' '.join(inst_levels) + '\n' for contention in ['low']: for workload in workloads: result_row = workload for inst_level in inst_levels: trial_string = '{}-{}-{}-{}'.format(database, workload, contention, inst_level) thread = 24 rx, tx = get_network('netstats/netstats-'+trial_string + '-{}.log'.format(thread)) print('{}-{}: {}, {}'.format(workload, inst_level, rx, tx)) result_row += ' {}'.format(tx) result_str += result_row + '\n' print(result_str) return elif sys.argv[1] == 'numop': inst_levels = ['no', 'cloud', 'ww'] result_str = 'workload ' + ' '.join(inst_levels) + '\n' for contention in ['low']: for workload in ['cheng', 'tpcc', 'twitter', 'ycsb', 'rubis']: result_str += workload for inst_level in inst_levels: trial_string = '{}-{}-{}-{}'.format(database, workload, contention, inst_level) numop = get_num_op(trial_string) result_str += ' {}'.format(numop) result_str += '\n' print(result_str) elif sys.argv[1] == 'tracesize': database = 'rocksdb' inst_levels = ['cloud', 'ww'] result_str = 'workload ' + ' '.join(inst_levels) + '\n' for contention in ['low']: for workload in ['cheng', 'ycsb', 'twitter', 'rubis', 'tpcc']: result_str += workload for inst_level in inst_levels: trial_string = '{}-{}-{}-{}'.format(database, workload, contention, inst_level) numop = get_trace_size(trial_string) result_str += ' {}'.format(numop) result_str += '\n' print(result_str) if __name__ == "__main__": main()
8,416
2,974
from typing import Optional from platypush.backend import Backend from platypush.context import get_plugin from platypush.message.event.foursquare import FoursquareCheckinEvent class FoursquareBackend(Backend): """ This backend polls for new check-ins on the user's Foursquare account and triggers an event when a new check-in occurs. Requires: * The :class:`platypush.plugins.foursquare.FoursquarePlugin` plugin configured and enabled. Triggers: - :class:`platypush.message.event.foursquare.FoursquareCheckinEvent` when a new check-in occurs. """ _last_created_at_varname = '_foursquare_checkin_last_created_at' def __init__(self, poll_seconds: Optional[float] = 60.0, *args, **kwargs): """ :param poll_seconds: How often the backend should check for new check-ins (default: one minute). """ super().__init__(*args, poll_seconds=poll_seconds, **kwargs) self._last_created_at = None def __enter__(self): self._last_created_at = int(get_plugin('variable').get(self._last_created_at_varname). output.get(self._last_created_at_varname) or 0) self.logger.info('Started Foursquare backend') def loop(self): checkins = get_plugin('foursquare').get_checkins().output if not checkins: return last_checkin = checkins[0] last_checkin_created_at = last_checkin.get('createdAt', 0) if self._last_created_at and last_checkin_created_at <= self._last_created_at: return self.bus.post(FoursquareCheckinEvent(checkin=last_checkin)) self._last_created_at = last_checkin_created_at get_plugin('variable').set(**{self._last_created_at_varname: self._last_created_at}) # vim:sw=4:ts=4:et:
1,821
574
import os import time import pandas as pd from src.utils import get_project_root from src.data.item_names_replacement import REPLACE_DICT1, REPLACE_DICT1 YEARS = [str(x) for x in list(range(2013,2021))] ROOT_DIR = get_project_root() def string_to_float(number): #Custom function for converting 'sales_value' column to float #because of faulty data. 28 rows have eg. '400.200.000.000.000.000' try: return float(number) except: return 0.5 def load_data(data_abs_path: str) -> pd.DataFrame: """Load raw data Parameters: ----------- data_abs_path: absolute path of csv data Returns: -------- data_df: raw data dataframe """ data_df = pd.read_csv(data_abs_path) data_df.sales_datetime = pd.to_datetime(data_df.sales_datetime, format='%Y-%m-%d', utc=True) data_df.set_index('sales_datetime', inplace=True) return data_df def arrange_data(data_df): # Drop unnecessary columns -> no known meaning data_df.drop(labels=[4,10,11], axis=1, inplace=True) data_df.columns = ['bar_name', 'number2', 'feature1', 'sales_datetime', 'feature2', 'item_name', 'item_class', 'sales_qty', 'feature3', 'sales_value'] #data_df.sales_value=data_df.sales_value.apply(lambda x: string_to_float(x)) data_df.sales_datetime = pd.to_datetime(data_df.sales_datetime, utc=True) data_df.set_index('sales_datetime', inplace=True) data_df['item_price'] = abs(data_df['sales_value']/data_df['sales_qty']) return data_df def load_dataset(): columns_to_keep = ['item_name', 'sales_qty', 'sales_value', 'item_price'] all_data_df = pd.DataFrame(columns = columns_to_keep) for year in YEARS: start_time = time.time() filename = os.path.join(ROOT_DIR, f'data/raw/{year}_eKasa_RECEIPT_ENTRIES.csv') df = pd.read_csv(filename, delimiter=';', header=None, converters={12: string_to_float}) data_df = arrange_data(df) all_data_df = all_data_df.append(data_df[columns_to_keep]) print("Dataframe shape: ",df.shape) #print("Dataframe head: ",df.head()) end_time = time.time() print("Time (s): ", end_time-start_time) print(f"{year} done.") all_data_df.sales_qty = all_data_df.sales_qty.astype('int64') all_data_df.item_name.replace(to_replace=REPLACE_DICT1, inplace=True) all_data_df.item_name.replace(to_replace=REPLACE_DICT1, inplace=True) all_data_df.index.name = 'sales_date' all_data_daily_sales = all_data_df.groupby(['item_name', pd.Grouper(freq='D')]).agg({'sales_qty':'sum', 'item_price': 'mean', 'sales_value': 'sum'}).reset_index() print(all_data_daily_sales) return all_data_daily_sales
2,980
1,060
""" This file contains assorted general utility functions used by other modules in the aiml_bot package. """ # TODO: Correctly handle abbreviations. def split_sentences(text: str) -> list: """Split the string s into a list of sentences.""" if not isinstance(text, str): raise TypeError(text) position = 0 results = [] length = len(text) while position < length: try: period = text.index('.', position) except ValueError: period = length + 1 try: question = text.index('?', position) except ValueError: question = length + 1 try: exclamation = text.index('!', position) except ValueError: exclamation = length + 1 end = min(period, question, exclamation) sentence = text[position:end].strip() if sentence: results.append(sentence) position = end + 1 # If no sentences were found, return a one-item list containing # the entire input string. if not results: results.append(text.strip()) # print(results) return results
1,143
305
import os, time, random from collections import defaultdict from System import Console, ConsoleColor, ConsoleKey from System.Threading import Thread, ThreadStart class Screen(object): red = ConsoleColor.Red; green = ConsoleColor.Green; blue = ConsoleColor.Blue;black = ConsoleColor.Black dimension = (21,39) def __update_input(self): mapping = defaultdict(lambda: None, {ConsoleKey.A:Snake.left,ConsoleKey.J:Snake.left, ConsoleKey.LeftArrow:Snake.left, ConsoleKey.D:Snake.right,ConsoleKey.L:Snake.right,ConsoleKey.RightArrow:Snake.right, ConsoleKey.W:Snake.up,ConsoleKey.I:Snake.up,ConsoleKey.UpArrow:Snake.up, ConsoleKey.S:Snake.down,ConsoleKey.K:Snake.down,ConsoleKey.DownArrow:Snake.down}) while True: self.last_input = mapping[Console.ReadKey(True).Key] def __init__(self): self.last_input = None; self.__input_update_thread = Thread(ThreadStart(self.__update_input)); self.__input_update_thread.Start() os.system("cls") # os.system("clear") Console.Title = "Snake by LuYU426" # The next line needed to be commented out on Unix-like systems. However before running, the console needs to be adjusted accordingly Console.CursorVisible = False; Console.WindowWidth = 80; Console.WindowHeight = 25;Console.BufferHeight = Console.WindowHeight; Console.BufferWidth = Console.WindowWidth for i in range(0,24): for j in range(0, 80): if i == 0 or j == 0: self.__show(j, i, Screen.black, "#") elif i == 22 or j == 79: self.__show(j, i, Screen.black,"#") else: self.__show(j, i, Screen.black," ") def __show(self,left,top,color,content): Console.CursorLeft = left; Console.CursorTop = top; Console.BackgroundColor = color; Console.Write(content) def show_score(self,score): self.__show(3,23,Screen.black,"Score: {0}".format(score)) def color(self, position, width, height, color): for row in range(position[0], position[0] + height): for col in range(position[1], position[1] + width): self.__show(col * 2 + 1,row + 1,color," ") class GameLogic(object): def update(self, screen, snake, fruit, stats): stats.increase_score() screen.show_score(stats.current_score) update_result = snake.update(screen.last_input,fruit.current_position) if update_result[0] == False: return True if update_result[1] == True: return False if update_result[2][0] < 0 or update_result[2][1] < 0: return False if update_result[2][0] >= Screen.dimension[0] or update_result[2][1] >= Screen.dimension[1]: return False screen.color(update_result[2],1,1,screen.green) if update_result[3] is None: fruit.reset_position() while snake.position_in_buffer(fruit.current_position): fruit.reset_position() screen.color(fruit.current_position,1,1,screen.red) stats.increase_level() else: screen.color(update_result[3],1,1,screen.black) return True def end(self): screen.color((0,0),39,21,Screen.blue) class Snake(object): up = 0x00; down = 0x01; left = 0x10; right = 0x11 def __init__(self): self.__buffer = list(); self.__current_time_slice = 0 self.__buffer = [[Screen.dimension[0]/2 + 1,Screen.dimension[1]/2 + 1]] self.__current_direction = Snake.up def __current_speed(self): _s = 8 - len(self.__buffer)/2 return 1 if _s < 1 else _s def position_in_buffer(self, fruit_pos): for item in self.__buffer: if item == fruit_pos: return True return False # returns [whether_need_update_screen(bool), whether_fail(bool), head_pos_to_draw(x,y), tail_pos_to_remove(x,y)] def update(self, direction, fruit_pos): self.__current_time_slice += 1 self.__current_time_slice %= self.__current_speed() if self.__current_time_slice != 0: return [False, False] if direction is None: direction = self.__current_direction if direction ^ self.__current_direction == 0x01: direction = self.__current_direction self.__current_direction = direction; candidate = [0, 0]; head = self.__buffer[len(self.__buffer) - 1] candidate[0] = head[0] + 1 if self.__current_direction == Snake.down else head[0] - 1 if self.__current_direction == Snake.up else head[0] candidate[1] = head[1] + 1 if self.__current_direction == Snake.right else head[1] - 1 if self.__current_direction == Snake.left else head[1] if self.position_in_buffer(candidate): return [True, True] if candidate == fruit_pos: self.__buffer.append(candidate); return [True, False, candidate, None] else: self.__buffer.append(candidate); tail = self.__buffer[0]; self.__buffer.remove(tail) return [True, False, candidate, tail] class Fruit(object): def __init__(self): self.reset_position() @property def current_position(self): return self.__position def reset_position(self): self.__position = [random.randint(0,Screen.dimension[0]-1),random.randint(0,Screen.dimension[1]-1)] class Stastics(object): def __init__(self): self.current_score = 0; self.__level = 0 def increase_score(self): self.current_score += 1 def increase_level(self): self.__level += 1; self.current_score += pow(2,self.__level-1) if __name__ == "__main__": screen = Screen(); logic = GameLogic(); stats = Stastics(); fruit = Fruit(); snake = Snake() while snake.position_in_buffer(fruit.current_position): fruit.reset_position() screen.color(fruit.current_position,1,1,screen.red) while logic.update(screen, snake, fruit, stats): time.sleep(0.05) logic.end()
5,858
1,839
# #! coding:utf-8 import xml.etree.ElementTree as ET from xml.etree import ElementTree import base64 import binascii import numpy as np import matplotlib.pyplot as plt from collections import OrderedDict SubType = {'1':'ASD','2':'CSD','3':'TF','4':'???','5':'COH'} average_type = {'0':'Fixed','1':'Exponential','2':'Accumulative'} # not comfirmed window_type = {'0':'Uniform','1':'Hanning','2':'Flat-top', '3':'Welch','4':'Bartlet','5':'BMH'} # not comfirmed class DttXMLSpectrum(): def __init__(self,child): self.Name = child.attrib["Name"] self._getAttribute(child) self._getStream(child) def _getAttribute(self,child): self.dt = child.find("./Param[@Name='dt']").text self.t0 = child.find("./Time[@Type='GPS']").text self.BW = child.find("./Param[@Name='BW']").text self.f0 = child.find("./Param[@Name='f0']").text self.df = child.find("./Param[@Name='df']").text self.N = int(child.find("./Param[@Name='N']").text) self.Window = child.find("./Param[@Name='Window']").text self.AveType = child.find("./Param[@Name='AverageType']").text self.Averages = child.find("./Param[@Name='Averages']").text self.Flag = child.find("./Param[@Name='Flag']").text self.Subtype = SubType[child.find("./Param[@Name='Subtype']").text] self.M = int(child.find("./Param[@Name='M']").text) self.dim = child.find('./Array/Dim').text channel = child.findall("./Param[@Unit='channel']") self.Channel = list(map(lambda x:{x.attrib['Name']:x.text},channel)) Channel = OrderedDict(self.Channel[0]) for c in self.Channel: Channel.update(OrderedDict(c)) self.Channel = Channel def showInfo(self): fmt = 'dt [s]\t:{dt:2.10f}\n'+\ 't0(GPS)\t:{t0:10.1f}\n'+\ 'BW [Hz]\t:{bw:2.10f} \n'+\ 'f0 [Hz]\t:{f0:2.10f} \n'+\ 'df [Hz]\t:{df:2.10f} \n'+\ 'average\t:{average:12d} \n'+\ 'Points\t:{n:12d} \n'+\ 'window\t:{window:12s} \n'+\ 'type\t:{aveType:12s}\n'+\ 'flag\t:{flag:12s}' text = fmt.format(dt=float(self.dt), t0=float(self.t0), bw=float(self.BW), f0=float(self.f0), df=float(self.df), n=int(self.N), window=self.Window, aveType=self.AveType, average=int(self.Averages), flag=self.Flag ) print(text) def _getStream(self,child): stream_str = child.find('./Array/Stream').text stream_bin = binascii.a2b_base64(stream_str) if self.Subtype == 'ASD': # float : asd self.spectrum = np.frombuffer(stream_bin, dtype=np.float32) self.f = np.arange(len(self.spectrum))*float(self.df) elif self.Subtype == 'CSD': # floatcomplex : cross spectrum self.spectrum = np.frombuffer(stream_bin, dtype=np.float32) real = self.spectrum[0::2] real = real.reshape(self.M,self.N) imag = self.spectrum[1::2] imag = imag.reshape(self.M,self.N) imag = 1j*imag c = real+imag #print c[0,:5] # Cxy # x:ChannelA # y:ChannelB[0-] self.csd = np.absolute(c) self.deg = np.rad2deg(np.angle(c)) self.f = np.arange(len(self.csd[0]))*float(self.df) elif self.Subtype == '???': # float : coherence? self.spectrum = np.frombuffer(stream_bin, dtype=np.float32) self.f = np.arange(len(self.spectrum))*float(self.df) #print len(self.spectrum),len(self.f) class DttXMLTransferFunction(): def __init__(self,child): self.Name = child.attrib["Name"] self._getAttribute(child) self._getStream(child) def _getAttribute(self,child): #self.dt = child.find("./Param[@Name='dt']").text #self.t0 = child.find("./Time[@Type='GPS']").text self.BW = child.find("./Param[@Name='BW']").text self.f0 = child.find("./Param[@Name='f0']").text self.df = child.find("./Param[@Name='df']").text self.N = int(child.find("./Param[@Name='N']").text) self.Window = child.find("./Param[@Name='Window']").text self.AveType = child.find("./Param[@Name='AverageType']").text self.Averages = child.find("./Param[@Name='Averages']").text self.Flag = child.find("./Param[@Name='Flag']").text self.Subtype = SubType[child.find("./Param[@Name='Subtype']").text] self.M = int(child.find("./Param[@Name='M']").text) self.dim = child.find('./Array/Dim').text channel = child.findall("./Param[@Unit='channel']") self.Channel = list(map(lambda x:{x.attrib['Name']:x.text},channel)) Channel = OrderedDict(self.Channel[0]) for c in self.Channel: Channel.update(OrderedDict(c)) self.Channel = Channel def showInfo(self): fmt = 'dt [s]\t:{dt:2.10f}\n'+\ 't0(GPS)\t:{t0:10.1f}\n'+\ 'BW [Hz]\t:{bw:2.10f} \n'+\ 'f0 [Hz]\t:{f0:2.10f} \n'+\ 'df [Hz]\t:{df:2.10f} \n'+\ 'average\t:{average:12d} \n'+\ 'Points\t:{n:12d} \n'+\ 'window\t:{window:12s} \n'+\ 'type\t:{aveType:12s}\n'+\ 'flag\t:{flag:12s}' text = fmt.format(dt=float(self.dt), t0=float(self.t0), bw=float(self.BW), f0=float(self.f0), df=float(self.df), n=int(self.N), window=self.Window, aveType=self.AveType, average=int(self.Averages), flag=self.Flag ) print(text) def _getStream(self,child): stream_str = child.find('./Array/Stream').text stream_bin = binascii.a2b_base64(stream_str) #print(stream_bin) #print(self.Subtype) # if self.Subtype == 'ASD': # float : asd self.spectrum = np.frombuffer(stream_bin, dtype=np.float32) #self.f = np.arange(len(self.spectrum))*float(self.df) elif self.Subtype == 'CSD': # floatcomplex : cross spectrum self.spectrum = np.frombuffer(stream_bin, dtype=np.float32) real = self.spectrum[0::2] real = real.reshape(self.M,self.N) imag = self.spectrum[1::2] imag = imag.reshape(self.M,self.N) imag = 1j*imag c = real+imag #print c[0,:5] # Cxy # x:ChannelA # y:ChannelB[0-] self.csd = np.absolute(c) self.deg = np.rad2deg(np.angle(c)) #self.f = np.arange(len(self.csd[0]))*float(self.df) elif self.Subtype == 'TF': self.spectrum = np.frombuffer(stream_bin, dtype=np.float32) real = self.spectrum[0::2] real = real.reshape(self.M+1,self.N) imag = self.spectrum[1::2] imag = imag.reshape(self.M+1,self.N) imag = 1j*imag c = real+imag self.mag = np.absolute(c) self.deg = np.rad2deg(np.angle(c)) #print('mag',self.mag) #print('deg',self.deg) #self.f = np.arange(len(self.mag[0]))*float(self.df) #print('###',self.f,self.df) #exit() elif self.Subtype == 'COH': self.spectrum = np.frombuffer(stream_bin, dtype=np.float32) self.spectrum = self.spectrum.reshape(self.M+1,self.N) self.mag = self.spectrum else: raise ValueError('!') class DttXMLTestParameter(): def __init__(self,child): self.Name = child.attrib["Name"] self._getAttribute(child) def _getAttribute(self,child): #self.dt = child.find("./Param[@Name='dt']").text #self.t0 = child.find("./Time[@Type='GPS']").text self.sp = child.find("./Param[@Name='SweepPoints']").text #print(self.sp) self.sp = list(map(float,self.sp.split()))[0::2] class DttData(): def __init__(self,xmlname): ''' ''' tree = ElementTree.parse(xmlname) root = tree.getroot() self.spect = [DttXMLSpectrum(child) for child in \ root.findall("./LIGO_LW[@Type='Spectrum']")] if not self.spect: self.tfmode = True self.spect = [DttXMLTransferFunction(child) for child in \ root.findall("./LIGO_LW[@Type='TransferFunction']")] huge = root.findall("./LIGO_LW[@Type='TestParameter']") hoge = DttXMLTestParameter(huge[0]) self.f = hoge.sp def getAllSpectrumName(self): ''' ''' for s in self.spect: print(s.Name,s.Subtype,s.Channel['ChannelA']) def getASDInfo(self,chname,ref=False): ''' ''' asd = filter(lambda x:x.Subtype=="ASD", self.spect) asd = filter(lambda x:x.Channel['ChannelA']==chname, asd) asd = list(asd) if len(asd)==1: asd = asd[0] else: raise ValueError('Error!') asd.showInfo() def getASD(self,chname,ref=False): ''' ''' asdlist = filter(lambda x:x.Subtype=="ASD", self.spect) asdlist = filter(lambda x:x.Channel['ChannelA']==chname, asdlist) asdlist = list(asdlist) if len(asdlist)==0: raise ValueError('No ASD with : {0}'.format(chname)) for asd in asdlist: print(asd.Name,asd.Subtype) if ref==False: if 'Result' in asd.Name: return asd.f,asd.spectrum else: raise ValueError('No name') elif ref==True: if 'Reference' in asd.Name: return asd.f,asd.spectrum else: raise ValueError('No reference') else: print('!') return None print('!') def getResultNum(self,chname,ref=False): ''' ''' asd = list(filter(lambda x:x.Subtype=="ASD", self.spect)) asd = list(filter(lambda x:x.Channel['ChannelA']==chname, asd)) num = asd[0].Name return int(num.split('[')[1][0]) def getCSD(self,chnameA,chnameB,ref=False,**kwargs): ''' ''' import re csd = list(filter(lambda x:x.Subtype=="CSD", self.spect)) csd = list(filter(lambda x:x.Channel['ChannelA']==chnameA, csd)) if not ref: csd = list(filter(lambda x: 'Reference' not in x.Name , csd)) numA = self.getResultNum(chnameA,**kwargs) for c in csd[0].Channel.keys(): if csd[0].Channel[c] == chnameB: num = int(c[:-1].split('[')[1]) if num >= numA: num = num -1 elif num < numA: num = num #print numA,num,csd[0].Channel[c] return csd[0].f,csd[0].csd[num],csd[0].deg[num] def getCoherence(self,chnameA,chnameB,ref=False): ''' ''' if not self.tfmode: freq = None freq,CSD_AB,deg = self.getCSD(chnameA,chnameB) freq,ASD_A = self.getASD(chnameA) freq,ASD_B = self.getASD(chnameB) mag = (CSD_AB/(ASD_A*ASD_B))**2 else: import re csd = list(filter(lambda x:x.Subtype=="COH", self.spect)) csd = list(filter(lambda x:x.Channel['ChannelA']==chnameA, csd)) if not ref: csd = list(filter(lambda x: 'Reference' not in x.Name , csd)) else: csd = list(filter(lambda x: 'Reference' in x.Name , csd)) if len(csd)==1: csd = csd[0] else: raise ValueError('!') chnames = list(csd.Channel.values()) label = list(csd.Channel.keys()) print(chnameA,chnames) num = chnames.index(chnameB) if ref: freq = csd.mag[0] else: freq = self.f mag = csd.mag[num] return freq,mag def getTF(self,chnameA,chnameB,ref=False,db=True): ''' ''' if not self.tfmode: f = None f,CSD_AB,deg = self.getCSD(chnameA,chnameB) f,ASD_A = self.getASD(chnameA) f,ASD_B = self.getASD(chnameB) mag = CSD_AB/(ASD_B*ASD_B) return f,mag,deg else: import re csd = list(filter(lambda x:x.Subtype=="TF", self.spect)) csd = list(filter(lambda x:x.Channel['ChannelA']==chnameA, csd)) if not ref: csd = list(filter(lambda x: 'Reference' not in x.Name , csd)) else: csd = list(filter(lambda x: 'Reference' in x.Name , csd)) if len(csd)==1: csd = csd[0] else: raise ValueError('!') chnames = list(csd.Channel.values()) label = list(csd.Channel.keys()) print(chnameA,chnames) num = chnames.index(chnameB) if ref: freq = csd.mag[0] else: freq = self.f mag = csd.mag[num] deg = csd.deg[num] if db: mag = 20*np.log10(mag) return freq,mag,deg
14,376
4,750
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""Program to build a graph based on dense input features (embeddings). This is a wrapper around the `nsl.tools.build_graph` API. See its documentation for more details. USAGE: `python graph_builder.py` [*flags*] *input_features.tfr... output_graph.tsv* For details about this program's flags, run `python graph_builder.py --help`. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl import app from absl import flags from neural_structured_learning.tools import graph_builder_lib import tensorflow as tf def _main(argv): """Main function for running the graph_builder program.""" flag = flags.FLAGS flag.showprefixforinfo = False if len(argv) < 3: raise app.UsageError( 'Invalid number of arguments; expected 2 or more, got %d' % (len(argv) - 1)) graph_builder_lib.build_graph(argv[1:-1], argv[-1], flag.similarity_threshold, flag.id_feature_name, flag.embedding_feature_name) if __name__ == '__main__': flags.DEFINE_string( 'id_feature_name', 'id', """Name of the singleton bytes_list feature in each input Example whose value is the Example's ID.""") flags.DEFINE_string( 'embedding_feature_name', 'embedding', """Name of the float_list feature in each input Example whose value is the Example's (dense) embedding.""") flags.DEFINE_float( 'similarity_threshold', 0.8, """Lower bound on the cosine similarity required for an edge to be created between two nodes.""") # Ensure TF 2.0 behavior even if TF 1.X is installed. tf.compat.v1.enable_v2_behavior() app.run(_main)
2,291
696
import torch from torch.utils import data import sys from sklearn.utils import shuffle import numpy as np import argparse import matplotlib.pyplot as plt class UserSet(data.Dataset): def __init__(self, path, tsplit, idim=100, seed=0, Nsongs=180198, pc_split=0.1, tag2vector_path=""): """ path : str path + fname of the user-playcounts list the file has the index of the songs listened by each user idim : int maximum number of songs per user in items >95% of users have listened less than 100 songs tsplit : str type of dataset: 'train', 'val', 'test' loss : str Name of the loss function used seed : int Seed used for the pcounts splitting Nsongs : int Number of different songs pc_split : float Percentage of the val and test set (pc_split=1 corresponds to 100%) """ # LOAD DATA self.path = path self.pcounts = torch.load(self.path) #list self.tsplit = tsplit self.pc_split = pc_split self.idim = idim self.len = len(self.pcounts) self.index1 = int(self.len*(1 - 2*pc_split)) self.index2 = int(self.len*(1 - pc_split)) self.seed = seed self.Nsongs = Nsongs # SPLIT DATASET if self.tsplit == "train": self.pcounts = shuffle(self.pcounts, random_state=self.seed)[:self.index1] self.len = len(self.pcounts) elif self.tsplit == "val": self.pcounts = shuffle(self.pcounts, random_state=self.seed)[self.index1:self.index2] self.len = len(self.pcounts) elif self.tsplit == "test": self.pcounts = shuffle(self.pcounts, random_state=self.seed)[self.index2:] self.len = len(self.pcounts) else: print("ERROR: split options = 'train', 'val', 'test'. \n", self.tsplit) self.len = None self.pcounts = None sys.exit(0) return def __len__(self): return self.len def __getitem__(self, idx): #given an index of user, returns two vectors of the listenned songs user = shuffle(self.pcounts[idx]) idx_inp = np.random.randint(1, min(len(user)-1, self.idim)) idx_out = np.random.randint(idx_inp + 1, min(len(user) + 1, idx_inp + self.idim)) #INP PER EMBEDDING (song ID and -1) inp = -torch.ones(self.idim, dtype=torch.long) inp[range(idx_inp)] = torch.LongTensor(user[:idx_inp]) #OUT (one-hot vector) out = torch.zeros(self.Nsongs, dtype=torch.long) out[user[idx_inp:idx_out]] = torch.ones(len(user[idx_inp:idx_out]), dtype=torch.long) return inp, out def get_tags(self, Nusers=0, Ntags=1): return torch.randint(Ntags, (Nusers, 1)).squeeze(1) class EmbSet(data.Dataset): def __init__(self, path, tsplit, idim=100, seed=0, Nsongs=180198, pc_split=0.1): """ See UserSet This dataset is for flows. """ self.path = path self.pcounts = torch.load(self.path) #list self.tsplit = tsplit self.pc_split = pc_split self.idim = idim self.len = len(self.pcounts) self.index1 = int(self.len*(1 - 2*pc_split)) self.index2 = int(self.len*(1 - pc_split)) self.seed = seed self.Nsongs = Nsongs # SPLIT DATASET if self.tsplit == "train": self.pcounts = shuffle(self.pcounts, random_state=self.seed)[:self.index1] self.len = len(self.pcounts) elif self.tsplit == "val": self.pcounts = shuffle(self.pcounts, random_state=self.seed)[self.index1:self.index2] self.len = len(self.pcounts) elif self.tsplit == "test": self.pcounts = shuffle(self.pcounts, random_state=self.seed)[self.index2:] self.len = len(self.pcounts) else: print("ERROR: split options = 'train', 'val', 'test'. \n", self.tsplit) self.len = None self.pcounts = None sys.exit(0) return def __len__(self): return self.len def __getitem__(self, idx): #given an index of user, returns two vectors of the listenned songs user = shuffle(self.pcounts[idx]) idx_inp = np.random.randint(1, min(len(user)-1, self.idim)) idx_out = np.random.randint(idx_inp + 1, min(len(user) + 1, idx_inp + self.idim)) #INP inp_idim = -torch.ones(self.idim, dtype=torch.long) inp_idim[range(idx_inp)] = torch.LongTensor(user[:idx_inp]) inp_idx = torch.zeros(self.Nsongs, dtype=torch.long) inp_idx[user[:idx_inp]] = torch.ones(len(user[:idx_inp]), dtype=torch.long) #OUT out_idim = -torch.ones(self.idim, dtype=torch.long) out_idim[range(idx_out - idx_inp)] = torch.LongTensor(user[idx_inp:idx_out]) out_idx = torch.zeros(self.Nsongs, dtype=torch.long) out_idx[user[idx_inp:idx_out]] = torch.ones(len(user[idx_inp:idx_out]), dtype=torch.long) return inp_idim, inp_idx, out_idim, out_idx class PostSet(data.Dataset): """ Loads dataset for predict created by get_PostSet(). """ def __init__(self, calculate=False, metadata_path="results/metadata", metadata_name="opt_tags", bias_top=1, bias_normal=1): if calculate: get_TestSetPredict() self.data = torch.load(metadata_path + "/postset_{}_t{}_n{}".format(metadata_name, bias_top, bias_normal)) self.len = len(self.data) self.path = metadata_path + "/postset_{}_t{}_n{}".format(metadata_name, bias_top, bias_normal) return def __len__(self): return self.len def __getitem__(self, idx): return self.data[idx] def get_PostSet(pcounts_name = "opt_pcounts", pcounts_path = "results/metadata", pc_split=0.1, seed = 0, metadata_name = "opt_tags", metadata_path = "results/metadata", bias_top=1, bias_normal=1): """ ONLY VALID FOR METADATA THAT IS A LIST FOR EACH SONG """ # LOAD PCOUNTS AND METADATA pcounts = torch.load(pcounts_path + "/" + pcounts_name) #list index2 = int(len(pcounts)*(1 - pc_split)) pcounts = shuffle(pcounts, random_state=seed)[index2:] # Test partition metadata, meta = torch.load(metadata_path + "/" + metadata_name) Nclasses = len(meta) meta2idx = {meta[i]:i for i in range(Nclasses)} idx2meta = {i:meta[i] for i in range(Nclasses)} # CHANGE METADATA print("Metadata2num and opt_pcounts to dict...") idx_metadata = {} # same as metadata but using the index of meta2idx for i in range(len(metadata)): if metadata[i] == -1: idx_metadata[i] = -1 else: idx_metadata[i] = [meta2idx[m] for m in metadata[i]] dict_pcounts = {} for i in range(len(pcounts)): dict_pcounts[i] = pcounts[i] # USER META COUNT print("Before filtering users without metadata,", len(pcounts)) user2class_counts = {} total = len(dict_pcounts) for b, user in enumerate(list(dict_pcounts.keys())): print(" {0:0.3f}% \r".format((b+1.)*100./total), end="") class_counts = torch.zeros(Nclasses) for song in dict_pcounts[user]: if idx_metadata[song] != -1: class_counts[idx_metadata[song]] += 1 if (class_counts != 0).any(): user2class_counts[user] = class_counts.data.tolist() else: del dict_pcounts[user] # GET TOP CLASS print("After filtering users without metadata,", len(user2class_counts), len(dict_pcounts)) user2topclass = {} for user in user2class_counts.keys(): user2topclass[user] = idx2meta[torch.argmax(torch.tensor(user2class_counts[user])).data.tolist()] # SPLIT INTO [SONGS, TOP CLASS SONGS, TOP TAG] user2topsongs = {} user2normalsongs = {} total = len(dict_pcounts) for b, user in enumerate(dict_pcounts.keys()): print(" {0:0.3f}%\r".format((b+1.)/total*100), end="") top = [] normal = [] Ntop = 0 for song in dict_pcounts[user]: if metadata[song] != -1: if (user2topclass[user] in metadata[song]) and Ntop<100: top += [song] Ntop += 1 else: normal += [song] else: normal += [song] user2topsongs[user] = top user2normalsongs[user] = normal # DELETE USERS (BIAS_TOP, BIAS_NORMAL) predict_dataset = [] for b, user in enumerate(dict_pcounts.keys()): print(" {0:0.3f}%\r".format((b+1.)/total*100), end="") if len(user2topsongs[user]) >= bias_top and len(user2normalsongs[user]) >= bias_normal: predict_dataset += [[user2normalsongs[user], user2topsongs[user], user2topclass[user]]] print("# Users (after deleting top<{}, inp<{}): ".format(bias_top, bias_normal), len(predict_dataset)) torch.save(predict_dataset, metadata_path + "/postset_{}_t{}_n{}".format(metadata_name, bias_top, bias_normal)) return def get_topclass2Ntopclass(bias_top=1, bias_normal=1, metadata_path="results/metadata", metadata_name="opt_tags"): print("Calculating topclass2Ntopclass...") PostSet = torch.load(metadata_path + "/postset_{}_t{}_n{}".format(metadata_name, bias_top, bias_normal)) topclass2Ntopclass = {} for b, (inp, out, c) in enumerate(PostSet): if c not in list(topclass2Ntopclass.keys()): topclass2Ntopclass[c] = 0 topclass2Ntopclass[c] += 1 torch.save(topclass2Ntopclass, metadata_path + "/topclass2Ntopclass_{}_t{}_n{}".format(metadata_name, bias_top, bias_normal)) return def get_class2song(metadata_path="results/metadata", metadata_name="opt_tags"): print("Calculating class2song...") metadata, meta = torch.load(metadata_path + "/" + metadata_name) class2song = {c:[] for c in meta} total = len(metadata) for i in range(total): print(" {0:0.3f}%\r".format((i+1.)/total*100), end="") if metadata[i] == -1: continue for c in metadata[i]: class2song[c] += [i] torch.save(class2song, metadata_path + "/{}2song".format(metadata_name)) return def get_class2vector(metadata_path="results/metadata", metadata_name="opt_tags", Nsongs=180198): print("Calculating get_class2vector...") class2song = torch.load(metadata_path + "/{}2song".format(metadata_name)) _, meta = torch.load(metadata_path + "/" + metadata_name) # for idx2meta Nclasses = len(meta) meta2idx = {meta[i]:i for i in range(Nclasses)} idx2meta = {i:meta[i] for i in range(Nclasses)} total = len(class2song) class2vector = torch.zeros(total,Nsongs).long() for i in range(total): print(" {0:0.3f}%\r".format((i+1.)/total*100), end="") class2vector[i][class2song[idx2meta[i]]] = 1 torch.save(class2vector, metadata_path + "/{}2vector".format(metadata_name)) return if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--bias_top', type=int, default=1, help="Minimum number of songs in user_topsongs to be taken in care") parser.add_argument('--bias_normal', type=int, default=1, help="Minimum number of songs in user_normalsongs to be taken in care") parser.add_argument('--Nsongs', type=int, default=180198, help="Number of different songs") parser.add_argument('--metadata_name', type=str, default="opt_tags", help="Name of the metadata to use") parser.add_argument('--metadata_path', type=str, default="results/metadata", help="Path of the metadata to use") parser.add_argument('--pcounts_name', type=str, default="opt_pcounts", help="Name of the pcounts to use") parser.add_argument('--pcounts_path', type=str, default="results/metadata", help="Path of the pcounts to use") parser.add_argument('--TODO', nargs='+', type=str, default=["all"], help="Things to calculate") args = parser.parse_args() if args.TODO == ["all"]: args.TODO = ["postset", "topclass2Ntopclass", "class2song", "class2vector"] print("METADATA: {}\nBIAS TOP: {}\nBIAS NORMAL: {}\n".format(args.metadata_name, args.bias_top, args.bias_normal)) if "postset" in args.TODO: get_PostSet(bias_normal=args.bias_normal, bias_top=args.bias_top, metadata_name=args.metadata_name, metadata_path=args.metadata_path, pcounts_name=args.pcounts_name, pcounts_path=args.pcounts_path) if "topclass2Ntopclass" in args.TODO: get_topclass2Ntopclass(bias_normal=args.bias_normal, bias_top=args.bias_top, metadata_name=args.metadata_name, metadata_path=args.metadata_path) if "class2song" in args.TODO: get_class2song(metadata_name=args.metadata_name, metadata_path=args.metadata_path) if "class2vector" in args.TODO: get_class2vector(metadata_name=args.metadata_name, metadata_path=args.metadata_path, Nsongs=args.Nsongs)
11,672
4,842
import json from typing import List from LocationObject import LocationObject def parse(file_path: str) -> List[LocationObject]: with open(file_path, "r") as file: data = json.loads(file.read().replace("\n", "")) locations: List[LocationObject] = [] for object in data: city = object["City"] code = object["PostalCode"] street = object["Street"] streetNum = str(object["StreetNumber"]) openTime = object["OpenTime"] closeTime = object["CloseTime"] location = LocationObject(city, code, street, streetNum, openTime, closeTime) locations.append(location) uniqueLocations = [] for i in locations: if i not in uniqueLocations: uniqueLocations.append(i) return uniqueLocations # add geocoding for each location
892
232
#!/usr/bin/env python3 #-*- codin:utf-8 -*- ''' 用django + celery + redis演示异步队列任务。 不过文章写的太简略了,文章没啥意思,水平到了可以直接看代码。 python manage.py migrate -- looks at the INSTALLED_APPS setting and creates any necessary database tables according to the database settings in your mysite/settings.py file and the database migrations shipped with the app python manage.py runserver -- 启动 python manage.py startapp app_name -- 创建 python manage.py makemigrations app_name -- 预览 python manage.py sqlmigrate app_name 0001 -- 真干 ''' import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "picha.settings") from django.core.management import execute_from_command_line execute_from_command_line(sys.argv)
736
289
import os from datetime import datetime from sqlalchemy.orm import sessionmaker import nfp.servicos.model as tables from nfp import CONEXAO class ControleExecucao(object): uri = '' tarefa = None tarefa_nova = False engine = CONEXAO def configurar_base_de_dados(self): self.DBSession = sessionmaker(bind=self.engine) if os.path.isfile(self.uri): if not self.engine.dialect.has_table(self.engine, self.table_name): print('Tabela {} ainda não existe. Criando tabela...'.format(self.table_name)) base = tables.Base base.metadata.create_all(self.engine) return print('Base de dados ainda não existe. Criando...') base = tables.Base base.metadata.create_all(self.engine) print('usando base de dados: ' + self.uri) def get_tarefa(self, tarefa_id): session = self.DBSession() tarefa = tables.Tarefa query = session.query(tarefa).filter( tarefa.id == tarefa_id, ) registro = query.first() return registro def atualizar_colunas_tabela(self): colunas = self.localizar_colunas_faltantes() if not colunas: return session = self.DBSession() for coluna, tipo in colunas.items(): session.execute('ALTER TABLE %s ADD COLUMN %s %s' % (self.table_name, coluna, tipo)) session.commit() def localizar_colunas_faltantes(self): tabela = self.table_name session = self.DBSession() result = session.execute("SELECT name FROM PRAGMA_TABLE_INFO('%s')" % (tabela)) colunas_bd = set() for coluna in result.fetchall(): colunas_bd.add(coluna[0]) mapper = self.model.__mapper__.columns colunas = set() colunas_dic = {} for column in mapper: colunas.add(column.name) colunas_dic[column.name] = str(column.type) diferencas = list(colunas - colunas_bd) if diferencas: retorno = {} for diferenca in diferencas: retorno[diferenca] = colunas_dic[diferenca] return retorno return None def extrair_dados_tarefa(self, tarefa_id): session = self.DBSession() execucao = self.model # busca uma tarefa iniciada filtro = [execucao.tarefa_id == tarefa_id] query = session.query(execucao).filter( *filtro, ) registros = query.all() if not registros: return None colunas = [column.name for column in self.model.__mapper__.columns] remover = ['id', 'tarefa_id', 'inicio', 'fim'] for item in remover: try: colunas.remove(item) except Exception: pass linhas = [ [getattr(valor, column.name) for column in self.model.__mapper__.columns if not(column.name in remover)] for valor in registros] return [colunas] + linhas def contador_processos_tarefa(self, tarefa_id): session = self.DBSession() execucao = self.model query = session.query(execucao).filter( execucao.tarefa_id == tarefa_id ) registros = query.all() executadas = [reg.fim for reg in registros if reg.fim is not None] ex = len(executadas) # ex += 1 tot = len(registros) return ex, tot def finalizar_tarefa(self): session = self.DBSession() tarefa = tables.Tarefa robo = self.table_name # busca a tarefa iniciada query = session.query(tarefa).filter( tarefa.inicio.isnot(None), tarefa.fim.is_(None), tarefa.robo == robo, ) registro = query.first() if not registro: return None # registra a finalização da tarefa # registro = tarefa() registro.robo = robo registro.fim = datetime.now() session.add(registro) session.commit() return registro def limpar_tabela(self, tabela): session = self.DBSession() session.execute('''DELETE FROM {}'''.format(tabela)) session.commit() def reativar_tarefa(self, tarefa_id): session = self.DBSession() tarefa = tables.Tarefa query = session.query(tarefa).filter( tarefa.id == tarefa_id, ) registro = query.first() registro.fim = None session.commit() return True def selecionar_execucao(self, tarefa_id): session = self.DBSession() execucao = self.model tarefa = tables.Tarefa # busca uma tarefa iniciada query = session.query(execucao).filter( execucao.inicio.isnot(None), execucao.fim.is_(None), execucao.tarefa_id == tarefa_id ).join(tarefa).filter(tarefa.fim.is_(None)) registro = query.first() if registro: return registro # busca a primeira tarefa livre query = session.query(execucao).filter( execucao.inicio.is_(None), execucao.tarefa_id == tarefa_id ).join(tarefa).filter(tarefa.fim.is_(None)) registro = query.first() # se não houver nenhuma livre, retorna vazio if not registro: return None # registra a tarefa livre encontrada como iniciada registro.inicio = datetime.now() session.add(registro) session.commit() return registro def selecionar_tarefa_ativa(self, criar_nova=False): session = self.DBSession() tarefa = tables.Tarefa robo = self.table_name # busca uma tarefa iniciada query = session.query(tarefa).filter( tarefa.inicio.isnot(None), tarefa.fim.is_(None), tarefa.robo == robo, ) registro = query.first() if registro: self.tarefa_nova = False return registro # registra a entrada da tarefa marcando como iniciada if criar_nova: registro = tarefa() registro.robo = robo registro.inicio = datetime.now() session.add(registro) session.commit() self.tarefa_nova = True return registro return None def selecionar_ultima_tarefa_finalizada(self): session = self.DBSession() tarefa = tables.Tarefa robo = self.table_name # busca a ultima tarefa finalizada query = session.query(tarefa).filter( tarefa.inicio.isnot(None), tarefa.fim.isnot(None), tarefa.robo == robo, ).order_by(tarefa.fim.desc()) return query.first() def __del__(self): del self.DBSession # ---------------- Funções de módulo ------ def selecionar_ultima_tarefa_remota_finalizada(tarefa_remota_id): ctrl = ControleExecucao() ctrl.table_name = 'tarefas' ctrl.configurar_base_de_dados() return ctrl.selecionar_ultima_tarefa_remota_finalizada(tarefa_remota_id) def get_id_tarefa_remota(tarefa_id): ctrl = ControleExecucao() ctrl.table_name = 'tarefas' ctrl.configurar_base_de_dados() return ctrl.get_id_tarefa_remota(tarefa_id) def get_tarefa(tarefa_id): ctrl = ControleExecucao() ctrl.table_name = 'tarefas' ctrl.configurar_base_de_dados() return ctrl.get_tarefa(tarefa_id) def reativar_tarefa(tarefa_id): ctrl = ControleExecucao() ctrl.table_name = 'tarefas' ctrl.configurar_base_de_dados() return ctrl.reativar_tarefa(tarefa_id) # ---------------------------------------- if __name__ == "__main__": pass
7,786
2,527
# -*- coding: utf-8 -*- """ Artifactory repository endpoint """ __copyright__ = "Copyright (C) 2016 Veritas Technologies LLC. All rights reserved." # project imports from ..http import HTTP from .repotype import RepositoryType from .virtual import Virtual from .local import Local from .remote import Remote # define all repo types REPO_TYPE = { "local": Local, "remote": Remote, "virtual": Virtual, } class Repository(HTTP): endpoint = "repositories" _required = [ ("key", "key", ""), ("type", "type", ""), ("url", "url", ""), ] _optional = [ ("description", "description", ""), ] def __init__(self, api): self.api = api super(Repository, self).__init__(self.api) def virtual(self): return Virtual(self.api) def local(self): return Local(self.api) def remote(self): return Remote(self.api) def list(self, type=None): """ Repository types - (local|remote|virtual) """ if type: endpoint = "{0}/?type={1}".format(self.endpoint, type) return self.get(endpoint=endpoint, instance_class=Repository) else: return self.get(instance_class=Repository) def fetch(self, name=""): if not name: name = getattr(self, "key", "") if not name: message = "Repository name is required" self.log.error(message) raise Exception(message) endpoint = "{0}/{1}".format(self.endpoint, name) return self.get(endpoint=endpoint, instance_class=RepositoryType) def _get_instance(self, data, instance): # TODO: this is disgusting hack, need to improve this in future if not instance in [RepositoryType]: return super(Repository, self)._get_instance(data, instance) else: self.log.debug("Instance RepositoryType found with type {0}".format( data.get("rclass"))) repo_instance = REPO_TYPE.get(data.get("rclass")) if not repo_instance: message = "Repository type {0} not supported".format( data.get("rclass")) self.log.error(message) raise Exception(message) self.log.debug("Returning RepositoryType instance {0}".format( repo_instance)) return repo_instance(self.api)
2,458
697
"""Adoption application.""" from flask import Flask, request, redirect, render_template from models import db, connect_db, Pets from wtforms import StringField, IntegerField, TextAreaField, BooleanField from wtforms.validators import DataRequired,InputRequired,AnyOf,URL, NumberRange from flask_wtf import FlaskForm from petfunctions import get_random_pet app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///adopt' app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False app.config['SQLALCHEMY_ECHO'] = True connect_db(app) db.create_all() from flask_debugtoolbar import DebugToolbarExtension app.config['SECRET_KEY'] ='SOSECRET' debug=DebugToolbarExtension(app) class AddPetForm(FlaskForm): """Form class for adding a pet""" name = StringField('Pet Name') #make this a dropdown (species) species = StringField('Pet Species',validators=[InputRequired(),AnyOf(['dog','cat','porcupine','pickle'])]) photo_url = StringField('Pet Photo Url',validators=[InputRequired(),URL()]) age = IntegerField('Pet Age',validators=[InputRequired(), NumberRange(0, 30, "Age must be between 0 and 30")]) notes = TextAreaField('Notes') class EditPetForm(FlaskForm): """"Form class for editing pets""" photo_url = StringField('Pet Photo Url',validators=[InputRequired(),URL()]) notes = TextAreaField('Notes') available = BooleanField('Available') @app.route('/') def pet_list(): """Display a homepage of pets we can adopt""" pets = Pets.query.all() pet_name,pet_age,pet_url = get_random_pet() return render_template('index.html',pets=pets,pet_name=pet_name,pet_age=pet_age,pet_url=pet_url) @app.route('/add', methods=['GET','POST']) def add_pet_form(): """Add pet to adoption database form""" form = AddPetForm() if form.validate_on_submit(): name = form.data['name'] species = form.data['species'] photo_url = form.data['photo_url'] age = form.data['age'] notes = form.data['notes'] pet = Pets(name=name, species=species, photo_url=photo_url, age=age, notes=notes, ) db.session.add(pet) db.session.commit() return redirect('/') else: return render_template('add_pet_form.html',form=form) @app.route('/<int:pet_id>', methods=['GET','POST']) def pet_page(pet_id): """Display pet details and a form to edit pet""" pet = Pets.query.get_or_404(pet_id) form = EditPetForm(obj=pet) if form.validate_on_submit(): pet.photo_url = form.data['photo_url'] pet.notes = form.data['notes'] pet.available = form.data['available'] db.session.commit() return redirect(f'/{pet_id}') else: return render_template('pet_details.html',pet=pet, form=form)
2,887
949
""" Utilities for (weighted) bootstrap resampling applied to geoscientific point-data. """ import numpy as np import pandas as pd from .meta import subkwargs from .spatial import great_circle_distance, _get_sqare_grid_segment_indicies from .log import Handle logger = Handle(__name__) try: import sklearn HAVE_SKLEARN = True except ImportError: msg = "scikit-learn not installed" logger.warning(msg) HAVE_SKLEARN = False def _segmented_univariate_distance_matrix( A, B, distance_metric, dtype="float32", segs=10 ): """ A method to generate a point-to-point distance matrix in segments to be softer on memory requirements yet retain precision (e.g. beyond a few thousand points). Parameters ----------- A, B : :class:`numpy.ndarray` Numpy arrays with positions of points. distance_metric Callable function f(a, b) from which to derive a distance metric. dtype : :class:`str` | :class:`numpy.dtype` Data type to use for the matrix. segs : :class:`int` Number of segments to split the matrix into (note that this will effectively squared - i.e. 10 -> 100 individual segments). Returns ------- dist : :class:`numpy.ndarray` 2D point-to-point distance matrix. """ max_size = np.max([a.shape[0] for a in [A, B]]) dist = np.zeros((max_size, max_size), dtype=dtype) # full matrix # note that this could be parallelized; the calcuations are independent for ix_s, ix_e, iy_s, iy_e in _get_sqare_grid_segment_indicies(max_size, segs): dist[ix_s:ix_e, iy_s:iy_e] = distance_metric( A[ix_s:ix_e][:, np.newaxis], B[iy_s:iy_e][np.newaxis, :], ) return dist def univariate_distance_matrix(a, b=None, distance_metric=None): """ Get a distance matrix for a single column or array of values (here used for ages). Parameters ----------- a, b : :class:`numpy.ndarray` Points or arrays to calculate distance between. If only one array is specified, a full distance matrix (i.e. calculate a point-to-point distance for every combination of points) will be returned. distance_metric Callable function f(a, b) from which to derive a distance metric. Returns ------- :class:`numpy.ndarray` 2D distance matrix. """ if distance_metric is None: distance_metric = lambda a, b: np.abs(a - b) a = np.atleast_1d(np.array(a).astype(np.float)) full_matrix = False if b is not None: # a second set of points is specified; the return result will be 1D b = np.atleast_1d(np.array(b).astype(np.float)) else: # generate a full point-to-point matrix for a single set of points full_matrix = True b = a.copy() return _segmented_univariate_distance_matrix(a, b, distance_metric) def get_spatiotemporal_resampling_weights( df, spatial_norm=1.8, temporal_norm=38, latlong_names=["Latitude", "Longitude"], age_name="Age", max_memory_fraction=0.25, normalized_weights=True, **kwargs ): """ Takes a dataframe with lat, long and age and returns a sampling weight for each sample which is essentailly the inverse of the mean distance to other samples. Parameters ----------- df : :class:`pandas.DataFrame` Dataframe to calculate weights for. spatial_norm : :class:`float` Normalising constant for spatial measures (1.8 arc degrees). temporal_norm : :class:`float` Normalising constant for temporal measures (38 Mya). latlong_names : :class:`list` List of column names referring to latitude and longitude. age_name : :class:`str` Column name corresponding to geological age or time. max_memory_fraction : :class:`float` Constraint to switch to calculating mean distances where :code:`matrix=True` and the distance matrix requires greater than a specified fraction of total avaialbe physical memory. This is passed on to :func:`~pyrolite.util.spatial.great_circle_distance`. normalized_weights : :class:`bool` Whether to renormalise weights to unity. Returns -------- weights : :class:`numpy.ndarray` Sampling weights. Notes ------ This function is equivalent to Eq(1) from Keller and Schone: .. math:: W_i \\propto 1 \\Big / \\sum_{j=1}^{n} \\Big ( \\frac{1}{((z_i - z_j)/a)^2 + 1} + \\frac{1}{((t_i - t_j)/b)^2 + 1} \\Big ) """ weights = pd.Series(index=df.index, dtype="float") z = great_circle_distance( df[[*latlong_names]], absolute=False, max_memory_fraction=max_memory_fraction, **subkwargs(kwargs, great_circle_distance) ) # angular distances _invnormdistances = np.zeros_like(z) # where the distances are zero, these weights will go to inf # instead we replace with the smallest non-zero distance/largest non-inf # inverse weight norm_inverse_distances = 1.0 / ((z / spatial_norm) ** 2 + 1) norm_inverse_distances[~np.isfinite(norm_inverse_distances)] = 1 _invnormdistances += norm_inverse_distances # ages - might want to split this out as optional for spatial resampling only? t = univariate_distance_matrix(df[age_name]) norm_inverse_time = 1.0 / ((t / temporal_norm) ** 2 + 1) norm_inverse_time[~np.isfinite(norm_inverse_time)] = 1 _invnormdistances += norm_inverse_time weights = 1.0 / np.sum(_invnormdistances, axis=0) if normalized_weights: weights = weights / weights.sum() return weights def add_age_noise( df, min_sigma=50, noise_level=1.0, age_name="Age", age_uncertainty_name="AgeUncertainty", min_age_name="MinAge", max_age_name="MaxAge", ): """ Add gaussian noise to a series of geological ages based on specified uncertainties or age ranges. Parameters ----------- df : :class:`pandas.DataFrame` Dataframe with age data within which to look up the age name and add noise. min_sigma : :class:`float` Minimum uncertainty to be considered for adding age noise. noise_level : :class:`float` Scaling of the noise added to the ages. By default the uncertaines are unscaled, but where age uncertaines are specified and are the one standard deviation level this can be used to expand the range of noise added (e.g. to 2SD). age_name : :class:`str` Column name for absolute ages. age_uncertainty_name : :class:`str` Name of the column specifiying absolute age uncertainties. min_age_name : :class:`str` Name of the column specifying minimum absolute ages (used where uncertainties are otherwise unspecified). max_age_name : :class:`str` Name of the column specifying maximum absolute ages (used where uncertainties are otherwise unspecified). Returns -------- df : :class:`pandas.DataFrame` Dataframe with noise-modified ages. Notes ------ This modifies the dataframe which is input - be aware of this if using outside of the bootstrap resampling for which this was designed. """ # try and get age uncertainty try: age_uncertainty = df[age_uncertainty_name] except KeyError: # otherwise get age min age max # get age uncertainties age_uncertainty = ( np.abs(df[max_age_name] - df[min_age_name]) / 2 ) # half bin width age_uncertainty[ ~np.isfinite(age_uncertainty) | age_uncertainty < min_sigma ] = min_sigma # generate gaussian age noise age_noise = np.random.randn(df.index.size) * age_uncertainty.values age_noise *= noise_level # scale the noise # add noise to ages df[age_name] += age_noise return df def spatiotemporal_bootstrap_resample( df, columns=None, uncert=None, weights=None, niter=100, categories=None, transform=None, boostrap_method="smooth", add_gaussian_age_noise=True, metrics=["mean", "var"], default_uncertainty=0.02, relative_uncertainties=True, noise_level=1, age_name="Age", latlong_names=["Latitude", "Longitude"], **kwargs ): """ Resample and aggregate metrics from a dataframe, optionally aggregating by a given set of categories. Formulated specifically for dealing with resampling to address uneven sampling density in space and particularly geological time. Parameters ----------- df : :class:`pandas.DataFrame` Dataframe to resample. columns : :class:`list` Columns to provide bootstrap resampled estimates for. uncert : :class:`float` | :class:`numpy.ndarray` | :class:`pandas.Series` | :class:`pandas.DataFrame` Fractional uncertainties for the dataset. weights : :class:`numpy.ndarray` | :class:`pandas.Series` Array of weights for resampling, if precomputed. niter : :class:`int` Number of resampling iterations. This will be the minimum index size of the output metric dataframes. categories : :class:`list` | :class:`numpy.ndarray` | :class:`pandas.Series` List of sample categories to group the ouputs by, which has the same size as the dataframe index. transform Callable function to transform input data prior to aggregation functions. Note that the outputs will need to be inverse-transformed. boostrap_method : :class:`str` Which method to use to add gaussian noise to the input dataset parameters. add_gaussian_age_noise : :class:`bool` Whether to add gassian noise to the input dataset ages, where present. metrics : :class:`list` List of metrics to use for dataframe aggregation. default_uncertainty : :class:`float` Default (fractional) uncertainty where uncertainties are not given. relative_uncertainties : :class:`bool` Whether uncertainties are relative (:code:`True`, i.e. fractional proportions of parameter values), or absolute (:code:`False`) noise_level : :class:`float` Multiplier for the random gaussian noise added to the dataset and ages. age_name : :class:`str` Column name for geological age. latlong_names : :class:`list` Column names for latitude and longitude, or equvalent orthogonal spherical spatial measures. Returns -------- :class:`dict` Dictionary of aggregated Dataframe(s) indexed by statistical metrics. If categories are specified, the dataframe(s) will have a hierarchical index of :code:`categories, iteration`. """ # uncertainty managment ############################################################ uncertainty_type = None if uncert is not None: if isinstance(uncert, float): uncertainty_type = "0D" # e.g. 2% elif isinstance(uncert, (list, pd.Series)) or ( isinstance(uncert, np.ndarray) and np.array(uncert).ndim < 2 ): uncertainty_type = "1D" # e.g. [0.5%, 1%, 2%] # shape should be equal to parameter column number elif isinstance(uncert, (pd.DataFrame)) or ( isinstance(uncert, np.ndarray) and np.array(uncert).ndim >= 2 ): uncertainty_type = "2D" # e.g. [[0.5%, 1%, 2%], [1.5%, 0.6%, 1.7%]] # shape should be equal to parameter column number by rows else: raise NotImplementedError("Unknown format for uncertainties.") # weighting ######################################################################## # generate some weights for resampling - here addressing specifically spatial # and temporal resampling if weights is None: weights = get_spatiotemporal_resampling_weights( df, age_name=age_name, latlong_names=latlong_names, **subkwargs(kwargs, get_spatiotemporal_resampling_weights) ) # to efficiently manage categories we can make sure we have an iterable here if categories is not None: if isinstance(categories, (list, tuple, pd.Series, np.ndarray)): pass elif isinstance(categories, str) and categories in df.columns: categories = df[categories] else: msg = "Categories unrecognized" raise NotImplementedError(msg) # column selection ################################################################# # get the subset of parameters to be resampled, removing spatial and age names # and only taking numeric data subset = columns or [ c for c in df.columns if c not in [[i for i in df.columns if age_name in i], *latlong_names] and np.issubdtype(df.dtypes[c], np.number) ] # resampling ####################################################################### def _metric_name(metric): return repr(metric).replace("'", "") metric_data = {_metric_name(metric): [] for metric in metrics} # samples are independent, so this could be processed in parallel for repeat in range(niter): # take a new sample with replacement equal in size to the original dataframe smpl = df.sample(weights=weights, frac=1, replace=True) # whether to specfically add noise to the geological ages # note that the metadata around age names are passed through to this function # TODO: Update to have external disambiguation of ages/min-max ages, # and just pass an age series to this function. if add_gaussian_age_noise: smpl = add_age_noise( smpl, min_sigma=50, age_name=age_name, noise_level=noise_level, **subkwargs(kwargs, add_age_noise) ) # transform the parameters to be estimated before adding parameter noise? if transform is not None: smpl[subset] = smpl[subset].apply(transform, axis="index") # whether to add parameter noise, and if so which method to use? # TODO: Update the naming of this? this is only one part of the bootstrap process if boostrap_method is not None: # try to get uncertainties for the data, otherwise use standard deviations? if boostrap_method.lower() == "smooth": # add random noise within uncertainty bounds # this is essentially smoothing # consider modulating the noise model using the covariance structure? # this could be done by individual group to preserve varying covariances # between groups? if uncert is None: noise = ( smpl[subset].values * default_uncertainty * np.random.randn(*smpl[subset].shape) ) * noise_level else: noise = np.random.randn(*smpl[subset].shape) * noise_level if uncertainty_type in ["0D", "1D"]: # this should work if a float or series is passed noise *= uncert else: # need to get indexes of the sample to look up uncertainties # need to extract indexes for the uncertainties, which might be arrays arr_idxs = df.index.take(smpl.index).values noise *= uncert[arr_idxs, :] if relative_uncertainties: noise *= smpl[subset].values smpl[subset] += noise elif (boostrap_method.upper() == "GP") or ( "process" in bootstrap_method.lower() ): # gaussian process regression to adapt to covariance matrix msg = "Gaussian Process boostrapping not yet implemented." raise NotImplementedError(msg) else: msg = "Bootstrap method {} not recognised.".format(boostrap_method) raise NotImplementedError(msg) # whether to independently estimate metric values for individual categories? # TODO: Should the categories argument be used to generate indiviudal # bootstrap resampling processes? if categories is not None: for metric in metrics: metric_data[_metric_name(metric)].append( smpl[subset].groupby(categories).agg(metric) ) else: # generate the metric summaries for the overall dataset for metric in metrics: metric_data[_metric_name(metric)].append(smpl[subset].agg(metric)) # where the whole dataset is presented if categories is not None: # the dataframe will be indexed by iteration of the bootstrap return { metric: pd.concat(data, keys=range(niter), names=["Iteration"]) .swaplevel(0, 1) .sort_index() for metric, data in metric_data.items() } else: # the dataframe will be indexed by categories and iteration # TODO: add iteration level to this index? return {metric: pd.DataFrame(data) for metric, data in metric_data.items()}
17,810
5,218
# TODO: add unit tests for test_nhdplus_utils.py
49
18
import numpy as np import seaborn as sns import matplotlib.pyplot as plt from task import SequenceLearning sns.set(style='white', palette='colorblind', context='poster') np.random.seed(0) '''how to use''' # init n_param, n_branch = 16, 4 pad_len = 0 n_parts = 2 n_samples = 256 p_rm_ob_enc = 0 p_rm_ob_rcl = 0 n_rm_fixed = False task = SequenceLearning( n_param, n_branch, pad_len=pad_len, p_rm_ob_enc=p_rm_ob_enc, p_rm_ob_rcl=p_rm_ob_rcl, n_rm_fixed=n_rm_fixed, ) # take sample X, Y = task.sample(n_samples, to_torch=False) print(f'X shape = {np.shape(X)}, n_example x time x x-dim') print(f'Y shape = {np.shape(Y)}, n_example x time x y-dim') '''visualize the sample''' # pick a sample i = 0 x, y = X[i], Y[i] cmap = 'bone' x_split = np.split(x, (n_param, n_param + n_branch), axis=1) mat_list = x_split + [y] f, axes = plt.subplots( 2, 4, figsize=(14, 11), sharey=True, gridspec_kw={ 'width_ratios': [n_param, n_branch, n_param, n_branch], 'height_ratios': [n_param, n_param] }, ) title_list = ['Observed feature', 'Observed value', 'Queried feature', 'Queried value'] ylabel_list = ['Part one', 'Part two'] for i, mat in enumerate(mat_list): [mat_p1, mat_p2] = np.split(mat, [n_param], axis=0) axes[0, i].imshow(mat[:n_param, :], cmap=cmap) axes[1, i].imshow(mat[n_param:, :], cmap=cmap) axes[0, i].set_title(title_list[i], fontname='Helvetica') axes[0, i].set_xticks([]) for i in [1, 3]: axes[1, i].set_xticks(range(n_branch)) axes[1, i].set_xticklabels(i for i in np.arange(4) + 1) for i in range(2): axes[i, 0].set_yticks(np.arange(0, n_param, 5)) axes[i, 0].set_ylabel(ylabel_list[i], fontname='Helvetica') f.tight_layout() f.savefig(f'examples/figs/stimulus-rep.png', dpi=100, bbox_inches='tight')
1,809
812
def lateRide(n): hours = n // 60 minutes = n % 60 return (hours // 10) + (hours % 10) + (minutes // 10) + (minutes % 10)
133
65
""" Custom exceptions """ from __future__ import annotations __all__ = [ "AlreadyRegistered", "ConsumerError", "EventBusError", "UnknownEvent", ] class EventBusError(Exception): """ Base of exceptions raised by the bus. """ class UnknownEvent(EventBusError): """ Raised when an receiver is created for an event the bus does not recognize. """ class AlreadyRegistered(EventBusError): """ Raised when an event is registered more than once to the bus. """ class ProducerError(EventBusError): """ Raised during production of an event. """ class ConsumerError(EventBusError): """ Raised during consumption of an event """
704
210
from sparse_ct.tool import plot_grid from sparse_ct.data import image_to_sparse_sinogram from sparse_ct.reconstructor_2d import ( IRadonReconstructor, SartReconstructor, SartTVReconstructor, DgrReconstructor, SartBM3DReconstructor) import logging logging.basicConfig( filename='dgr_example_32_35.log', filemode='a', format='%(asctime)s - %(levelname)s - %(message)s', level=logging.DEBUG ) def test(fname, label, n_proj=32, noise_pow=25.0): dgr_iter = 4000 lr = 0.01 net = 'skip' noise_std = 1./100 gt, sinogram, theta, FOCUS = image_to_sparse_sinogram(fname, channel=1, n_proj=n_proj, size=512, angle1=0.0, angle2=180.0, noise_pow=noise_pow) logging.warning('Starting') logging.warning('fname: %s %s',label, fname) logging.warning('n_proj: %s', n_proj) logging.warning('noise_pow: %s', noise_pow) logging.warning('dgr_n_iter: %s', dgr_iter) logging.warning('dgr_lr: %s', lr) logging.warning('dgr_net: %s', net) logging.warning('dgr_noise_std: %s', noise_std) recons = [ IRadonReconstructor('FBP'), SartReconstructor('SART', sart_n_iter=40, sart_relaxation=0.15), SartTVReconstructor('SART+TV', sart_n_iter=40, sart_relaxation=0.15, tv_weight=0.5, tv_n_iter=100), SartBM3DReconstructor('SART+BM3D', sart_n_iter=40, sart_relaxation=0.15, bm3d_sigma=0.5), DgrReconstructor('DIP_1.00_0.00_0.00_0.00', dip_n_iter=dgr_iter, net=net, lr=lr, reg_std=noise_std, w_proj_loss=1.0, w_perceptual_loss=0.0, w_tv_loss=0.0 ), DgrReconstructor('DIP_0.99_0.01_0.00_0.00', dip_n_iter=dgr_iter, net=net, lr=lr, reg_std=noise_std, w_proj_loss=0.99, w_perceptual_loss=0.01, w_tv_loss=0.0 ), DgrReconstructor('DIP_0.90_0.10_0.00_0.00', dip_n_iter=dgr_iter, net=net, lr=lr, reg_std=noise_std, w_proj_loss=0.90, w_perceptual_loss=0.10, w_tv_loss=0.0 ), DgrReconstructor('DIP_0.50_0.50_0.00_0.00', dip_n_iter=dgr_iter, net=net, lr=lr, reg_std=noise_std, w_proj_loss=0.5, w_perceptual_loss=0.5, w_tv_loss=0.0 ), DgrReconstructor('DIP_0.10_0.90_0.00_0.00', dip_n_iter=dgr_iter, net=net, lr=lr, reg_std=noise_std, w_proj_loss=0.10, w_perceptual_loss=0.90, w_tv_loss=0.0 ), DgrReconstructor('DIP_0.01_0.99_0.00_0.00', dip_n_iter=dgr_iter, net=net, lr=lr, reg_std=noise_std, w_proj_loss=0.01, w_perceptual_loss=0.99, w_tv_loss=0.0 ), DgrReconstructor('DIP_0.00_1.00_0.00_0.00', dip_n_iter=dgr_iter, net=net, lr=lr, reg_std=noise_std, w_proj_loss=0.0, w_perceptual_loss=1.0, w_tv_loss=0.0 ), DgrReconstructor('DIP_0.99_0.00_0.01_0.00', dip_n_iter=dgr_iter, net=net, lr=lr, reg_std=noise_std, w_proj_loss=0.99, w_perceptual_loss=0.0, w_tv_loss=0.01 ), DgrReconstructor('DIP_0.90_0.00_0.10_0.00', dip_n_iter=dgr_iter, net=net, lr=lr, reg_std=noise_std, w_proj_loss=0.9, w_perceptual_loss=0.0, w_tv_loss=0.1 ), DgrReconstructor('DIP_0.50_0.00_0.50_0.00', dip_n_iter=dgr_iter, net=net, lr=lr, reg_std=noise_std, w_proj_loss=0.5, w_perceptual_loss=0.0, w_tv_loss=0.5 ), DgrReconstructor('DIP_0.10_0.00_0.90_0.00', dip_n_iter=dgr_iter, net=net, lr=lr, reg_std=noise_std, w_proj_loss=0.1, w_perceptual_loss=0.0, w_tv_loss=0.9 ), DgrReconstructor('DIP_0.01_0.00_0.99_0.00', dip_n_iter=dgr_iter, net=net, lr=lr, reg_std=noise_std, w_proj_loss=0.01, w_perceptual_loss=0.0, w_tv_loss=0.99 ), DgrReconstructor('DIP_0.00_0.00_1.0_0.00', dip_n_iter=dgr_iter, net=net, lr=lr, reg_std=noise_std, w_proj_loss=0.00, w_perceptual_loss=0.0, w_tv_loss=1.0 ), DgrReconstructor('DIP_0.33_0.33_0.33_0.00', dip_n_iter=dgr_iter, net=net, lr=lr, reg_std=noise_std, w_proj_loss=0.33, w_perceptual_loss=0.33, w_tv_loss=0.33 ), DgrReconstructor('DIP_0.8_0.10_0.10_0.00', dip_n_iter=dgr_iter, net=net, lr=lr, reg_std=noise_std, w_proj_loss=0.8, w_perceptual_loss=0.1, w_tv_loss=0.1 ), DgrReconstructor('DIP_0.98_0.01_0.01_0.00', dip_n_iter=dgr_iter, net=net, lr=lr, reg_std=noise_std, w_proj_loss=0.98, w_perceptual_loss=0.01, w_tv_loss=0.01 ), DgrReconstructor('DIP_0.10_0.80_0.10_0.00', dip_n_iter=dgr_iter, net=net, lr=lr, reg_std=noise_std, w_proj_loss=0.10, w_perceptual_loss=0.80, w_tv_loss=0.10 ), DgrReconstructor('DIP_0.01_0.98_0.01_0.00', dip_n_iter=dgr_iter, net=net, lr=lr, reg_std=noise_std, w_proj_loss=0.01, w_perceptual_loss=0.98, w_tv_loss=0.01 ), ] img_sart_bm3d = recons[3].calc(sinogram, theta) imgs = [] for recon in recons: if type(recon) == DgrReconstructor: recon.set_for_metric(gt, img_sart_bm3d, FOCUS=FOCUS, log_dir='../log/dip') imgs.append(recon.calc(sinogram)) mse, psnr, ssim = recon.eval(gt) recon.save_result() logstr = "{}: MSE:{:.5f} PSNR:{:.5f} SSIM:{:.5f}".format( recon.name, mse, psnr, ssim ) logging.info(logstr) plot_grid([gt] + imgs, FOCUS=FOCUS, save_name=label+'.png', dpi=500) logging.warning('Done. Results saved as %s', label+'.png') if __name__ == "__main__": # test("../data/shepp_logan.jpg", "shepp_logan_32_35", n_proj=32, noise_pow=35.0) test("../data/ct2.jpg", "ct2_32_35", n_proj=32, noise_pow=35.0) test("../data/ct1.jpg", "ct1_32_35", n_proj=32, noise_pow=35.0) test("../data/LoDoPaB/004013_02_01_119.png", "LoDoPaB1_32_35", n_proj=32, noise_pow=35.0) test("../data/LoDoPaB/004017_01_01_151.png", "LoDoPaB2_32_35", n_proj=32, noise_pow=35.0) test("../data/LoDoPaB/004028_01_04_109.png", "LoDoPaB3_32_35", n_proj=32, noise_pow=35.0) test("../data/LoDoPaB/004043_01_01_169.png", "LoDoPaB4_32_35", n_proj=32, noise_pow=35.0) test("../data/LoDoPaB/004049_04_01_062.png", "LoDoPaB5_32_35", n_proj=32, noise_pow=35.0)
9,580
3,410
from pathlib import Path from typing import Optional from starlette.config import Config from starlette.datastructures import CommaSeparatedStrings from ..models.pydantic.database import DatabaseURL p: Path = Path(__file__).parents[2] / ".env" config: Config = Config(p if p.exists() else None) DATABASE: str = config("POSTGRES_DB", cast=str) DB_USER: Optional[str] = config("POSTGRES_USER", cast=str, default=None) DB_PASSWORD: Optional[str] = config( "POSTGRES_PASSWORD", cast=str, default=None ) DB_HOST: str = config("DB_HOST", cast=str, default="postgres_db") DB_PORT: int = config("DB_PORT", cast=int, default=5432) DATABASE_CONFIG: DatabaseURL = DatabaseURL( drivername="asyncpg", username=DB_USER, password=DB_PASSWORD, host=DB_HOST, port=DB_PORT, database=DATABASE, ) ALEMBIC_CONFIG: DatabaseURL = DatabaseURL( drivername="postgresql+psycopg2", username=DB_USER, password=DB_PASSWORD, host=DB_HOST, port=DB_PORT, database=DATABASE, ) REDIS_IP: str = config("REDIS_IP", cast=str, default="redis") REDIS_PORT: int = config("REDIS_PORT", cast=int, default=6379) REDIS_PASSWORD: str = config("REDIS_PASSWORD", cast=str, default=None) ARQ_BACKGROUND_FUNCTIONS: Optional[CommaSeparatedStrings] = config( "ARQ_BACKGROUND_FUNCTIONS", cast=CommaSeparatedStrings, default=None )
1,340
476
fruit = str(input()) day_of_the_week = str(input()) quantity = float(input()) price = 0 if fruit == 'banana' or \ fruit == 'apple' or \ fruit == 'orange' or \ fruit == 'grapefruit' or \ fruit == 'kiwi' or \ fruit == 'pineapple' or \ fruit == 'grapes': if day_of_the_week == 'Monday' or day_of_the_week == 'Tuesday' or \ day_of_the_week == 'Wednesday' or \ day_of_the_week == 'Thursday' or \ day_of_the_week == 'Friday': if fruit == 'banana': price = 2.50 elif fruit == 'apple': price = 1.20 elif fruit == 'orange': price = 0.85 elif fruit == 'grapefruit': price = 1.45 elif fruit == 'kiwi': price = 2.70 elif fruit == 'pineapple': price = 5.50 elif fruit == 'grapes': price = 3.85 total_price = quantity * price print(f'{total_price:.2f}') elif day_of_the_week == 'Saturday' or day_of_the_week == 'Sunday': if fruit == 'banana': price = 2.70 elif fruit == 'apple': price = 1.25 elif fruit == 'orange': price = 0.90 elif fruit == 'grapefruit': price = 1.60 elif fruit == 'kiwi': price = 3 elif fruit == 'pineapple': price = 5.60 elif fruit == 'grapes': price = 4.20 total_price = quantity * price print(f'{total_price:.2f}') else: print('error') else: print('error')
1,607
557
import unittest from trajectories.dynamic_time_warper import * from trajectories.trajectory import Trajectory from trajectories.point import Point class TestDTW(unittest.TestCase): def test_1D_DTW(self): t1 = [1,2,2,10,2,1] t2 = [3,3,5,5,2] self.assertEqual(45, dtw(t1, t2, -1, metricI)) self.assertEqual(0, dtw(t1, t1, -1, metricI)) t1 = Trajectory([Point([1]),Point([2]),Point([2]),Point([10]),Point([2]),Point([1])]) t2 = Trajectory([Point([3]),Point([3]),Point([5]),Point([5]),Point([2])]) self.assertEqual(45, dtw(t1, t2, -1, metricD)) self.assertEqual(0, dtw(t1, t1, -1, metricD)) def test_DTWI(self): p1 = Point([-7, -4]) p2 = Point([5, 6]) p3 = Point([3, 4]) p4 = Point([-3, 5]) t1 = Trajectory([p1, p2]) t2 = Trajectory([p3, p4]) self.assertEqual(45, dtwI(t1, t2)) t1 = Trajectory([p1, p2, p3, p4]) self.assertEqual(0, dtwI(t1, t1)) def test_ITWD(self): p1 = Point([-7, -4]) p2 = Point([5, 6]) p3 = Point([3, 4]) p4 = Point([-3, 5]) t1 = Trajectory([p1, p2]) t2 = Trajectory([p3, p4]) self.assertEqual(45, dtwD(t1, t2)) t1 = Trajectory([p1, p2, p3, p4]) self.assertEqual(0, dtwD(t1, t1)) if __name__ == '__main__': unittest.main()
1,368
583
while True: h1,m1,h2,m2=map(int,input().split()) i=f=0 if m1+m2+h1+h2==0:break if h1==0:i=(24*60)+m1 else:i=(h1*60)+m1 if h2==0:f=(24*60)+m2 else:f=(h2*60)+m2 print(f-i) if f>i else print((24*60)-(i-f))
235
142
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.shortcuts import render from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger from django.db.models import Count from django.template import RequestContext from django.views.decorators.csrf import csrf_exempt from django.http import HttpResponseRedirect from .models import * from .forms import * import os from django.http import HttpResponse from portfolio_pj import settings # Create your views here. def index(req): context = HomeContext("Home", Facade.getSkills(), Facade.getHobbies()) return render(req, 'index.html', {"context": context}) def profile(req): profile_context = ProfileContext("Profile", Facade.getProfiles()) return render(req, 'profile.html', {"context": profile_context}) def portfolio(req): portfolio_context = PortfolioContext("Portfolio", Facade.getProjects()) return render(req, 'portfolio-gird-3.html', {"context": portfolio_context}) def service(req): service_context = ServiceContext("Services", Facade.getServices()) return render(req, 'services.html', {"context": service_context,}) def contact(req): context = BaseContext("Contact") return render(req, 'contact-3.html', {"context": context,}) def blog(req): blogs = getBlogsWithPaging(req, Blog.objects.all().order_by('-pub_date')) blog_context = BlogsContext("Blog", blogs, getRecentBlogs(), getCategories(), getTags(), getArchives()) return render(req, 'blog-list.html', {"context": blog_context}) def blogWithSlug(req, blog_year, blog_month, blog_day, blog_slug): blog = Blog.objects.get(slug=blog_slug) blog_context = BlogContext("Blog", blog, getRecentBlogs(), getCategories(), getTags(), getArchives()) # if this is a POST request we need to process the form data if req.method == 'POST': # create a form instance and populate it with data from the request: form = CommentForm(req.POST) # check whether it's valid: if form.is_valid(): # process the data in form.cleaned_data as required # save the data comment = form.save(commit=False) if (form.cleaned_data['parent'] is not None): comment.parent_id = form.cleaned_data['parent'].id comment.blog_id = blog.id comment.save() form = CommentForm() return HttpResponseRedirect("/blog/%s/%s/%s/%s" % (blog_year,blog_month,blog_day,blog_slug)) # if a GET (or any other method) we'll create a blank form else: form = CommentForm() return render(req, 'blog-details.html', {"context": blog_context, "form" : form}) def blogArchive(req, blog_year, blog_month): blogs = getBlogsWithPaging(req, Blog.objects.filter(pub_date__year=blog_year, pub_date__month=blog_month)) blog_context = BlogsContext("Blog", blogs, getRecentBlogs(), getCategories(), getTags(), getArchives()) return render(req, 'blog-list.html', {"context": blog_context}) def blogWithTag(req, tag_slug): blogs = getBlogsWithPaging(req,Tag.objects.get(slug=tag_slug).blog_set.all()) blog_context = BlogsContext("Blog", blogs, getRecentBlogs(), getCategories(), getTags(), getArchives()) return render(req, 'blog-list.html', {"context":blog_context}) def blogWithCategory(req, category_slug): blogs = getBlogsWithPaging(req,Category.objects.get(slug=category_slug).blog_set.all()) blog_context = BlogsContext("Blog", blogs, getRecentBlogs(), getCategories(), getTags(), getArchives()) return render(req, 'blog-list.html', {"context":blog_context}) def cv(req): file_path = os.path.join(settings.STATIC_ROOT, 'CV_NBD.pdf') if os.path.exists(file_path): with open(file_path, 'rb') as fh: response = HttpResponse(fh.read(), content_type="application/pdf") response['Content-Disposition'] = 'inline; filename=' + os.path.basename(file_path) return response else: return render(req, 'eror-404.html') # Private Methods def getCategories(): return Category.objects.annotate(blog_count=Count('blog')).filter(blog_count__gt=0).order_by('-blog_count') def getTags(): return Tag.objects.annotate(blog_count=Count('blog')).filter(blog_count__gt=0).order_by('-blog_count') def getRecentBlogs(): return Blog.objects.all().order_by('-pub_date')[:7] def getArchives(): return Blog.getArchives() def getBlogsWithPaging(req, blog_list): max_paging = 5 page_no = req.GET.get('page') blogs_paginator = Paginator(blog_list, max_paging) try: blogs = blogs_paginator.page(page_no) except PageNotAnInteger: blogs = blogs_paginator.page(1) except EmptyPage: blogs = blogs_paginator.page(blogs_paginator.num_pages) return blogs
4,820
1,521
import datetime from ..databases.postgresql import session from ..models.bookmark_model import Bookmark # Select one async def select_one(user_id: int, post_id: int): bookmark = session.query(Bookmark).filter(Bookmark.user_id == user_id, Bookmark.post_id == post_id).first() return bookmark # Insert async def insert(user_id: int, post_id: int): bookmark = Bookmark( user_id=user_id, post_id=post_id, ) session.add(bookmark) session.commit() session.close() # Update async def update(user_id: int, post_id: int, deleted_at: str): bookmark = session.query(Bookmark).filter(Bookmark.user_id == user_id, Bookmark.post_id == post_id).first() bookmark.updated_at = datetime.datetime.now() bookmark.deleted_at = deleted_at session.commit() session.close() # Count by post id async def countByPostId(post_id: int): num_bookmarks = session.query(Bookmark).filter(Bookmark.post_id == post_id, Bookmark.deleted_at == None).count() return num_bookmarks
1,022
338
#!/usr/bin/python3 # # User management application # """ 六、用python写一个cgi程序,功能如下: 1. 查询用户 (get) 2. 创建用户 (post) 3. 修改用户 (post) 4. 删除用户 (post) 要点: 1. 通过变量 REQUEST_METHOD 来判断是get还是post 2. 通过变量 QUERY_STRING 来判断是创建还是修改还是删除 3. 通过subprocess.getoutput, 或者os.system 来运行shell命令 4. 相关命令如下: 查用户:grep ^root /etc/passwd 加用户:useradd user-name 改用户:usermod user-name 删用户:userdel user-name """ import os import sys import subprocess as sub def response(headers, body): for h in headers: print(h) print() for b in body: sys.stdout.write(b) def get_user_info(params_str, headers): if params_str: params = dict(p.split('=') for p in params_str.split('&')) else: params = {} name = params.get('name') if not name: headers.append('Status: 400 BAD_REQUEST') return response(headers, ['name is required']) info = read_user_info(name) if not info: headers.append('Status: 200 OK') return response(headers, ['name %s not exists' % name]) body = [] body.append('name: %s\n' % info['name']) body.append('uid: %s\n' % info['uid']) body.append('gid: %s\n' % info['gid']) body.append('comment: %s\n' % info['comment']) body.append('home: %s\n' % info['home']) body.append('shell: %s\n' % info['shell']) return response(headers, body) def read_user_info(name): """从系统的用户数据库 /etc/passwd 中读取指定用户的基本信息,返回字典""" db = '/etc/passwd' info = [line.split(':') for line in open(db).read().splitlines()] user_info = [i for i in info if i[0] == name] if not user_info: # 找不到用户 return user_info = user_info[0] colnames = ('name', 'password', 'uid', 'gid', 'comment', 'home', 'shell') return dict(zip(colnames, user_info)) def alter_user(headers): data = sys.stdin.read().strip() if data: params = dict(p.split('=') for p in data.split('&')) else: headers.append('Status: 400 BAD_REQUEST') return response(headers, ['invalid parameters']) kind = params['kind'] # add? delete? modify? if kind == 'add': cmd = ['useradd', params['name']] elif kind == 'delete': cmd = ['userdel', '-r', params['name']] elif kind == 'mod': # 目前只支持修改用户的comment字段,后续可以扩展 name = params['name'] comment = params['comment'] cmd = ['usermod', '-c', comment, name] else: headers.append('Status: 400 BAD_REQUEST') return response(headers, ['operation %s not supported' % kind]) # 运行外部的用户管理命令 # 临时修改,用sudo 执行命令 cmd.insert(0, 'sudo') cmd = ' '.join(cmd) code, out = sub.getstatusoutput(cmd) if code == 0: headers.append('Status: 200 OK') return response(headers, ['operation success']) else: headers.append('Status: 200 OK') return response(headers, ['failed: %s' % out]) if __name__ == '__main__': headers = [] headers.append('Content-Type: text/plain') if os.getenv('REQUEST_METHOD') == 'GET': params = os.getenv('QUERY_STRING', '') get_user_info(params, headers) elif os.getenv('REQUEST_METHOD') == 'POST': alter_user(headers) else: headers.append('Status: 405 METHOD_NOT_ALLOWED') response(headers, [])
3,326
1,274
bl_info = { "name": "Universal Exporter", "category": "Import & Export", } import bpy class Export(bpy.types.Operator): """Export blender project""" bl_idname = "object.export_scene" bl_label = "Export Blender Scene" bl_options = {'REGISTER', 'UNDO'} def execute(self, context): scene = context.scene cursor = scene.cursor_location obj = scene.objects.active # Add stuff here # This script converts all objects to .obj files for obj in bpy.data.objects: bpy.ops.object.select_name(name=obj.name) bpy.ops.export_scene.obj(filepath=file_path, # the filepath check_existing=True, filter_glob="*.obj;*.mtl", use_selection=True, use_all_scenes=False, use_animation=False, use_modifiers=True, use_rotate_x90=True, use_edges=True, use_normals=False, use_hq_normals=True, use_uvs=True, use_materials=True, copy_images=False, use_triangles=False, use_vertex_groups=False, use_nurbs=False, use_blen_objects=True, group_by_object=False, group_by_material=False, keep_vertex_order=False global_scale=1) return {'FINISHED'} def register(): bpy.utils.register_class(Export) def unregister(): bpy.utils.unregister_class(Export) if __name__ == "__main__": register()
1,979
525
import random # Parameters states_num: int = 900 trans_per_state: int = 3 transitions_num: int = trans_per_state * states_num num_non_zero_start_probs: int = 2 emit_range: int = 20 file_name: str = "random_" + \ str(states_num) + "_" + str(transitions_num) + "_" + \ str(emit_range) + "_" + str(num_non_zero_start_probs) + ".chmm" # Implicit parameter for probabilities generation rng_range: int = 100 def generate_probability_list(length: int) -> list: # Fill list with random values, then divide all elements to sum of probs, # so sum(probs) == 1 probs: list = [] for _ in range(length): probs.append(random.randrange(rng_range)) sum_of_list: int = sum(probs) # Cast to floats with fixed precision of 6-2 = 4 signs probs = list( map(lambda x: str(float(x) / sum_of_list)[:6], probs)) return probs # Generation with open(file_name, 'w') as f: f.write(str(states_num) + '\n') # Start probabilities pairs info start_probs: list = generate_probability_list(num_non_zero_start_probs) f.write(str(num_non_zero_start_probs) + '\n') for i in range(num_non_zero_start_probs): f.write(str(i) + ' ' + start_probs[i] + '\n') # Emissions probabilities for each state f.write(str(emit_range) + '\n') for _ in range(states_num): emit_probs: list = generate_probability_list(emit_range) emit_str: str = ' '.join(emit_probs) + '\n' f.write(emit_str) # Transitions info f.write(str(transitions_num) + '\n') for src in range(states_num): used_dst: list = [] for _ in range(trans_per_state): dst: int = random.randrange(states_num) while (dst in used_dst): dst = random.randrange(states_num) used_dst.append(dst) trans_probs: list = generate_probability_list(trans_per_state) for i in range(trans_per_state): f.write(str(src) + ' ' + str(used_dst[i]) + ' ' + trans_probs[i] + '\n')
2,024
727
# Given a string which consists of lowercase or uppercase letters, find the length of the longest palindromes that can be built with those letters. # # This is case sensitive, for example "Aa" is not considered a palindrome here. # # Note: # Assume the length of given string will not exceed 1,010. # # # Example: # # Input: # "abccccdd" # # Output: # 7 # # Explanation: # One longest palindrome that can be built is "dccaccd", whose length is 7. # # # # @lc app=leetcode id=409 lang=python3 # # [409] Longest Palindrome # # https://leetcode.com/problems/longest-palindrome/description/ # # algorithms # Easy (48.27%) # Likes: 547 # Dislikes: 56 # Total Accepted: 100.6K # Total Submissions: 208.5K # Testcase Example: '"abccccdd"' # # Given a string which consists of lowercase or uppercase letters, find the # length of the longest palindromes that can be built with those letters. # # This is case sensitive, for example "Aa" is not considered a palindrome # here. # # Note: # Assume the length of given string will not exceed 1,010. # # # Example: # # Input: # "abccccdd" # # Output: # 7 # # Explanation: # One longest palindrome that can be built is "dccaccd", whose length is 7. # # # class Solution: def longestPalindrome(self, s: str) -> int: if not s: return 0 if len(s) is 1: return 1 hash_table = {} result = 0 for char in s: if char not in hash_table: hash_table[char] = 1 else: hash_table[char] = hash_table[char] + 1 if hash_table[char] % 2 == 0: result += 1 if result * 2 != len(s): return result * 2 + 1 else: return result * 2
1,764
622
from django.apps import AppConfig class GetpaidRestConfig(AppConfig): name = "getpaid_rest_framework"
108
35
import socketio import socketio sio = socketio.Client() @sio.event def connect(): print('connection established') @sio.event def my_message(data): print('message received with ', data) sio.emit('my response', {'response': 'my response'}) @sio.event def disconnect(): print('disconnected from server') sio.connect('http://localhost:5000') sio.wait()
392
142
import requests import requests_cache requests_cache.install_cache() from ricecooker.config import LOGGER STUDIO_URL = 'https://studio.learningequality.org' NODES_ENDPOINT = STUDIO_URL + '/api/get_nodes_by_ids_complete/' LICENSES_LIST_ENDPOINT = STUDIO_URL + '/api/license' # TODO https://studio.learningequality.org/api/get_node_path/ca8f380/18932/41b2549 # TODO http://develop.studio.learningequality.org/api/channel/094097ce6f395ec0b50aabd04943c6b3 class StudioApi(object): def __init__(self, token): self.token = token self.licenses_by_id = self.get_licenses() def get_licenses(self): headers = {"Authorization": "Token {0}".format(self.token)} response = requests.get(LICENSES_LIST_ENDPOINT, headers=headers) licenses_list = response.json() licenses_dict = {} for license in licenses_list: licenses_dict[license['id']] = license return licenses_dict def get_nodes_by_ids_complete(self, studio_id): headers = {"Authorization": "Token {0}".format(self.token)} url = NODES_ENDPOINT + studio_id LOGGER.info(' GET ' + url) response = requests.get(url, headers=headers) studio_node = response.json()[0] return studio_node def get_tree_for_studio_id(self, studio_id): """ Returns the full json tree (recusive calls to /api/get_nodes_by_ids_complete) """ channel_parent = {'children': []} # this is like _ with children def _build_subtree(parent, studio_id): subtree = self.get_nodes_by_ids_complete(studio_id) if 'children' in subtree: children_refs = subtree['children'] subtree['children'] = [] for child_studio_id in children_refs: _build_subtree(subtree, child_studio_id) parent['children'].append(subtree) _build_subtree(channel_parent, studio_id) channel = channel_parent['children'][0] return channel
2,042
658
# Supostamente não funciona from aresta import Aresta from insert import insert_sort from collections import defaultdict def kruskal(arestas): arestas, vertices = insert_sort(arestas, defaultdict()) # Inicializa a árvore de fato arvore = list() # vertices terá o número de chaves do dicionário retornado pelo insertion_sort tamanhoArvore = len(vertices.keys()) i = 0 # Enquanto o tamanho da árvore é menor que o tamanho do dicionário de vértices, while len(arvore) < tamanhoArvore - 1: # Utilizamos todas as arestas aresta = arestas[i] i += 1 # Para verificar o peso das arestas com o dicionário if vertices[aresta.first] < 2 and vertices[aresta.second] < 2: vertices[aresta.first] += 1 vertices[aresta.second] += 1 arvore.append(aresta) # Não se utiliza todo o dicionário pois o tamanho da árvore quebra o while antes disso return arvore if __name__ == "__main__": arestas = list() # arestas.append(Aresta(1, 'a', 'b')) # arestas.append(Aresta(8, 'a', 'c')) # arestas.append(Aresta(3, 'c', 'b')) # arestas.append(Aresta(4, 'b', 'd')) # arestas.append(Aresta(2, 'd', 'e')) # arestas.append(Aresta(3, 'b', 'e')) # arestas.append(Aresta(-1, 'c', 'd')) # arestas.append(Aresta(13, '0', '3')) # arestas.append(Aresta(24, '0', '1')) # arestas.append(Aresta(13, '0', '2')) # arestas.append(Aresta(22, '0', '4')) # arestas.append(Aresta(13, '1', '3')) # arestas.append(Aresta(22, '1', '2')) # arestas.append(Aresta(13, '1', '4')) # arestas.append(Aresta(19, '2', '3')) # arestas.append(Aresta(14, '2', '4')) # arestas.append(Aresta(19, '3', '4')) arestas.append(Aresta(2, "0", "1")) arestas.append(Aresta(-10, "0", "3")) arestas.append(Aresta(3, "0", "2")) arestas.append(Aresta(5, "1", "2")) arestas.append(Aresta(0, "1", "3")) arestas.append(Aresta(4, "2", "3")) grafo = kruskal(arestas) print("Imprimindo árvore geradora mínima:") for aresta in grafo: print(f"Peso {aresta.peso:2}: {aresta.first:1} para {aresta.second:2}")
2,163
885
# coding: utf-8 import sys __author__ = "Paulo Sérgio dos Santos Araujo" __license__ = "MIT" __version__ = "1.0.0" __email__ = "paulo.araujo [at] splab.ufcg.edu.br" class Msn: """ Essa classe feita para disciplina de Métodos de Software Númericos - UFCG 2018.2 se propõe a encontrar as raízes de uma certa equação definida por um usuário. Parameters ---------- eq : str Equação a ser avaliada deve ser expressa em termos de 'x', pode usar funções de python. tol : float Tolerância da precisão do MSN. alg: string String contendo qual algoritmo deve ser executado 'bisection' ou 'false_position' """ def __init__(self, eq, tol, alg="false_position"): self.eq = eq self.tol = tol self.alg = alg def f(self, x): return eval(self.eq) def findRoots(self, a, b): """ Encontra as raízes da função no intervalo A, B """ if abs(b-a) >= self.tol and (self.f(a) * self.f(b) > 0): mid = (a + b) * .5 self.findRoots(a, mid) self.findRoots(mid, b) else: iterNum = 1 while abs(b - a) > self.tol : if self.alg == "bisection": estimate = (a + b) * .5 elif self.alg == "false_position": estimate = (a*(self.f(b)) - b * (self.f(a))) / (self.f(b) - self.f(a)) else: print('Algoritmo não definido') exit(0) if (self.f(a) * self.f(estimate) > 0): a = estimate else: b = estimate iterNum += 1 print(estimate) print(iterNum) if __name__ == "__main__": msn = Msn(eq="-x**2 + 3", tol=0.01, alg="false_position") msn.findRoots(-2, 3) # -1.7320508075688774 e 1.7320508075688776 msn2 = Msn(eq="-x**2 + 3", tol=0.01, alg="bisection") msn2.findRoots(-2, 3) # -1.736328125 e 1.740234375
1,983
757
""" Project: Data Types Notes Author: Mr. Buckley Last update: 8/25/2018 Description: Goes over comments, int, float, str, and type casting """ # *** COMMENTS *** # This is a comment (with a "#") # Comments are only for the user's eyes, the program doesn't read them. # Describe what sections of code do with a comment. """ This is a multiline comment """ # *** DATA TYPE: INTEGER *** # TODO: An integer number (no decimal) integer = 5 print (integer) print (type(integer)) # *** DATA TYPE: FLOAT *** # TODO: A decimal number decimal = 4.85 print (decimal) print (type(decimal)) # *** DATA TYPE: STRING *** # TODO: A string of characters enclosed in quotes word = "these are my words" print (word) print (type(word)) # *** TYPE CASTING *** # This converts one type to another # TODO: Cast float to int decimal = 55.55 dec_to_int = int(decimal) print(dec_to_int) # TODO: Cast int to string number = "8" print (int(number)+2) # TODO: Cast number string to int print ("give me add I'll add 1 to it") number = float (input()) print (number + 1) # TODO: Input demo (str to float)
1,096
388
import os import zarr import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import DataLoader from torch.utils.data import Dataset from tqdm import tqdm, trange class FaceDataset(Dataset): def __init__(self, path, transforms=None): self.path = path self.keys = ('images', 'labels') assert os.path.exists(path), 'file `{}` not exists!'.format(path) with zarr.LMDBStore(path) as store: zarr_db = zarr.group(store=store) self.num_examples = zarr_db['labels'].shape[0] self.datasets = None if transforms is None: transforms = { 'labels': lambda v: torch.tensor(v, dtype=torch.long), 'images': lambda v: torch.tensor((v - 127.5)/127.5, dtype=torch.float32) } self.transforms = transforms def __len__(self): return self.num_examples def __getitem__(self, idx): if self.datasets is None: store = zarr.LMDBStore(self.path) zarr_db = zarr.group(store=store) self.datasets = {key: zarr_db[key] for key in self.keys} items = [] for key in self.keys: item = self.datasets[key][idx] if key in self.transforms: item = self.transforms[key](item) items.append(item) return items class Model(nn.Module): def __init__(self, input_size=96 * 96 * 3, output_size=126, hidden_size=25): super().__init__() self.layer1 = nn.Sequential( nn.Conv2d(3, 16, kernel_size=6, stride=2, padding=2), nn.BatchNorm2d(16), nn.ReLU(), nn.MaxPool2d( kernel_size=2, stride=2)) self.layer2 = nn.Sequential( nn.Conv2d(16, 32, kernel_size=6, stride=2, padding=2), nn.BatchNorm2d(32), nn.ReLU(), nn.MaxPool2d( kernel_size=2, stride=2)) self.fc = nn.Linear(6 * 6 * 32, output_size) self.criteria = nn.CrossEntropyLoss() def forward(self, inputs): outputs = self.layer1(inputs) outputs = self.layer2(outputs) outputs = outputs.reshape(outputs.size(0), -1) outputs = self.fc(outputs) return outputs def main(batch_size=64, epochs=50): data_train = FaceDataset('data/anime_faces/train.lmdb') device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') loader = DataLoader(data_train, batch_size=batch_size, num_workers=10) model = Model() model.to(device) model.train() optim = torch.optim.Adam(model.parameters(), lr=0.001) for epoch in trange(epochs): t = tqdm(loader) for i, (images, labels) in enumerate(t): images = images.to(device) labels = labels.to(device) optim.zero_grad() logits = model(images) loss = model.criteria(logits, labels) loss.backward() optim.step() predicts = torch.argmax(F.softmax(logits, dim=1), dim=1) accuracy = (predicts == labels).to(torch.float32).mean() t.set_postfix( epoch=epoch, i=i, loss=loss.item(), accuracy=accuracy.item()) data_val = FaceDataset('data/anime_faces/val.lmdb') val_loader = DataLoader(data_val, batch_size=batch_size, num_workers=0) total = len(data_val) total_correct = 0 model.eval() for images, labels in val_loader: images = images.to(device) labels = labels.to(device) logits = model(images) predicts = torch.argmax(F.softmax(logits, dim=1), dim=1) correct = (predicts == labels).sum() total_correct += correct.item() print('Val accuracy = {}'.format(total_correct / total)) if __name__ == '__main__': main()
3,812
1,304
import os import scrapy from pepper.items import PepperItem class PepperSpider(scrapy.Spider): name = 'pepper' start_urls = ['https://blog.drpepper.com.br'] def parse(self, response): images = response.xpath( './/img[contains(@class,"size-full")]' ) images += response.xpath( './/img[contains(@class,"alignnone")]' ) images += response.xpath( './/img[contains(@src,"/tirinhas/")]' ) images = set(images) for img in images: link = img.xpath('./@src').get() yield PepperItem( name=os.path.basename(link), description=img.xpath('./parent::p/text()').get(), link=link, image_urls=[link], ) current_page = response.xpath('//span[@class="page-numbers current"]') next_page = current_page.xpath('./parent::li/following-sibling::li[1]/a/@href').get() if next_page: yield scrapy.Request(next_page, callback=self.parse)
1,066
320
# Copyright 2021 UW-IT, University of Washington # SPDX-License-Identifier: Apache-2.0 from django.test import TestCase from django.test.client import Client from django.test.utils import override_settings import simplejson as json from spotseeker_server.models import Spot, SpotExtendedInfo from spotseeker_server.org_filters import SearchFilterChain def spot_with_noise_level(name, noise_level): """Create a spot with the given noise level""" spot = Spot.objects.create(name=name) spot.spotextendedinfo_set.create(key='noise_level', value=noise_level) return spot @override_settings(SPOTSEEKER_AUTH_MODULE='spotseeker_server.auth.all_ok', SPOTSEEKER_SEARCH_FILTERS=( 'spotseeker_server.org_filters.uw_search.Filter',)) class UWNoiseLevelTestCase(TestCase): @classmethod def setUpClass(cls): cls.silent_spot = spot_with_noise_level('Silent Spot', 'silent') cls.quiet_spot = spot_with_noise_level('Quiet Spot', 'quiet') cls.moderate_spot = spot_with_noise_level('Moderate', 'moderate') cls.variable_spot = spot_with_noise_level('Var Spot', 'variable') @classmethod def tearDownClass(cls): Spot.objects.all().delete() def get_spots_for_noise_levels(self, levels): """Do a search for spots with particular noise levels""" c = self.client response = c.get('/api/v1/spot', {'extended_info:noise_level': levels}, content_type='application/json') return json.loads(response.content) def assertResponseSpaces(self, res_json, spaces): """ Assert that a particular decoded response contains exactly the same spaces as 'spaces'. """ def sortfunc(spot_dict): return spot_dict['id'] expected_json = [spot.json_data_structure() for spot in spaces] expected_json.sort(key=sortfunc) res_json.sort(key=sortfunc) self.assertEqual(expected_json, res_json) def test_only_silent(self): """Searching for silent should return only silent""" SearchFilterChain._load_filters() # make sure the uw filters is loaded res_json = self.get_spots_for_noise_levels(['silent']) self.assertResponseSpaces(res_json, [self.silent_spot]) def test_uw_only_quiet(self): """Quiet should return both a quiet spot and variable""" SearchFilterChain._load_filters() # make sure the uw filters is loaded res_json = self.get_spots_for_noise_levels(['quiet']) expected = [self.quiet_spot, self.variable_spot] self.assertResponseSpaces(res_json, expected) def test_uw_only_moderate(self): """Moderate should return moderate and variable""" SearchFilterChain._load_filters() # make sure the uw filters is loaded res_json = self.get_spots_for_noise_levels(['moderate']) expected = [self.moderate_spot, self.variable_spot] self.assertResponseSpaces(res_json, expected) def test_uw_silent_and_quiet(self): """Silent+quiet should give everything but moderate""" SearchFilterChain._load_filters() # make sure the uw filters is loaded res_json = self.get_spots_for_noise_levels(['silent', 'quiet']) expected = [self.quiet_spot, self.silent_spot, self.variable_spot] self.assertResponseSpaces(res_json, expected) def test_uw_silent_and_moderate(self): """Silent+moderate should give everything but quiet""" SearchFilterChain._load_filters() # make sure the uw filters is loaded res_json = self.get_spots_for_noise_levels(['silent', 'moderate']) expected = [self.silent_spot, self.moderate_spot, self.variable_spot] self.assertResponseSpaces(res_json, expected) def test_uw_all_three(self): """All 3 should give everything""" SearchFilterChain._load_filters() # make sure the uw filters is loaded query = ['silent', 'quiet', 'moderate'] res_json = self.get_spots_for_noise_levels(query) expected = [self.silent_spot, self.quiet_spot, self.moderate_spot, self.variable_spot] self.assertResponseSpaces(res_json, expected)
4,334
1,338
__all__ = ["des"]
17
8
from spotdl.metadata.providers.spotify import ProviderSpotify from spotdl.metadata.providers.youtube import ProviderYouTube from spotdl.metadata.providers.youtube import YouTubeSearch
185
50
from math import floor class Integer: def __init__(self, value): self.value = value @classmethod def from_float(cls, float_value): if isinstance(float_value, float): return cls(floor(float_value)) else: return 'value is not a float' @classmethod def from_roman(cls, value): try: roman_nums = list(value) translate = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000} arabic_nums = [translate[r] for r in roman_nums] arabic_sum = sum( val if val >= next_val else -val for val, next_val in zip(arabic_nums[:-1], arabic_nums[1:]) ) + arabic_nums[-1] return cls(int(arabic_sum)) except Exception: pass @classmethod def from_string(cls, value): if isinstance(value, str): try: return cls(int(value)) except Exception: return 'wrong type' else: return 'wrong type' def add(self, num): if isinstance(num, Integer): return self.value + getattr(num, 'value') else: return 'number should be an Integer instance' def __repr__(self): return self.value first_num = Integer(10) second_num = Integer.from_roman("IV") print(Integer.from_float("2.6")) print(Integer.from_string(2.6)) print(first_num.add(second_num))
1,476
479
import datetime from typing import Optional from pydantic import BaseModel class TokenBase(BaseModel): access_token: str class Token(TokenBase): refresh_token: str class TokenData(BaseModel): username: Optional[str] = None class VerificationToken(BaseModel): user_id: int exp: datetime.datetime
324
96
import json from urllib.request import urlopen import requests from bs4 import BeautifulSoup def get_sti(): # https://github.com/hongtaocai/googlefinance return '<a href="https://chart.finance.yahoo.com/t?s=%5eSTI&lang=en-SG&region=SG&width=300&height=180" >' def get_fx(): url = 'https://eservices.mas.gov.sg/api/action/datastore/search.json?resource_id=95932927-c8bc-4e7a-b484-68a66a24edfe&limit=1&sort=end_of_day%20desc' request = requests.get(url) data = json.loads(request.text) result_today = data['result']['records'][0] AUD = 1/float(result_today['aud_sgd'])*1 CNY = 1/float(result_today['cny_sgd_100'])*100 HKD = 1/float(result_today['hkd_sgd_100'])*100 EUR = 1/float(result_today['eur_sgd'])*1 JPY = 1/float(result_today['jpy_sgd_100'])*100 MYR = 1/float(result_today['myr_sgd_100'])*100 THB = 1/float(result_today['thb_sgd_100'])*100 TWD = 1/float(result_today['twd_sgd_100'])*100 USD = 1/float(result_today['usd_sgd'])*1 VND = 1/float(result_today['vnd_sgd_100'])*100 list_curr = {'AUD': AUD, 'CNY':CNY, 'HKD':HKD, 'EUR':EUR, 'JPY':JPY, 'MYR':MYR, 'THB':THB, 'TWD':TWD, 'USD':USD, 'VND':VND} text_final = '<b>Latest SGD End of Day Rates ' + result_today['end_of_day'] + '</b>\n\n' for key in sorted(list_curr.keys()): text_final += key + " " + str(round(list_curr[key], 3)) + " = 1 SGD \n" return text_final def get_sibor(): # Connect to Source url = 'http://www.moneysmart.sg/home-loan/sibor-trend' data = urlopen(url) soup = BeautifulSoup(data, 'html.parser') # Find latest Result result = soup.findAll("div", {"class" : "sibor-sor-table"}) result = result[0].findAll("td") result = result[1:] text_final = '<b>Latest SIBOR Rates</b>\n\n' name = result[0:][::2] rate = result[1:][::2] for i in range(0, 4): text_final += name[i].get_text() + " - " + rate[i].get_text() + "\n" return text_final
1,988
899
# -*- coding: utf-8 -*- # # Copyright (c) 2017~2999 - cologler <skyoflw@gmail.com> # ---------- # # ---------- from .common import LifeTime, IServiceProvider, ICallSiteResolver from .descriptors import CallableDescriptor class CallSiteResolver(ICallSiteResolver): def __init__(self, service_provider: IServiceProvider): self._service_provider = service_provider def resolve(self, service_type: type, depend_chain): descriptor = CallableDescriptor.try_create(service_type, service_type, LifeTime.transient) if descriptor: return self._service_provider.get_callsite(descriptor, depend_chain)
637
197
from joblib import Memory cachedir = "cache" memory = Memory(cachedir, verbose=10) # @memory.cache def filter_ff_stories(books, max_rating, min_words, max_words, min_chapters, max_chapters, max_books): print("filtering ff stories") ratings = {"K":1, "K+":2, "T":3, "M":4, "MA":5 } rating_number = ratings[max_rating] delete_ids = [] for bookid, book in enumerate(books): if bookid % 1000 == 0: print(f"filtering book {bookid} now") removal = False if book["Language"] != "English": removal = True if ratings[book["Rating"]] > rating_number: removal = True words = int(book["Words"].replace(",","")) if not (min_words <= words <= max_words): removal = True chapters = int(book["Chapters"].replace(",","")) if not (min_chapters <= chapters <= max_chapters): removal = True if removal: delete_ids.append(bookid) for bookid in reversed(delete_ids): del books[bookid] books = books[:max_books] return books
1,124
362
import sys import helpers.printer import helpers.parser import helpers.config import program.obfuscation import program.bypass modes = helpers.config.Modes bypass_methods = helpers.config.BypassMethods obfuscation_methods = helpers.config.ObfuscationMethods printer = helpers.printer.Printer() parser = helpers.parser.Parser( printer, modes, bypass_methods, obfuscation_methods) bypass = program.bypass.Bypass() obfuscation = program.obfuscation.Obfuscation() def execute_program(options): try: print(options) mode = options[0] bypass_type = options[1] obfuscation_type = options[2] input = options[3] output = options[4] if mode == modes.bypass: code = bypass.execute_bypass(bypass_type) obfuscation.execute_ofuscation_code( obfuscation_type, code, output) elif mode == modes.obfuscate: obfuscation.execute_obfuscation_file( obfuscation_type, input, output) else: raise SystemExit("Not supported mode") print("The file " + output + " has been created succesfully") except: raise SystemError("Options are not valid") def main(args=None): # Get command line arguments if args is None: try: args = sys.argv[1:] except IndexError: printer.print_help() if ("-h" in args or "--help" in args): if len(args) == 1: printer.print_help() else: raise SystemExit( "Help option (-h or --help) must be run without arguments") elif ("-v" in args or "--version" in args): if len(args) == 1: printer.print_version() else: raise SystemExit( "Version option (-v or --version) must be run without arguments") else: if (len(args) == 0): printer.print_help() else: # Mode - Bypass type - Obfuscation type - Input file to obfuscate - Output file to generate options = parser.parse_command_line(args) execute_program(options) if __name__ == "__main__": main()
2,167
649
import torch import torchvision import torchvision.transforms as transforms from torch.utils.data.sampler import SubsetRandomSampler from torch.utils.data import DataLoader import os path = os.path.abspath(__file__) dir_path = os.path.dirname(path) resnet_18_default = 224 def _get_dataset(resize=resnet_18_default): transform = transforms.Compose( [transforms.Resize(resize), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) trainset = torchvision.datasets.CIFAR10(root=dir_path, train=True, download=True, transform=transform) testset = torchvision.datasets.CIFAR10(root=dir_path, train=False, download=True, transform=transform) return trainset, testset def _get_classes(): return ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') def get_dataloader(train=True, batch_size=16): animal_indices = [2, 3, 4, 5, 6, 7] #animal_sampler = SubsetRandomSampler(animal_indices) if train: return DataLoader(_get_dataset()[0], batch_size) else: return DataLoader(_get_dataset()[1], batch_size)
1,169
429
from scipy import io import numpy as np import random import tensorflow as tf class_num = 10 image_size = 32 img_channels = 3 def OneHot(label,n_classes): label=np.array(label).reshape(-1) label=np.eye(n_classes)[label] return label def prepare_data(): classes = 10 data1 = io.loadmat('./data/train_32x32.mat') data2 = io.loadmat('./data/test_32x32.mat') data3 = io.loadmat('./data/extra_32x32.mat') train_data = data1['X'] train_labels = data1['y'] test_data = data2['X'] test_labels = data2['y'] extra_data = data3['X'] extra_labels = data3['y'] train_data = train_data.astype('float32') test_data = test_data.astype('float32') extra_data = extra_data.astype('float32') train_data = np.transpose(train_data, (3, 0, 1, 2)) test_data = np.transpose(test_data, (3, 0, 1, 2)) extra_data = np.transpose(extra_data, (3, 0, 1, 2)) train_labels[train_labels == 10] = 0 test_labels[test_labels == 10] = 0 extra_labels[extra_labels == 10] = 0 train_labels = train_labels[:, 0] test_labels = test_labels[:, 0] extra_labels = extra_labels[:, 0] train_labels = OneHot(train_labels, classes) test_labels = OneHot(test_labels, classes) extra_labels = OneHot(extra_labels, classes) # truncate the train data and test data train_data = train_data[0:50000,:,:,:] train_labels = train_labels[0:50000,:] test_data = test_data[0:10000,:,:,:] test_labels = test_labels[0:10000,:] # train_data = np.concatenate((train_data,extra_data),axis=0) # train_labels = np.concatenate((train_labels,extra_labels),axis=0) print('Train data:', train_data.shape, ', Train labels:', train_labels.shape) print('Test data:', test_data.shape, ', Test labels:', test_labels.shape) return train_data, train_labels, test_data, test_labels def _random_crop(batch, crop_shape, padding=None): oshape = np.shape(batch[0]) if padding: oshape = (oshape[0] + 2 * padding, oshape[1] + 2 * padding) new_batch = [] npad = ((padding, padding), (padding, padding), (0, 0)) for i in range(len(batch)): new_batch.append(batch[i]) if padding: new_batch[i] = np.lib.pad(batch[i], pad_width=npad, mode='constant', constant_values=0) nh = random.randint(0, oshape[0] - crop_shape[0]) nw = random.randint(0, oshape[1] - crop_shape[1]) new_batch[i] = new_batch[i][nh:nh + crop_shape[0], nw:nw + crop_shape[1]] return new_batch def _random_flip_leftright(batch): for i in range(len(batch)): if bool(random.getrandbits(1)): batch[i] = np.fliplr(batch[i]) return batch def color_preprocessing(x_train, x_test): x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train[:, :, :, 0] = (x_train[:, :, :, 0] - np.mean(x_train[:, :, :, 0])) / np.std(x_train[:, :, :, 0]) x_train[:, :, :, 1] = (x_train[:, :, :, 1] - np.mean(x_train[:, :, :, 1])) / np.std(x_train[:, :, :, 1]) x_train[:, :, :, 2] = (x_train[:, :, :, 2] - np.mean(x_train[:, :, :, 2])) / np.std(x_train[:, :, :, 2]) x_test[:, :, :, 0] = (x_test[:, :, :, 0] - np.mean(x_test[:, :, :, 0])) / np.std(x_test[:, :, :, 0]) x_test[:, :, :, 1] = (x_test[:, :, :, 1] - np.mean(x_test[:, :, :, 1])) / np.std(x_test[:, :, :, 1]) x_test[:, :, :, 2] = (x_test[:, :, :, 2] - np.mean(x_test[:, :, :, 2])) / np.std(x_test[:, :, :, 2]) return x_train, x_test def data_augmentation(batch): batch = _random_flip_leftright(batch) batch = _random_crop(batch, [32, 32], 4) return batch
3,680
1,556
""" User models module """ from sqlalchemy import Column, Integer, String from app.models import Base class User(Base): """User class""" id: int = Column(Integer, primary_key=True, index=True) firstname: str = Column(String(50), nullable=False, index=True) lastname: str = Column(String(50), nullable=False, index=True) email: str = Column(String(100), nullable=False) username: str = Column(String(50), nullable=False, unique=True, index=True) password: str = Column(String(100), nullable=False)
530
177
from pathlib import Path from fhir.resources.valueset import ValueSet as _ValueSet from oops_fhir.utils import ValueSet from oops_fhir.r4.code_system.v3_role_code import v3RoleCode __all__ = ["ParentRelationshipCodes"] _resource = _ValueSet.parse_file(Path(__file__).with_suffix(".json")) class ParentRelationshipCodes(ValueSet): """ Parent Relationship Codes The value set includes the v3 RoleCode PRN (parent), TWIN (twin) and all of their specializations. It covers the relationships needed to establish genetic pedigree relationships between family members. Status: draft - Version: 4.0.1 http://hl7.org/fhir/ValueSet/parent-relationship-codes """ # TODO: fix this template issue1 pass class Meta: resource = _resource
782
254
counter = 0 interpolations = None padding = None def AddCounter(tag): global counter #print("\nSetting:\n" +tag +": " + str(counter) + "\n") counter += 1 def SetPad(pad): global padding padding = pad def GetPad(): global padding return padding
253
88
"""Code related to ``django-plugins``. First, it creates a ``ProjectAppPluginPoint`` for the ``bgjobs`` app. Second, it creates a new plugin point for the registering ``BackgroundJob`` specializations. """ from djangoplugins.point import PluginPoint from projectroles.plugins import ProjectAppPluginPoint from .urls import urlpatterns class ProjectAppPlugin(ProjectAppPluginPoint): """Plugin for registering app with the ``ProjectAppPluginPoint`` from the ``projectroles`` app.""" name = 'bgjobs' title = 'Background Jobs' urls = urlpatterns icon = 'tasks' entry_point_url_id = 'bgjobs:list' description = 'Jobs executed in the background' #: Required permission for accessing the app app_permission = 'bgjobs.view_data' #: Enable or disable general search from project title bar search_enable = False #: List of search object types for the app search_types = [] #: Search results template search_template = None #: App card template for the project details page details_template = 'bgjobs/_details_card.html' #: App card title for the project details page details_title = 'Background Jobs App Overview' #: Position in plugin ordering plugin_ordering = 100 class BackgroundJobsPluginPoint(PluginPoint): """Definition of a plugin point for registering background job types with the ``bgjobs`` app.""" #: Mapping from job specialization name to specialization class # (OneToOneField "inheritance"). job_specs = {}
1,540
438
import discord as nextcord import asyncio from discord.ext import commands import json import time import typing def log(*,text): ... class AutoMod(commands.Cog): def __init__(self,bot): self.bot=bot self._cd = commands.CooldownMapping.from_cooldown(5, 5, commands.BucketType.member) # Change accordingly def get_ratelimit(self, message: nextcord.Message) -> typing.Optional[int]: """Returns the ratelimit left""" bucket = self._cd.get_bucket(message) return bucket.update_rate_limit() @commands.Cog.listener() async def on_message(self,message): if message.author.bot:return with open("config.json") as f: config = json.load(f) if message.content == message.content.upper(): print("ALL CAPS") if config[str(message.guild.id)]["ancap"] == True and not str(message.channel.id) in config[str(message.guild.id)]["whitelists"]: await message.delete() await message.author.send("Please don't spam Capital letters") ratelimit = self.get_ratelimit(message) if ratelimit is None: ... else: role = nextcord.utils.get(message.guild.roles,name="MUTED (By Midna)") if not role: role = await message.guild.create_role(name="MUTED (By Midna)",permissions=nextcord.Permissions(send_messages=False,read_messages=True)) await role.edit(position=2) for c in message.guild.categories: await c.set_permissions(role,send_messages=False) await message.author.add_roles(role) embed = nextcord.Embed(title="🔇 Member silenced | 2m") embed.add_field(name="Reason",value="Message Spam") embed.set_footer(text=f'{message.author} | {message.author.id}') await message.channel.send(embed=embed) await asyncio.sleep(120) await message.author.remove_roles(role) @commands.command() async def anticaps(self,ctx,enabled:bool=False): with open("config.json") as f: config = json.load(f) config[str(ctx.guild.id)]["ancap"] = enabled with open("config.json","w+") as f: json.dump(config,f) embed = nextcord.Embed(color=nextcord.Color.green()) embed.description = f':white_check_mark: Anti Caps is now set to {enabled}!' await ctx.send(embed=embed) @commands.command(help="Open the Lockdown") @commands.has_permissions(manage_channels=True) async def openlockdown(self,ctx): with open("config.json") as f: config = json.load(f) await ctx.channel.edit(slowmode_delay=0) await ctx.send("This channel is no longer under lockdown") @commands.command(help="Starts a Lockdown in the current channel") @commands.has_permissions(manage_channels=True) async def lockdown(self,ctx): with open("config.json") as f: config = json.load(f) await ctx.channel.edit(slowmode_delay=config[str(ctx.guild.id)]["emergencyLock"]) await ctx.send("This channel is now under lockdown") @commands.command(help="Set the Rate Limit, a channel will be put into upon being spammed") @commands.has_permissions(manage_channels=True) async def emratelimit(self,ctx,rate=60): with open("config.json") as f: config = json.load(f) config[str(ctx.guild.id)]["emergencyLock"] = rate with open("config.json","w+") as f: json.dump(config,f) embed = nextcord.Embed(color=nextcord.Color.green()) embed.description = f':white_check_mark: Emergency Member Rate limit is now set to {rate}!' await ctx.send(embed=embed) @commands.command(help="The Threshold of how many messages a user can send before its detected as spam") @commands.has_permissions(manage_channels=True) async def empanicrate(self,ctx,rate=5): with open("config.json") as f: config = json.load(f) config[str(ctx.guild.id)]["panicRate"] = rate with open("config.json","w+") as f: json.dump(config,f) embed = nextcord.Embed(color=nextcord.Color.green()) embed.description = f':white_check_mark: Emergency Member Rate limit is now set to {rate}!' await ctx.send(embed=embed) def setup(bot): bot.add_cog(AutoMod(bot))
4,562
1,463
# Adam # A program that reads in a text # file and outputs the number of e's it contains # The program takes the filename from # an argument on the command line. # I found information on this website: # https://www.sanfoundry.com/python-program-read-file-counts-number/ #fname = input("Enter file name: ") #l = input("Enter letter to be searched: ") #e = 0 #with open(fname, "r") as f: #for line in f: #words = line.split() #for i in words: #for letter in i: #if(letter == e): #e = e+1 #print("Occurences of the letter: ") #print(e) # Requirement for this assignmnet is to only print # The occurence of letter E. fname = input("Enter file name: ") e = 0 with open(fname, "r") as f: for line in f: words = line.split() for i in words: for letter in i: if(letter == "e"): e = e+1 print(e)
960
315
""" Tests for tinypages build using sphinx extensions """ from os.path import (join as pjoin, dirname, isdir) import sphinx SPHINX_ge_1p5 = sphinx.version_info[:2] >= (1, 5) from sphinxtesters import PageBuilder HERE = dirname(__file__) PAGES = pjoin(HERE, 'tinypages') from texext.tests.test_plotdirective import format_math_block def _pdiff(str1, str2): # For debugging from difflib import ndiff print(''.join(ndiff(str1.splitlines(True), str2.splitlines(True)))) class TestTinyPages(PageBuilder): # Test build and output of tinypages project page_source_template = PAGES def test_some_math(self): assert isdir(self.out_dir) assert isdir(self.doctree_dir) doctree = self.get_doctree('some_math') assert len(doctree.document) == 1 tree_str = self.doctree2str(doctree) if SPHINX_ge_1p5: back_ref = ( '<paragraph>Refers to equation at ' '<pending_xref refdoc="some_math" refdomain="math" ' 'refexplicit="False" reftarget="some-label" ' 'reftype="eq" refwarn="True">' '<literal classes="xref eq">some-label</literal>' '</pending_xref>') else: back_ref=( '<paragraph>Refers to equation at ' '<eqref docname="some_math" ' 'target="some-label">(?)</eqref>') expected = ( '<title>Some math</title>\n' '<paragraph>Here <math latex="a = 1"/>, except ' '<title_reference>$b = 2$</title_reference>.</paragraph>\n' '<paragraph>Here <math latex="c = 3"/>, except ' '<literal>$d = 4$</literal>.</paragraph>\n' '<literal_block xml:space="preserve">' 'Here $e = 5$</literal_block>\n' '<bullet_list bullet="*">' '<list_item>' '<paragraph>' 'A list item containing\n' '<math latex="f = 6"/> some mathematics.' '</paragraph>' '</list_item>' '<list_item>' '<paragraph>' 'A list item containing ' '<literal>a literal across\nlines</literal> ' 'and also <math latex="g = 7"/> some mathematics.' '</paragraph>' '</list_item>' '</bullet_list>\n' + format_math_block('some_math', "10 a + 2 b + q") + '\n<paragraph>More text</paragraph>\n' '<target refid="equation-some-label"/>\n' + format_math_block( 'some_math', "5 a + 3 b", label='some-label', number='1', ids='equation-some-label') + '\n<paragraph>Yet more text</paragraph>\n' + format_math_block( "some_math", latex="5 w + 3 x") + '\n' + r'<paragraph>Math with <math latex="\beta"/> a backslash.' '</paragraph>\n' '<paragraph>' # What happens to backslashes? 'A protected whitespace with <math latex="dollars"/>.' '</paragraph>\n' '<paragraph>' 'Some * asterisks *. <math latex="dollars"/>. ' r'A line break. Protected \ backslash. ' 'Protected n in <math latex="a"/> line.</paragraph>\n' # Do labels get set as targets? + back_ref + '.</paragraph>') assert tree_str == expected class TestTopLevel(TestTinyPages): # Test we can import math_dollar with just `texext` @classmethod def modify_source(cls): conf_fname = pjoin(cls.page_source, 'conf.py') with open(conf_fname, 'rt') as fobj: contents = fobj.read() contents = contents.replace("'texext.mathcode',\n", "") contents = contents.replace("'texext.math_dollar'", "'texext'") with open(conf_fname, 'wt') as fobj: fobj.write(contents)
3,936
1,214
import logging from django.core.exceptions import ObjectDoesNotExist from django.db import transaction from django.http import HttpRequest from rest_framework.exceptions import NotFound from rest_framework.test import APIRequestFactory from rest_framework.views import exception_handler, APIView from typing import List, TypeVar logger = logging.getLogger(__name__) T = TypeVar('T') NON_CLONEABLE_MODELS: List[str] = [ 'User', ] @transaction.atomic def clone_instance(instance: T) -> T: """ Clone any django model instance and its related instances recursively Ignore many-to-many or one-to-many relationship (reverse foreign key) Also ignore user model ref: https://docs.djangoproject.com/en/2.2/ref/models/fields/#attributes-for-fields-with-relations https://github.com/jackton1/django-clone/blob/master/model_clone/mixins/clone.py """ # initialize a new instance cloned_instance = instance.__class__() fields = instance._meta.get_fields() for field in fields: # only clone one-to-one or forward foreign key relationship # ignore many-to-many or reverse foreign key relationship if field.one_to_one or field.many_to_one: _related = getattr(instance, field.name) # skip if related instance is None if _related is None: continue # use the same reference for non-cloneable related models if field.related_model.__name__ in NON_CLONEABLE_MODELS: setattr(cloned_instance, field.name, _related) else: _cloned_related = clone_instance(_related) setattr(cloned_instance, field.name, _cloned_related) # simply copy the value for those non-relation fields if not field.is_relation: _value = getattr(instance, field.name) setattr(cloned_instance, field.name, _value) # set primary key as None to save a new record in DB cloned_instance.pk = None cloned_instance.save() return cloned_instance def exception_logging_handler(exc: Exception, context: dict): """ Intercept DRF error handler to log the error message Update the REST_FRAMEWORK setting in settings.py to use this handler REST_FRAMEWORK = { 'EXCEPTION_HANDLER': 'core.exception_logging_handler', } """ logger.warning(exc) # translate uncaught Django ObjectDoesNotExist exception to NotFound if isinstance(exc, ObjectDoesNotExist): logger.error(f'uncaught ObjectDoesNotExist error: {exc} - {context}') exc = NotFound(str(exc)) # follow DRF default exception handler response = exception_handler(exc, context) return response def make_drf_request(request: HttpRequest = None, headers: dict = None): """ The request object made by APIRequestFactory is `WSGIRequest` which doesn't have `.query_params` or `.data` method as recommended by DRF. It only gets "upgraded" to DRF `Request` class after passing through the `APIView`, which invokes `.initialize_request` internally. This helper method uses a dummy API view to return a DRF `Request` object for testing purpose. Ref: https://stackoverflow.com/questions/28421797/django-rest-framework-apirequestfactory-request-object-has-no-attribute-query-p https://github.com/encode/django-rest-framework/issues/3608 """ class DummyView(APIView): pass if request is None: # use a default request request = APIRequestFactory().get('/') drf_request = DummyView().initialize_request(request) if headers: drf_request.headers = headers return drf_request
3,708
1,058
S = input() arr = [] now = [] counter = 0 for s in S: now.append(s.lower()) if s.isupper(): if counter == 0: counter += 1 else: arr.append(''.join(now)) now = [] counter = 0 arr.sort() for word in arr: for i, s in enumerate(word): if i == 0 or i == len(word) - 1: print(s.upper(), end='') else: print(s, end='') print()
436
150
from lesson12_projects.house3.data.const import E_TURNED_KNOB, MSG_TURN_KNOB, E_FAILED class OutState: def update(self, req): self.on_entry(req) # 入力 msg = self.on_trigger(req) # 外に居ます。 'Turn knob' とメッセージを送ってくるのが正解です if msg == MSG_TURN_KNOB: self.on_turned_knob(req) return E_TURNED_KNOB else: self.on_failed(req) return E_FAILED def on_entry(self, req): req.c_sock.send( """You can see the house. You can see the close knob.""".encode() ) def on_trigger(self, req): return req.pull_trigger() def on_turned_knob(self, req): pass def on_failed(self, req): pass
738
289
# -*- coding: utf-8 -*- """setup.py""" import os import sys from setuptools import setup from setuptools.command.test import test as TestCommand class Tox(TestCommand): user_options = [('tox-args=', 'a', 'Arguments to pass to tox')] def initialize_options(self): TestCommand.initialize_options(self) self.tox_args = None def finalize_options(self): TestCommand.finalize_options(self) self.test_args = [] self.test_suite = True def run_tests(self): import tox import shlex if self.tox_args: errno = tox.cmdline(args=shlex.split(self.tox_args)) else: errno = tox.cmdline(self.test_args) sys.exit(errno) def read_content(filepath): with open(filepath) as fobj: return fobj.read() classifiers = [ "Development Status :: 3 - Alpha", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", ] long_description = ( read_content("README.rst") + read_content(os.path.join("docs/source", "CHANGELOG.rst"))) requires = ['setuptools', 'typeguard==2.5.0', 'pyspark==3.0.1', 'findspark'] extras_require = { 'reST': ['Sphinx'], } if os.environ.get('READTHEDOCS', None): extras_require['reST'].append('recommonmark') setup(name='pysequila', version=os.getenv('VERSION', '0.1.0'), description='An SQL-based solution for large-scale genomic analysis', long_description=long_description, long_description_content_type='text/x-rst', author='biodatageeks', author_email='team@biodatageeks.org', url='https://pysequila.biodatageeks.org', classifiers=classifiers, packages=['pysequila'], data_files=[], install_requires=requires, include_package_data=True, extras_require=extras_require, tests_require=['tox'], cmdclass={'test': Tox},)
2,171
722
from sys import stdin, stdout from struct import pack, unpack def float2half(float_val): f = unpack('I', pack('f', float_val))[0] if f == 0: return 0 if f == 0x80000000: return 0x8000 return ((f>>16)&0x8000) | ((((f&0x7f800000)-0x38000000)>>13)&0x7c00) | ((f>>13)&0x03ff) def half2float(h): if h == 0: return 0 if h == 0x8000: return 0x80000000 f = ((h&0x8000)<<16) | (((h&0x7c00)+0x1C000)<<13) | ((h&0x03FF)<<13) return unpack('f', pack('I', f))[0] def blob_pack_vertex_attr((count, typ, signed, bits, padding), v, little_endian = True): assert typ is int or typ is float assert len(padding) == count assert len(v) <= count pad = v + padding[len(v):] fmt = None if typ is float: float_fmts = {16: 'H', 32: 'f', 64: 'd'} if bits == 16: pad = map(float2half, pad) fmt = float_fmts[bits] else: int_fmts = {8: 'b', 16: 'h', 32: 'i', 64: 'q'} uint_fmts = {8: 'B', 16: 'H', 32: 'I', 64: 'Q'} fmt = int_fmts[bits] if signed else uint_fmts[bits] return pack(('<' if little_endian else '>') + fmt * count, *pad) def blob_vertices(attrs, verts, little_endian = True): for v in verts: for (attr, data) in zip(attrs, v): yield blob_pack_vertex_attr(attr, data, little_endian) def blob_indices(indices, restart = None, little_endian = True): fmt = ('<' if little_endian else '>') + 'H' for primitive in indices: for index in primitive: yield pack(fmt, index) if restart is not None: yield pack(fmt, restart) def blob_vertex_write(attrs, verts, out=stdout, little_endian = True): for blob in blob_vertices(attrs, verts, little_endian): out.write(blob) def blob_vertex_save(filename, attrs, verts, little_endian = True): with open(filename, 'wb0') as f: blob_vertex_write(attrs, verts, f, little_endian) def blob_index_write(indices, out=stdout, restart = None, little_endian = True): for blob in blob_indices(indices, restart, little_endian): out.write(blob) def blob_index_save(filename, indices, restart = None, little_endian = True): with open(filename, 'wb0') as f: blob_index_write(indices, f, restart, little_endian)
2,266
913
from config import * from template import * from dictasobject import DictAsObject class RemoteFileHelper: def __init__(self, service): self.service = service self.config = DictAsObject({ 'ini' : self.config_ini, 'parser' : self.config_parser, 'shellvars' : self.config_shellvars, 'whitespace' : self.config_whitespace }) def build_local_lpath(self, path): if not path: return path if path and path.startswith('/'): return path if os.path.isfile(self.service.local_path+'/'+path): return self.service.local_path+'/'+path return path def abstract(self, remote_file=None): return AbstractRemoteLoader(self.service, self.build_remote_path(remote_file)) def template(self, local_path, remote_path=None, *args, **kwargs): return RemoteConfigFileWithTemplate(self.service, self.build_local_lpath(local_path), remote_path, *args, **kwargs) def partial(self, local_path, remote_path=None, *args, **kwargs): return RemoteConfigFileWithPartial( self.service, self.build_local_lpath(local_path), remote_path, *args, **kwargs) def config_ini(self, remote_file = None, *args, **kwargs): if remote_file: remote_file = self.service.normalize_path(remote_file) return RemoteConfigIniLoader(self.service, remote_file, *args, **kwargs) def config_parser(self, remote_file = None, *args, **kwargs): if remote_file: remote_file = self.service.normalize_path(remote_file) return RemoteConfigParser(self.service, remote_file, *args, **kwargs) def config_shellvars(self, remote_file = None, *args, **kwargs): if remote_file: remote_file = self.service.normalize_path(remote_file) return RemoteShellVarsLoader(self.service, remote_file, *args, **kwargs) def config_whitespace(self, remote_file = None, *args, **kwargs): if remote_file: remote_file = self.service.normalize_path(remote_file) return RemoteWhitespaceConfigLoader(self.service, remote_file, *args, **kwargs)
2,234
664
import os import torch import torch.nn as nn import torch.optim as optim from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm import matplotlib.pyplot as plt import architecture.default from architecture.default import Defender DEBUG=False BATCH_SIZE=32 FIXED_POLICY=False NORMALIZE=False K=10 PENALTY=10 MAX_TARGET_POS=10 torch.set_default_tensor_type(torch.DoubleTensor) class Attacker(architecture.default.Attacker): """ NN architecture for the attacker """ def __init__(self, model, n_hidden_layers, layer_size, n_coeff, noise_size): super(Attacker, self).__init__(model, n_hidden_layers, layer_size, n_coeff, noise_size) def forward(self, x): output = self.nn(x) eps = output[1] mu = torch.sigmoid(output[1]) return eps, mu class Trainer(architecture.default.Trainer): """ The class contains the training logic """ def __init__(self, world_model, robustness_computer, \ attacker_nn, defender_nn, lr, logging_dir=None): super(Trainer, self).__init__(world_model, robustness_computer, attacker_nn, defender_nn, lr, logging_dir) def train_attacker_step(self, timesteps, dt, atk_static): self.attacker_optimizer.zero_grad() if FIXED_POLICY is True: z = torch.rand(self.attacker.noise_size) oe = torch.tensor(self.model.environment.status) oa = torch.tensor(self.model.agent.status) x_target = oe[3] atk_policy = self.attacker(torch.cat((z, oe))) with torch.no_grad(): def_policy = self.defender(oa) cumloss = 0. if PENALTY: previous_def_policy = torch.zeros_like(self.defender(torch.tensor(self.model.agent.status))) for t in range(timesteps): if FIXED_POLICY is False: z = torch.rand(self.attacker.noise_size) oe = torch.tensor(self.model.environment.status) oa = torch.tensor(self.model.agent.status) x_target = oe[3] atk_policy = self.attacker(torch.cat((z, oe))) with torch.no_grad(): def_policy = self.defender(oa) self.model.step(atk_policy, def_policy, dt) if t>K: rho = self.robustness_computer.compute(self.model) cumloss += self.attacker_loss_fn(rho) if torch.abs(x_target) >= MAX_TARGET_POS: cumloss += PENALTY cumloss.backward() self.attacker_optimizer.step() if DEBUG: print(self.attacker.state_dict()["nn.0.bias"]) return cumloss.detach() / timesteps def train_defender_step(self, timesteps, dt, atk_static): self.defender_optimizer.zero_grad() if FIXED_POLICY is True: z = torch.rand(self.attacker.noise_size) oe = torch.tensor(self.model.environment.status) oa = torch.tensor(self.model.agent.status) x_target = oa[3] with torch.no_grad(): atk_policy = self.attacker(torch.cat((z, oe))) def_policy = self.defender(oa) cumloss = 0. if PENALTY: previous_def_policy = torch.zeros_like(self.defender(torch.tensor(self.model.agent.status))) for t in range(timesteps): if FIXED_POLICY is False: z = torch.rand(self.attacker.noise_size) oe = torch.tensor(self.model.environment.status) oa = torch.tensor(self.model.agent.status) x_target = oa[3] with torch.no_grad(): atk_policy = self.attacker(torch.cat((z, oe))) def_policy = self.defender(oa) self.model.step(atk_policy, def_policy, dt) if t>K: rho = self.robustness_computer.compute(self.model) cumloss += self.defender_loss_fn(rho) if torch.abs(x_target) >= MAX_TARGET_POS: cumloss += PENALTY cumloss.backward() self.defender_optimizer.step() if DEBUG: print(self.defender.state_dict()["nn.0.bias"]) # make_dot(def_input, self.defender.named_parameters(), path=self.logging_dir) return cumloss.detach() / timesteps
4,400
1,458
import pygame import random from time import sleep white = (255, 255, 255) black = (0, 0, 0) red = (255, 0, 0) green = (0, 255, 0) blue = (0, 0, 255) pygame.init() largura = 320 altura = 320 fundo = pygame.display.set_mode((largura, altura)) pygame.display.set_caption("TicTacToe") def texto(msg, cor, tam, x, y): fonte = pygame.font.SysFont(None, tam) texto1 = fonte.render(msg, True, cor) fundo.blit(texto1, [x, y]) def circulo(centro): if centro == 0 or centro == 1 or centro == 2: if centro == 0: centro = 53*(centro+1) if centro == 1: centro = 53*(centro+2) if centro == 2: centro = 53*(centro+3) pos_circulo = (centro, 53) if centro == 3 or centro == 4 or centro == 5: if centro == 3: centro = 53*(centro-2) if centro == 4: centro = 53*(centro-1) if centro == 5: centro = 53*centro pos_circulo = (centro, 160) if centro == 6 or centro == 7 or centro == 8: if centro == 6: centro = 53*(centro-5) if centro == 7: centro = 53*(centro-4) if centro == 8: centro = 53*(centro-3) pos_circulo = (centro, 266) pygame.draw.circle(fundo, black, pos_circulo, 30) def cruz(cruzx, cruzy): pygame.draw.line(fundo, black, (cruzx, cruzy), (cruzx+35, cruzy+35)) pygame.draw.line(fundo, black, (cruzx+35, cruzy), ( cruzx, cruzy+35)) def cerca(): pygame.draw.line(fundo, black,(106, 0), (106, altura)) pygame.draw.line(fundo, black,(212, 0), (212, altura)) pygame.draw.line(fundo, black,(0, 106), (largura, 106)) pygame.draw.line(fundo, black,(0, 212), (largura, 212)) def endgame(): global fimdejogo global resultado global trava if matriz[0] == 1 and matriz[1] == 1 and matriz[2] == 1 or matriz[0] == 2 and matriz[1] == 2 and matriz[2] == 2: fimdejogo = True trava = False if matriz[0] == 1: resultado = 1 else: resultado = 2 if matriz[3] == 1 and matriz[4] == 1 and matriz[5] == 1 or matriz[3] == 2 and matriz[4] == 2 and matriz[5] == 2: fimdejogo = True trava = False if matriz[3] == 1: resultado = 1 else: resultado = 2 if matriz[6] == 1 and matriz[7] == 1 and matriz[8] == 1 or matriz[6] == 2 and matriz[7] == 2 and matriz[8] == 2: fimdejogo = True trava = False if matriz[6] == 1: resultado = 1 else: resultado = 2 if matriz[0] == 1 and matriz[3] == 1 and matriz[6] == 1 or matriz[0] == 2 and matriz[3] == 2 and matriz[6] == 2: fimdejogo = True trava = False if matriz[6] == 1: resultado = 1 else: resultado = 2 if matriz[1] == 1 and matriz[4] == 1 and matriz[7] == 1 or matriz[1] == 2 and matriz[4] == 2 and matriz[7] == 2: fimdejogo = True trava = False if matriz[1] == 1: resultado = 1 else: resultado = 2 if matriz[2] == 1 and matriz[5] == 1 and matriz[8] == 1 or matriz[2] == 2 and matriz[5] == 2 and matriz[8] == 2: fimdejogo = True trava = False if matriz[2] == 1: resultado = 1 else: resultado = 2 if matriz[0] == 1 and matriz[4] == 1 and matriz[8] == 1 or matriz[0] == 2 and matriz[4] == 2 and matriz[8] == 2: fimdejogo = True trava = False if matriz[0] == 1: resultado = 1 else: resultado = 2 if matriz[2] == 1 and matriz[4] == 1 and matriz[6] == 1 or matriz[2] == 2 and matriz[4] == 2 and matriz[6] == 2: fimdejogo = True trava = False if matriz[2] == 1: resultado = 1 else: resultado = 2 vaziu = 0 for c in range(0, len(matriz)): if matriz[c] == 0: vaziu +=1 if vaziu == 0: if resultado != 1 and resultado != 2: fimdejogo = True resultado = 3 vaziu = 0 game = True fimdejogo = False evento = True trava = True resultado = 0 mousex = -1 mousey = 0 fundo.fill(white) cerca() matriz = [0, 0, 0, 0, 0, 0, 0, 0, 0] pygame.display.update() while game: while fimdejogo: sleep(0.5) fundo.fill(white) texto('Fim de Jogo', red, 50, 65, 30) if resultado == 1: texto('Vitoria!!!', black, 30, 70, 80) if resultado == 3: texto('Velha', black, 30, 70, 80) if resultado == 2: texto('Derrota!!', black, 30, 70, 80) pygame.draw.rect(fundo, black, [45, 120, 135, 27]) texto('Continuar(C)', white, 30, 50, 125) pygame.draw.rect(fundo, black, [190, 120, 75, 27]) texto('Sair(S)', white, 30, 195, 125) pygame.display.update() for event in pygame.event.get(): if event.type == pygame.QUIT: game = False fimdejogo = False trava = False if event.type == pygame.KEYDOWN: if event.key == pygame.K_c: game = True fimdejogo = False evento = True trava = True resultado = 0 mousex = -1 mousey = 0 fundo.fill(white) cerca() matriz = [0, 0, 0, 0, 0, 0, 0, 0, 0] pygame.display.update() if event.key == pygame.K_s: game = False fimdejogo = False evento = False trava = False while evento: for event in pygame.event.get(): if event.type == pygame.QUIT: game = False evento = False trava = False if event.type == pygame.MOUSEBUTTONDOWN: mousex = pygame.mouse.get_pos()[0] mousey = pygame.mouse.get_pos()[1] evento = False evento = True if mousex < 106 and mousey < 106 and mousex != -1 and matriz[0] == 0: cruz(35, 35) matriz[0] = 1 if mousex < 212 and mousex > 106 and mousey < 106 and matriz[1] == 0: cruz(141, 35) matriz[1] = 1 if mousex < 320 and mousex > 212 and mousey < 106 and matriz[2] == 0: cruz(247, 35) matriz[2] = 1 if mousex < 106 and mousey > 106 and mousey < 212 and matriz[3] == 0: cruz(35, 141) matriz[3] = 1 if mousex < 212 and mousex > 106 and mousey < 212 and mousey > 106 and matriz[4] == 0: cruz(141, 141) matriz[4] = 1 if mousex < 320 and mousex > 212 and mousey < 212 and mousey > 106 and matriz[5] == 0: cruz(247, 141) matriz[5] = 1 if mousex < 106 and mousey < 320 and mousey > 212 and matriz[6] == 0: cruz(35, 247) matriz[6] = 1 if mousex < 212 and mousex > 106 and mousey < 320 and mousey > 212 and matriz[7] == 0: cruz(141, 247) matriz[7] = 1 if mousex < 320 and mousex > 212 and mousey < 320 and mousey > 212 and matriz[8] == 0: cruz(247, 247) matriz[8] = 1 endgame() pygame.display.update() sleep(0.5) if trava: while True: jogada = random.randint(0, 8) if matriz[jogada] == 0: circulo(jogada) matriz[jogada] = 2 break else: if 0 in matriz: jogada = random.randint(0, 8) else: break endgame() pygame.display.update() pygame.display.update()
7,785
3,049
#/usr/bin/env python # -*- coding: Utf8 -*- import event class Plugin: def __init__(self, client): self.client = client self.notices = {} #:nisay!~nisay@53.ip-192-99-70.net PRIVMSG #testbobot :!notice user message @event.privmsg() def get_notice(self, e): target = e.values['target'] msg = e.values['msg'][1:] nick = e.values['nick'] if nick == self.client.nick_name: return if msg == '!notice': self.help(target) if target == self.client.nick_name: message = "You can only send a notice on a channel !" self.client.priv_msg(nick, message) return elif msg[0:7] == '!notice': try: (cmd, user, message) = msg.split(' ', 2) except ValueError, e: self.help(target) return if user in self.client.channels[target.lower()].users: message = nick + ": Can\'t you really do that by yourself ? ._." self.client.priv_msg(target, message) else: message = message.strip() notice = 'From ' + nick + ': ' + message if not self.notices.has_key(target): self.notices[target] = {} if not self.notices[target].has_key(user): self.notices[target][user] = [] self.notices[target][user].append(notice) @event.join() def send_notice(self, e): nick = e.values['nick'] if nick[0] in ('&', '~', '+', '@'): nick = nick[1:] chan = e.values['chan'] if nick == self.client.nick_name: return if self.notices.has_key(chan): if self.notices[chan].has_key(nick): message = nick + ": While you were away" self.client.priv_msg(chan, message) for notice in self.notices[chan][nick]: self.client.priv_msg(chan, notice) self.notices[chan].pop(nick) def help(self, target): message = "Notify a user with a message when (s)he reconnects." self.client.priv_msg(target, message) message = "!notice user message" self.client.priv_msg(target, message)
2,332
702
""" Copyright (C) 2012 Alan J Lockett Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ """ Parse .net format for Bayes nets and return a bayes net """ from pyec.config import Config from pyec.distribution.bayes.net import * from pyec.distribution.bayes.structure.proposal import StructureProposal class BayesParser(object): def __init__(self): self.variables = {} self.indexMap = {} self.revIndexMap = {} self.index = 0 def processLine(self, line): line = line.strip() if line == "": return None line = line[1:-1] parts = line.split(" ") if parts[0] == "var": name = parts[1].strip("' ") vals = " ".join(parts[2:]) vals = vals.strip("'()").split(" ") vals = [v.strip("() \t\r\n") for v in vals] vals = [v for v in vals if v != ""] self.variables[name] = {'vals':vals, 'parents':None, 'cpt':None} self.indexMap[name] = self.index self.revIndexMap[self.index] = name self.index += 1 elif parts[0] == "parents": name = parts[1].strip("'").strip() parts2 = line.split("'(") if len(parts2) == 1: parts2 = line.split("(") parstr = parts2[1] cptstr = "(".join(parts2[2:]) else: parstr = parts2[1] cptstr = parts2[2] parents = parstr.strip(") \n").split(" ") parents = [parent for parent in parents if parent != ""] sortedParents = sorted(parents, key=lambda x: self.indexMap[x]) self.variables[name]['parents'] = sortedParents cpt = {} if len(parents) == 0: vals = cptstr[:-2].strip("( )\r\n\t").split(" ") vals = array([float(v) for v in vals][:-1]) cpt[""] = vals else: rows = cptstr[:-2].split("((") for row in rows: row = row.strip(") \r\n\t") if row == "": continue cfg, vals = row.split(")") keys = [c for c in cfg.split(" ") if c != ""] keyStr = [[]] * len(parents) for j, key in enumerate(keys): options = self.variables[parents[j]]['vals'] idx = options.index(key) + 1 keyStr[sortedParents.index(parents[j])] = idx keyStr = ",".join([str(i) for i in array(keyStr)]) vals = vals.strip().split(" ") vals = array([float(v) for v in vals][:-1]) cpt[keyStr] = vals self.variables[name]['cpt'] = cpt else: return False def parse(self, fname): f = open(fname) totalLine = "" done = False for line in f: totalLine += line lefts = len(totalLine.split("(")) rights = len(totalLine.split(")")) if lefts == rights: self.processLine(totalLine) totalLine = "" categories = [[]] * self.index for name, idx in self.indexMap.iteritems(): categories[idx] = self.variables[name]['vals'] cfg = Config() cfg.numVariables = len(self.variables) cfg.variableGenerator = MultinomialVariableGenerator(categories) cfg.randomizer = MultinomialRandomizer() cfg.sampler = DAGSampler() cfg.structureGenerator = StructureProposal(cfg) net = BayesNet(cfg) for variable in net.variables: variable.tables = self.variables[self.revIndexMap[variable.index]]['cpt'] #print names[variable.index], self.variables[self.revIndexMap[variable.index]]['parents'] variable.known = [self.indexMap[parent] for parent in self.variables[self.revIndexMap[variable.index]]['parents']] variable.known = sorted(variable.known) variable.parents = dict([(i, net.variables[i]) for i in variable.known]) net.dirty = True net.computeEdgeStatistics() """ for variable in net.variables: print "(var ", self.revIndexMap[variable.index], " (", " ".join(variable.categories[variable.index]), "))" for variable in net.variables: print "(parents ", self.revIndexMap[variable.index], " (", " ".join([self.revIndexMap[i] for i in variable.known]), ") " for key, val in variable.tables.iteritems(): if key == "": expanded = "" else: cfg = array([int(num) for num in key.split(",")]) expanded = " ".join(self.variables[self.revIndexMap[variable.known[k]]]['vals'][c-1] for k,c in enumerate(cfg)) total = val.sum() vals = " ".join([str(i) for i in val]) print "((", expanded, ") ", vals, (1. - total), ")" print ")" """ return net
5,794
1,793
nome = str(input('Digite seu nome completo: ')).strip() if 'silva' in nome.lower(): print('Sim, seu nome tem Silva.') else: print('Não , seu nome não tem Silva')
169
57
"""empty message Revision ID: 8b664608a7c7 Revises: ec21e19825ff Create Date: 2021-06-01 14:37:20.327189 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '8b664608a7c7' down_revision = 'ec21e19825ff' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('admin_dept', sa.Column('id', sa.Integer(), nullable=False, comment='部门ID'), sa.Column('parent_id', sa.Integer(), nullable=True, comment='父级编号'), sa.Column('dept_name', sa.String(length=50), nullable=True, comment='部门名称'), sa.Column('sort', sa.Integer(), nullable=True, comment='排序'), sa.Column('leader', sa.String(length=50), nullable=True, comment='负责人'), sa.Column('phone', sa.String(length=20), nullable=True, comment='联系方式'), sa.Column('email', sa.String(length=50), nullable=True, comment='邮箱'), sa.Column('status', sa.Integer(), nullable=True, comment='状态(1开启,0关闭)'), sa.Column('remark', sa.Text(), nullable=True, comment='备注'), sa.Column('address', sa.String(length=255), nullable=True, comment='详细地址'), sa.Column('create_at', sa.DateTime(), nullable=True, comment='创建时间'), sa.Column('update_at', sa.DateTime(), nullable=True, comment='创建时间'), sa.PrimaryKeyConstraint('id') ) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_table('admin_dept') # ### end Alembic commands ###
1,527
608
from django.shortcuts import render from django.http import HttpResponse from django.template import loader from .models import Location,Category,Image def index(request): '''Main view function for the start page''' images = Image.get_images() template = loader.get_template('index.html') context = { 'images': images, } return HttpResponse(template.render(context,request)) def search(request): '''View function to search by category''' template = loader.get_template('search.html') if 'image' in request.GET and request.GET['image']: search_category = request.GET['image'] searched_images = Image.search_images(search_category) message = f'{search_category}' context = { 'message': message, 'images': searched_images, } return HttpResponse(template.render(context,request)) else: message = 'The category does not exist!!' context = { 'message': message, } return render(request, 'search.html', {'message': message}) def locations(request, region): '''View Function to sort based on location''' template = loader.get_template('location.html') region_images = Image.filter_by_location(region) context = { 'images': region_images, } return HttpResponse(template.render(context,request))
1,393
365
x = (input("enters hours")) y = (input("enters rate")) def compute_pay(hours, rate): """The try block ensures that the user enters a value between from 0-1 otherwise an error message pops up""" try: hours = float(x) rate = float(y) if hours <= 40: pay= float(hours * rate) else: pay = float(40 * rate + (hours - 40) * 1.5 * rate) return pay except ValueError: return "INVALID ENTRY" pay = compute_pay(x, y) print(pay)
509
174
# import argparse # # # def main(audio_path, textgrid_path, output_path): # data = list() # for # print(1) # # if __name__ == "__main__": # # -------------MENU-------------- # # # command line arguments # parser = argparse.ArgumentParser() # parser.add_argument("audio_path", help="The path to the audio directory") # parser.add_argument("labels_path", help="The path to the relevant textgrids") # parser.add_argument("output_path", help="The path to output directory") # args = parser.parse_args() # # # main function # main(args.audio_path, args.textgrid_path, args.output_path)
628
199
import phonenumbers from phonenumbers import geocoder, carrier def get_information_about_number(phone_numbers): number = phonenumbers.parse(phone_numbers, "en") phone_location = geocoder.description_for_number(number, "en") phone_carrier = carrier.name_for_number(number, "en") print("The Location Of This Phone Number is " + str(phone_location) + " " + "And The Phone Carrier is " + phone_carrier) if __name__ == '__main__': numbers = input("Please Enter The Target Number : ") get_information_about_number(numbers)
557
182
# TODO: 1. Add indicator that node should be run by python # line above indicates that python is responsible for running this node import os import csv import rospy import numpy as np import pygame from utilities import pipline import cv2 from cv_bridge import CvBridge, CvBridgeError from sensor_msgs.msg import Image # set image resolution RESOLUTION_X = 640 RESOLUTION_Y = 480 # python class definition class CameraTester(object): # python constructor definition def __init__(self): self.start_time = None self.image = None self.got_image = False self.init_pygame() self.bridge = CvBridge() # TODO: 2. Init nide - give node an unique name - overwritten from launch file # wait master node is initialized and record start time self.wait_master_initialization() # TODO: 3. Subscribe to the ROS bridge camera topic and provide callback # wait till we got first image self.wait_initialization() # run node infinite loop self.loop() # TODO: 4. Write callback method for the subscriber # init pygame window to display images def init_pygame(self): pygame.init() pygame.display.set_caption("Camera images") self.screen = pygame.display.set_mode([RESOLUTION_X, RESOLUTION_Y]) # wait master node is initialized and record start time def wait_master_initialization(self): while not self.start_time and not rospy.is_shutdown(): self.start_time = rospy.Time.now().to_nsec() if not rospy.is_shutdown(): rospy.loginfo('CameraTester: Ros master initialized.') # wait till we got first image def wait_initialization(self): # define sleep rate foe the loop rate = rospy.Rate(10) # wait till we get image initialized while not rospy.is_shutdown() and not self.got_image: rate.sleep() if not rospy.is_shutdown(): rospy.loginfo('CameraTester: Connected to vehicle - got camera images') # main node loop def loop(self): # define loop rate in Hz rate = rospy.Rate(20) while not rospy.is_shutdown(): if self.image is not None: # process stored image and display it in pygame window self.process_frame() # update pygame window pygame.display.flip() # wait 1/20 sec rate.sleep() # convert open cv image to pygame image and display def process_frame(self): # we need to convert image as it use BGR color scheme and flipped frame = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB) frame = np.rot90(frame) frame = np.flip(frame, 0) # TODO: 5. Add sample image processing - for example filters useful to lanes detection - uncomment line below #frame = pipline(frame) frame = pygame.surfarray.make_surface(frame) self.screen.blit(frame,(0,0)) return # python way to indicate what to do if this file is run as executable rather then imported as library if __name__ == '__main__': try: # create CameraTester instance and initiate loop sequence CameraTester() except rospy.ROSInterruptException: # catch and log ROS errors rospy.logerr('Could not start camera tester node.') pass finally: pygame.quit()
3,494
1,005
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2018-02-01 23:04 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('rest', '0011_auto_20180201_2256'), ] operations = [ migrations.RenameModel( old_name='nextFix', new_name='Fixtures', ), ]
393
153
import logging import os import json import requests try: from alerta.plugins import app # alerta >= 5.0 except ImportError: from alerta.app import app # alerta < 5.0 from alerta.plugins import PluginBase LOG = logging.getLogger('alerta.plugins.beacon') BEACON_HEADERS = { 'Content-Type': 'application/json' } BEACON_SEND_ON_ACK = os.environ.get('BEACON_SEND_ON_ACK') or app.config.get('BEACON_SEND_ON_ACK', False) BEACON_SEVERITY_MAP = app.config.get('BEACON_SEVERITY_MAP', {}) BEACON_DEFAULT_SEVERITY_MAP = {'security': '#000000', # black 'critical': '#FF0000', # red 'major': '#FFA500', # orange 'minor': '#FFFF00', # yellow 'warning': '#1E90FF', #blue 'informational': '#808080', #gray 'debug': '#808080', # gray 'trace': '#808080', # gray 'ok': '#00CC00'} # green class ServiceIntegration(PluginBase): def __init__(self, name=None): # override user-defined severities self._severities = BEACON_DEFAULT_SEVERITY_MAP self._severities.update(BEACON_SEVERITY_MAP) super(ServiceIntegration, self).__init__(name) def pre_receive(self, alert): return alert def post_receive(self, alert): return def status_change(self, alert, status, text, **kwargs): BEACON_WEBHOOK_URL = self.get_config('BEACON_WEBHOOK_URL', type=str, **kwargs) #if BEACON_SEND_ON_ACK == False or status not in ['ack', 'assign']: #return LOG.debug('Beacon alert: %s', alert) LOG.debug('Beacon status: %s', status) LOG.debug('Beacon text: %s', text) LOG.debug('Beacon kwargs: %s', kwargs) payload = dict() try: payload['severity'] = alert.severity payload['status'] = status payload['environment'] = alert.environment payload['event'] = alert.event payload['id'] = alert.id payload['tags'] = alert.tags LOG.debug('Beacon payload: %s', payload) except Exception as e: LOG.error('Exception formatting payload: %s\n%s' % (e, traceback.format_exc())) return try: r = requests.post(BEACON_WEBHOOK_URL, data=json.dumps(payload), headers=BEACON_HEADERS, timeout=2) except Exception as e: raise RuntimeError("Beacon connection error: %s", e) LOG.debug('Beacon response: %s\n%s' % (r.status_code, r.text))
2,669
839
"""The abstract class module for all hosts.""" # Standard Library Imports from abc import ABC, abstractmethod, abstractclassmethod # Third Party Imports # Local Application Imports from util.helpers import get_typeof_repo_names from util.message import Message class Host(ABC): """The abstract class for all hosts. Host subclasses are created under the assumption that their abstract methods are defined and `HELP_DESC` is also defined in class scope (see __init_subclass__). Attributes ---------- HELP_DESC : NotImplemented Parser description provided of a host when using -h/--help (See Also). HOST_KEY : str Chosen host is stored under this name. Methods ---------- add_parser : argparse._SubParsersAction How hosts are added to the command line to be used. Used to enforce consistent structure. See Also ---------- pyscriptman.LocalHost.HELP_DESC pyscriptman.RemoteHost.HELP_DESC pyscriptman.GitHub.HELP_DESC Notes ---------- _modify_parser : argparse.ArgumentParser To be implemented, allows the host parser to take custom arguments. """ HELP_DESC = NotImplemented HOST_KEY = "host" @property def repo_names_and_locations(self): """Getter for returning repo names and locations Returns ------- dict repo names and locations are returned, where repo names are the keys with locations as the values. """ return self._repo_names_and_locations @property def repo_names(self): """Getter for repo names""" return self._repo_names_and_locations.keys() def __init__(self): self._repo_names_and_locations = dict() def __init_subclass__(cls, *args, **kwargs): """Specifications required by future host subclasses.""" super().__init_subclass__(*args, **kwargs) if cls.HELP_DESC is NotImplemented and cls.__name__ != "WebHost": raise NotImplementedError( Message.construct_helpdesc_notimplemented_msg({cls.__name__}) ) @staticmethod def _get_bare_repo_names_from_path(dir_path): """Retrieve's bare Git repos from a given directory path. Parameters ---------- dir_path : str A directory path. """ return get_typeof_repo_names(dir_path, barerepo=True) @classmethod def add_parser(cls, subparser_container): """How hosts are added to the command line. Parameters ---------- subparser_container : argparse._SubParsersAction The 'container' that the host subparser is added to (see notes). Notes ---------- It should be noted that subparser_container is technically not actually an container, but a 'special action object' (see argparser documentation). """ subcommand = cls._get_host_name() parser = subparser_container.add_parser( subcommand, help=cls.HELP_DESC, allow_abbrev=False ) parser = cls._modify_parser(parser) parser.set_defaults(**{cls.HOST_KEY: subcommand}) return parser @classmethod def _get_host_name(cls): """How the host name is returned.""" return cls.__name__.lower() def add_repo_name_and_location(self, repo_name, location): """How to add repo name and location to host's repos names and locations. Parameters ---------- repo_name : str The name of the Git repo to store. location : str A url to the Git repo. """ self.repo_names_and_locations[repo_name] = location def get_location_from_repo_name(self, repo_name): """How to get the host's repo location from the repo name. Parameters ---------- repo_name : str The name of the Git repo to store. """ return self.repo_names_and_locations[repo_name] @abstractclassmethod def is_host_type(cls, chosen_host, configholder): """How the host type is determined. Parameters ---------- chosen_host : str Input received from the command line. configholder : util.configholder.ConfigHolder An instantiation of ConfigHolder, used to hold program configurations (see notes). Notes ----- The signature implemented in each host subclass does not have to be exact according to the base method and may not contain `configholder`. """ NotImplemented @abstractclassmethod def _modify_parser(cls, parser): """To be implemented, allows the host parser to take custom arguments. Parameters ---------- parser : argparse.ArgumentParser A normal argparse.ArgumentParser parser that can additional positional/optional arguments. """ NotImplemented @abstractmethod def get_user_repo_names_and_locations(self): """To be implemented. Depending on the type of host, this method is the 'how' in getting the repo names and locations. See Also -------- pyscriptman.hosts.host.add_repo_name_and_location : For location definition """ NotImplemented
5,542
1,505
from sqlalchemy import ( Column, Integer, String, DateTime, ) from .models import Base class RewardManagerTransaction(Base): __tablename__ = "reward_manager_txs" signature = Column(String, nullable=False, primary_key=True) slot = Column(Integer, nullable=False) created_at = Column(DateTime, nullable=False) def __repr__(self): return f"<RewardManagerTransaction\ signature={self.signature},\ slot={self.slot}\ created_at={self.created_at}\ >"
488
159
from nonebot import on_command, CommandSession,permission as perm import asyncio import traceback from helper import getlogger,msgSendToBot,CQsessionToStr,data_read,data_save from module.roll import match_roll logger = getlogger(__name__) __plugin_name__ = 'ROLL骰' __plugin_usage__ = r""" roll命令 """ #预处理 def headdeal(session: CommandSession): if session.event['message_type'] == "group" and session.event.sub_type != 'normal': return False return True # on_command 装饰器将函数声明为一个命令处理器 @on_command('roll',aliases=['掷骰','掷骰子','骰子'],only_to_me = False) async def roll(session: CommandSession): if not headdeal(session): return stripped_arg = session.current_arg_text.strip() logger.info(CQsessionToStr(session)) event = session.event nick = event['user_id'] if hasattr(event,'sender'): if 'card' in event.sender and event['sender']['card'] != '': nick = event['sender']['card'] elif 'nickname' in event.sender and event['sender']['nickname'] != '': nick = event['sender']['nickname'] #公式 res = stripped_arg.split('#',1) #注释合成 addmsg = '' if len(res) == 2: stripped_arg = res[1] if len(res[0]) > 25: addmsg = "---{0}---\n".format(res[0]) else: addmsg = res[0] + '#' #Default if stripped_arg == '': stripped_arg = '1d100<50' elif stripped_arg[:1] in ('<','>','!'): stripped_arg = '1d100' + stripped_arg elif stripped_arg.isdecimal(): stripped_arg = '1d100<' + stripped_arg try: msg = match_roll(nick,stripped_arg) if msg == '': await session.send('参数不正确') return except: s = traceback.format_exc(limit=10) logger.error(s) await session.send("内部错误!") return await session.send(addmsg + msg) @on_command('rollhelp',aliases=['掷骰帮助','掷骰子帮助','骰子帮助','骰娘帮助'],only_to_me = False) async def rollhelp(session: CommandSession): if not headdeal(session): return msg = '--掷骰帮助--' + "\n" msg = msg + '!roll 参数' + "\n" msg = msg + '无参默认为1d100>50' + "\n" msg = msg + '1d100固定1-5大成功,96-100大失败' + "\n" msg = msg + '支持符号>,<,>=,<=,!=,=,+,-,*,/' + "\n" msg = msg + "代码主体来自:https://github.com/akrisrn/dice" await session.send(msg)
2,326
930
import time import requests # Http endpoint cur = time.time() print(requests.post("http://127.0.0.1:8000/my-dag", json=["5", [1, 2], "sum"]).text) print(f"Time spent: {round(time.time() - cur, 2)} secs.") # Http endpoint cur = time.time() print(requests.post("http://127.0.0.1:8000/my-dag", json=["1", [0, 2], "max"]).text) print(f"Time spent: {round(time.time() - cur, 2)} secs.")
384
169
# Generated by Django 3.0.5 on 2020-04-26 09:09 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ("companyprofile", "0007_company_created_date"), ("careeropportunity", "0014_auto_20191031_1239"), ] operations = [ migrations.AlterField( model_name="careeropportunity", name="company", field=models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, related_name="career_opportunities", to="companyprofile.Company", verbose_name="Bedrift", ), ) ]
697
227
import os import os.path import logging import sys import IValidator import re sys.path.append(os.path.abspath(os.path.join('0', '../extensions'))) from extensions_logging import logmyerror class Email_Validate(IValidator.IValidator): def formValidate_BSL(self,email): try: regex_emailCheck = re.compile("^\S+@\S+$") if (regex_emailCheck.match(email)): return email else: msg = "Email address not valid" return msg except Exception as e: excep_msg = "Error occured in method formValidate_BSL method" level = logging.getLogger().getEffectiveLevel() logmyerror.loadMyExceptionInDb(level,excep_msg,e) logging.info(excep_msg, exc_info=True)
790
225
from django.contrib import admin from psmate.models import News class BlogAdmin(admin.ModelAdmin): prepopulated_fields = {'slug': ('title',)} admin.site.register(News, BlogAdmin)
188
61
# coding:utf-8 import re from Helper.common import * class APIUsagePatternSearcher: def __init__(self, OPTIONS, custom_args, numOfRecs): self.OPTIONS = OPTIONS self.custom_args = custom_args self.numOfRecs = numOfRecs def searchAPIUsagePatterns(self): # Collect in allProjects the method invocations for every training project allProjects = {} # Map<String, Map<String, Set<String>>> # ???only the most similar projects are considered trainingProjects = getFileList_from_txt(self.custom_args['Training_Set']) testingProjects = self.getProjectNames(self.custom_args['Training_Set_filtered']) for trainingProject in trainingProjects: # projectMIs - Map<String, Set<String>> projectMIs projectMIs = self.getProjectDetails(self.OPTIONS.presolve, trainingProject) allProjects[trainingProject] = projectMIs # For every testingPro, collect the Jaccard distance # between the recommendations and the actual invocations for testingPro in testingProjects: results = {} # Map<String, Float> # ordered lists recommendations = [] testingInvocations = self.getTestingInvocations(self.custom_args['Test_Set'], testingPro) # Searching API usage pattern for testingPro # add also the testing invocation(s) for invocation in testingInvocations: recommendations.append(invocation) recommendations.extend(self.readRecommendationFile(self.custom_args['RECOMMENDATION_PATH'], testingPro)) for project in allProjects: methodInvocations = allProjects[project] for declaration in methodInvocations: invocations = methodInvocations[declaration] allMIs = set() # Md in training projects s_train = len(invocations) # Recoomendations in test project s_test = len(recommendations) short_len = min(s_train, s_test) for i in range(short_len): allMIs.add(recommendations[i]) size1 = len(invocations.intersection(allMIs)) size2 = len(invocations.union(allMIs)) if size1: jaccard = (1.0 * size1) / size2 results[project + "#" + declaration] = jaccard jaccard_sim_list = dict2sortedlist(results) numOfRecs = self.numOfRecs if len(jaccard_sim_list) > numOfRecs: jaccard_sim_list = jaccard_sim_list[:numOfRecs] headings = ["Project#Declaration", "Jaccard Similarity"] writeScores(self.custom_args['OUTPUT_PATH'], testingPro, jaccard_sim_list, headings) def readRecommendationFile(self, path, project): ret = [] filename = os.path.join(path, project + ".csv") with open(filename, "r") as fr: reader = csv.reader(fr) headings = next(reader) for line in reader: mi = line[0] ret.append(mi) return ret def getTestingInvocations(self, path, project): ret = [] filename = os.path.join(path, project + ".csv") with open(filename, "r") as fr: reader = csv.reader(fr) headings = next(reader) for line in reader: md = line[0].strip('\"[] ') string = line[1].strip('\"[] ') pattern = r'(<.*?>)' mi = re.findall(pattern, string) ret = mi return ret def getProjectDetails(self, path, project): # return a Map<String, Set<String>> methodInvocations = {} filename = os.path.join(path, project + ".csv") with open(filename, "r") as fr: reader = csv.reader(fr) headings = next(reader) for line in reader: md = line[0].strip('\"[] ') string = line[1].strip('\"[] ') pattern = r'(<.*?>)' mi = re.findall(pattern, string) mi = set(mi) if md in methodInvocations: methodInvocations[md] = methodInvocations[md].union(mi) else: methodInvocations[md] = mi return methodInvocations def getProjectNames(self, path): names = [] files = getFileList(path, ".csv") for file in files: names.append(os.path.split(file)[-1][:-4]) return names
4,706
1,296
class Solution: # @param {string} s # @return {string[]} def restoreIpAddresses(self, s): if not s or len(s) < 4: return [] res = [] cur = [] self.helper(s, res, cur, 0) return res def helper(self, s, res, cur, level): if level == 4: if not s: res.append('.'.join(cur)) return if len(s) == 0: return if len(s) >= 1: cur.append(s[0]) self.helper(s[1:],res,cur,level+1) cur.pop(-1) if len(s) >= 2 and 10 <= int(s[:2]): cur.append(s[:2]) self.helper(s[2:],res,cur,level+1) cur.pop(-1) if len(s) >= 3 and 100 <= int(s[:3]) < 256: cur.append(s[:3]) self.helper(s[3:],res,cur,level+1) cur.pop(-1)
873
307
while True : n = int(input()) if n == 0 : break else : arr = input().split() check = True for i in range(n) : if int(arr[int(arr[i]) - 1]) != i + 1 : check = False if check : print('ambiguous') else : print('not ambiguous')
335
102
"""Add-On functions for speech interface.""" from __future__ import annotations from typing import TYPE_CHECKING, List from voiceassistant.addons.create import Addon, CoreAttribute, addon_begin, addon_end from voiceassistant.exceptions import IntegrationError from .base import Integration if TYPE_CHECKING: from voiceassistant.core import VoiceAssistant try: from pixel_ring import pixel_ring from pixel_ring import apa102_pixel_ring if isinstance(pixel_ring, apa102_pixel_ring.PixelRing): print("Found ReSpeaker 4 Mic Array") from gpiozero import LED power = LED(5) power.on() pixel_ring.change_pattern("echo") class PixelRingState: """Host pixel ring states.""" off = 0 speak = 1 think = 2 pixel_ring.off() ring_state = PixelRingState.off except Exception as e: raise IntegrationError(f"No ReSpeaker Microphone detected or not able to connect: {e}") from e class RespeakerMicrophoneArray(Integration): """Respeaker Microphone Array integration.""" name = "respeaker" def __init__(self, vass: VoiceAssistant) -> None: """Init.""" pass @property def addons(self) -> List[Addon]: """Get addons.""" return [processing_starts, processing_ends, tts_starts, tts_ends] @addon_begin(CoreAttribute.SPEECH_PROCESSING) def processing_starts(vass: VoiceAssistant) -> None: """Do before NLP starts.""" pixel_ring.speak() global ring_state ring_state = PixelRingState.speak @addon_end(CoreAttribute.SPEECH_PROCESSING) def processing_ends(vass: VoiceAssistant) -> None: """Do when NLP ends.""" pixel_ring.off() global ring_state ring_state = PixelRingState.off @addon_begin(CoreAttribute.SPEECH_OUTPUT) def tts_starts(vass: VoiceAssistant) -> None: """Do before voice output starts.""" pixel_ring.think() @addon_end(CoreAttribute.SPEECH_OUTPUT) def tts_ends(vass: VoiceAssistant) -> None: """Do when voice output ends.""" if ring_state == PixelRingState.speak: pixel_ring.speak() else: pixel_ring.off()
2,140
714
from math import cos, sin import numpy as np import tensorflow as tf from .....simulator import Agent # from simulator import Agent tf.set_random_seed(1234) class TensorflowNoveltyDetector(Agent): def execute(self, action): raise NotImplementedError() def __init__(self, world, learning=True, x=0.0, y=0.0, theta=0.0, v=0.0, checkpoint_file=None): Agent.__init__(self, world, x, y, theta, v) self.state_tensor = None self.action_tensor = None self.encoder_model = None self.optimization_algorithm = None self.loss_function = None self.last_loss = None self.tf_session = tf.InteractiveSession() self.tf_checkpoint = checkpoint_file self.tf_saver = None self.summary_merge = None self.summary_writer = None self.global_step = tf.Variable(0, trainable=False, name='global_step') self.learning_tensor = tf.placeholder(dtype=tf.bool, name='learning') self.learning = learning def is_learning(self): return self.learning def exploit(self, state, action, horizon=1): feed_dict = dict() feed_dict[self.state_tensor] = [state] if action is not None: feed_dict[self.action_tensor] = [action] model, loss = self.tf_session.run( fetches=[ self.encoder_model, self.loss_function ], feed_dict=feed_dict ) return model, loss def explore(self, state, horizon=1): pass def learn(self, state, action): feed_dict = dict() feed_dict[self.state_tensor] = [state] feed_dict[self.learning_tensor] = self.learning if action is not None: feed_dict[self.action_tensor] = [action] summary, step, _, learning_loss, _ = self.tf_session.run( fetches=[ self.summary_merge, self.global_step, self.optimization_algorithm, self.loss_function, self.encoder_model ], feed_dict=feed_dict ) self.summary_writer.add_summary(summary, step) self.last_loss = learning_loss return learning_loss def commit(self): self.tf_saver.save(self.tf_session, self.tf_checkpoint, global_step=self.global_step) def architecture(self): raise NotImplementedError() def train(self, state_dims, action_dims, storage_location): if not self.encoder_model: self._state_action_tensors(state_dims, action_dims) self.encoder_model, self.loss_function = self.architecture() self.optimization_algorithm = self.get_optimizer(self.loss_function) self.tf_session.run(tf.global_variables_initializer()) tf.train.global_step(self.tf_session, self.global_step) self.summary_merge = tf.summary.merge_all() self.last_loss = float('inf') self.tf_checkpoint = tf.train.latest_checkpoint(storage_location) self.tf_saver = tf.train.Saver(filename='model') if self.tf_checkpoint: self.tf_saver.restore(self.tf_session, self.tf_checkpoint) else: self.tf_checkpoint = storage_location + 'model' self.summary_writer = tf.summary.FileWriter(storage_location, self.tf_session.graph) def test(self, state_dims, action_dims, storage_location): if not self.encoder_model: self._state_action_tensors(state_dims, action_dims) self.encoder_model, self.loss_function = self.architecture() self.tf_session.run(tf.global_variables_initializer()) self.tf_checkpoint = tf.train.latest_checkpoint(storage_location) self.tf_saver = tf.train.Saver() if self.tf_checkpoint: self.tf_saver.restore(self.tf_session, self.tf_checkpoint) else: print("NO TRAINING!") def _state_action_tensors(self, input_shape=(None, 1), output_shape=(1, 1)): if len(input_shape) == 3: input_shape = (1, input_shape[0], input_shape[1], input_shape[2]) with tf.name_scope('data'): self.state_tensor = tf.placeholder(dtype=tf.float32, shape=input_shape, name='state') if output_shape: self.action_tensor = tf.placeholder(dtype=tf.float32, shape=output_shape, name='action') tf.summary.image('state', self.state_tensor, 1) def get_optimizer(self, loss): raise NotImplementedError()
4,602
1,388
from __future__ import print_function import os import sys from errno import ENOENT from os.path import dirname, abspath, join, isdir from setuptools import setup, find_packages from distutils.command.upload import upload from pywincffi import __version__ try: WindowsError except NameError: WindowsError = OSError try: with open("README.rst") as readme: long_description = readme.read() except (OSError, IOError, WindowsError) as error: if error.errno == ENOENT: long_description = "" else: raise requirements = [ "cffi>=1.6.0", "six" ] ROOT = dirname(abspath(__file__)) DISTS = join(ROOT, "dist") class AppVeyorArtifactUpload(upload): """ A subclass of the normal upload command which """ def run(self): if not isdir(DISTS): print("%s does not exist" % DISTS, file=sys.stderr) sys.exit(1) # Clean out everything in dist/* first. This ensures that # if we have local files they'll be replaced by the artifacts # that we're downloading. for root, dirs, files in os.walk(DISTS): for name in files: os.remove(join(root, name)) from pywincffi.dev.release import AppVeyor appveyor = AppVeyor() for artifact in appveyor.artifacts(directory=DISTS): extension = artifact.path.split(".")[-1] if extension not in ("whl", "zip", "msi", "exe"): continue for root, dirs, files in os.walk(DISTS): for filename in files: if filename.endswith(".zip"): command = "sdist" pyversion = "source" elif filename.endswith(".whl"): command = "bdist_wheel" _, _, pyversion, _, _ = filename.rstrip(".whl").split("-") pyversion = ".".join(list(pyversion.lstrip("cp"))) elif filename.endswith(".msi"): command = "bdist_msi" pyversion = \ filename.rstrip(".msi").split("-")[-1].lstrip("py") elif filename.endswith(".exe"): command = "bdist_wininst" raise NotImplementedError( "Don't have `pyversion` implemented for %r" % filename) else: print( "Unknown file type: %r" % filename.split(".")[-1], file=sys.stderr) sys.exit(1) filename = join(root, filename) self.upload_file(command, pyversion, filename) setup_keywords = dict( name="pywincffi", version=".".join(map(str, __version__)), cmdclass={ "upload_from_appveyor": AppVeyorArtifactUpload }, packages=find_packages( include=("pywincffi*", ) ), include_package_data=True, author="Oliver Palmer", author_email="oliverpalmer@opalmer.com", url="http://github.com/opalmer/pywincffi", description="A Python library which wraps Windows functions using CFFI", long_description=long_description, setup_requires=requirements, install_requires=requirements, classifiers=[ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Natural Language :: English", "Environment :: Win32 (MS Windows)", "Operating System :: Microsoft :: Windows", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Software Development :: Libraries" ] ) # Only add cffi_modules if we're running on Windows. Otherwise # things like the documentation build, which can run on Linux, may # not work. if os.name == "nt": setup_keywords.update( cffi_modules=["pywincffi/core/dist.py:_ffi"] ) setup(**setup_keywords)
4,093
1,207
import shutil with open('test.csv', 'r') as f: lines = f.readlines() for line in lines: image, gender = line.split(",") print(image) if 'female' in gender: shutil.move("test/" + image, "test/female/" + image) else: shutil.move("test/" + image, "test/male/" + image)
331
110
import random from helpers import Leaf, Rect, RoomList from renderer import MapRenderer from typing import List, Any class BSPTree: def __init__(self): self.level: List = [] self.room: object = None self._leafs: List = [] self.MAX_LEAF_SIZE: int = 32 self.ROOM_MAX_SIZE: int = 20 self.ROOM_MIN_SIZE: int = 6 def generateLevel(self, map_width: int, map_height: int, room_list: RoomList): # Creates an empty 2D array or clears existing array self.level = [["#" for y in range(map_height)] for x in range(map_width)] rootLeaf = Leaf(0, 0, map_width, map_height) self._leafs.append(rootLeaf) split_successfully = True # loop through all leaves until they can no longer split successfully while split_successfully: split_successfully = False for l in self._leafs: if (l.child_1 is None) and (l.child_2 is None): if (l.width > self.MAX_LEAF_SIZE or (l.height > self.MAX_LEAF_SIZE) or (random.random() > 0.7)): if l.split_leaf(): # try to split the leaf self._leafs.append(l.child_1) self._leafs.append(l.child_2) split_successfully = True rootLeaf.createRooms(self, room_list) return self.level def createRoom(self, room: Rect): # set all tiles within a rectangle to 0 for x in range(room.x1 + 1, room.x2): for y in range(room.y1 + 1, room.y2): self.level[x][y] = " " def createHall(self, room1: Rect, room2: Rect): # connect two rooms by hallways x1, y1 = room1.get_wall() x2, y2 = room2.get_wall() # 50% chance that a tunnel will start horizontally if random.randint(0, 1) == 1: self.createHorTunnel(x1, x2, y1) self.createVirTunnel(y1, y2, x2) else: # else it starts virtically self.createVirTunnel(y1, y2, x1) self.createHorTunnel(x1, x2, y2) def createHorTunnel(self, x1: int, x2: int, y: int): _x1, _x2, _y = int(x1), int(x2), int(y) for x in range(min(_x1, _x2), max(_x1, _x2) + 1): if self.level[x][_y] is not " ": self.level[x][_y] = "c" # self.level[x][_y] = "c" def createVirTunnel(self, y1: int, y2: int, x: int): _y1, _y2, _x = int(y1), int(y2), int(x) for y in range(min(_y1, _y2), max(_y1, _y2) + 1): if self.level[_x][y] is not " ": self.level[_x][y] = "c" # self.level[_x][y] = "c" room_list = RoomList() tree = BSPTree().generateLevel(64, 128, room_list) MapRenderer(tree).render_map() print(room_list.get_rooms()[5].get_random_point_in_room())
2,941
1,019
class Exporter: # Extensions for output files FILE_EXT = ".txt" PLOTS_FILE_EXT = ".pdf" """Exporter: Exports statistical data captured from YCSB output to a file.""" def __init__(self, stats_set): """__init__ :param stats_set: StatisticsSet object containing data to be exported """ self.stats_set = stats_set def export(self, filename, key, *fields): """export Exports the given fields to the given CSV file. :param filename: Filename and path for the export output :param key: Key to use as index column :param *fields: Fields to be exported """ raise NotImplementedError def export_averages(self, filename, key, *fields): """export_averages Exports the averages of the given fields, grouped by the given key, to the given CSV file. :param filename: Filename and path for export output :param key: Key to group by :param *fields: Fields to average """ raise NotImplementedError def export_averages_plot(self, filename, key, *fields): """export_plot Automatically generates and saves a plot of the given fields :param filename: Filename and path for the plot output :param *fields: Fields to be plotted """ raise NotImplementedError
1,382
367
# This an autogenerated file # # Generated with CRSAxialFrictionModel from typing import Dict,Sequence,List from dmt.entity import Entity from dmt.blueprint import Blueprint from .blueprints.crsaxialfrictionmodel import CRSAxialFrictionModelBlueprint from typing import Dict from sima.sima.moao import MOAO from sima.sima.scriptablevalue import ScriptableValue class CRSAxialFrictionModel(MOAO): """ Keyword arguments ----------------- name : str (default "") description : str (default "") _id : str (default "") scriptableValues : List[ScriptableValue] staticFriction : float Static friction force corresponding to elongation(default 0.0) staticElongation : float Relative elongation(default 0.0) dynamicFriction : float Dynamic friction force corresponding to elongation(default 0.0) dynamicElongation : float Relative elongation(default 0.0) axialFriction : bool Local axial friction model(default False) """ def __init__(self , name="", description="", _id="", staticFriction=0.0, staticElongation=0.0, dynamicFriction=0.0, dynamicElongation=0.0, axialFriction=False, **kwargs): super().__init__(**kwargs) self.name = name self.description = description self._id = _id self.scriptableValues = list() self.staticFriction = staticFriction self.staticElongation = staticElongation self.dynamicFriction = dynamicFriction self.dynamicElongation = dynamicElongation self.axialFriction = axialFriction for key, value in kwargs.items(): if not isinstance(value, Dict): setattr(self, key, value) @property def blueprint(self) -> Blueprint: """Return blueprint that this entity represents""" return CRSAxialFrictionModelBlueprint() @property def name(self) -> str: """""" return self.__name @name.setter def name(self, value: str): """Set name""" self.__name = str(value) @property def description(self) -> str: """""" return self.__description @description.setter def description(self, value: str): """Set description""" self.__description = str(value) @property def _id(self) -> str: """""" return self.___id @_id.setter def _id(self, value: str): """Set _id""" self.___id = str(value) @property def scriptableValues(self) -> List[ScriptableValue]: """""" return self.__scriptableValues @scriptableValues.setter def scriptableValues(self, value: List[ScriptableValue]): """Set scriptableValues""" if not isinstance(value, Sequence): raise Exception("Expected sequense, but was " , type(value)) self.__scriptableValues = value @property def staticFriction(self) -> float: """Static friction force corresponding to elongation""" return self.__staticFriction @staticFriction.setter def staticFriction(self, value: float): """Set staticFriction""" self.__staticFriction = float(value) @property def staticElongation(self) -> float: """Relative elongation""" return self.__staticElongation @staticElongation.setter def staticElongation(self, value: float): """Set staticElongation""" self.__staticElongation = float(value) @property def dynamicFriction(self) -> float: """Dynamic friction force corresponding to elongation""" return self.__dynamicFriction @dynamicFriction.setter def dynamicFriction(self, value: float): """Set dynamicFriction""" self.__dynamicFriction = float(value) @property def dynamicElongation(self) -> float: """Relative elongation""" return self.__dynamicElongation @dynamicElongation.setter def dynamicElongation(self, value: float): """Set dynamicElongation""" self.__dynamicElongation = float(value) @property def axialFriction(self) -> bool: """Local axial friction model""" return self.__axialFriction @axialFriction.setter def axialFriction(self, value: bool): """Set axialFriction""" self.__axialFriction = bool(value)
4,387
1,267