text
string
size
int64
token_count
int64
import sqlite3 import sys import os import io def cleanName(m_name): lis = [] lis = m_name result_lis = {} for data in lis: text = str(data) text = (((((text[2:len(text)-3].rstrip()).replace("@","")).replace("smt.","")).strip()).replace(".","")).replace("smt","") temp_lis = text.split(" ") if len(temp_lis[0]) > 2: #print(temp_lis[0]) result_lis[str(temp_lis[0])] = 0 return result_lis def writeIt(name_dic): w_lis = [] for key in name_dic: w_lis = name_dic filename = '/home/neel/indian-name.txt' with io.open(filename,'a',encoding='utf-8') as f: for data in w_lis: f.write(data+'\n') return None def main(): sqlite_database = '/home/neel/indian_name.sqlite' conn = sqlite3.connect(sqlite_database) c = conn.cursor() m_name = [] c.execute("SELECT `name` FROM `Indian-Female-Names`;") m_name = c.fetchall() name_dic = cleanName(m_name) result = writeIt(name_dic) print('done!') if __name__=='__main__': main()
1,075
396
from defcon.objects.base import BaseDictObject class Lib(BaseDictObject): """ This object contains arbitrary data. **This object posts the following notifications:** =========== ==== Name Note =========== ==== Lib.Changed Posted when the *dirty* attribute is set. =========== ==== This object behaves like a dict. For example, to get a particular item from the lib:: data = lib["com.typesupply.someApplication.blah"] To set the glyph list for a particular group name:: lib["com.typesupply.someApplication.blah"] = 123 And so on. **Note 1:** It is best to keep the data below the top level as shallow as possible. Changes below the top level will go unnoticed by the defcon change notification system. These changes will be saved the next time you save the font, however. **Note 2:** The keys used for storing data in the lib shoudl follow the reverse domain naming convention detailed in the `UFO specification <http://unifiedfontobject.org/filestructure/lib.html>`_. """ changeNotificationName = "Lib.Changed" beginUndoNotificationName = "Lib.BeginUndo" endUndoNotificationName = "Lib.EndUndo" beginRedoNotificationName = "Lib.BeginRedo" endRedoNotificationName = "Lib.EndRedo" if __name__ == "__main__": import doctest doctest.testmod()
1,389
408
from django.apps import AppConfig class PresenterConfig(AppConfig): name = 'Presenter'
93
28
#this program is atm that withdraw any money amount #allowed papers: 100,50,10,5, and the rest of requests def withdraw(balance,request): if request>balance: print "can`t give you all this money" while request>0: if request>=100: request-=100 print "give 100" elif request>=50: request-=50 print "give 50" elif request>=10: request-=10 print "give 10" elif request>=5: request-=5 print "give 5" elif request<5: request-=3 print "give 2" return balance-request balance = 500 balance = withdraw(balance, 277) balance = withdraw(balance, 30) balance = withdraw(balance, 5) balance = withdraw(balance, 500)
685
321
#!/usr/bin/env python3 from statistics import mode def execute(): with open('./input/day.3.txt') as inp: lines = inp.readlines() data = [l.strip() for l in lines if len(l.strip()) > 0] return power_consumption(data), life_support_rating(data) tests_failed = 0 tests_executed = 0 def verify(a, b): global tests_executed global tests_failed tests_executed += 1 if (a == b): print("✓") return tests_failed += 1 print (locals()) example1= """00100 11110 10110 10111 10101 01111 00111 11100 10000 11001 00010 01010""".split('\n') def sum_parts(diagnostics): exploded = [s[:] for s in diagnostics] accumulator = [0] * len(exploded[0]) for next in exploded: for i, v in enumerate(next): accumulator[i] += int(v) return accumulator powers = [8192, 4096, 2048, 1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1] def convert_to_int(exploded): return sum([a for (a,b) in zip(powers[-len(exploded):], exploded) if b]) def epsilon_rate(parts, length): exploded = [s < length / 2 for s in parts] return convert_to_int(exploded) def gamma_rate(parts, length): exploded = [s > length // 2 for s in parts] return convert_to_int(exploded) def power_consumption(diagnostics): sp = sum_parts(diagnostics) return epsilon_rate(sp, len(diagnostics)) * gamma_rate(sp, len(diagnostics)) def match_bit_criteria(bitcount, diagnostics, default): bits = powers[-bitcount:] while len(diagnostics) > 1: f = bits.pop(0) bit_matches = [d&f for d in diagnostics] if (bit_matches.count(0) == len(bit_matches)/2): criteria = f else: criteria = mode(bit_matches) if (default == 1) : diagnostics = [d for d in diagnostics if d&f == criteria&f] else: diagnostics = [d for d in diagnostics if d&f != criteria&f] return diagnostics[0] def oxygen_generator_rating(parts, diagnostics): return match_bit_criteria(len(parts), diagnostics, 1) def co2_scrubber_rating(parts, diagnostics): return match_bit_criteria(len(parts), diagnostics, 0) def life_support_rating(diagnostics): sp = sum_parts(diagnostics) values = [int(d, 2) for d in diagnostics] return oxygen_generator_rating(sp, values) * co2_scrubber_rating(sp, values) def test_cases(): verify(sum_parts(example1), [7,5,8,7,5]) verify(gamma_rate(sum_parts(example1), len(example1)), 0b10110) verify(epsilon_rate(sum_parts(example1), len(example1)), 0b01001) verify(power_consumption(example1), 198) verify(oxygen_generator_rating(sum_parts(example1), [int(d,2) for d in example1]), 0b10111) verify(co2_scrubber_rating(sum_parts(example1), [int(d,2) for d in example1]), 0b01010) verify(life_support_rating(example1), 230) print("Failed {} out of {} tests. ".format(tests_failed, tests_executed)) if __name__ == "__main__": test_cases() print(execute())
2,973
1,136
import cv2 import numpy as np from scipy.signal import medfilt from utils import init_dict, l2_dst def keypoint_transform(H, keypoint): """ Input: H: homography matrix of dimension (3*3) keypoint: the (x, y) point to be transformed Output: keypoint_trans: Transformed point keypoint_trans = H * (keypoint, 1) """ keypoint = np.append(keypoint, 1) a, b, c = np.dot(H, keypoint) keypoint_trans = np.array([[a/c, b/c]]).flatten() return keypoint_trans def propagate(input_points, output_points, input_frame, PATCH_SIZE=16, PROP_R=300): """ Input: intput_points: points in input_frame which are matched feature points with output_frame output_points: points in input_frame which are matched feature points with intput_frame input_frame H: the homography between input and output points Output: x_motion_patch, y_motion_patch: Motion patch in x-direction and y-direction for input_frame """ cols, rows = input_frame.shape[1] // PATCH_SIZE, input_frame.shape[0] // PATCH_SIZE x_motion = init_dict(cols, rows) y_motion = init_dict(cols, rows) temp_x_motion = init_dict(cols, rows) temp_y_motion = init_dict(cols, rows) # pre-warping with global homography H, _ = np.array(cv2.findHomography(input_points, output_points, cv2.RANSAC)) for i in range(rows): for j in range(cols): point = np.array([[PATCH_SIZE * j, PATCH_SIZE * i]]) point_trans = keypoint_transform(H, point) x_motion[i, j] = point.flatten()[0] - point_trans[0] y_motion[i, j] = point.flatten()[1] - point_trans[1] # distribute feature motion vectors for i in range(rows): for j in range(cols): vertex = np.array([[PATCH_SIZE * j, PATCH_SIZE * i]]) for in_point, out_point in zip(input_points, output_points): # velocity = point - feature point in current frame distance = l2_dst(in_point, vertex) if distance < PROP_R: point_trans = keypoint_transform(H, in_point) temp_x_motion[i, j] = [out_point[0] - point_trans[0]] temp_y_motion[i, j] = [out_point[1] - point_trans[1]] # Apply one Median Filter on obtained motion for each vertex x_motion_patch = np.zeros((rows, cols), dtype=float) y_motion_patch = np.zeros((rows, cols), dtype=float) for key in x_motion.keys(): temp_x_motion[key].sort() temp_y_motion[key].sort() x_motion_patch[key] = x_motion[key] + temp_x_motion[key][len(temp_x_motion[key]) // 2] y_motion_patch[key] = y_motion[key] + temp_y_motion[key][len(temp_y_motion[key]) // 2] # Apply the other Median Filter over the motion patch for outliers x_motion_patch = medfilt(x_motion_patch, kernel_size=[3, 3]) y_motion_patch = medfilt(y_motion_patch, kernel_size=[3, 3]) return x_motion_patch, y_motion_patch def vertex_motion_path(x_path, y_path, x_motion_patch, y_motion_patch): """ Input: x_path: motion path along x_direction y_path: motion path along y_direction x_motion_patch: obtained motion patch along x_direction y_motion_patch: obtained motion patch along y_direction Output: x_paths, y_paths: Updated x_paths, y_paths with new x_motion_patch, y_motion_patch added to the last x_paths, y_paths """ x_path_new = x_path[:, :, -1] + x_motion_patch y_path_new = y_path[:, :, -1] + y_motion_patch x_paths = np.concatenate((x_path, np.expand_dims(x_path_new, axis=2)), axis=2) y_paths = np.concatenate((y_path, np.expand_dims(y_path_new, axis=2)), axis=2) return x_paths, y_paths def warp_frame(frame, x_motion_patch, y_motion_patch, PATCH_SIZE=16): """ Input: frame is the current frame x_motion_patch: the motion_patch to be warped on frame along x-direction y_motion_patch: the motion patch to be warped on frame along y-direction Output: new_frame: a warped frame according to given motion patches x_motion_patch, y_motion_patch """ map_x = np.zeros((frame.shape[0], frame.shape[1]), np.float32) map_y = np.zeros((frame.shape[0], frame.shape[1]), np.float32) for i in range(x_motion_patch.shape[0] - 1): for j in range(x_motion_patch.shape[1] - 1): x, y = int(j * PATCH_SIZE), int(i * PATCH_SIZE) x_next, y_next = int((j+1) * PATCH_SIZE), int((i+1) * PATCH_SIZE) src = np.array( [[x, y], [x, y_next], [x_next, y], [x_next, y_next]] ) dst = np.array( [[x + x_motion_patch[i, j], y + y_motion_patch[i, j]], [x + x_motion_patch[i+1, j], y_next + y_motion_patch[i+1, j]], [x_next + x_motion_patch[i, j+1], y + y_motion_patch[i, j+1]], [x_next + x_motion_patch[i+1, j+1], y_next + y_motion_patch[i+1, j+1]]] ) H, _ = cv2.findHomography(src, dst, cv2.RANSAC) for k in range(y, y_next): for l in range(x, x_next): x_res, y_res, w_res = np.dot(H, np.append(np.array([[l, k]]), 1)) if w_res != 0: x_res, y_res = x_res / (w_res*1.0), y_res / (w_res*1.0) else: x_res, y_res = l, k map_x[k, l] = x_res map_y[k, l] = y_res # repeat motion vectors for remaining frame in x-direction for j in range(PATCH_SIZE*x_motion_patch.shape[1], map_x.shape[1]): map_x[:, j] = map_x[:, PATCH_SIZE * x_motion_patch.shape[0] - 1] map_y[:, j] = map_y[:, PATCH_SIZE * x_motion_patch.shape[0] - 1] # repeat motion vectors for remaining frame in y-direction for i in range(PATCH_SIZE*x_motion_patch.shape[0], map_x.shape[0]): map_x[i, :] = map_x[PATCH_SIZE * x_motion_patch.shape[0] - 1, :] map_y[i, :] = map_y[PATCH_SIZE * x_motion_patch.shape[0] - 1, :] # deforms patch new_frame = cv2.remap(frame, map_x, map_y, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT) return new_frame
6,194
2,304
# GENERATED BY KOMAND SDK - DO NOT EDIT import komand import json class Component: DESCRIPTION = "Returns VPN status" class Input: pass class Output: VPNS = "vpns" class VpnStatusInput(komand.Input): schema = json.loads(""" {} """) def __init__(self): super(self.__class__, self).__init__(self.schema) class VpnStatusOutput(komand.Output): schema = json.loads(""" { "type": "object", "title": "Variables", "properties": { "vpns": { "type": "array", "title": "VPN Statuses", "description": "VPN status array", "items": { "$ref": "#/definitions/vpn" }, "order": 1 } }, "required": [ "vpns" ], "definitions": { "vpn": { "type": "object", "title": "vpn", "properties": { "name": { "type": "string", "title": "Name", "order": 1 }, "status": { "type": "string", "title": "Status", "order": 2 } } } } } """) def __init__(self): super(self.__class__, self).__init__(self.schema)
1,140
408
import os import struct from .compilation.scout_flags import * from .compilation.scout_files import * from .compilation.arc_intel import arcIntel from .compilation.arc_arm import arcArm, arcArmThumb from .compilation.arc_mips import arcMips from .context_creator import * ################################### ## Architecture Configurations ## ################################### # Using an enum to support feature extensions ARC_INTEL = arcIntel.name() ARC_ARM = arcArm.name() ARC_ARM_THUMB = arcArmThumb.name() ARC_MIPS = arcMips.name() arc_factory = { ARC_INTEL: arcIntel, ARC_ARM: arcArm, ARC_ARM_THUMB: arcArmThumb, ARC_MIPS: arcMips, } arc_flags = { ARC_INTEL: (flag_arc_intel,), ARC_ARM: (flag_arc_arm,), ARC_ARM_THUMB: (flag_arc_arm, flag_arc_thumb), ARC_MIPS: (flag_arc_mips,), } ################# ## Utilities ## ################# def systemLine(line, logger): """Issue (and debug trace) a systen line. Args: line (string): cmd line to be executed logger (logger, elementals): logger to be used by the function (elementals) """ logger.debug(line) os.system(line) ############################### ## The full Scout Compiler ## ############################### class scoutCompiler: """A class representing the Scout Compiler object, which manages the entire compilation logic. Attributes ---------- logger (logger): (elementals) logger target_arc (targetArc): target architecture instance to hold CPU-specific configurations project_folder (str): path to the user's working folder scout_folder (str): path to Scout's base folder config_flags (list): list of Scout configuration flags, accumulated along the process is_32_bits (bool): True iff we are going to compile a 32-bits binary is_little_endian (bool): True iff we are going to compile a Little Endian binary is_pic (bool): True iff we are going to compile a PIC binary blob full_got (bytes): blob containing the GOT function address table for a PIC compilation global_vars (bytes): blob containing the global variables content for a PIC compilation Notes ----- This class serves as the main object to be used by the suer when compiling an executable or a Position-Independent-Code (PIC) Scout binary. """ def __init__(self, logger): """Construct the basic Scout compiler object. Args: logger (logger): (elementals) logger """ self.logger = logger self.target_arc = None self.project_folder = None self.scout_folder = None self.config_flags = [] self.is_32_bits = True self.is_little_endian = True self.is_pic = False self.full_got = b'' self.global_vars = b'' def setArc(self, arc, is_pic, is_32_bits=True, is_little_endian=True, is_native=False): """Set the target's architecture specifications. Args: arc (string, enum): name of the target architecture (should be a key of arc_factory) is_pic (bool): True iff compiling a position independent blob is_32_bits (bool, optional): True iff the architecture is 32 bit, otherwise it will be 64 bits (True by default) is_little_endian (bool, optional): True iff the architecture is little endian, otherwise it will be big endian (True by default) is_native (bool, optional): True iff should use the native compilation programs, regardless of the arc (False by default) """ # Sanity check if arc not in arc_factory.keys(): self.logger.error("Unknown architecture: \"%s\". Supported options are: \"%s\"", arc, ', '.join(arc_factory.keys())) # Apply the chosen settings self.is_pic = is_pic self.target_arc = arc_factory[arc](is_pic) if is_native: self.config_flags.append(flag_native_compiler) else: self.target_arc.setNotNative() # Configure the architecture self.target_arc.setEndianness(is_little_endian) self.target_arc.setBitness(is_32_bits) self.is_32_bits = is_32_bits self.is_little_endian = is_little_endian # Store the values for the configuration flags self.config_flags.append(flag_32_bit if is_32_bits else flag_64_bit) self.config_flags.append(flag_little_endian if is_little_endian else flag_big_endian) self.config_flags += list(arc_flags[arc]) if self.is_pic: self.config_flags.append(flag_pic_code) def setScoutMode(self, is_user): """Set the target's permission level. Args: is_user (bool): True iff the scout will run in user mode, otherwise it will assume kernel mode permissions """ self.config_flags.append(flag_mode_user if is_user else flag_mode_kernel) def setWorkingDirs(self, project_dir, scout_dir, include_dirs=[]): """Set the paths for the used directories. Args: project_dir (string): path to the project's directory scout_dir (string): path to the directory of the basic Scout (Example: ".../src/scout") include_dirs (list, optional): list of additional include directories """ self.project_folder = project_dir self.scout_folder = scout_dir # Ends with "/scout" (and not "/scout/") if scout_dir.endswith(os.path.sep + "scout"): main_folder = os.path.sep.join(scout_dir.split(os.path.sep)[:-1]) else: main_folder = scout_dir + os.path.sep + ".." self.target_arc.compile_flags += ['I' + x for x in [self.project_folder, main_folder] + include_dirs] def addScoutFlags(self, flags): """Add the flags regarding the target's specifications. Args: flags (list): list of configuration flags (strings) """ self.config_flags += flags def addCompilationFlags(self, user_compile_flags=[], user_link_flags=[]): """Add custom compilation / linking flags. Args: user_compile_flags (list, optional): list of compiler flags (without the '-' prefix) user_link_flags (list, optional) list of linker flags (without the '-' prefix) """ self.target_arc.compile_flags += user_compile_flags self.target_arc.link_flags += user_link_flags def verifyScoutFlags(self): """Check that all of the configuration flags are set correctly.""" if flag_mode_user not in self.config_flags and flag_mode_kernel not in self.config_files: self.logger.warning("Missing Scout flag - unknown permission mode. Defaulting to USER-MODE (low privileges)") def generateFlagsFile(self): """Generate the architecture's "flags.h" file.""" # Verify the flags self.verifyScoutFlags() # Verify we know where to store this file if self.project_folder is None: self.logger.error("Working directories are NOT defined...") return flag_path = os.path.join(self.project_folder, FLAGS_FILE_NAME) self.logger.info(f"Generating the {flag_path} file") fd = open(flag_path, "w") # file prefix fd.write("#ifndef __SCOUT__FLAGS__H__\n") fd.write("#define __SCOUT__FLAGS__H__\n") fd.write('\n') # auto-generation comment fd.write("/* This file is AUTO-GENERATED, please do NOT edit it manually */\n") # The actual flags for flag in self.config_flags: fd.write(f"#define {flag}\n") # file suffix fd.write("\n") fd.write("#endif /* _SCOUT__FLAGS__H__ */") # can close the file fd.close() def populateGOT(self, scout_got, project_got, project_vars_size=0, is_host_thumb=False): """Populate the PIC context with the GOT entries, and capacity for global variables. Args: scout_got (list): list of (virtual) addresses according to Scout's GOT order project_got (list): list of additional memory addresses for symbols used in the project's GOT projects_vars_size (int, optional): size (in bytes) of the project's global variables (0 by default) is_host_thumb (bool, optional): True iff the host process is a Thumb binary (False by default) """ # Sanity Check #1 - PIC Compilation if not self.is_pic: self.logger.error("Can't populate a PIC context (GOT and globals) for a non-PIC compilation!") return # Sanity Check #2 - GOT Size expected_size = scout_got_base_size_mmap if flag_mmap in self.config_flags else scout_got_base_size if len(scout_got) != expected_size: self.logger.error(f"Wrong size for Scout's GOT: Expected {expected_size} entries, and got {len(scout_got)}!") return format = ("<" if self.is_little_endian else ">") + ("L" if self.is_32_bits else "Q") self.full_got = b''.join([struct.pack(format, func + (1 if is_host_thumb else 0)) for func in scout_got + project_got]) # Calculate the size for the global variables size_globals = project_vars_size # The base loaders don't use global variables, only the full scout if flag_loader not in self.config_flags: if flag_instructions in self.config_flags: if self.is_32_bits: size_globals += scout_instructions_globals_32_size if flag_dynamic_buffers not in self.config_flags: size_globals += scout_static_buffers_32_size else: size_globals += scout_instructions_globals_64_size if flag_dynamic_buffers not in self.config_flags: size_globals += scout_static_buffers_64_size # Now generate the blob self.global_vars = b'\x00' * size_globals def compile(self, scout_files, project_files, elf_file): """Compile the "Scout" project, according to the PIC setup that was defined earlier. Args: scout_files (list): list of file paths for scout's code (*.c) files proect_files (list): list of file paths for the project's code (*.c) files elf_file (string): path to the (created) compiled ELF file Note: If this is a PIC compilation, the final binary file will be named to match the ELF file. For example: "project.elf" => "project.bin". Return Value: Name of the PIC binary file (in PIC compilations), None otherwise. """ self.logger.addIndent() # 1. Auto-Generate the flags.h file self.generateFlagsFile() # 2. Prepare the list of compilation files compilation_files = [os.path.join(self.scout_folder, f) for f in scout_files] + project_files # 3. Prepare the compilation & linking flags compile_flags, link_flags = self.target_arc.prepareFlags() ############################# ## Compiling an Executable ## ############################# if not self.is_pic: # 4. Re-organize the linker flags fixed_link_flags = "".join("-Wl,-" + x for x in link_flags.split("-")[1:]) # 5. Compile together all of the file (and that's it) self.logger.info(f"Compiling the *.c files, linking them together and creating: {elf_file}") systemLine(f"{self.target_arc.compiler_path} {compile_flags} {' '.join(compilation_files)} {fixed_link_flags} -o {elf_file}", self.logger) self.logger.removeIndent() return None ########################### ## Compiling a PIC Scout ## ########################### # 4. Generate all of the *.S files self.logger.info("Compiling the *.c files") compile_flags, link_flags = self.target_arc.prepareFlags() s_files = [] for c_file in compilation_files: local_out_file = ".".join(c_file.split(".")[:-1]) + ".S" systemLine(f"{self.target_arc.compiler_path} -S -c {compile_flags} {c_file} -o {local_out_file}", self.logger) s_files.append(local_out_file) # 5. Work-around GCC's bugs # We can afford these changes due to the following: # a) We only perform them on PIC compilations # b) PIC compilations don't contain string literals, so we won't conflict with them # c) Our strings are very specific, so they (probably) won't conflict with something else self.logger.info("Fixing the *.S files to work around GCC's bugs") for s_file in s_files: fd = open(s_file, "r") content_lines = fd.readlines() fd.close() new_content_lines = [] for content in content_lines: # Makes sure that only our special "_start" will be at the beginning of the compiled blob # This is needed because gcc tends to place "Main" in .text.startup section, instead of our _start. if ".section .text.startup" in content and "Scout" not in content: continue content = content.replace(".space #", ".space ").replace(".space $", ".space ") # Mips: convert the calls to relative (PIC) if self.target_arc.name() == ARC_MIPS: content = content.replace("\tjal\t", "\tbal\t").replace("\tj\t", "\tb\t") # save the modified line new_content_lines.append(content) fd = open(s_file, "w") fd.writelines(new_content_lines) fd.close() # 6. Generate all of the *.o files self.logger.info("Compiling the *.S files") o_files = [] for s_file in s_files: local_out_file = ".".join(s_file.split(".")[:-1]) + ".o" systemLine(f"{self.target_arc.compiler_path} -c {compile_flags} {s_file} -o {local_out_file}", self.logger) o_files.append(local_out_file) # 7. Link together all of the *.o files self.logger.info(f"Linking together all of the files, creating: {elf_file}") systemLine(f"{self.target_arc.linker_path} {link_flags} {' '.join(o_files)} -o {elf_file}", self.logger) # 8. Objcopy the content to the actual wanted file if elf_file.split('.')[-1].lower() == "elf": binary_file = '.'.join(elf_file.split('.')[:-1] + ['bin']) else: binary_file = elf_file + ".bin" self.logger.info(f"Extracting the final binary to: {binary_file}") systemLine(f"{self.target_arc.objcopy_path} -O binary -j .text -j .rodata {' '.join(self.target_arc.objcopy_flags)} {elf_file} {binary_file}", self.logger) # 9. Place the PIC context inside the file placeContext(self.full_got, self.global_vars, binary_file, self.logger) self.logger.removeIndent() return binary_file
15,306
4,540
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import six import sys import copy import glob import warnings import subprocess import paddle IS_WINDOWS = os.name == 'nt' # TODO(Aurelius84): Need check version of gcc and g++ is same. # After CI path is fixed, we will modify into cc. NVCC_COMPILE_FLAGS = [ '-ccbin', 'gcc', '-DPADDLE_WITH_CUDA', '-DEIGEN_USE_GPU', '-DPADDLE_USE_DSO', '-Xcompiler', '-fPIC', '-w', '--expt-relaxed-constexpr', '-O3', '-DNVCC' ] def prepare_unix_cflags(cflags): """ Prepare all necessary compiled flags for nvcc compiling CUDA files. """ cflags = NVCC_COMPILE_FLAGS + cflags + get_cuda_arch_flags(cflags) return cflags def add_std_without_repeat(cflags, compiler_type, use_std14=False): """ Append -std=c++11/14 in cflags if without specific it before. """ cpp_flag_prefix = '/std:' if compiler_type == 'msvc' else '-std=' if not any(cpp_flag_prefix in flag for flag in cflags): suffix = 'c++14' if use_std14 else 'c++11' cpp_flag = cpp_flag_prefix + suffix cflags.append(cpp_flag) def get_cuda_arch_flags(cflags): """ For an arch, say "6.1", the added compile flag will be ``-gencode=arch=compute_61,code=sm_61``. For an added "+PTX", an additional ``-gencode=arch=compute_xx,code=compute_xx`` is added. """ # TODO(Aurelius84): return [] def normalize_extension_kwargs(kwargs, use_cuda=False): """ Normalize include_dirs, library_dir and other attributes in kwargs. """ assert isinstance(kwargs, dict) # append necessary include dir path of paddle include_dirs = kwargs.get('include_dirs', []) include_dirs.extend(find_paddle_includes(use_cuda)) kwargs['include_dirs'] = include_dirs # append necessary lib path of paddle library_dirs = kwargs.get('library_dirs', []) library_dirs.extend(find_paddle_libraries(use_cuda)) kwargs['library_dirs'] = library_dirs # add runtime library dirs runtime_library_dirs = kwargs.get('runtime_library_dirs', []) runtime_library_dirs.extend(find_paddle_libraries(use_cuda)) kwargs['runtime_library_dirs'] = runtime_library_dirs # append compile flags extra_compile_args = kwargs.get('extra_compile_args', []) extra_compile_args.extend(['-g']) kwargs['extra_compile_args'] = extra_compile_args # append link flags extra_link_args = kwargs.get('extra_link_args', []) extra_link_args.extend(['-lpaddle_framework', '-lcudart']) kwargs['extra_link_args'] = extra_link_args kwargs['language'] = 'c++' return kwargs def find_paddle_includes(use_cuda=False): """ Return Paddle necessary include dir path. """ # pythonXX/site-packages/paddle/include paddle_include_dir = paddle.sysconfig.get_include() third_party_dir = os.path.join(paddle_include_dir, 'third_party') include_dirs = [paddle_include_dir, third_party_dir] return include_dirs def find_cuda_includes(): cuda_home = find_cuda_home() if cuda_home is None: raise ValueError( "Not found CUDA runtime, please use `export CUDA_HOME=XXX` to specific it." ) return [os.path.join(cuda_home, 'lib64')] def find_cuda_home(): """ Use heuristic method to find cuda path """ # step 1. find in $CUDA_HOME or $CUDA_PATH cuda_home = os.environ.get('CUDA_HOME') or os.environ.get('CUDA_PATH') # step 2. find path by `which nvcc` if cuda_home is None: which_cmd = 'where' if IS_WINDOWS else 'which' try: with open(os.devnull, 'w') as devnull: nvcc_path = subprocess.check_output( [which_cmd, 'nvcc'], stderr=devnull) if six.PY3: nvcc_path = nvcc_path.decode() nvcc_path = nvcc_path.rstrip('\r\n') # for example: /usr/local/cuda/bin/nvcc cuda_home = os.path.dirname(os.path.dirname(nvcc_path)) except: if IS_WINDOWS: # search from default NVIDIA GPU path candidate_paths = glob.glob( 'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v*.*') if len(candidate_paths) > 0: cuda_home = candidate_paths[0] else: cuda_home = "/usr/local/cuda" # step 3. check whether path is valid if not os.path.exists(cuda_home) and paddle.is_compiled_with_cuda(): cuda_home = None warnings.warn( "Not found CUDA runtime, please use `export CUDA_HOME= XXX` to specific it." ) return cuda_home def find_paddle_libraries(use_cuda=False): """ Return Paddle necessary library dir path. """ # pythonXX/site-packages/paddle/libs paddle_lib_dirs = [paddle.sysconfig.get_lib()] if use_cuda: cuda_dirs = find_cuda_includes() paddle_lib_dirs.extend(cuda_dirs) return paddle_lib_dirs def append_necessary_flags(extra_compile_args, use_cuda=False): """ Add necessary compile flags for gcc/nvcc compiler. """ necessary_flags = ['-std=c++11'] if use_cuda: necessary_flags.extend(NVCC_COMPILE_FLAGS) def add_compile_flag(extension, flag): extra_compile_args = copy.deepcopy(extension.extra_compile_args) if isinstance(extra_compile_args, dict): for args in extra_compile_args.values(): args.append(flag) else: extra_compile_args.append(flag) extension.extra_compile_args = extra_compile_args def is_cuda_file(path): cuda_suffix = set(['.cu']) items = os.path.splitext(path) assert len(items) > 1 return items[-1] in cuda_suffix def get_build_directory(name): """ Return paddle extension root directory, default specific by `PADDLE_EXTENSION_DIR` """ root_extensions_directory = os.envsiron.get('PADDLE_EXTENSION_DIR') if root_extensions_directory is None: # TODO(Aurelius84): consider wind32/macOs here = os.path.abspath(__file__) root_extensions_directory = os.path.realpath(here) warnings.warn( "$PADDLE_EXTENSION_DIR is not set, using path: {} by default." .format(root_extensions_directory)) return root_extensions_directory
6,870
2,337
# -*- coding: utf-8 -*- from selenium.webdriver.firefox.webdriver import WebDriver import unittest from group import Group from contact import Contact def is_alert_present(wd): try: wd.switch_to_alert().text return True except: return False class HW_new_contact(unittest.TestCase): def setUp(self): self.wd = WebDriver(capabilities={"marionette": False}) self.wd.implicitly_wait(60) def test_HW_new_contact(self): wd = self.wd self.open_webpage(wd) self.login(wd, username="admin", password="secret") wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click() self.link_to_group_page(wd) self.create_a_new_group(wd, Group(name="group.Chupakabra", header="group.Chupakabra Header", footer="group.Chupakabra footer")) self.group_verification(wd) self.new_contac_page(wd) self.contact_information(wd, Contact(firstname="Abra", middlename="Kadabra", lastname="Chupakabra", nickname="Chupakabra", title="My Title", company="Home", address="557 Mayfair Walk Ave, Las Vegas NV 89173", home="7023542185", mobile="6504655622", work="7023336669", fax="1234567890", email="test@test.com", email2="test@gmail.com", email3="test.mail.ru", homepage="yahoo.com", address2="123 new address test", phone2="123 home address", notes="my notes bla bla bla")) self.submit_cotact_information(wd) wd.find_element_by_link_text("Logout").click() def submit_cotact_information(self, wd): wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click() def contact_information(self, wd, contact): wd.find_element_by_name("firstname").click() wd.find_element_by_name("firstname").clear() wd.find_element_by_name("firstname").send_keys(contact.firstname) wd.find_element_by_name("middlename").click() wd.find_element_by_name("middlename").clear() wd.find_element_by_name("middlename").send_keys(contact.middlename) wd.find_element_by_name("lastname").click() wd.find_element_by_name("lastname").clear() wd.find_element_by_name("lastname").send_keys(contact.lastname) wd.find_element_by_name("nickname").click() wd.find_element_by_name("nickname").clear() wd.find_element_by_name("nickname").send_keys(contact.nickname) wd.find_element_by_name("title").click() wd.find_element_by_name("title").clear() wd.find_element_by_name("title").send_keys(contact.title) wd.find_element_by_name("company").click() wd.find_element_by_name("company").clear() wd.find_element_by_name("company").send_keys(contact.company) wd.find_element_by_name("address").click() wd.find_element_by_name("address").clear() wd.find_element_by_name("address").send_keys(contact.address) wd.find_element_by_name("home").click() wd.find_element_by_name("home").clear() wd.find_element_by_name("home").send_keys(contact.home) wd.find_element_by_name("mobile").click() wd.find_element_by_name("mobile").clear() wd.find_element_by_name("mobile").send_keys(contact.mobile) wd.find_element_by_name("work").click() wd.find_element_by_name("work").clear() wd.find_element_by_name("work").send_keys(contact.work) wd.find_element_by_name("fax").click() wd.find_element_by_name("fax").clear() wd.find_element_by_name("fax").send_keys(contact.fax) wd.find_element_by_name("email").click() wd.find_element_by_name("email").clear() wd.find_element_by_name("email").send_keys(contact.email) wd.find_element_by_name("email2").click() wd.find_element_by_name("email2").clear() wd.find_element_by_name("email2").send_keys(contact.email2) wd.find_element_by_name("email2").click() wd.find_element_by_name("email2").clear() wd.find_element_by_name("email2").send_keys(contact.email2) wd.find_element_by_name("email3").click() wd.find_element_by_name("email3").clear() wd.find_element_by_name("email3").send_keys(contact.email3) wd.find_element_by_name("homepage").click() wd.find_element_by_name("homepage").clear() wd.find_element_by_name("homepage").send_keys(contact.homepage) if not wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[17]").is_selected(): wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[17]").click() if not wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option[12]").is_selected(): wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option[12]").click() wd.find_element_by_name("byear").click() wd.find_element_by_name("byear").clear() wd.find_element_by_name("byear").send_keys("1986") if not wd.find_element_by_xpath("//div[@id='content']/form/select[3]//option[7]").is_selected(): wd.find_element_by_xpath("//div[@id='content']/form/select[3]//option[7]").click() if not wd.find_element_by_xpath("//div[@id='content']/form/select[4]//option[3]").is_selected(): wd.find_element_by_xpath("//div[@id='content']/form/select[4]//option[3]").click() wd.find_element_by_name("ayear").click() wd.find_element_by_name("ayear").clear() wd.find_element_by_name("ayear").send_keys("2018") if not wd.find_element_by_xpath("//div[@id='content']/form/select[5]//option[2]").is_selected(): wd.find_element_by_xpath("//div[@id='content']/form/select[5]//option[2]").click() wd.find_element_by_name("address2").click() wd.find_element_by_name("address2").clear() wd.find_element_by_name("address2").send_keys(contact.address2) wd.find_element_by_name("phone2").click() wd.find_element_by_name("phone2").clear() wd.find_element_by_name("phone2").send_keys(contact.phone2) wd.find_element_by_name("notes").click() wd.find_element_by_name("notes").clear() wd.find_element_by_name("notes").send_keys(contact.notes) def new_contac_page(self, wd): wd.find_element_by_link_text("add new").click() def group_verification(self, wd): wd.find_element_by_link_text("group page").click() def create_a_new_group(self, wd, group): wd.find_element_by_name("new").click() wd.find_element_by_name("group_name").click() wd.find_element_by_name("group_name").clear() wd.find_element_by_name("group_name").send_keys(group.name) wd.find_element_by_name("group_header").click() wd.find_element_by_name("group_header").clear() wd.find_element_by_name("group_header").send_keys(group.header) wd.find_element_by_name("group_footer").click() wd.find_element_by_name("group_footer").clear() wd.find_element_by_name("group_footer").send_keys(group.footer) wd.find_element_by_name("submit").click() def link_to_group_page(self, wd): wd.find_element_by_link_text("groups").click() def login(self, wd, username="admin", password="secret"): wd.find_element_by_name("user").click() wd.find_element_by_name("user").clear() wd.find_element_by_name("user").send_keys(username) wd.find_element_by_name("pass").click() wd.find_element_by_name("pass").clear() wd.find_element_by_name("pass").send_keys(password) def open_webpage(self, wd): wd.get("http://localhost/addressbook/addressbook/edit.php") def tearDown(self): self.wd.quit() if __name__ == '__main__': unittest.main()
8,095
2,805
## using output of king and plink --miss identify worse performing duplicate for exclusion ## this script is run as follows ## python ExcludeDuplicates.py <king output> <plink --miss output> <output file> import sys print "Reading in sample missingness from", sys.argv[2] sampleMissing = file(sys.argv[2], "r") sampleMissing = sampleMissing.readlines() sMissing = {} for line in sampleMissing: line = line.strip().split() sMissing[line[0]+"^"+line[1]] = line[4] print "Missingness info read for ", str(len(sMissing)), "samples" print "\n\n" print "Identifying duplicates from", sys.argv[1] dupSamplesFile = file(sys.argv[1], "r") dupSamplesFile = dupSamplesFile.readlines() dupSamples = {} for line in dupSamplesFile: line = line.strip().split() sample1 = line[0] + "^" + line[1] sample2 = line[2] + "^" + line[3] ## need to save both ways around in case 3 way+ duplicates if sample1 in dupSamples: oldEntry = dupSamples[sample1] newEntry = oldEntry + [sample2] dupSamples[sample1] = newEntry else: dupSamples[sample1] = [sample2] if sample2 in dupSamples: oldEntry = dupSamples[sample2] newEntry = oldEntry + [sample1] dupSamples[sample2] = newEntry else: dupSamples[sample2] = [sample1] ## create unique list of duplicate samples fullList = [] for item in dupSamples: allDups = dupSamples[item] + [item] allDups.sort() fullList = fullList + [";".join(allDups)] uniqList = [] unique = [uniqList.append(x) for x in fullList if x not in uniqList] print str(len(uniqList)),"groups of duplicate samples" uniqList.sort() print "Writing list of samples to exclude to", sys.argv[3] output = file(sys.argv[3], "w") ## find sample with least missingness and exclude all others for item in uniqList: samples = item.split(";") list_values = [] for element in samples: if element in sMissing: list_values = list_values + [sMissing[element]] else: print "Couldn't find sample: ", element if len(list_values) != 0: indexToKeep = list_values.index(min(list_values)) samples.remove(samples[indexToKeep]) for each in samples: output.write("\t".join(each.split("^")) + "\n") output.close()
2,189
794
import numpy as np import pandas as pd from .jscatter import Scatter, component_idx_to_name from .utils import minmax_scale def test_component_idx_to_name(): assert 'valueA' == component_idx_to_name(2) assert 'valueB' == component_idx_to_name(3) assert None == component_idx_to_name(4) assert None == component_idx_to_name(1) assert None == component_idx_to_name(None) def test_scatter_numpy(): x = np.random.rand(500) y = np.random.rand(500) scatter = Scatter(x, y) widget = scatter.widget widget_data = np.asarray(widget.points) assert (500, 4) == widget_data.shape assert np.allclose(minmax_scale(x, (-1,1)), widget_data[:,0]) assert np.allclose(minmax_scale(y, (-1,1)), widget_data[:,1]) assert np.sum(widget_data[:,2:]) == 0 def get_df(): num_groups = 8 data = np.random.rand(500, 7) data[:,2] *= 100 data[:,3] *= 100 data[:,3] = data[:,3].astype(int) data[:,4] = np.round(data[:,4] * (num_groups - 1)).astype(int) data[:,5] = np.repeat(np.arange(100), 5).astype(int) data[:,6] = np.resize(np.arange(5), 500).astype(int) df = pd.DataFrame( data, columns=['a', 'b', 'c', 'd', 'group', 'connect', 'connect_order'] ) df['group'] = df['group'].astype('int').astype('category').map(lambda c: chr(65 + c), na_action=None) df['connect'] = df['connect'].astype('int') df['connect_order'] = df['connect_order'].astype('int') return df def test_scatter_pandas(): df = get_df() scatter = Scatter(data=df, x='a', y='b') widget = scatter.widget widget_data = np.asarray(widget.points) assert (500, 4) == np.asarray(widget.points).shape assert np.allclose(minmax_scale(df['a'].values, (-1,1)), widget_data[:,0]) assert np.allclose(minmax_scale(df['b'].values, (-1,1)), widget_data[:,1]) def test_scatter_point_encoding_updates(): df = get_df() scatter = Scatter(data=df, x='a', y='b') widget = scatter.widget widget_data = np.asarray(widget.points) assert len(scatter._encodings.data) == 0 assert np.sum(widget_data[:,2:]) == 0 scatter.color(by='group') widget_data = np.asarray(widget.points) assert 'color' in scatter._encodings.visual assert 'group' in scatter._encodings.data assert np.sum(widget_data[:,2]) > 0 assert np.sum(widget_data[:,3]) == 0 scatter.opacity(by='c') widget_data = np.asarray(widget.points) assert 'opacity' in scatter._encodings.visual assert 'c' in scatter._encodings.data assert np.sum(widget_data[:,3]) > 0 scatter.size(by='c') widget_data = np.asarray(widget.points) assert 'size' in scatter._encodings.visual assert 'c' in scatter._encodings.data assert np.sum(widget_data[:,3]) > 0 def test_scatter_connection_encoding_updates(): df = get_df() scatter = Scatter(data=df, x='a', y='b') widget = scatter.widget scatter.connect(by='connect') widget_data = np.asarray(widget.points) assert widget_data.shape == (500, 5) assert np.all( df['connect'].values == widget_data[:,4].astype(df['connect'].dtype) ) scatter.connect(order='connect_order') widget_data = np.asarray(widget.points) assert widget_data.shape == (500, 6) assert np.all( df['connect_order'].values == widget_data[:,5].astype(df['connect_order'].dtype) )
3,375
1,272
import logging import sys from typing import Iterable # 3rd party imports import numpy as np # import matplotlib.pyplot as plt from scipy.io.wavfile import read as wavread # local imports from .dio import dio from .stonemask import stonemask from .harvest import harvest from .cheaptrick import cheaptrick from .d4c import d4c from .d4cRequiem import d4cRequiem from .get_seeds_signals import get_seeds_signals from .synthesis import synthesis from .synthesisRequiem import synthesisRequiem from .swipe import swipe class World(object): def get_f0(self, fs: int, x: np.ndarray, f0_method: str = 'harvest', f0_floor: int = 71, f0_ceil: int = 800, channels_in_octave: int = 2, target_fs: int = 4000, frame_period: int = 5) -> tuple: """ :param fs: sample frequency :param x: signal :param f0_method: f0 extraction method: dio, harvest :param f0_floor: smallest f0 :param f0_ceil: largest f0 :param channels_in_octave: :param target_fs: downsampled frequency for f0 extraction :param frame_period: in ms :return: """ if f0_method == 'dio': source = dio(x, fs, f0_floor, f0_ceil, channels_in_octave, target_fs, frame_period) source['f0'] = stonemask(x, fs, source['temporal_positions'], source['f0']) elif f0_method == 'harvest': source = harvest(x, fs, f0_floor, f0_ceil, frame_period) elif f0_method == 'swipe': source = swipe(fs, x, plim=[f0_floor, f0_ceil],sTHR=0.3) else: raise Exception return source['temporal_positions'], source['f0'], source['vuv'] # or a dict def get_spectrum(self, fs: int, x: np.ndarray, f0_method: str = 'harvest', f0_floor: int = 71, f0_ceil: int = 800, channels_in_octave: int = 2, target_fs: int = 4000, frame_period: int = 5, fft_size=None) -> dict: ''' This function extract pitch-synchronous WORLD spectrogram :param fs: sampling frequency :param x: signal (in float) :param f0_method: dio, harvest, swipe :param f0_floor: f0 min :param f0_ceil: f0 max :param frame_period: frame shift :param fft_size: fourier transform length :param: channels_in_octave: channels per octave :return: ''' if f0_method == 'dio': source = dio(x, fs, f0_floor, f0_ceil, channels_in_octave, target_fs, frame_period) source['f0'] = stonemask(x, fs, source['temporal_positions'], source['f0']) elif f0_method == 'harvest': source = harvest(x, fs, f0_floor, f0_ceil, frame_period) elif f0_method == 'swipe': source = swipe(fs, x, plim=[f0_floor, f0_ceil],sTHR=0.3) else: raise Exception filter = cheaptrick(x, fs, source, fft_size=fft_size) return {'f0': source['f0'], 'temporal_positions': source['temporal_positions'], 'fs': fs, 'ps spectrogram': filter['ps spectrogram'], 'spectrogram': filter['spectrogram']} def encode_w_gvn_f0(self, fs: int, x: np.ndarray, source: dict, fft_size=None, is_requiem: bool=False) -> dict: ''' Do WORLD pitch-synchronous analysis with given F0 contour :param fs: sampling rate :param x: signal :param source: a dictionary contains source['temporal_positions'] time in second, source['f0'] f0 contour and source['vuv'] voice/unvoice :param fft_size: length of Fourier transform :return: a dictionary contains WORLD's components ''' assert np.all(source['f0'] >= 3 * fs / fft_size) filter = cheaptrick(x, fs, source, fft_size=fft_size) if is_requiem: source = d4cRequiem(x, fs, source, fft_size=fft_size) else: source = d4c(x, fs, source, fft_size_for_spectrum=fft_size) return {'temporal_positions': source['temporal_positions'], 'vuv': source['vuv'], 'f0': source['f0'], 'fs': fs, 'spectrogram': filter['spectrogram'], 'aperiodicity': source['aperiodicity'], 'coarse_ap': source['coarse_ap'], 'is_requiem': is_requiem } def encode(self, fs: int, x: np.ndarray, f0_method: str = 'harvest', f0_floor: int = 71, f0_ceil: int = 800, channels_in_octave: int = 2, target_fs: int = 4000, frame_period: int = 5, allowed_range: float = 0.1, fft_size=None, is_requiem: bool=False) -> dict: ''' encode speech to excitation signal, f0, spectrogram :param fs: sample frequency :param x: signal :param f0_method: f0 extraction method: dio, harvest :param f0_floor: smallest f0 :param f0_ceil: largest f0 :param channels_in_octave: number of channels per octave :param target_fs: downsampled frequency for f0 extraction :param frame_period: in ms :param allowed_range: :param fft_size: length of Fourier transform :return: a dictionary contains WORLD components ''' if fft_size != None: f0_floor = 3.0 * fs / fft_size if f0_method == 'dio': source = dio(x, fs, f0_floor=f0_floor, f0_ceil=f0_ceil, channels_in_octave=channels_in_octave, target_fs=target_fs, frame_period=frame_period, allowed_range=allowed_range) source['f0'] = stonemask(x, fs, source['temporal_positions'], source['f0']) elif f0_method == 'harvest': source = harvest(x, fs, f0_floor=f0_floor, f0_ceil=f0_ceil, frame_period=frame_period) elif f0_method == 'swipe': source = swipe(fs, x, plim=[f0_floor, f0_ceil], sTHR=0.3) else: raise Exception filter = cheaptrick(x, fs, source, fft_size=fft_size) if is_requiem: source = d4cRequiem(x, fs, source, fft_size=fft_size) else: source = d4c(x, fs, source, fft_size_for_spectrum=fft_size) return {'temporal_positions': source['temporal_positions'], 'vuv': source['vuv'], 'fs': filter['fs'], 'f0': source['f0'], 'aperiodicity': source['aperiodicity'], 'ps spectrogram': filter['ps spectrogram'], 'spectrogram': filter['spectrogram'], 'is_requiem': is_requiem } def scale_pitch(self, dat: dict, factor: float) -> dict: ''' the function does pitch scaling :param dat: WORLD components (F0, spectrogram, aperiodicity) :param factor: scaling factor :return: scaled pitch. ''' dat['f0'] *= factor return dat def set_pitch(self, dat: dict, time: np.ndarray, value: np.ndarray) -> dict: raise NotImplementedError # TODO: need to resample to set values at given temporal positions (which are presumably shared with the spectrogram) dat['f0'] = value dat['temporal_positions'] = time return dat def scale_duration(self, dat: dict, factor: float) -> dict: ''' the function does duration scaling :param dat: WORLD components (F0, spectrogram, aperiodicity) :param factor: scaling factor :return: scaled event-time to speech up or slow down the speech ''' dat['temporal_positions'] *= factor return dat def modify_duration(self, dat: dict, from_time: Iterable, to_time: Iterable) -> dict: end = dat['temporal_positions'][-1] assert np.all(np.diff(from_time)) > 0 assert np.all(np.diff(to_time)) > 0 assert from_time[0] > 0 assert from_time[-1] < end from_time = np.r_[0, from_time, end] if to_time[-1] == -1: to_time[-1] = end dat['temporal_positions'] = np.interp(dat['temporal_positions'], from_time, to_time) def warp_spectrum(self, dat: dict, factor: float) -> dict: dat['spectrogram'][:] = np.array([np.interp((np.arange(0, len(s)) / len(s)) ** factor, (np.arange(0, len(s)) / len(s)), s) for s in dat['spectrogram'].T]).T return dat def decode(self, dat: dict) -> dict: ''' This function combine WORLD components (F0, spectrogram, and aperiodicity) to make sound signal :param dat: contains WORLD components :return: a dictionary contains synthesized speech and WORLD components ''' if dat['is_requiem']: seeds_signals = get_seeds_signals(dat['fs']) y = synthesisRequiem(dat, dat, seeds_signals) else: y = synthesis(dat, dat) m = np.max(np.abs(y)) if m > 1.0: logging.info('rescaling waveform') y /= m dat['out'] = y return dat def draw(self, x: np.ndarray, dat: dict): ''' An example of visualize WORLD components, original signal, synthesized signal ''' from matplotlib import pyplot as plt fs = dat['fs'] time = dat['temporal_positions'] y = dat['out'] fig, ax = plt.subplots(nrows=5, figsize=(8, 6), sharex=True) ax[0].set_title('input signal and resynthesized-signal') ax[0].plot(np.arange(len(x)) / fs, x, alpha=0.5) ax[0].plot(np.arange(len(y)) / fs, y, alpha=0.5) ax[0].set_xlabel('samples') ax[0].legend(['original', 'synthesis']) X = dat['ps spectrogram'] X = np.where(X==0, sys.float_info.epsilon, X) ax[1].set_title('pitch-synchronous spectrogram') ax[1].imshow(20 * np.log10(np.abs(X[:X.shape[0] // 2, :])), cmap=plt.cm.gray_r, origin='lower', extent=[0, len(x) / fs, 0, fs / 2], aspect='auto') ax[1].set_ylabel('frequency (Hz)') ax[2].set_title('phase spectrogram') ax[2].imshow(np.diff(np.unwrap(np.angle(X[:X.shape[0] // 2, :]), axis=1), axis=1), cmap=plt.cm.gray_r, origin='lower', extent=[0, len(x) / fs, 0, fs / 2], aspect='auto') ax[2].set_ylabel('frequency (Hz)') ax[3].set_title('WORLD spectrogram') Y = dat['spectrogram'] Y = np.where(Y < sys.float_info.epsilon, sys.float_info.epsilon, Y) ax[3].imshow(20 * np.log10(Y), cmap=plt.cm.gray_r, origin='lower', extent=[0, len(x) / fs, 0, fs / 2], aspect='auto') ax[3].set_ylabel('frequency (Hz)') ax[4].set_title('WORLD fundamental frequency') ax[4].plot(time, dat['f0']) ax[4].set_ylabel('time (s)') plt.show()
10,917
3,611
"""Utilities to help building Docker images.""" import argparse import os import subprocess from typing import List, Optional from universal_build import build_utils FLAG_DOCKER_IMAGE_PREFIX = "docker_image_prefix" def parse_arguments( input_args: List[str] = None, argument_parser: argparse.ArgumentParser = None ) -> dict: """Parses all arguments and returns a sanitized & augmented list of arguments. Sanitized means that, for example, the version is already checked and set depending on our build guidelines. If arguments are not valid, exit the script run. Args: input_args (List[str], optional): List of arguments that are used instead of the arguments passed to the process. Defaults to `None`. argument_parser (arparse.ArgumentParser, optional): An argument parser which is passed as a parents parser to the default ArgumentParser to be able to use additional flags besides the default ones. Returns: dict: The parsed default arguments thar are already checked for validity. """ if argument_parser is None: argument_parser = argparse.ArgumentParser() argument_parser.add_argument( "--" + FLAG_DOCKER_IMAGE_PREFIX.replace("_", "-"), help="Provide a prefix for a Docker image, e.g. 'mltooling/' or even a repository path. When leaving blank, the default Dockerhub Repository is used.", required=False, default="", ) return build_utils.parse_arguments( input_args=input_args, argument_parser=argument_parser ) def check_image( image: str, trivy: bool = True, exit_on_error: bool = True ) -> subprocess.CompletedProcess: """Run vulnerability checks on Dockerimage. Args: image (str): The name of the docker image to check. trivy (bool, optional): Activate trivy vulnerability check. Defaults to `True`. exit_on_error (bool, optional): If `True`, exit process as soon as an error occurs. """ build_utils.log("Run vulnerability checks on docker image:") if trivy and build_utils.command_exists("trivy", exit_on_error=exit_on_error): return build_utils.run( f"trivy image --timeout=20m0s --exit-code 1 --severity HIGH,CRITICAL {image}", exit_on_error=exit_on_error, ) return subprocess.CompletedProcess(args="", returncode=-1, stdout="", stderr="") # TODO: Implement dockl container scan def lint_dockerfile( hadolint: bool = True, dockerfile: str = "Dockerfile", exit_on_error: bool = True ) -> None: """Run hadolint on the Dockerfile. Args: hadolint (bool, optional): Activate hadolint dockerfile linter. Defaults to `True`. dockerfile (str, optional): Specify a specific Dockerfile. If not specified, the default `Dockerfile` wil be used. exit_on_error (bool, optional): Exit process if an error occurs. Defaults to `True`. """ build_utils.log("Run linters and style checks:") if hadolint and build_utils.command_exists("hadolint", exit_on_error=exit_on_error): config_file_arg = "" if os.path.exists(".hadolint.yml"): config_file_arg = "--config=.hadolint.yml" build_utils.run( f"hadolint {config_file_arg} {dockerfile}", exit_on_error=exit_on_error ) def get_image_name(name: str, tag: str, image_prefix: str = "") -> str: """Get a valid versioned image name. Args: name (str): Name of the docker image. tag (str): Version to use for the tag. image_prefix (str, optional): The prefix added to the name to indicate an organization on DockerHub or a completely different repository. Returns: str: a valid docker image name based on: prefix/name:tag """ versioned_tag = name.strip() + ":" + tag.strip() if image_prefix: versioned_tag = image_prefix.strip().rstrip("/") + "/" + versioned_tag return versioned_tag def build_docker_image( name: str, version: str, build_args: str = "", docker_image_prefix: str = "", dockerfile: Optional[str] = None, additional_build_args: str = "", exit_on_error: bool = True, ) -> subprocess.CompletedProcess: """Build a docker image from a Dockerfile in the working directory. Args: name (str): Name of the docker image. version (str): Version to use as tag. build_args (str, optional): Add additional build arguments for docker build. docker_image_prefix (str, optional): The prefix added to the name to indicate an organization on DockerHub or a completely different repository. dockerfile (str, optional): Specify a specific Dockerfile. If not specified, the default `Dockerfile` wil be used. exit_on_error (bool, optional): If `True`, exit process as soon as an error occurs. Returns: subprocess.CompletedProcess: Returns the CompletedProcess object of the """ # Check if docker exists on the system build_utils.command_exists("docker", exit_on_error=exit_on_error) versioned_tag = get_image_name(name=name, tag=version) latest_tag = get_image_name(name=name, tag="latest") dockerfile_command = "" if dockerfile: dockerfile_command = " -f " + dockerfile completed_process = build_utils.run( "docker build " + dockerfile_command + "-t " + versioned_tag + " -t " + latest_tag + " " + build_args + " ./", exit_on_error=exit_on_error, ) if completed_process.returncode > 0: build_utils.log(f"Failed to build Docker image {versioned_tag}") return completed_process if docker_image_prefix: remote_versioned_tag = get_image_name( name=name, tag=version, image_prefix=docker_image_prefix ) build_utils.run( "docker tag " + versioned_tag + " " + remote_versioned_tag, exit_on_error=exit_on_error, ) return completed_process def release_docker_image( name: str, version: str, docker_image_prefix: str, exit_on_error: bool = True ) -> subprocess.CompletedProcess: """Push a Docker image to a repository. Args: name (str): The name of the image. Must not be prefixed! version (str): The tag used for the image. docker_image_prefix (str): The prefix added to the name to indicate an organization on DockerHub or a completely different repository. exit_on_error (bool, optional): Exit process if an error occurs. Defaults to `True`. Returns: subprocess.CompletedProcess: Returns the CompletedProcess object of the `docker push ...` command. """ # Check if docker exists on the system build_utils.command_exists("docker", exit_on_error=exit_on_error) if not docker_image_prefix: build_utils.log( "The flag --docker-image-prefix cannot be blank when pushing a Docker image." ) build_utils.exit_process(build_utils.EXIT_CODE_GENERAL) versioned_tag = get_image_name(name=name, tag=version) remote_versioned_tag = get_image_name( name=name, tag=version, image_prefix=docker_image_prefix ) build_utils.run( "docker tag " + versioned_tag + " " + remote_versioned_tag, exit_on_error=exit_on_error, ) completed_process = build_utils.run( "docker push " + remote_versioned_tag, exit_on_error=exit_on_error ) if completed_process.returncode > 0: build_utils.log(f"Failed to release Docker image {name}:{version}") # Only push version with latest tag if no suffix is added (pre-release) if "-" not in version: remote_latest_tag = get_image_name( name=name, tag="latest", image_prefix=docker_image_prefix ) build_utils.log( "Release Docker image with latest tag as well: " + remote_latest_tag ) build_utils.run( "docker tag " + versioned_tag + " " + remote_latest_tag, exit_on_error=exit_on_error, ) build_utils.run("docker push " + remote_latest_tag, exit_on_error=exit_on_error) return completed_process
8,165
2,377
version https://git-lfs.github.com/spec/v1 oid sha256:ea33786bb4be2c91d879beaff23346f37c5b4b5b8504df61a909e3570d67eb08 size 5150
129
89
# Generated by Django 2.0.5 on 2018-05-22 21:02 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('status', '0003_auto_20180522_1745'), ('gear', '0005_weapon_reach'), ] operations = [ migrations.CreateModel( name='Consumable', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(default='Untitled', max_length=255)), ('ability', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='status.Ability')), ], ), migrations.RemoveField( model_name='weapon', name='tiles', ), migrations.AddField( model_name='weapon', name='ability', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='status.Ability'), ), migrations.AlterField( model_name='inventory', name='weapons', field=models.ManyToManyField(blank=True, to='gear.Weapon'), ), migrations.AddField( model_name='inventory', name='consumables', field=models.ManyToManyField(blank=True, to='gear.Consumable'), ), ]
1,415
449
expected_output = { "sort": { 1: { "invoked": 3321960, "usecs": 109, "tty": 0, "one_min_cpu": 0.54, "process": "PIM Process", "five_min_cpu": 0.48, "runtime": 362874, "pid": 368, "five_sec_cpu": 1.03, }, 2: { "invoked": 1466728, "usecs": 2442, "tty": 0, "one_min_cpu": 0.87, "process": "IOSv e1000", "five_min_cpu": 2.77, "runtime": 3582279, "pid": 84, "five_sec_cpu": 0.55, }, 3: { "invoked": 116196, "usecs": 976, "tty": 0, "one_min_cpu": 0.07, "process": "OSPF-1 Hello", "five_min_cpu": 0.07, "runtime": 113457, "pid": 412, "five_sec_cpu": 0.15, }, }, "five_sec_cpu_total": 4, "five_min_cpu": 9, "one_min_cpu": 4, "nonzero_cpu_processes": ["PIM Process", "IOSv e1000", "OSPF-1 Hello"], "five_sec_cpu_interrupts": 0, }
1,121
470
# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from threading import Thread from oslo_log import log from six.moves.queue import Queue from watcher_metering.publisher.base import PublisherServerBase from watcher_metering.publisher.worker import Worker LOG = log.getLogger(__name__) class Publisher(PublisherServerBase): def __init__(self, use_nanoconfig_service, publisher_endpoint, nanoconfig_service_endpoint, nanoconfig_update_endpoint, nanoconfig_profile, metrics_store, max_queue_size, max_worker, min_worker=5): """ :param use_nanoconfig_service: Indicates whether or not it should use a nanoconfig service :type use_nanoconfig_service: bool :param publisher_endpoint: Publisher server URI :type publisher_endpoint: str :param nanoconfig_service_endpoint: Nanoconfig service URI :type nanoconfig_service_endpoint: str :param nanoconfig_update_endpoint: Nanoconfig update service URI :type nanoconfig_update_endpoint: str :param nanoconfig_profile: Nanoconfig profile URI :type nanoconfig_profile: str :param max_queue_size: Max size for the message queue :type max_queue_size: int :param max_worker: Max number of worker to be spawned at a given time :type max_worker: int :param min_worker: Min number of worker to be spawned at a given time :type min_worker: int """ super(Publisher, self).__init__( use_nanoconfig_service, publisher_endpoint, nanoconfig_service_endpoint, nanoconfig_update_endpoint, nanoconfig_profile ) self.max_queue_size = max_queue_size self.metrics_store = metrics_store self.min_worker = min_worker self.max_worker = max_worker self.msg_queue = Queue(self.max_queue_size) self.workers = [] @property def num_workers(self): return len(self.workers) def on_receive(self, msg): LOG.debug('[Publisher] Queue msg size = %s | workers = %s', self.msg_queue.qsize(), self.num_workers) try: self.check_workers_alive() self.adjust_pool_size() except OSError as exc: LOG.exception(exc) LOG.error("[Publisher] Error upon receiving a message") self.msg_queue.put(msg) def check_workers_alive(self): # Because we can create new workers in this loop, we create a copy # --> We could otherwise loop onto a new workers... worker_items = self.workers[:] for worker_thread in worker_items: if not worker_thread.is_alive(): self.workers.pop(self.workers.index(worker_thread)) self.start_worker() def adjust_pool_size(self): needed_size = self.msg_queue.qsize() + self.min_worker if abs(needed_size - self.num_workers) > self.min_worker * 2: LOG.debug(("[Publisher] Auto adjust pool size needed size is `%s` " "and the current size is `%s`"), needed_size, self.num_workers) while self.num_workers > min(self.min_worker, needed_size): self.stop_worker() # Create enough, but not too many while self.num_workers < min(self.max_worker, needed_size): self.start_worker() def start_worker(self): LOG.debug("[Publisher] starting worker") worker = Worker(self.msg_queue, self.metrics_store) worker.start() self.workers.append(worker) def stop_worker(self): if self.num_workers: LOG.debug("[Publisher] stopping worker") worker = self.workers.pop(-1) # Pops the last worker worker.stop() def stop(self): super(Publisher, self).stop() join_threads = [] for key in self.workers: t = Thread(target=self.workers.get(key).stop) t.start() join_threads.append(t) for join_thread in join_threads: join_thread.join()
4,702
1,370
#!/usr/bin/env python from setuptools import setup setup( name="earthlyw", version="0.1", packages=[ "ibidem", "ibidem.earthlyw", ], install_requires=[ "setuptools", "colorlog<6", "appdirs<2", "requests<3", ], extras_require={ "dev": [ "tox", "pytest", 'pytest-xdist', 'pytest-sugar', 'pytest-html', 'pytest-cov', ] }, namespace_packages=["ibidem"], zip_safe=True, # Metadata author="Morten Lied Johansen", author_email="mortenjo@ifi.uio.no", license="LGPL", keywords="ibidem earthly", url="https://github.com/mortenlj/earthlyw", # Entry points entry_points={ "console_scripts": [ "earthlyw = ibidem.earthlyw.main:main", ], }, )
873
311
#!/usr/bin/env python # -*- coding= UTF-8 -*- # Fad from reportlab.pdfgen import canvas from reportlab.lib.pagesizes import A4 # setup the empty canvas from io import FileIO as file from reportlab.platypus import Flowable # from Common.pyPdf import PdfFileWriter, PdfFileReader from PyPDF2 import PdfFileWriter, PdfFileReader from reportlab.lib import colors from reportlab.platypus import Table, TableStyle, Paragraph # from reportlab.pdfgen.canvas import Canvas from reportlab.lib.units import inch from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle from num2words import num2words # from configuration import Config from Common.ui.util import formatted_number from Common.ui.util import get_temp_filename class flowable_rect(Flowable): def __init__(self, text, text2="", chck=0): Flowable.__init__(self) self.width = 10 self.height = 10 self.text = text self.text2 = text2 self.chck = 0 def draw(self): self.canv.rect(0, 0, self.width, self.height, fill=0) self.canv.drawString(13, 0, self.text) if self.text2 != "": self.canv.rect(0, 15, self.width, self.height, fill=0) self.canv.drawString(13, 15, self.text2) def pdFview(filename, invoice): """ cette views est cree pour la generation du PDF """ styles = getSampleStyleSheet() # styleN = styles["BodyText"] styleBH = styles["Normal"] if not filename: filename = get_temp_filename('pdf') PDFSOURCE = 'static/encaissement_source.pdf' TMP_FILE = 'static/tmp.pdf' DEFAULT_FONT_SIZE = 11 FONT_BOLD = 'Helvetica-Bold' FONT = 'Helvetica' # FONT = 'Courier-Bold' # A simple function to return a leading 0 on any single digit int. # PDF en entrée input1 = PdfFileReader(file(PDFSOURCE, "rb")) # PDF en sortie output = PdfFileWriter() # Récupération du nombre de pages n_pages = input1.getNumPages() # Pour chaque page y = 750 x = 40 recever_name = Paragraph('''{}'''.format(invoice.recever_name), styleBH) description = Paragraph('''{}'''.format(invoice.description), styleBH) date_valeur = invoice.date.strftime("%d - %b - %Y") for i in range(n_pages): # Récupération de la page du doc initial (input1) page = input1.getPage(i) p = canvas.Canvas(TMP_FILE, pagesize=A4) p.setFont(FONT_BOLD, 12) p.drawString(x + 300, y - 60, "DECAISEMENT N° :") p.drawString(x + 300, y - 80, "BAMAKO le ") p.setFont(FONT, 12) p.drawString(x + 420, y - 60, invoice.number) p.drawString(x + 380, y - 80, date_valeur) ldata = [] ht = invoice.amount amount = str(formatted_number(ht)) ldata.append(['', "DESIGNATION", 'MONTANT', 'NOM']) ldata.append(["MONTANT", description, amount, recever_name]) ldata.append(["TAUX", "", "", "MONTANT"]) ldata.append(["VALEUR", "", "", amount]) row = 0.8 col = 1.5 btable = Table( ldata, colWidths=[col * inch, 2.8 * inch, col * inch, col * inch], rowHeights=[0.5 * inch, row * inch, row * inch, row * inch]) btable.setStyle( TableStyle( [("BOX", (0, 0), (-1, -1), 0.25, colors.black), ('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black), ('ALIGN', (0, 1), (-1, -1), "RIGHT"), ('BACKGROUND', (0, 0), (-1, 0), colors.white), ('TEXTCOLOR', (0, 0), (-1, 0), colors.black), ('FONTSIZE', (0, 0), (-1, 0), 14), ('FONTNAME', (0, 0), (-1, -1), FONT_BOLD), # ('BACKGROUND', (1, 1), (1, 1), colors.black), ('ALIGN', (1, 0), (1, -1), 'LEFT')]) ) a_w = 800 a_h = y - 320 w, h = btable.wrap(a_w, a_h) btable.drawOn(p, 40, a_h) ht_en_lettre = num2words(ht, lang='fr') y = a_h - 15 ht_en_lettre1, ht_en_lettre2 = controle_caratere(ht_en_lettre + " franc CFA", 55, 40) p.drawString(x, y - 30, "Arrêté la présente facture à la somme de : {}".format(ht_en_lettre1.title())) p.drawString(x, y - 45, (ht_en_lettre2)) y -= 90 p.drawString(x + 230, y - 20, str(invoice.num_client)) p.setFont(FONT_BOLD, 12) p.drawString(x, y, "Signature Client") p.drawString(x + 220, y, "Numéro Client") p.drawString(x + 440, y, "Signature") p.showPage() # Sauvegarde de la page p.save() # Création du watermark watermark = PdfFileReader(file(TMP_FILE, "rb")) # Création page_initiale+watermark page.mergePage(watermark.getPage(0)) # Création de la nouvelle page output.addPage(page) # Nouveau pdf file_dest = filename + ".pdf" output_stream = file(file_dest, u"wb") output.write(output_stream) output_stream.close() return file_dest def controle_caratere(lettre, nb_controle, nb_limite): """ cette fonction decoupe une chaine de caratere en fonction du nombre de caratere donnée et conduit le reste à la ligne """ lettre = lettre if len(lettre) <= nb_controle: ch = lettre ch2 = u"" return ch, ch2 else: ch = ch2 = u"" for n in lettre.split(u" "): if len(ch) <= nb_limite: ch = ch + u" " + n else: ch2 = ch2 + u" " + n return ch, ch2
5,547
2,069
from __future__ import print_function, absolute_import import random import torch.utils.data as data from pose.utils.osutils import * from pose.utils.transforms import * from scipy.io import loadmat import argparse class Real_Animal_All(data.Dataset): def __init__(self, is_train=True, is_aug=False, **kwargs): print() print("==> real_animal_all") self.img_folder = kwargs['image_path'] # root image folders self.is_train = is_train # training set or test set self.inp_res = kwargs['inp_res'] self.out_res = kwargs['out_res'] self.sigma = kwargs['sigma'] self.scale_factor = kwargs['scale_factor'] self.rot_factor = kwargs['rot_factor'] self.label_type = kwargs['label_type'] self.animal = ['horse', 'tiger'] if kwargs['animal'] == 'all' else [kwargs['animal']] # train on single or all animal categories self.train_on_all_cat = kwargs['train_on_all_cat'] # train on single or mul, decide mean file to load self.is_aug = is_aug # create train/val split self.train_img_set = [] self.valid_img_set = [] self.train_pts_set = [] self.valid_pts_set = [] self.load_animal() self.mean, self.std = self._compute_mean() def load_animal(self): # generate train/val data for animal in sorted(self.animal): img_list = [] # img_list contains all image paths anno_list = [] # anno_list contains all anno lists range_path = os.path.join(self.img_folder, 'behaviorDiscovery2.0/ranges', animal, 'ranges.mat') landmark_path = os.path.join(self.img_folder, 'behaviorDiscovery2.0/landmarks', animal) range_file = loadmat(range_path) frame_num = 0 train_idxs = np.load('./data/real_animal/' + animal + '/train_idxs_by_video.npy') valid_idxs = np.load('./data/real_animal/' + animal + '/valid_idxs_by_video.npy') for video in range_file['ranges']: # range_file['ranges'] is a numpy array [Nx3]: shot_id, start_frame, end_frame shot_id = video[0] landmark_path_video = os.path.join(landmark_path, str(shot_id) + '.mat') if not os.path.isfile(landmark_path_video): continue landmark_file = loadmat(landmark_path_video) for frame in range(video[1], video[2] + 1): # ??? video[2]+1 frame_id = frame - video[1] img_name = animal + '/' + '0' * (8 - len(str(frame))) + str(frame) + '.jpg' img_list.append([img_name, shot_id, frame_id]) coord = landmark_file['landmarks'][frame_id][0][0][0][0] vis = landmark_file['landmarks'][frame_id][0][0][0][1] landmark = np.hstack((coord, vis)) landmark_18 = landmark[:18, :] if animal == 'horse': anno_list.append(landmark_18) elif animal == 'tiger': landmark_18 = landmark_18[ np.array([1, 2, 3, 4, 5, 6, 7, 8, 15, 16, 17, 18, 13, 14, 9, 10, 11, 12]) - 1] anno_list.append(landmark_18) frame_num += 1 for idx in range(train_idxs.shape[0]): train_idx = train_idxs[idx] self.train_img_set.append(img_list[train_idx]) self.train_pts_set.append(anno_list[train_idx]) for idx in range(valid_idxs.shape[0]): valid_idx = valid_idxs[idx] self.valid_img_set.append(img_list[valid_idx]) self.valid_pts_set.append(anno_list[valid_idx]) print('Animal:{}, number of frames:{}, train: {}, valid: {}'.format(animal, frame_num, train_idxs.shape[0], valid_idxs.shape[0])) print('Total number of frames:{}, train: {}, valid {}'.format(len(img_list), len(self.train_img_set), len(self.valid_img_set))) def _compute_mean(self): animal = 'all' if self.train_on_all_cat else self.animal[0] # which mean file to load meanstd_file = './data/synthetic_animal/' + animal + '_combineds5r5_texture' + '/mean.pth.tar' if isfile(meanstd_file): print('load from mean file:', meanstd_file) meanstd = torch.load(meanstd_file) else: print("generate mean file") mean = torch.zeros(3) std = torch.zeros(3) for index in self.train_list: a = self.img_list[index][0] img_path = os.path.join(self.img_folder, 'behaviorDiscovery2.0', a) img = load_image_ori(img_path) # CxHxW mean += img.view(img.size(0), -1).mean(1) std += img.view(img.size(0), -1).std(1) mean /= len(self.train_list) std /= len(self.train_list) meanstd = { 'mean': mean, 'std': std, } torch.save(meanstd, meanstd_file) print(' Real animal mean: %.4f, %.4f, %.4f' % (meanstd['mean'][0], meanstd['mean'][1], meanstd['mean'][2])) print(' Real animal std: %.4f, %.4f, %.4f' % (meanstd['std'][0], meanstd['std'][1], meanstd['std'][2])) return meanstd['mean'], meanstd['std'] def __getitem__(self, index): sf = self.scale_factor rf = self.rot_factor img_list = self.train_img_set if self.is_train else self.valid_img_set anno_list = self.train_pts_set if self.is_train else self.valid_pts_set try: a = img_list[index][0] except IndexError: print(index) img_path = os.path.join(self.img_folder, 'behaviorDiscovery2.0', a) img = load_image_ori(img_path) # CxHxW pts = anno_list[index].astype(np.float32) x_vis = pts[:, 0][pts[:, 0] > 0] y_vis = pts[:, 1][pts[:, 1] > 0] try: # generate bounding box using keypoints height, width = img.size()[1], img.size()[2] y_min = float(max(np.min(y_vis) - 15, 0.0)) y_max = float(min(np.max(y_vis) + 15, height)) x_min = float(max(np.min(x_vis) - 15, 0.0)) x_max = float(min(np.max(x_vis) + 15, width)) except ValueError: print(img_path, index) # Generate center and scale for image cropping, # adapted from human pose https://github.com/princeton-vl/pose-hg-train/blob/master/src/util/dataset/mpii.lua c = torch.Tensor(((x_min + x_max) / 2.0, (y_min + y_max) / 2.0)) s = max(x_max - x_min, y_max - y_min) / 200.0 * 1.25 # For single-animal pose estimation with a centered/scaled figure nparts = pts.shape[0] pts = torch.Tensor(pts) r = 0 if self.is_aug and self.is_train: # print('augmentation') s = s * torch.randn(1).mul_(sf).add_(1).clamp(1 - sf, 1 + sf)[0] r = torch.randn(1).mul_(rf).clamp(-2 * rf, 2 * rf)[0] if random.random() <= 0.6 else 0 # Flip if random.random() <= 0.5: img = torch.from_numpy(fliplr(img.numpy())).float() pts = shufflelr_ori(pts, width=img.size(2), dataset='real_animal') c[0] = img.size(2) - c[0] # Color img[0, :, :].mul_(random.uniform(0.8, 1.2)).clamp_(0, 1) img[1, :, :].mul_(random.uniform(0.8, 1.2)).clamp_(0, 1) img[2, :, :].mul_(random.uniform(0.8, 1.2)).clamp_(0, 1) # Prepare image and groundtruth map inp = crop_ori(img, c, s, [self.inp_res, self.inp_res], rot=r) inp = color_normalize(inp, self.mean, self.std) # Generate ground truth tpts = pts.clone() tpts_inpres = pts.clone() target = torch.zeros(nparts, self.out_res, self.out_res) target_weight = tpts[:, 2].clone().view(nparts, 1) for i in range(nparts): if tpts[i, 1] > 0: tpts[i, 0:2] = to_torch(transform(tpts[i, 0:2] + 1, c, s, [self.out_res, self.out_res], rot=r)) tpts_inpres[i, 0:2] = to_torch(transform(tpts_inpres[i, 0:2] + 1, c, s, [self.inp_res, self.inp_res], rot=r)) target[i], vis = draw_labelmap_ori(target[i], tpts[i] - 1, self.sigma, type=self.label_type) target_weight[i, 0] *= vis # Meta info meta = {'index': index, 'center': c, 'scale': s, 'pts': pts, 'tpts': tpts, 'target_weight': target_weight, 'pts_256': tpts_inpres} return inp, target, meta def __len__(self): if self.is_train: return len(self.train_img_set) else: return len(self.valid_img_set) def real_animal_all(**kwargs): return Real_Animal_All(**kwargs) real_animal_all.njoints = 18 # ugly but works
9,073
3,167
import sklearn from sklearn.cluster import KMeans from src.features.feature_selection import PCA_Variants2Gene_FeatureSelection class SLEuth(sklearn.base.TransformerMixin, sklearn.base.ClusterMixin): def __init__(self, cluster_num, variants_genes_path="../../data/interim/variants_top56_genes.csv", variance_threshold=0.9, init='random', n_init=10, max_iter=300,tol=1e-4,random_state=40): """ Run fit_transform() to compute top_k PCA components first. Run transform() if need to get projections from SNP data. Then, run fit_predict() to run KMeans clustering on the PCA transformed data. To predict a SNP profile which cluster they belong to, run predict() :param cluster_num: The number of clusters to form as well as the number of centroids to generate :param variants_genes_path: Path to the csv file that provides two column, ["Gene(s)", "Variant ID"] that the user would like to :param variance_threshold: e.g. 0.9 for 90% variance. The variance threshold to select the minimum number of PCA components such that a % of the variance remains. :param init: 'random': choose k observations (rows) at random from data for the initial centroids. :param n_init: Number of time the k-means algorithm will be run with different centroid seeds :param max_iter: :param tol: :param random_state: """ self.pca_variants_fs = PCA_Variants2Gene_FeatureSelection(variants_genes_path, variance_threshold) self.km = KMeans( n_clusters=cluster_num, init=init, n_init=n_init, max_iter=max_iter, tol=tol, random_state=random_state ) def fit_transform(self, X, y=None, **fit_params): """ :param X: A Pandas DataFrame where row indices are patient samples and columns are SNP sites with :return: """ return self.pca_variants_fs.fit_transform(X, y, **fit_params) def transform(self, X, y=None): return self.pca_variants_fs.transform(X, y) def fit_predict(self, X, y=None): X_transformed = self.transform(X) return self.km.fit_predict(X_transformed, y) def predict(self, X): X_transformed = self.transform(X) return self.km.predict(X_transformed)
2,376
716
p = [1, 4, 9, 10, 20, 25] e1 = int(input('Primeiro elemento: ')) e2 = int(input('Segundo elemento: ')) x = 0 achou = False primeiro = 0 while x < len(p): if p[x] == e1: print(f'Elemento 1 encontrado na posição {x} da lista!') if primeiro == 0: primeiro = 1 if p[x] == e2: print(f'Elemento 2 encontrado na posição {x} da lista!') if primeiro == 0: primeiro = 2 x += 1 print(f'Foi encontrado primeiro o {primeiro} elemento!')
492
199
from lk_db.ents.Ent import Ent class EntTime(Ent): pass
62
25
""" Visual Genome in Scene Graph Generation by Iterative Message Passing split """ import os import cv2 import json import h5py import pickle import numpy as np import scipy.sparse import os.path as osp from datasets.imdb import imdb from model.utils.config import cfg from IPython import embed class vg_sggimp(imdb): """ Visual Genome with sgg split """ def __init__(self, split): """ Args: split: integer, 0 is training, 1 is val, 2 is test """ imdb.__init__(self, 'vg_sggimp') # TODO: add file existence asserts # load files self.data_dir = osp.join(cfg.DATA_DIR, 'visual_genome') self.cache_dir = osp.join(cfg.DATA_DIR, 'cache') self.anno_dir = osp.join(self.data_dir, 'sggimp') self.img_dir = osp.join(self.data_dir, 'VG_100K') with open(osp.join(self.anno_dir, 'VG-SGG-dicts.json'), 'r') as f: self.vg_dicts = json.load(f) with open(osp.join(self.anno_dir, 'image_data.json'), 'r') as f: self.img_meta = json.load(f) self.vg_h5 = h5py.File(osp.join(self.anno_dir, 'VG-SGG.h5')) # filter boxes # filter_corrupted_imgs() # corrupted files: 1592.jpg 1722.jpg 4616.jpg 4617.jpg del self.img_meta[1591], self.img_meta[1720] del self.img_meta[4613], self.img_meta[4613] self.imgs_path = [] for meta in self.img_meta: fn = '{}.jpg'.format(meta['image_id']) img_path = osp.join(self.img_dir, fn) if osp.exists(img_path): self.imgs_path.append(img_path) self.imgs_path = np.array(self.imgs_path) self.img_meta = np.array(self.img_meta) assert len(self.imgs_path) == 108073 # there are 108073 images now # load statistics self.idx_to_labels = dict(map(lambda x: (int(x[0]), x[1]), self.vg_dicts['idx_to_label'].items())) self.idx_to_labels.update({0: 'background'}) self.idx_to_predicates = dict(map(lambda x: (int(x[0]), x[1]), self.vg_dicts['idx_to_predicate'].items())) self.idx_to_predicates.update({0: '__irrelevant__'}) self.nr_predicates = len(self.idx_to_predicates) self._classes = tuple(self.idx_to_labels.values()) # shape : (NumOfImages, ) self.img_to_first_box = self.vg_h5['img_to_first_box'].value self.img_to_last_box = self.vg_h5['img_to_first_box'].value self.img_to_first_rel = self.vg_h5['img_to_first_rel'].value self.img_to_last_rel = self.vg_h5['img_to_last_rel'].value # shape: (NumOfBoxes, 4) self.bboxes = self.vg_h5['boxes_%s' % cfg.BOX_SCALE_H5].value # covert from xcenter, ycenter, w, h to x0, y0, x1, y1 self.bboxes[:, :2] = self.bboxes[:, :2] - np.floor(self.bboxes[:, 2:] / 2) self.bboxes[:, 2:] += self.bboxes[:, :2] - 1 # shape: (NumOfBoxes, ) self.bbox_labels = self.vg_h5['labels'].value # predicates, shape: (NumOfRelationships, ) self.predicates = self.vg_h5['predicates'].value # box relationships, shape: (NumOfRelationships, 2) # specify the ids of two boxes related to the relationship # e.g. bbox_rels[0] is [boxid1, boxid2] self.bbox_rels = self.vg_h5['relationships'].value # split, shape (NumOfImages, ) self.split_indicator = self.vg_h5['split'].value self.split_data(split) self.filter_invalid_box() # set imdb shit self._image_index = np.arange(len(self.img_meta)) self._image_ext = '.jpg' def image_id_at(self, idx): """ Args: idx: integer, image index """ return self.img_meta[idx]['image_id'] def image_path_at(self, idx): """ Args: idx: integer, image index """ return self.imgs_path[idx] def split_data(self, split): """ Args: split: integer, 0,1,2, train val test """ split_mask = self.split_indicator == split self._filter(split_mask) def filter_invalid_box(self): """ delelte those image without boxes """ valid_mask = self.img_to_first_box >= 0 assert np.all( valid_mask == (self.img_to_last_box >= 0) ) self._filter(valid_mask) def _filter(self, mask): """ Args: mask: numpy array of boolean """ self.img_to_first_box = self.img_to_first_box[mask] self.img_to_last_box = self.img_to_last_box[mask] self.img_to_first_rel = self.img_to_first_rel[mask] self.img_to_last_rel = self.img_to_last_rel[mask] self.imgs_path = self.imgs_path[mask] self.img_meta = self.img_meta[mask] def gt_roidb(self): cache_path = osp.join(self.cache_dir, '%s_roidb.pkl' % self._name) if osp.exists(cache_path): print('load roidb from cache pickle file') with open(cache_path, 'rb') as f: roidb = pickle.load(f) return roidb roidb = [self._load_vg_anno(i) for i in self.image_index] with open(cache_path, 'wb') as f: pickle.dump(roidb, f) return roidb @staticmethod def get_size_after_resizing(self, height, width, scale): if height > width: return int(scale), int(width / height * scale) else: return int(height / width * scale), int(scale) def _load_vg_anno(self, idx): """ load visual genome annotations of image with index `idx` you should know the difference between image index and image id image id is in annotation file, image index is the index of img_meta Args: idx: integer, index of image """ idx_roidb = {} # image annotations height, width = self.img_meta[idx]['height'], self.img_meta[idx]['width'] img_scales = max(height, width) / cfg.BOX_SCALE_H5 # bounding boxes annotations bboxes = self.bboxes[self.img_to_first_box[idx]: self.img_to_last_box[idx] + 1, :] # bboxes was in cfg.BBOX_SCALE_H5, supposed 1024 # original image max size: max(h, w) # original bboxes = bboxes in 1024-size image * max(h,w)/ 1024 bboxes = bboxes * img_scales bboxes = bboxes.astype('int32') bbox_labels = self.bbox_labels[self.img_to_first_box[idx]: self.img_to_last_box[idx] + 1] overlaps = np.zeros((bboxes.shape[0], self.num_classes)) for ci, o in enumerate(overlaps): o[bbox_labels[ci]] = 1. overlaps = scipy.sparse.csr_matrix(overlaps) seg_areas = np.multiply(bboxes[:, 2] - bboxes[:, 0] + 1, bboxes[:, 3] - bboxes[:, 1] + 1) # relation annotations rels = [] first_rel_idx = self.img_to_first_rel[idx] last_rel_idx = self.img_to_last_rel[idx] if first_rel_idx >= 0: assert last_rel_idx >= 0 predicates = self.predicates[first_rel_idx: last_rel_idx + 1] bbox_rels = self.bbox_rels[first_rel_idx: last_rel_idx + 1] # img_to_first_box validness has been checked bbox_rels -= self.img_to_first_box[idx] assert bbox_rels.shape[0] == predicates.shape[0] for ri, predicate in enumerate(predicates): rels.append([bbox_rels[ri][0], predicate, bbox_rels[ri][1]]) rels = np.array(rels) idx_roidb.update( { 'boxes': bboxes, 'gt_classes': bbox_labels, 'gt_rels': rels, 'gt_overlaps': overlaps, 'seg_areas': seg_areas, 'flipped': False, 'width': width, 'height': height } ) return idx_roidb if __name__ == '__main__': fuck = vg_sggimp(0) embed(header='myvg.py in lib/datasets')
8,084
2,879
# SL030 RFID reader driver for skpang supplied SL030 Mifare reader # (c) 2013-2014 Thinking Binaries Ltd, David Whale #=============================================================================== # CONFIGURATION # # You can change these configuration items either by editing them in this # file, or by refering to the module by name inside your own program. # e.g. # import rfid # rfid.CFGEN_GPIO = False # set to True to detect card presence by using GPIO # set to False to detect card presence by reading card status CFGEN_GPIO = True # Set to the GPIO required to monitor the tag detect (OUT) line CFG_TAG_DETECT = 4 # The I2C address of the SL030 RFID tag reader CFG_ADDRESS = 0x50 # How often to poll (in seconds) for a tag present CFG_TAG_PRESENT_POLL_TIME = 0.01 # How often to poll (in seconds) for a tag absent CFG_TAG_ABSENT_POLL_TIME = 0.5 # Set to True to throw an exception when an error is printed # Set to False to just print the error CFGEN_EXCEPTIONS = True # The function called when an error occurs in this module # you can replace this with a function of your own to handle errors def error(str): print("ERROR:" + str) if CFGEN_EXCEPTIONS: raise ValueError(str) #=============================================================================== # SETUP try: import ci2c # python2 except ImportError: from . import ci2c # python3 import time CMD_SELECT_MIFARE = 0x01 CMD_GET_FIRMWARE = 0xF0 WR_RD_DELAY = 0.05 ci2c.initDefaults() #=============================================================================== # UTILITIES def typename(type): if (type == 0x01): return "mifare 1k, 4byte UID" elif (type == 0x02): return "mifare 1k, 7byte UID" elif (type == 0x03): return "mifare UltraLight, 7 byte UID" elif (type == 0x04): return "mifare 4k, 4 byte UID" elif (type == 0x05): return "mifare 4k, 7 byte UID" elif (type == 0x06): return "mifare DesFilre, 7 byte UID" elif (type == 0x0A): return "other" else: return "unknown:" + str(type) #=============================================================================== # class-based interface. # If for some reason you had multiple SL030's with different addresses, # you could use this to have multiple instances. It's not really written # that way yet as CFG_ADDRESS is global, but it's easy to change if you # did want more than one reader, or if you wanted different types of readers # that implemented this same interface and were interchangeable at product # install time. # The gpio parameter in __init__ can be used to provide an alternative GPIO # implementation or to share an application wide GPIO object. class SL030: def __init__(self, gpio=None): self.type = None self.uid = None self.GPIO = gpio if CFGEN_GPIO: if gpio == None: # use default RPi.GPIO, if nothing else provided import RPi.GPIO as GPIO GPIO.setmode(GPIO.BCM) self.GPIO = GPIO self.GPIO.setup(CFG_TAG_DETECT, GPIO.IN) def tagIsPresent(self): if CFGEN_GPIO: return self.GPIO.input(CFG_TAG_DETECT) == False else: return self.select_mifare() def waitTag(self): while not self.tagIsPresent(): time.sleep(CFG_TAG_PRESENT_POLL_TIME) def waitNoTag(self): while self.tagIsPresent(): time.sleep(CFG_TAG_ABSENT_POLL_TIME) def validateVer(self, ver): first = ver[0] if first != ord('S'): if first == ord('S') + 0x80: error("validateVer:Corruption from device detected") else: error("validateVer:unrecognised device") def tostr(self, ver): verstr = "" for b in ver: verstr += chr(b) return verstr def getFirmware(self): # Tx ADDRESS, 1, CMD_GET_FIRMWARE result = ci2c.write(CFG_ADDRESS, [1, CMD_GET_FIRMWARE]) time.sleep(WR_RD_DELAY) if result != 0: error("getFirmware:Cannot read, result=" + str(result)) return None result, buf = ci2c.read(CFG_ADDRESS, 15) if result != 0: error("getFirmware:Cannot write, result=" + str(result)) return None ver = buf[3:] self.validateVer(ver) return self.tostr(ver) def readMifare(self): result = ci2c.write(CFG_ADDRESS, [1, CMD_SELECT_MIFARE]) time.sleep(WR_RD_DELAY) if result != 0: error("readMifare:Cannot read, result=" + str(result)) return False result, buf = ci2c.read(CFG_ADDRESS, 15) if result != 0: error("readMifare:Cannot write, result=" + str(result)) return False length = buf[0] cmd = buf[1] status = buf[2] if (status != 0x00): self.uid = None self.type = None return False # uid length varies on type, and type is after uuid uid = buf[3:length] type = buf[length] self.type = type self.uid = uid return True def getUID(self): return self.uid def getUniqueId(self): uidstr = "" for b in self.uid: uidstr += "%02X" % b return uidstr def getType(self): return self.type #=============================================================================== # class-less interface # # Useful if you want kids to use the interface and don't want the complexity # of classes. It also allows us to hide some of the more complex functions # and provide simpler documentation strings instance = SL030() def tagIsPresent(): """Check if there is a tag present or not""" return instance.tagIsPresent() def waitTag(): """Wait until a tag is present""" instance.waitTag() def waitNoTag(): """Wait until there is no longer a tag present""" instance.waitNoTag() def readMifare(): """Try to read this as a mifare tag. Returns False if not a mifare""" return instance.readMifare() def getUID(): """Get the unique ID number of the card""" return instance.getUID() def getUniqueId(): """Get the unique ID number of the card as a printable string""" return instance.getUniqueId() def getType(): """Get the type number of the card""" return instance.getType() def getTypeName(): """Get a string representing the name of the type of card in use""" return typename(instance.getType()) # END
6,213
2,134
# parser.py - parses a given sentence using a given grammar definition import sys, os import argparse from utils import load_grammar def get_parser(grammar_file, *args, **kwargs): """ loads a parser from the given grammar """ return load_grammar(grammar_file, *args, **kwargs) def tokenize(sentence): """ breaks down a string into tokens for parsing """ return sentence.split() def parse(parser, sentence): """ gets the best parse trees for this sentence """ return parser.nbest_parse(tokenize(sentence)) def main(): parser = argparse.ArgumentParser() parser.add_argument('grammar', help="file in local folder with grammar") parser.add_argument('sentence', nargs="*", help="sentence to be parsed") parser.add_argument('-t', '--trace', type=int, help="parser debug trace level") parser.add_argument('-c', '--cache', action="store_true", help="cache grammar or not") args = parser.parse_args() sentence = " ".join(args.sentence) parser = get_parser(args.grammar, trace=args.trace, cache=args.cache) trees = parse(parser, sentence) if len(trees) == 0: print "No parse trees found" return for tree in trees: print tree if __name__ == "__main__": main()
1,293
389
import re from typing import List, Any, Generator, Tuple, Pattern, Optional, Callable, Dict class Token: """ A Token class. Parameters ---------- lex: str Token's lexeme. token_type: Enum Token's type. """ def __init__(self, lex, token_type, line=0, column=0): """ :param lex: str :param token_type: Enum :param line: int :param column: int """ self.lex: str = lex self.token_type: Any = token_type self.line: int = line self.column: int = column def __str__(self): return f'{self.token_type}: {self.lex}' def __repr__(self): return str(self) @property def is_valid(self): return True class Lexer: def __init__(self, table: List[Tuple[str, str]], eof: str, token_rules: Optional[Dict[str, Callable[['Lexer'], Optional[Token]]]] = None, error_handler: Optional[Callable[['Lexer'], None]] = None): if token_rules is None: token_rules = {} if error_handler is None: error_handler = self.error self.lineno: int = 1 # Current line number self.column: int = 1 # Current column in the line self.position: int = 0 # Current position in recognition self.text = '' # current text self.token: Token = Token('', '', 0, 0) # Current token in recognition self.pattern: Pattern = self._build_regex(table) self.token_rules = token_rules # type: Dict[str, Callable[['Lexer'], Optional[Token]]] self.contain_errors: bool = False self.error_handler = error_handler if error_handler is not None else self.error self.eof: str = eof self._errors: List[Tuple[int, int, str]] = [] def tokenize(self, text: str) -> Generator[Token, None, None]: self.text = text while self.position < len(text): match = self.pattern.match(text, pos=self.position) if match is None: self.contain_errors = True self.token = Token(text[self.position], None, self.lineno, self.column) self.error_handler(self) continue lexeme = match.group() token_type = match.lastgroup if match.lastgroup is not None else match.group() self.token = Token(lexeme, token_type, self.lineno, self.column) if token_type in self.token_rules: token = self.token_rules[token_type](self) if token is not None and isinstance(token, Token): yield token continue yield self.token self.position = match.end() self.column += len(match.group()) yield Token('$', self.eof, self.lineno, self.column) @property def errors(self, clean: bool = True): return [(m if clean else (r, c, m)) for r, c, m in sorted(self._errors)] def add_error(self, line: int, col: int, error_msg: str): self._errors.append((line, col, error_msg)) @staticmethod def error(lexer: 'Lexer') -> None: lexer.add_error( lexer.token.line, lexer.token.column, f'Tokenization error: unexpected symbol "{lexer.token.lex}" ' f'at line "{lexer.token.line}" and column "{lexer.token.column}"' ) lexer.position += len(lexer.token.lex) lexer.column += len(lexer.token.lex) @staticmethod def _build_regex(table: List[Tuple[str, str]]) -> Pattern: return re.compile('|'.join( [('(?P<%s>%s)' % (name, regex) if name is not None else '(%s)' % regex) for name, regex in table])) def __call__(self, text: str) -> List[Token]: return list(self.tokenize(text))
3,821
1,155
import random from django.core.management.base import BaseCommand from pandas import Series from src.cache.cache import put_labelled_logs from src.core.core import get_encoded_logs from src.jobs.models import Job from src.jobs.tasks import prediction_task from src.runtime.tasks import create_prediction_job from src.utils.django_orm import duplicate_orm_row class Command(BaseCommand): help = 'tries to deliver an explanation of a random prediction of the trained model' def handle(self, *args, **kwargs): TARGET_JOB = 439 initial_job_obj = Job.objects.filter(pk=TARGET_JOB)[0] # todo: return performances print('Initial Job:', initial_job_obj.evaluation.classificationmetrics) # TODO future bug training_df_old, test_df_old = get_encoded_logs(initial_job_obj) training_df = training_df_old.copy() test_df = test_df_old.copy() # todo: what should I randomise? TARGETS = [ [('prefix_1', 2)], # <- simple pattern [('prefix_2', 3)], # <- simple pattern [('prefix_3', 2), ('prefix_4', 3),] # <- complex pattern ] for target in TARGETS: if len(target) == 1: target = target[0] for df in [training_df, test_df]: m_col = df[target[0]] del df[target[0]] target_values1 = list(set(m_col.values)) df[target[0]] = m_col.apply( lambda x: x if (x != target[1]) else random.choice(target_values1) ) elif len(target) > 1: for df in [training_df, test_df]: m_col = df[[column for column, _ in target]] possible_values = {} for column, _ in target: possible_values[column] = list(set(df[column])) del df[column] df[[column for column, _ in target]] = m_col.apply( lambda x: x if any([x[column] != value for column, value in target]) else Series({ column: random.choice(possible_values[column]) for column, value in target }), axis=1) else: raise Exception('target list with unexpected value') assert not training_df.equals(training_df_old) assert not test_df.equals(test_df_old) # todo: save new dataset in memory and create split to use it initial_split_obj = initial_job_obj.split new_split = duplicate_orm_row(initial_split_obj) train_log = duplicate_orm_row(new_split.train_log) test_log = duplicate_orm_row(new_split.test_log) # TODO future bug creates shadows train_log.name = 'RETRAIN' + train_log.name train_log.path = 'cache/log_cache/' + train_log.name train_log.properties = {} test_log.name = 'RETRAIN' + test_log.name test_log.path = 'cache/log_cache/' + test_log.name test_log.properties = {} new_split.train_log = train_log new_split.test_log = test_log new_split.additional_columns = None new_split.save() prediction_job = create_prediction_job(initial_job_obj, initial_job_obj.encoding.prefix_length) prediction_job.split = new_split prediction_job.split.save() prediction_job.save() put_labelled_logs(prediction_job, training_df, test_df) # todo: build model prediction_task(prediction_job.id, do_publish_result=False) prediction_job.refresh_from_db() # todo: return performances print('Retrain Job:', prediction_job.evaluation.classificationmetrics) print('Done, cheers!')
3,975
1,123
#emacs, this is -*-Python-*- mode from __future__ import division from __future__ import with_statement import contextlib import threading, Queue class ChainLink(object): """essentially a linked list of threads""" def __init__(self): self._queue = Queue.Queue() self._lock = threading.Lock() # start: vars access controlled by self._lock self._next = None # end: vars access controlled by self._lock def fire(self, buf): """fire a listener in new thread. Threadsafe""" self._queue.put( buf ) def append_link(self, chain ): if not isinstance(chain,ChainLink): raise ValueError("%s is not instance of ChainLink"%(str(chain),)) with self._lock: if self._next is None: self._next = chain return else: next = self._next next.append_link( chain ) def get_buf(self,blocking=True): """called from client thread to get a buffer""" if blocking: return self._queue.get() else: return self._queue.get_nowait() def end_buf(self,buf): """called from client thread to release a buffer""" with self._lock: next = self._next if next is not None: next.fire(buf) else: pool = buf.get_pool() pool.return_buffer( buf ) @contextlib.contextmanager def use_buffer_from_chain(link,blocking=True): """manage access to the buffer""" buf = link.get_buf(blocking=blocking) try: yield buf finally: link.end_buf(buf)
1,646
471
from flask import Flask, render_template, request, redirect, logging, make_response, json from ethw3 import genkey, create_chain_data, verify_chain_data, create_acct, mine, history_slice from utils_s3 import load_from_fetchlist # Initialize flask an other global variables app = Flask(__name__) address, username, addr, priv, contVer, web3Ver = None,None,None,None,None,None sig = [] txHash = [] status,status2 = 0,0 recordDict, matchedData = {}, {} entryList = [] @app.route('/') def render_index(): mine() recordDict, history = load_from_fetchlist(history=True) history = history_slice(history, 20) global address, sig, txHash, username, status, contVer, web3Ver, addr, priv, status2, matchedData _sig, _address, _txHash, _username, _status, _contVer, _web3Ver, _addr, _priv, _status2, _matchedData = sig, address, txHash, username, status, contVer, web3Ver, addr, priv, status2, matchedData address, username, contVer, web3Ver, addr, priv = tuple([None]*6) sig, txHash = [], [] status, status2 = 0,0 matchedData = {} return render_template("index.html", entryList = recordDict, history = history, txhash = _txHash, address = _addr, username = _username, sig=_sig, privkey=_priv, showStatus = _status, web3Ver = _web3Ver, contVer = _contVer, status2 = _status2, matchedData = _matchedData) @app.route('/submit', methods=['POST']) def hash_to_chain(): global sig, txHash, username, address sig, address, txHash, username = None,None,None,None test = 'check' in request.form fsFile = request.files["file"].read() pkey = request.form.get("pkey") username = request.form.get("name") cookie_value = create_acct(pkey).address if username == "": username = "Anonymous" username, sig, address, txHash = create_chain_data(fsFile, pkey, username, test) resp = make_response(redirect('/', code=302)) resp.set_cookie("ID", cookie_value, max_age=60*1) return resp @app.route("/verify", methods=["POST"]) def verify_from_chain(): global status, contVer, web3Ver status, contVer, web3Ver = None,None,None test = 'check2' in request.form print(test) fsFile = request.files["file"].read() sig = request.form.get("sig") if fsFile == b'': web3Ver = "nofile" status = 0 return redirect("/", code=302) status, contVer, web3Ver = verify_chain_data(fsFile, sig, test) return redirect("/", code=302) @app.route('/generate', methods=['POST', "GET"]) def generatekeypair(): global addr, priv addr, priv = genkey() return redirect("/", code=302) @app.route("/whosigned", methods=["POST"]) def direct_verify(): global status2, contVer, matchedData status, contVer = None,None test = 'check3' in request.form sig = request.form.get("sig") status2, contVer, matchedData= verify_chain_data(sig=sig, test=test) return redirect("/", code=302) if __name__ == '__main__': app.run(debug=True) gunicorn_logger = logging.getLogger('gunicorn.error') app.logger.handlers = gunicorn_logger.handlers app.logger.setLevel(gunicorn_logger.level)
3,439
1,064
import json import os from datetime import date from typing import List, Dict from d3m_metadata.metadata import PrimitiveMetadata, PrimitiveFamily, PrimitiveAlgorithmType from d3m import index from dsbox.planner.common.primitive import Primitive from dsbox.schema.profile_schema import DataProfileType as dpt from collections import defaultdict class D3MPrimitiveLibrary(object): '''Creates a primitive library based on primitives_repo or d3m.index''' def __init__(self): self.api_version = '' # List of all primitives, except those in black list self.primitives : List[Primitive] = [] # List of black listed primitives, e.g. pickling problems self.black_list_package : List[str] = [] self.primitive_by_package : Dict[str, Primitive] = {} self.primitives_by_family : Dict[PrimitiveFamily, List[Primitive]] = defaultdict(list) self.primitives_by_type : Dict[PrimitiveAlgorithmType, List[Primitive]] = defaultdict(list) def has_api_version(self, primitives_repo_dir, api_version): return api_version in os.listdir(primitives_repo_dir) def load_from_directory(self, primitives_repo_dir, api_version=''): '''Load primitive description from filesystem. E.g. from repo https://gitlab.datadrivendiscovery.org/jpl/primitives_repo''' # Use fully for debugging listing = os.listdir(primitives_repo_dir) if api_version: if not api_version in listing: raise ValueError('API version {} not found') else: date_str = [x[1:] for x in listing if x.startswith('v')] if not date_str: raise ValueError('No API version found under {}'.format(primitives_repo_dir)) dates = [date(*(map(int, x.split('.')))) for x in date_str] vdate = sorted(dates)[-1] api_version = 'v{}.{}.{}'.format(vdate.year, vdate.month, vdate.day) self.api_version = api_version api_dir = os.path.join(primitives_repo_dir, self.api_version) for team in os.listdir(api_dir): team_dir = os.path.join(api_dir, team) for module in os.listdir(team_dir): module_dir = os.path.join(team_dir, module) version = self._get_latest_version(os.listdir(module_dir)) primitive_file = os.path.join(module_dir, version, 'primitive.json') with open(primitive_file) as fp: d3m_metadata = PrimitiveMetadata(json.load(fp)) primitive = self._create_primitive_desc(d3m_metadata) if primitive.cls in self.black_list_package: print('Black listing primitive: {}'.format(primitive.name)) else: self.primitives.append(primitive) self._setup() def load_from_d3m_index(self): '''Load primitive description from installed python packages''' for primitive_path, primitive_type in index.search().items(): primitive = self._create_primitive_desc(primitive_type.metadata) if primitive.cls in self.black_list_package: print('Black listing primitive: {}'.format(primitive.name)) else: self.primitives.append(primitive) self._setup() def get_primitives_by_family(self, family : PrimitiveFamily) -> List[Primitive]: return self.primitives_by_family[family] def has_primitive_by_package(self, path): return path in self.primitive_by_package def get_primitive_by_package(self, path): return self.primitive_by_package[path] def augment_with_primitive_profiler(self, profiler_json_file): '''Augment primitive with its requirements using Daniel's primitive profiler output''' with open(profiler_json_file) as fp: primitive_profiles = json.load(fp) for package, profile in primitive_profiles.items(): if not self.has_primitive_by_package(package): print('Cannot find class: {}'.format(package)) continue primitive = self.get_primitive_by_package(package) if 'Requirements' in profile: # Note: Cannot use {PrimitivePrecodition[x] : True for x in ...}, because extra "POSTIVE_VALUES" primitive.addPrecondition({x : True for x in profile['Requirements']}) if 'Error' in profile: primitive.addErrorCondition({x:True for x in profile['Error']}) def add_custom_primitive(self, class_str): mod, cls = class_str.rsplit('.', 1) try: import importlib module = importlib.import_module(mod) primitive_type = getattr(module, cls) primitive = self._create_primitive_desc(primitive_type.metadata) # Modify to actual python path primitive.cls = class_str self.primitives.append(primitive) self.primitive_by_package[class_str] = primitive return primitive except Exception as e: print('Failed to add primitive: {}'.format(e)) return None def _get_latest_version(self, versions : List[str]): version_tuples = [v.split('.') if not v.startswith('v') else v[1:].split('.') for v in versions] version_tuples = list(map(lambda x : list(map(int, x)), version_tuples)) latest_tuple = sorted(version_tuples)[-1] index = version_tuples.index(latest_tuple) return versions[index] def _create_primitive_desc(self, d3m : PrimitiveMetadata): primitive = Primitive(d3m.query()['id'], d3m.query()['name'], d3m.query()['python_path']) primitive.d3m_metadata = d3m return primitive def load_black_list(self, jsonfile): """Black list primitives that do not work properly""" with open(jsonfile) as json_data: black_list = json.load(json_data) names = [] for pdict in black_list: # pid = pdict['Id'] name = pdict["Name"] cls = pdict["Class"] self.black_list_package.append(cls) names.append(name) print('Primitives to black list: {}'.format(names)) def is_black_listed(self, cls): return cls in self.black_list_package def _setup(self): for p in self.primitives: self.primitive_by_package[p.cls] = p self.primitives_by_family[p.getFamily()].append(p) types = p.getAlgorithmTypes() for entry in types: if isinstance(types[0], str): self.primitives_by_type[entry].append(p) else: self.primitives_by_type[entry.value].append(p) class PrimitiveLibrary(object): """ Creates a Library of Primitives given the location of a library json """ def __init__(self, location): self.primitives = [] self.json = self.loadjson(location) for p in self.json: prim = Primitive(p['Id'], p['Name'], p['Class']) for precstr in p.get('Requirements', []): prec = self.parseProfile(precstr) if prec: prim.addPrecondition(prec) for effectstr in p.get('Effects', []): effect = self.parseProfile(effectstr) if effect: prim.addEffect(effect) prim.type = p.get('LearningType', None) prim.task = p.get('Task', None) prim.column_primitive = p.get('RequiresColumnData', False) prim.is_persistent = (prim.task == "Modeling") or (not p.get('NotPersistent', False)) prim.unified_interface = p.get('UnifiedInterface', False) prim.init_args = p.get('InitArguments', []) prim.init_kwargs = p.get('InitKeywordArguments', {}) self.primitives.append(prim) def parseProfile(self, profile): value = True if profile.startswith('!'): value = False profile = profile[1:] if hasattr(dpt, profile): return {getattr(dpt, profile): value} return None def loadjson(self, jsonfile): with open(jsonfile) as json_data: d = json.load(json_data) json_data.close() return d def getPrimitivesByEffect(self, effect, value): plist = []; for primitive in self.primitives: if (primitive.preconditions.get(effect, False) != value and primitive.effects.get(effect, False) == value): plist.append(primitive) return plist
8,800
2,497
# Program 64 : Capitalize the First Character of a String my_string = input() cap_string = my_string.capitalize() print(cap_string)
134
45
# -*- coding: utf-8 -*- ''' Created on Fri Nov 16 09:36:50 2018 @author: Visa Suomi Turku University Hospital November 2018 @description: This model is used to predict radiation dose from pre-treatment patient parameters ''' #%% clear variables %reset -f %clear #%% import necessary libraries import keras as k import pandas as pd import numpy as np import sklearn as sk from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix from sklearn.utils.class_weight import compute_sample_weight import scipy as sp import time import os from save_load_variables import save_load_variables from plot_regression_performance import plot_regression_performance from analyse_statistics import analyse_statistics from analyse_correlation import analyse_correlation from analyse_feature_correlation import analyse_feature_correlation #%% define logging and data display format pd.options.display.max_rows = 10 pd.options.display.float_format = '{:.1f}'.format #%% read data df = pd.read_csv(r'C:\Users\visa\Documents\TYKS\Machine learning\Radiation dose\radiation-dose\radiation_dose_data.csv', sep = ',') df_orig = df.copy() #%% check for duplicates duplicates = any(df.duplicated()) #%% create synthetic features df['BSA'] = 0.007184 * df['paino'].pow(0.425) * df['pituus'].pow(0.725) #%% calculate nan percent for each label nan_percent = pd.DataFrame(df.isnull().mean() * 100, columns = ['% of NaN']) # drop nan values #df = df.dropna() df = df.dropna(subset = ['paino']) #df = df.dropna(subset = ['AHA_cto']) #df = df.dropna(subset = ['Patient_sex']) #df = df.dropna(subset = ['FN2BA']) #df = df.dropna(subset = ['I20.81_I21.01_I21.11_or_I21.41']) #df = df.dropna(subset = ['add_stent_2_tai_yli']) #df = df.dropna(subset = ['n_tmp_3']) #df = df.dropna(subset = ['sten_post_100']) #df = df.dropna(subset = ['suonia_2_tai_yli']) #df = df.dropna(subset = ['pituus']) #%% randomise and divive data for cross-validation # split data split_ratio = 0.2 training_set, holdout_set = train_test_split(df, test_size = split_ratio) validation_set, testing_set = train_test_split(holdout_set, test_size = 0.5) del holdout_set # obtain sizes n_training = training_set.shape[0] n_validation = validation_set.shape[0] n_testing = testing_set.shape[0] #%% calculate correlation and standard deviation matrices std_mat, corr_mat, most_corr = analyse_correlation(training_set, 13, 'Korjattu_DAP_GYcm2') #%% analyse individual feature correlations analyse_feature_correlation(training_set, 'paino', 'Korjattu_DAP_GYcm2', False) analyse_feature_correlation(training_set, 'AHA_cto', 'Korjattu_DAP_GYcm2', True) analyse_feature_correlation(training_set, 'Patient_sex', 'Korjattu_DAP_GYcm2', True) analyse_feature_correlation(training_set, 'FN2BA', 'Korjattu_DAP_GYcm2', True) analyse_feature_correlation(training_set, 'I20.81_I21.01_I21.11_or_I21.41', 'Korjattu_DAP_GYcm2', True) analyse_feature_correlation(training_set, 'add_stent_2_tai_yli', 'Korjattu_DAP_GYcm2', True) analyse_feature_correlation(training_set, 'n_tmp_3', 'Korjattu_DAP_GYcm2', True) analyse_feature_correlation(training_set, 'sten_post_100', 'Korjattu_DAP_GYcm2', True) analyse_feature_correlation(training_set, 'suonia_2_tai_yli', 'Korjattu_DAP_GYcm2', True) analyse_feature_correlation(training_set, 'pituus', 'Korjattu_DAP_GYcm2', False) #%% analyse target analyse_statistics(training_set[['Korjattu_DAP_GYcm2']]) #%% replace missing values in all datasets # create dictionary for impute values based only on training data impute_values = {'BSA': training_set['BSA'].mean(), 'paino': training_set['paino'].mean(), 'pituus': training_set['pituus'].mean(), 'ind_pci_in_stemi': training_set['ind_pci_in_stemi'].mode()[0], 'ind_flap_failure': training_set['ind_flap_failure'].mode()[0], 'ind_nstemi': training_set['ind_nstemi'].mode()[0], 'ind_diag': training_set['ind_diag'].mode()[0], 'ind_uap': training_set['ind_uap'].mode()[0], 'ind_heart_failure': training_set['ind_heart_failure'].mode()[0], 'ind_stemi_other': training_set['ind_stemi_other'].mode()[0], 'ind_stable_ap': training_set['ind_stable_ap'].mode()[0], 'ind_arrhythmia_settl': training_set['ind_arrhythmia_settl'].mode()[0], 'suonia_2_tai_yli': training_set['suonia_2_tai_yli'].mode()[0], 'lm_unprotected': training_set['lm_unprotected'].mode()[0], 'im': training_set['im'].mode()[0], 'lada': training_set['lada'].mode()[0], 'ladb': training_set['ladb'].mode()[0], 'ladc': training_set['ladc'].mode()[0], 'lcxa': training_set['lcxa'].mode()[0], 'lcxb': training_set['lcxb'].mode()[0], 'lcxc': training_set['lcxc'].mode()[0], 'ld1': training_set['ld1'].mode()[0], 'ld2': training_set['ld2'].mode()[0], 'lita': training_set['lita'].mode()[0], 'lm': training_set['lm'].mode()[0], 'lom1': training_set['lom1'].mode()[0], 'lom2': training_set['lom2'].mode()[0], 'lpd': training_set['lpd'].mode()[0], 'lpl': training_set['lpl'].mode()[0], 'ram_rv': training_set['ram_rv'].mode()[0], 'rcaa': training_set['rcaa'].mode()[0], 'rcab': training_set['rcab'].mode()[0], 'rcac': training_set['rcac'].mode()[0], 'rita': training_set['rita'].mode()[0], 'rpd': training_set['rpd'].mode()[0], 'rpl': training_set['rpl'].mode()[0], 'vgrca_ag': training_set['vgrca_ag'].mode()[0], 'vglca1_ag': training_set['vglca1_ag'].mode()[0], 'vglca2_ag': training_set['vglca2_ag'].mode()[0], 'restenosis': training_set['restenosis'].mode()[0], 'stent_dimension': training_set['stent_dimension'].mean(), 'ball_dimension': training_set['ball_dimension'].mean(), 'add_stent_1': 0, 'add_stent_2_tai_yli': 0} # combine datasets for imputing df = training_set.append([validation_set, testing_set]) # impute data for key, val in impute_values.items(): df[key] = df[key].fillna(val) del key, val #%% fill in mutually exclusive categorical values # obtain categorical impute values sten_post_training = training_set[['sten_post_0', 'sten_post_25', 'sten_post_60', 'sten_post_85', 'sten_post_100']].idxmax(axis = 1) impute_values['sten_post'] = sten_post_training.mode()[0] sten_pre_training = training_set[['sten_pre_100', 'sten_pre_85', 'sten_pre_60']].idxmax(axis = 1) impute_values['sten_pre'] = sten_pre_training.mode()[0] AHA_training = training_set[['AHA_a', 'AHA_b1', 'AHA_b2', 'AHA_c', 'AHA_cto']].idxmax(axis = 1) impute_values['AHA'] = AHA_training.mode()[0] del sten_post_training, sten_pre_training, AHA_training # impute data sten_post = df[['sten_post_0', 'sten_post_25', 'sten_post_60', 'sten_post_85', 'sten_post_100']].idxmax(axis = 1) sten_post = sten_post.fillna(impute_values['sten_post']) sten_post = pd.get_dummies(sten_post).astype(int) sten_post = sten_post[['sten_post_0', 'sten_post_25', 'sten_post_60', 'sten_post_85', 'sten_post_100']] df[['sten_post_0', 'sten_post_25', 'sten_post_60', 'sten_post_85', 'sten_post_100']] = sten_post sten_pre = df[['sten_pre_100', 'sten_pre_85', 'sten_pre_60']].idxmax(axis = 1) sten_pre = sten_pre.fillna(impute_values['sten_pre']) sten_pre = pd.get_dummies(sten_pre).astype(int) sten_pre = sten_pre[['sten_pre_100', 'sten_pre_85', 'sten_pre_60']] df[['sten_pre_100', 'sten_pre_85', 'sten_pre_60']] = sten_pre AHA = df[['AHA_a', 'AHA_b1', 'AHA_b2', 'AHA_c', 'AHA_cto']].idxmax(axis = 1) AHA = AHA.fillna(impute_values['AHA']) AHA = pd.get_dummies(AHA).astype(int) AHA = AHA[['AHA_a', 'AHA_b1', 'AHA_b2', 'AHA_c', 'AHA_cto']] df[['AHA_a', 'AHA_b1', 'AHA_b2', 'AHA_c', 'AHA_cto']] = AHA del sten_post, sten_pre, AHA #%% check for nan values df.isnull().values.any() #%% split impute data back to training, validation and testing training_set = df[:n_training] validation_set = df[n_training:n_training+n_validation] testing_set = df[-n_testing:] #%% define feature and target labels feature_labels = ['paino', 'FN2BA', 'Patient_sex', 'Aiempi_ohitusleikkaus', 'suonia_2_tai_yli', 'add_stent_2_tai_yli', 'sten_post_85', 'sten_post_100', 'I20.81_I21.01_I21.11_or_I21.41', 'I35.0', 'ind_nstemi', 'ind_pci_in_stemi', 'ind_stable_ap', 'AHA_a', 'AHA_b1', 'AHA_b2', 'AHA_c', 'AHA_cto'] #feature_labels = ['BSA', 'AHA_cto', 'FN2BA', # 'add_stent_2_tai_yli', # 'sten_post_100', 'suonia_2_tai_yli'] #feature_labels = ['paino', 'pituus', 'Patient_sex', 'Age', # 'I20.81_I21.01_I21.11_or_I21.41', 'I35.0', 'FN1AC', 'FN2BA', # 'FN2AA', 'TFC00', 'n_tmp_1', 'n_tmp_2', 'n_tmp_3', # 'ind_pci_in_stemi', 'ind_flap_failure', 'ind_nstemi', # 'ind_diag', 'ind_uap', 'ind_heart_failure', 'ind_stemi_other', # 'ind_stable_ap', 'ind_arrhythmia_settl', 'suonia_2_tai_yli', # 'lm_unprotected', 'Aiempi_ohitusleikkaus', 'im', 'lada', # 'ladb', 'ladc', 'lcxa', 'lcxb', 'lcxc', 'ld1', 'ld2', 'lita', # 'lm', 'lom1', 'lom2', 'lpd', 'lpl', 'ram_rv', 'rcaa', 'rcab', # 'rcac', 'rita', 'rpd', 'rpl', 'vgrca_ag', 'vglca1_ag', # 'vglca2_ag', 'restenosis', 'stent_dimension', 'ball_dimension', # 'add_stent_1', 'add_stent_2_tai_yli', 'sten_post_0', # 'sten_post_25', 'sten_post_60', 'sten_post_85', 'sten_post_100', # 'sten_pre_100', 'sten_pre_85', 'sten_pre_60', 'AHA_a', 'AHA_b1', # 'AHA_b2', 'AHA_c', 'AHA_cto', 'IVUS', 'OCT'] target_label = ['Korjattu_DAP_GYcm2'] #%% extract features and targets training_features = training_set[feature_labels] validation_features = validation_set[feature_labels] testing_features = testing_set[feature_labels] training_targets = training_set[target_label] validation_targets = validation_set[target_label] testing_targets = testing_set[target_label] #%% calculate sample weights hist, bin_edges = np.histogram(training_targets, bins = 10) classes = training_targets.apply(lambda x: pd.cut(x, bin_edges, labels = False, include_lowest = True)).values sample_weights = compute_sample_weight('balanced', classes) #%% scale features feature_transform = 'z-score' if feature_transform == 'z-score': t_mean = training_features.mean() t_std = training_features.std() training_features = (training_features - t_mean) / t_std validation_features = (validation_features - t_mean) / t_std testing_features = (testing_features - t_mean) / t_std if feature_transform == 'log': training_features = np.log1p(training_features) validation_features = np.log1p(validation_features) testing_features = np.log1p(testing_features) if feature_transform == 'box-cox': lmbda = 0.15 training_features = sp.special.boxcox1p(training_features, lmbda) validation_features = sp.special.boxcox1p(validation_features, lmbda) testing_features = sp.special.boxcox1p(testing_features, lmbda) #%% scale targets (for skewed data) target_transform = 'log' if target_transform == 'log': training_targets = np.log1p(training_targets) validation_targets = np.log1p(validation_targets) testing_targets = np.log1p(testing_targets) if target_transform == 'box-cox': lmbda = 0.15 training_targets = sp.special.boxcox1p(training_targets, lmbda) validation_targets = sp.special.boxcox1p(validation_targets, lmbda) testing_targets = sp.special.boxcox1p(testing_targets, lmbda) #%% build and train neural network model # define parameters learning_rate = 0.001 n_epochs = 150 n_neurons = 64 n_layers = 2 batch_size = 5 l1_reg = 0.0 l2_reg = 0.01 batch_norm = False dropout = None if 'sample_weights' not in locals(): sample_weights = None # build model if 'model' in locals(): del model model = k.models.Sequential() model.add(k.layers.Dense(n_neurons, input_shape = (training_features.shape[1],), kernel_regularizer = k.regularizers.l1_l2(l1 = l1_reg, l2 = l2_reg), activation = 'relu')) if batch_norm is True: model.add(k.layers.BatchNormalization()) if dropout is not None: model.add(k.layers.Dropout(dropout)) i = 1 while i < n_layers: model.add(k.layers.Dense(n_neurons, kernel_regularizer = k.regularizers.l1_l2(l1 = l1_reg, l2 = l2_reg), activation = 'relu')) if batch_norm is True: model.add(k.layers.BatchNormalization()) if dropout is not None: model.add(k.layers.Dropout(dropout)) i += 1 model.add(k.layers.Dense(1)) model.compile(optimizer = k.optimizers.Adam(lr = learning_rate), loss = 'mean_squared_error', metrics = ['mean_absolute_error']) model.summary() # train model class PrintDot(k.callbacks.Callback): def on_epoch_end(self, epoch, logs): if epoch % 100 == 0: print('') print('.', end = '') timestr = time.strftime('%Y%m%d-%H%M%S') history = model.fit(training_features, training_targets, verbose = 0, callbacks = [PrintDot()], batch_size = batch_size, epochs = n_epochs, sample_weight = sample_weights, validation_data = (validation_features, validation_targets)) #%% evaluate model performance # calculate loss metrics training_loss, training_error = model.evaluate(training_features, training_targets) validation_loss, validation_error = model.evaluate(validation_features, validation_targets) # make predictions training_predictions = model.predict(training_features) training_predictions = pd.DataFrame(training_predictions, columns = target_label, index = training_features.index, dtype = float) validation_predictions = model.predict(validation_features) validation_predictions = pd.DataFrame(validation_predictions, columns = target_label, index = validation_features.index, dtype = float) # convert log targets to linear units (for skewed data) if target_transform == 'log': training_targets_lin = np.expm1(training_targets) validation_targets_lin = np.expm1(validation_targets) training_predictions_lin = np.expm1(training_predictions) validation_predictions_lin = np.expm1(validation_predictions) # convert box-cox targets to linear units (for skewed data) if target_transform == 'box-cox': training_targets_lin = sp.special.inv_boxcox1p(training_targets, lmbda) validation_targets_lin = sp.special.inv_boxcox1p(validation_targets, lmbda) training_predictions_lin = sp.special.inv_boxcox1p(training_predictions, lmbda) validation_predictions_lin = sp.special.inv_boxcox1p(validation_predictions, lmbda) # plot training performance if (target_transform == 'log') or (target_transform == 'box-cox'): f1 = plot_regression_performance(history, training_targets_lin, training_predictions_lin, validation_targets_lin, validation_predictions_lin) else: f1 = plot_regression_performance(history, training_targets, training_predictions, validation_targets, validation_predictions) #%% save model model_dir = 'Keras models\\%s_TE%d_VE%d' % (timestr, round(training_error), round(validation_error)) if not os.path.exists(model_dir): os.makedirs(model_dir) f1.savefig(model_dir + '\\' + 'evaluation_metrics.pdf', dpi = 600, format = 'pdf', bbox_inches = 'tight', pad_inches = 0) variables_to_save = {'learning_rate': learning_rate, 'n_epochs': n_epochs, 'n_neurons': n_neurons, 'n_layers': n_layers, 'batch_size': batch_size, 'l1_reg': l1_reg, 'l2_reg': l2_reg, 'batch_norm': batch_norm, 'dropout': dropout, 'nan_percent': nan_percent, 'duplicates': duplicates, 'most_corr': most_corr, 'corr_mat': corr_mat, 'std_mat': std_mat, 'split_ratio': split_ratio, 'sample_weights': sample_weights, 'feature_transform': feature_transform, 'target_transform': target_transform, 'timestr': timestr, 'history': history, 'model_dir': model_dir, 'df': df, 'df_orig': df_orig, 'impute_values': impute_values, 'feature_labels': feature_labels, 'target_label': target_label, 'n_training': n_training, 'n_validation': n_validation, 'n_testing': n_testing, 'training_set': training_set, 'training_features': training_features, 'training_targets': training_targets, 'validation_set': validation_set, 'validation_features': validation_features, 'validation_targets': validation_targets, 'testing_set': testing_set, 'testing_features': testing_features, 'testing_targets': testing_targets} save_load_variables(model_dir, variables_to_save, 'variables', 'save') model.save(model_dir + '\\' + 'keras_model.h5')
18,487
6,623
from dolfin import * parameters['form_compiler']['representation'] = 'uflacs' parameters['form_compiler']['cpp_optimize'] = True parameters['form_compiler']['cpp_optimize_flags'] = '-O3 -ffast-math -march=native' parameters['ghost_mode'] = 'shared_facet' mesh_file = 'cell_grid.h5' comm = mpi_comm_world() h5 = HDF5File(comm, mesh_file, 'r') mesh = Mesh() h5.read(mesh, 'mesh', False) # The mesh comes in micro meters. Below it is more convenient to work in cm mesh.coordinates()[:] *= 1E-4 # Facets in the mesh have tags 0, 1, 2, 3 One is for interfaces between # cells and exterior, 3 are cell-cell interfaces. Two is used for marking # boundary facets of the domain - this is where typically zero DirichletBCs # are applied for the potential surfaces = MeshFunction('size_t', mesh, mesh.topology().dim()-1) h5.read(surfaces, 'facet') # The domain is split into 2 subdomains marked as 1 and 2 (cell interior, # cell exterior). These differ by conductivities volumes = MeshFunction('size_t', mesh, mesh.topology().dim()) h5.read(volumes, 'physical') cell = mesh.ufl_cell() # We have 3 spaces S for sigma = -kappa*grad(u) [~electric field] # U for potential u # Q for transmebrane potential p Sel = FiniteElement('RT', cell, 1) Vel = FiniteElement('DG', cell, 0) Qel = FiniteElement('Discontinuous Lagrange Trace', cell, 0) W = FunctionSpace(mesh, MixedElement([Sel, Vel, Qel])) sigma, u, p = TrialFunctions(W) tau, v, q = TestFunctions(W) # Grounding for potential bcs = [DirichletBC(W.sub(2), Constant(0), surfaces, 2)] # Make measures aware of subdomains dx = Measure('dx', domain=mesh, subdomain_data=volumes) dS = Measure('dS', domain=mesh, subdomain_data=surfaces) ds = Measure('ds', domain=mesh, subdomain_data=surfaces) # Normal fo the INTERIOR surface. Note that 1, 2 marking of volume makes # 2 cells the '+' cells w.r.t to surface and n('+') would therefore be their # outer normal (that is an outer normal of the outside). ('-') makes the orientation # right n = FacetNormal(mesh)('-') # Now onto the weak form # Electric properties of membrane and interior/exterior C_m = Constant(1) # 1 mu F / cm^2 @ 1 C_mcc = Constant(1.1) # @ 3 cond_int = Constant(5) # 5 mS / cm cond_ext = Constant(20) # 20 mS / cm # Time step dt_fem = Constant(1E-3) # ms # The source term as a function Q is coming from ODE solver. Here it is # just some random function Q = FunctionSpace(mesh, Qel) p0 = interpolate(Constant(1), Q) # And additional source on the boundary is the ionic current. For simplicity I_ion = p0 # The source term for cell-cell interface I_gap = 2*p0 # The system a = ((1/cond_int)*inner(sigma, tau)*dx(1)+(1/cond_ext)*inner(sigma, tau)*dx(2) - inner(div(tau), u)*dx(1) - inner(div(tau), u)*dx(2) + inner(p('+'), dot(tau('+'), n))*dS(1) + inner(p('+'), dot(tau('+'), n))*dS(3) - inner(div(sigma), v)*dx(1) - inner(div(sigma), v)*dx(2) + inner(q('+'), dot(sigma('+'), n))*dS(1) + inner(q('+'), dot(sigma('+'), n))*dS(3) - (C_m/dt_fem)*inner(q('+'), p('+'))*dS(1) - (C_mcc/dt_fem)*inner(q('+'), p('+'))*dS(3)) L = (inner(q('+'), I_ion('+')-(C_m/dt_fem)*p0('+'))*dS(1) + inner(q('+'), I_gap('+')-(C_mcc/dt_fem)*p0('+'))*dS(3)) # Additional terms to set to zero the dofs of W.sub(2) which are not on # the interfaces a -= inner(p('+'), q('+'))*dS(0) + inner(p, q)*ds(2) L -= inner(Constant(0)('+'), q('+'))*dS(0) + inner(Constant(0), q)*ds(2) A, b = PETScMatrix(), PETScVector() assemble_system(a, L, bcs, A_tensor=A, b_tensor=b) info("size(A) = %d" % A.size(0))
3,610
1,405
from django.test import TestCase from customers.gems_utils import Gems class GemUtilsCase(TestCase): def setUp(self): self.gems = Gems() pass
165
51
import click import pandas import pickle import json from clients import s3, redis @click.command() @click.option('--both', 'upload', flag_value='s3_and_redis', default=True, help='Upload metadata to both s3 and Redis') @click.option('--s3', 'upload', flag_value='only_s3', help='Upload metadata only to s3') @click.option('--redis', 'upload', flag_value='only_redis', help='Upload metadata only to Redis') def products(upload): csv = s3.get('metadata/hs.csv') df = pandas.read_csv( csv, sep=';', header=0, names=['id', 'name_pt', 'name_en', 'profundidade_id', 'profundidade'], converters={ "id": str } ) products = {} product_sections = {} product_chapters = {} for _, row in df.iterrows(): if row['profundidade'] == 'Seção': product_section_id = row['id'] product_section = { 'id': product_section_id, 'name_pt': row["name_pt"], 'name_en': row["name_en"], } if upload != 'only_s3': redis.set('product_section/' + str(product_section_id), json.dumps(product_section, ensure_ascii=False)) product_sections[product_section_id] = product_section elif row['profundidade'] == 'Capítulo': product_chapter_id = row['id'][2:] product_chapter = { 'id': product_chapter_id, 'name_pt': row["name_pt"], 'name_en': row["name_en"], } if upload != 'only_s3': redis.set('product_chapter/' + str(product_chapter_id), json.dumps(product_chapter, ensure_ascii=False)) product_chapters[product_chapter_id] = product_chapter for _, row in df.iterrows(): if row['profundidade'] == 'Posição': product_id = row['id'][2:] product_section_id = row["id"][:2] product_chapter_id = row["id"][2:4] product = { 'id': row['id'][2:], 'name_pt': row["name_pt"], 'name_en': row["name_en"], 'product_section': product_sections[product_section_id], 'product_chapter': product_chapters[product_chapter_id], } products[product_id] = product if upload != 'only_s3': redis.set('product/' + str(product_id), json.dumps(product, ensure_ascii=False)) if upload != 'only_redis': s3.put('product.json', json.dumps(products, ensure_ascii=False)) s3.put('product_section.json', json.dumps( product_sections, ensure_ascii=False)) s3.put('product_chapter.json', json.dumps( product_chapters, ensure_ascii=False)) click.echo("Products loaded.")
2,855
866
#!/usr/bin/env python3 import argparse import os import io import subprocess import sys from tabulate import tabulate def parse_args(): desc = ( 'Performance testing script for OasisLMF input file generation' 'This script expects a set of nested sub directories each containing' 'acc.csv, loc.csv, keys.csv' ) parser = argparse.ArgumentParser(description=desc) parser.add_argument('-t', '--time-threshold', default=300, type=int, help='Maximum time for each file generation test') parser.add_argument('-d', '--test-directory', default='.', type=str, help='File path of the test data directory') parser.add_argument('-o', '--output-directory', default='/tmp/oasis-files', type=str, help='Filepath to generate oasisfiles in') parser.add_argument('-l', '--log-output', default='/var/report/oasisfiles_benchmark.log', type=str, help='Log file path') parser.add_argument('-a', '--extra-oasislmf-args', default='', type=str, help='Addtional Aguments to run Oasislmf with') return vars(parser.parse_args()) def run_command(cmd_str): resp = subprocess.run(cmd_str.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=False) stdout = resp.stdout.decode('utf-8').strip() print(stdout) resp.check_returncode() return stdout def pasrse_gen_output(stdout_string): runtime_list = [l for l in stdout_string.split('\n') if 'COMPLETED' in l] t_breakdown = dict() total = runtime_list.pop().rsplit(' ').pop() t_breakdown['total'] = float(total[:-1]) for l in runtime_list: line = l.split(' ') func = line[-3] time = line[-1] t_breakdown[func] = float(time[:-1]) return t_breakdown def tabulate_data(test_results, output_fp=None): input_sizes = sorted(list(test_results.keys())) time_values = dict() func_names = test_results[input_sizes[0]].keys() for f in func_names: name = f.split('.')[-1:].pop() time_values[name] = list() for n in input_sizes: for f in func_names: name = f.split('.')[-1:].pop() time_values[name].append(test_results[n][f]) timing_tbl = tabulate( [[k] + time_values[k] for k in time_values], headers=['portfolio size'] + input_sizes, tablefmt="rst") # if set write to test summary table to log file if output_fp: log_path = os.path.abspath(output_fp) log_dir = os.path.dirname(log_path) if not os.path.exists(log_dir): os.makedirs(log_dir) with io.open(log_path, 'w') as log: log.write(timing_tbl) print(timing_tbl) def run_tests(test_dir, run_dir, log_fp, oasis_args, threshold=None): ''' Output of each run entry in `results` In [3]: example_run Out[3]: {'total': 88.63, 'oasislmf.manager.__init__': 0.0, 'oasislmf.model_preparation.gul_inputs.get_gul_input_items': 16.05, 'oasislmf.model_preparation.gul_inputs.write_items_file': 3.84, 'oasislmf.model_preparation.gul_inputs.write_coverages_file': 1.88, 'oasislmf.model_preparation.gul_inputs.write_gul_input_files': 5.94, 'oasislmf.model_preparation.summaries.get_summary_mapping': 0.8, 'oasislmf.model_preparation.summaries.write_mapping_file': 6.77, 'oasislmf.model_preparation.il_inputs.get_il_input_items': 30.42, 'oasislmf.model_preparation.il_inputs.write_fm_policytc_file': 8.49, 'oasislmf.model_preparation.il_inputs.write_fm_profile_file': 1.59, 'oasislmf.model_preparation.il_inputs.write_fm_programme_file': 7.52, 'oasislmf.model_preparation.il_inputs.write_fm_xref_file': 2.98, 'oasislmf.model_preparation.il_inputs.write_il_input_files': 21.44} ''' sub_dirs = next(os.walk(test_dir))[1] test_data = dict() results= dict() for d in sub_dirs: loc_fp = os.path.join(test_dir, d, 'loc.csv') acc_fp = os.path.join(test_dir, d, 'acc.csv') keys_fp = os.path.join(test_dir, d, 'keys.csv') n_sample = sum(1 for line in open(loc_fp)) -1 cmd_str = f'oasislmf model generate-oasis-files -x {loc_fp} -y {acc_fp} -z {keys_fp} --oasis-files-dir {run_dir} {oasis_args} --verbose' test_data[n_sample] = cmd_str for t in sorted(test_data.keys()): print('Running: ') print(f"cmd = {test_data[t]}") print(f'size = {t}') print(f't_max = {threshold}') stdout = run_command(test_data[t]) run = pasrse_gen_output(stdout) results[t] = run print(f"t_total = {run['total']}\n") # If given check that threshold isn't exceeded if threshold: if run['total'] > threshold: print('FAILED\n') tabulate_data(results, log_fp) sys.exit(1) else: print('PASSED\n') tabulate_data(results, log_fp) return results if __name__ == "__main__": args = parse_args() run_tests(args['test_directory'], args['output_directory'], args['log_output'], args['extra_oasislmf_args'], args['time_threshold'])
5,158
1,884
#!/bin/env python from app import create_app, socketio from app.db_setup import init_db app = create_app(debug=False) init_db() if __name__ == '__main__': socketio.run(app, port=5001)
191
74
# from tkinter import * # root = Tk() # frametop = Frame(root) # framebottom = Frame(root) # frameleft = Frame(framebottom) # frameright = Frame(framebottom) # text = Text(frametop) # scroll = Scrollbar(frametop, command=text.yview) # btn1 = Button(frameleft, text="Course") # btn2 = Button(frameleft, text="Abscences") # btn3 = Button(frameright, text="Notes") # btn4 = Button(frameright, text="Return") # text['yscrollcommand'] = scroll.set # frametop.pack(side=TOP, fill=BOTH, expand=1) # framebottom.pack(side=BOTTOM, fill=BOTH, expand=1) # frameleft.pack(side=LEFT, fill=BOTH, expand=1) # frameright.pack(side=RIGHT, fill=BOTH, expand=1) # text.pack(side=TOP, fill=BOTH, padx=5, pady=5, expand=1) # scroll.pack(side=BOTTOM, fill=BOTH, padx=5, pady=5, expand=1) # btn1.pack(side=TOP, fill=BOTH, padx=5, pady=5, expand=1) # btn2.pack(side=BOTTOM, fill=BOTH, padx=5, pady=5, expand=1) # btn3.pack(side=TOP, fill=BOTH, padx=5, pady=5, expand=1) # btn4.pack(side=BOTTOM, fill=BOTH, padx=5, pady=5, expand=1) # root.mainloop() from tkinter import * root = Tk() button_frame = Frame(root) button_frame.pack(fill=X, side=BOTTOM) reset_button = Button(button_frame, text='Reset') run_button = Button(button_frame, text='Run') button_frame.columnconfigure(0, weight=1) button_frame.columnconfigure(1, weight=1) reset_button.grid(row=0, column=0, sticky=W+E) run_button.grid(row=0, column=1, sticky=W+E) root.mainloop()
1,427
612
import os import logging import shutil from optimus.i18n.manager import I18NManager def test_update_catalogs_all( minimal_i18n_settings, caplog, temp_builds_dir, fixtures_settings ): """ Update every catalogs """ basepath = temp_builds_dir.join("i18n_update_catalogs_all") # Copy sample project to temporary dir samplename = "minimal_i18n" samplepath = os.path.join(fixtures_settings.fixtures_path, samplename) destination = os.path.join(basepath.strpath, samplename) shutil.copytree(samplepath, destination) # Get manager with settings settings = minimal_i18n_settings(destination) manager = I18NManager(settings) updated = manager.update_catalogs() assert updated == ["en_US", "fr_FR"] assert caplog.record_tuples == [ ( "optimus", logging.INFO, "Updating catalog (PO) for language 'en_US' to {}".format( manager.get_po_filepath("en_US") ), ), ( "optimus", logging.INFO, "Updating catalog (PO) for language 'fr_FR' to {}".format( manager.get_po_filepath("fr_FR") ), ), ] def test_update_catalogs_one( minimal_i18n_settings, caplog, temp_builds_dir, fixtures_settings ): """ Update only default locale catalog """ basepath = temp_builds_dir.join("i18n_update_catalogs_one") # Copy sample project to temporary dir samplename = "minimal_i18n" samplepath = os.path.join(fixtures_settings.fixtures_path, samplename) destination = os.path.join(basepath.strpath, samplename) shutil.copytree(samplepath, destination) # Get manager with settings settings = minimal_i18n_settings(destination) manager = I18NManager(settings) updated = manager.update_catalogs([settings.LANGUAGE_CODE]) assert updated == [settings.LANGUAGE_CODE] assert os.path.exists(manager.get_po_filepath(settings.LANGUAGE_CODE)) is True assert caplog.record_tuples == [ ( "optimus", logging.INFO, "Updating catalog (PO) for language 'en_US' to {}".format( manager.get_po_filepath(settings.LANGUAGE_CODE) ), ), ]
2,270
731
"""This module contains examples of stream_func where f_type is 'element' and stream_func has a single input stream, and a single output stream, and the operation is stateful. The state captures information in the past input streams; this information is required to append values to the tails of the output streams. The functions on static Python data structures are of the form: element, state -> element, state These functions typically have the following structure: (1) Extract variables from the state. (2) Compute the output and the new state. (3) Return (output, new_state) """ if __package__ is None: import sys from os import path sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) ) from functools import partial from Stream import Stream from Operators import stream_func from stream_test import * # Functions: element, state -> element, state def cumulative_sum(v, cumulative): """ This function is used to output a stream where the n-th value on the output stream is the cumulative sum of the first n values of the input stream. The state of the input stream is cumulative. When used to create a stream, cumulative is the sum of all the values in the input stream received so far. v is the next value received in the input stream. """ cumulative += v return (cumulative, cumulative) def average(v, state): """ This function is used to output a stream where the n-th value on the output stream is the average of the first n values of the input stream. The state of the input stream is the pair (n,cum). When used to create a stream, n is the number of values received on the input stream, and cum is the sum of all the values in the input stream received so far. v is the next value received in the input stream. """ n, cum = state n += 1 cum += v mean = cum/float(n) state = (n, cum) return (mean, state) # Functions: stream -> stream. # Each element of the output stream is f() applied to the corresponding # element of the input stream. stream_cumulative = partial(stream_func, f_type='element', f=cumulative_sum, num_outputs=1, state=0) stream_average = partial(stream_func, f_type='element', f=average, num_outputs=1, state=(0,0.0)) def test(): # Create stream x and give it names 'x'. x = Stream('input') # v is the stream returned by stream_cumulative(x) and # w is the stream returned by stream_cumulative(v). v = stream_cumulative(x) w = stream_cumulative(v) # avg is the stream returned by stream_average(x) avg = stream_average(x) # Give names to streams. This is helpful in reading output. v.set_name('cumulative sum of input') w.set_name('cumulative sum of cumulative sum of input') avg.set_name('average of input') check(v, [3, 8, 18, 20, 25, 36]) check(w, [3, 11, 29, 49, 74, 110]) check(avg, [3.0, 4.0, 6.0, 5.0, 5.0, 6.0]) print print 'add values [3, 5, 10] to the tail of the input stream.' # Add values to the tail of stream x. x.extend([3, 5, 10]) # Print the N most recent values of streams x, v, w. x.print_recent() v.print_recent() w.print_recent() avg.print_recent() print print 'add values [2, 5, 11] to the tail of the input stream.' # Add more values to the tail of stream x. x.extend([2, 5, 11]) # Print the N most recent values of streams x, v, w. x.print_recent() v.print_recent() w.print_recent() avg.print_recent() check_empty() if __name__ == '__main__': test()
3,707
1,186
# mpqa3_to_dict helps to convert MPQA stand-off format to python dictionaries. # It provides the following functionalities: # 1) Clean up the MPQA 3.0 corpus # 2) Convert an MPQA document to a dictionary # 3) Convert an entire corpus to a dictionary import os import re HAS_LIST_OF_IDS = [ # These attributes may have any number of ids. (>= 0) "nested-source", "attitude-link", "insubstantial", "sTarget-link", "newETarget-link", "eTarget-link", "target-speech-link" ] class mpqa3_to_dict: """ mpqa3_to_dict helps to clean up the corpus and convert MPQA stand-off format to python dictionaries. """ corpus_name = "" # Name of the corpus from which the documents were drawn. mpqa_dir = "mpqa_dataprocessing\\database.mpqa.cleaned" # mpqa root directory def __init__(self, corpus_name="", mpqa_dir="mpqa_dataprocessing\\database.mpqa.cleaned"): self.corpus_name = corpus_name self.mpqa_dir = mpqa_dir def __cleanup_data(self, anno_lines): ### IT ACTUALLY DOES NOTHING AT THE MOMENT !!! """ It cleans up the annotation lines by correcting misspelled values, attributes and more. :param anno_lines: a list of the annotation lines of a document :return: The cleaned up list of the annotation lines of the document """ return anno_lines def doc_to_dict(self, docname, cleaning=True): """ It converts an MPQA document to a python dictionary. :param docname: The name of the document to be converted. :param cleaning: It cleans up the data, if set to true. :return: A python dictionary representing the document. """ # example: ./docs/20011024/21.53.09-11428 with open(os.path.join(self.mpqa_dir, "docs", docname)) as doc_file: doc_text = doc_file.read() # example: ./man_anns/20011024/21.53.09-11428/gateman.mpqa.lre.3.0 anno_lines = [] with open(os.path.join(self.mpqa_dir, "man_anns", docname, "gateman.mpqa.lre.3.0")) as anno_file: anno_lines = anno_file.readlines() if cleaning: # Clean up the data, if requested. anno_lines = self.__cleanup_data(anno_lines) # Final output output = { "agent": [], "expressive-subjectivity": [], "direct-subjective": [], "objective-speech-event": [], "attitude": [], "targetFrame": [], "sTarget": [], "eTarget": [], "sentence": [], "supplementaryAttitude": [], "supplementaryExpressive-subjectivity": [], "target-speech": [], "annotations": {} } # Process all annotation lines for anno in anno_lines: if len(anno) < 1: # If the line is empty then skip it. continue if anno[0] == '#': # If it is a comment then skip it. continue # Parsing the main components of an annotation line. line_id, span, anno_type, attributes = anno.split('\t') # Converting span to a tuple of ints. span = span.split(',') span = (int(span[0]), int(span[1])) # Removes ' \n' at the end of the string. attributes = attributes.strip() # A temporary variable for an annotation line before knowing its ID. temp_dict = { "anno-type": anno_type, "head": doc_text[span[0]:span[1]], "line-id": int(line_id), "span-in-doc": span, } # Process all attributes if len(attributes) == 0: # example: split annotation continue # Splits with the whitespaces out of the quotes as the delimeter attributes = attributes.strip() attributes = re.split(r' (?=([^"]*"[^"]*")*[^"]*$)', attributes) for attribute in attributes: key, val = attribute.split('=') key, val = key.strip(), val.strip() val = val[1:-1] # Removes double quotation marks if key in HAS_LIST_OF_IDS: temp_dict[key] = [] if val == "none" or val == "" else [v.strip() for v in val.split(',')] else: temp_dict[key] = val # We probably know the identifier assigned to the annotation by now # except some of the agnets and the sentences id = temp_dict.pop("id", line_id) # Updating the final output output["annotations"][id] = temp_dict if anno_type in output: output[anno_type].append(id) else: # If there's a new type of annotation, warn us in red! output[anno_type] = [id] print("\033[91m <UNKNOWN ANNO: {}>\033[00m".format(anno_type)) # Set sentence-id, sentence and span-in-sentence for key in output["annotations"].keys(): if key in output["sentence"]: continue # Skip changing sentences # Search for the corresponding sentence for sentence_id in output["sentence"]: # Checks if the annotation is whithin this sentence if output["annotations"][sentence_id]["span-in-doc"][0] <= output["annotations"][key]["span-in-doc"][0]\ and output["annotations"][sentence_id]["span-in-doc"][1] >= output["annotations"][key]["span-in-doc"][1]: output["annotations"][key]["sentence-id"] = sentence_id output["annotations"][key]["text"] = output["annotations"][sentence_id]["head"] output["annotations"][key]["span-in-sentence"] = ( output["annotations"][key]["span-in-doc"][0] - output["annotations"][sentence_id]["span-in-doc"][0], output["annotations"][key]["span-in-doc"][1] - output["annotations"][sentence_id]["span-in-doc"][0] ) break return output def corpus_to_dict(self, doclist=None, doclist_filename='doclist.3.0', cleaning=True): """ It converts an entire list of MPQA documents to a python dictionary. :param doclist: The list of document names to be converted. If set, doclist_filename will be ignored. :param doclist_filename: The name of the file which contains a list of the document names. :param cleaning: It cleans up the data, if set to true. :return: A python dictionary representing the corpus. """ if doclist is None: doclist = self.__doclistfile_to_doclist(doclist_filename) output = { "corpus": self.corpus_name, # Name of the corpus from which the documents were drawn. "doclist": doclist, # List of the document names. "docs": {} # Dictionary of document annotations in dictionary format. } for docname in doclist: output["docs"][docname] = self.doc_to_dict(docname, cleaning) return output def __doclistfile_to_doclist(self, doclist_filename='doclist.3.0'): """ An auxiliary function for converting a file of a list of document names to a list of document names. :param doclist_filename: The name of the file which contains a list of the document names. :return: A python list containing the document names. """ # example: ./doclist.3.0 doclist = [] with open(os.path.join(self.mpqa_dir, doclist_filename)) as doclist_file: for doc in doclist_file.readlines(): doclist.append(doc[:-1]) # Removes \n at the end of the line return doclist
7,815
2,269
# # All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or # its licensors. # # For complete copyright and license terms please see the LICENSE at the root of this # distribution (the "License"). All use of this software is governed by the License, # or, if provided, by the license below or the license accompanying this file. Do not # remove or modify any license notices. This file is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # import service import CloudCanvas import survey_utils import survey_common import validation_utils import validation_common from boto3.dynamodb.conditions import Key @service.api def get(request, survey_id): cognito_identity_id = request.event["cognitoIdentityId"] params = {} params['KeyConditionExpression'] = Key('user_id').eq(cognito_identity_id) & Key('survey_id').eq(survey_id) params['IndexName'] = 'UserAnswersIndex' params['ProjectionExpression'] = 'submission_id' query_result = survey_utils.get_answer_table().query(**params) submission_ids = [] for submission in query_result['Items']: submission_ids.append(submission['submission_id']) return { 'submission_id_list': submission_ids }
1,283
388
#Crie um programa que tenha uma tupla com várias palavras (não usar acentos). Depois disso, você deve mostrar, para cada palavra, quais são as suas vogais. palavras=('SOPA','BATATAS','CACAU','CASTANHA','LASANHA','GOSTOSURAS','TRAVESSURAS','PARMEGIANA') for p in palavras: print(f'\n As Vogais de {p} são: ',end='') for letra in p: if letra in 'AEIOU': print(letra, end=' ')
402
160
import os from django.conf import * from django.shortcuts import render_to_response, render from django.http import HttpResponse from .models import Data, MovingAvg, Movements, Sigma from datetime import datetime from django.template import RequestContext def index(request): ticker = Data.objects.values_list('ticker').distinct() market = [] for t in ticker: t = t[0] price = Data.objects.filter(ticker=t).latest('date').aclose_price date = Data.objects.filter(ticker=t).latest('date').date move_price = Movements.objects.filter(ticker=t, series='market').latest('date').price move_percent = Movements.objects.filter(ticker=t, series='market').latest('date').percent move_zscore = Movements.objects.filter(ticker=t, series='market').latest('date').zvalue spans = MovingAvg.objects.values_list('span').distinct() i = { 'index':t, 'price':price, 'date':date, 'move_price':move_price, 'move_percent':round(move_percent, 4), 'move_zscore':round(move_zscore, 4), 'hist':'marketgrab/'+t+'_hist.png' } market.append(i) for s in spans: s = s[0] avg_price = Movements.objects.filter(ticker=t, series=s).latest('date').price avg_percent = Movements.objects.filter(ticker=t, series=s).latest('date').percent zscore = Movements.objects.filter(ticker=t, series=s).latest('date').zvalue a = { 'ticker':t, 'span':s, 'price':avg_price, 'percent':round(avg_percent, 4), 'zscore':round(zscore, 4) } (item for item in market if item['index']==t).next()[str(s) + '_avg'] = a context = RequestContext(request, {'market':market}) return render_to_response('marketgrab/index.html', context_instance = context) def detail(request, t): if Data.objects.filter(ticker=t).exists(): response = "You're looking at the details for %s." else: response = "Sorry, cannot find data for %s." return HttpResponse(response % t) def graphs(request): path = os.path.abspath(os.path.join(settings.BASE_DIR, '..', 'public/static/marketgrab')) images = [] for f in os.listdir(path): if f.endswith("png"): images.append("marketgrab/%s" % f) context = {'images': images} return render(request, 'marketgrab/graphs.html', context)
2,534
785
# Method_#1 #Regex_Pattern = r"\S\S\s\S\S\s\S\S" # Do not delete 'r'. # Method_#2 Regex_Pattern = r"(\S\S\s){2}(\S\S){1}" import re print(str(bool(re.search(Regex_Pattern, input()))).lower())
195
97
import os import shutil class TempFolder: def __init__(self, folder: str, **kwargs): self.folder = folder self.lock = kwargs.pop("LOCK", None) def lockit(self): if self.lock is not None: self.lock.acquire() def unlockit(self): if self.lock is not None: self.lock.release() def __enter__(self): self.lockit() i = 0 self.folder += f"_{i}" while os.path.isdir(self.folder): self.folder = self.folder.rsplit("_", 1)[0] i += 1 self.folder += f"_{i}" os.mkdir(self.folder) self.unlockit() return os.path.join(".", self.folder) def __exit__(self, type, value, traceback): self.lockit() shutil.rmtree(self.folder, ignore_errors=False) self.unlockit()
842
275
regno='1941012661' year=2019 # print('My Regd. No is %s and I have taken admission in B. Tech. In %d.' %(regno, year)) print('My Regd. No is', regno,'and I have taken admission in B. Tech. In', year,'.' )
205
90
""" Описывается база данных для хранения, промежуточных результатов """ from django.db import models class SpeechApiModel(models.Model): encoded_data = models.TextField() ext = models.CharField(max_length=300) # blank=True означает, что поле может быть пустым при заполнении бд model = models.CharField(max_length=300, blank=True) vocab = models.TextField(blank=True) result = models.TextField(blank=True) def __str__(self): return self.model
483
157
"""setuptools entry point.""" from codecs import open from os import path from setuptools import find_packages, setup HERE = path.abspath(path.dirname(__file__)) with open(path.join(HERE, "README.rst"), encoding="utf-8") as f: LONG_DESCRIPTION = f.read() with open(path.join(HERE, "src", "den", "VERSION")) as version_file: VERSION = version_file.read().strip() setup( name="den", version=VERSION, description="Den is a home for your home's data.", long_description=LONG_DESCRIPTION, author="Kris Molendyke", author_email="kris@k20e.com", url="https://git.io/k20e", license="MIT", classifiers=[ "Development Status :: 3 - Alpha", "Intended Audience :: Developers", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6" ], keywords="nest thermostat smoke alarm camera weather propane monitor", packages=find_packages("src"), package_dir={"": "src"}, install_requires=["backoff>=1.3.2", "influxdb>=3.0", "python-forecastio>=1.3.5", "requests>=2.0"], extras_require={ "dev": [ "tox", "yapf", ], "doc": [ "Sphinx", "alabaster", "sphinx-argparse", "sphinx-autobuild", ], "notebook": ["jupyter", ], "test": [ "coverage", "prospector", "mock", "responses", ], }, package_data={}, include_package_data=True, data_files=[], test_suite="tests", python_requires=">=2.7", entry_points={"console_scripts": ["den = den.__main__:main", ], }, )
1,690
567
''' Created on 31 Jul 2009 @author: charanpal ''' from __future__ import print_function import sys import os import numpy from contextlib import contextmanager import numpy.random as rand import logging import scipy.linalg import scipy.sparse as sparse import scipy.special import pickle from apgl.util.Parameter import Parameter class Util(object): ''' A class with some general useful function that don't fit in anywhere else. Not very OO unfortunately. ''' def __init__(self): ''' Constructor ''' pass @staticmethod def histogram(v): """ Compute a histogram based on all unique elements in vector v """ if v.ndim != 1: raise ValueError("Input must be a dimension 1 vector") uniqElements = numpy.unique(v) numElements = uniqElements.shape[0] hist = numpy.zeros(numElements) for i in range(0, numElements): hist[i] = sum(v == uniqElements[i]) return (hist, uniqElements) @staticmethod def mode(v): """ Returns the mode of a 1D vectors, and the 1st more frequent element if more than 1 """ if v.ndim != 1: raise ValueError("Input must be a dimension 1 vector") uniqElements = numpy.unique(v) freqs = numpy.zeros(uniqElements.shape[0]) for i in range(uniqElements.shape[0]): freqs[i] = numpy.sum(v == uniqElements[i]) return uniqElements[numpy.argmax(freqs)] @staticmethod def sampleWithoutReplacement(sampleSize, totalSize): """ Create a list of integers from 0 to totalSize, and take a random sample of size sampleSize. The sample ordered. """ perm = rand.permutation(totalSize) perm = perm[0:sampleSize] perm = numpy.sort(perm) return perm @staticmethod def randNormalInt(mean, sd, min, max): """ Returns a normally distributed integer within a range (inclusive of min, max) """ i = round(rand.normal(mean, sd)); while i<min or i>max: i = round(random.normal(mean, sd)); return i @staticmethod def computeMeanVar(X): mu = numpy.mean(X, 0) X2 = X - mu sigma = numpy.dot(X2.T, X2)/X.shape[0] return (mu, sigma) @staticmethod def iterationStr(i, step, maxIter, preStr="Iteration: "): outputStr = "" if maxIter == 1: outputStr = preStr + str(i) + " (1.0)" elif i % step == 0: #frm = inspect.stack()[1] #mod = inspect.getmodule(frm[0]) #logging.info(mod.__name__ + ": " + str(i) + " (" + str(float(i)/maxIter) + ")") outputStr = preStr + str(i) + " (" + str("%.3f" % (float(i)/(maxIter-1))) + ")" elif i == maxIter-1: outputStr = preStr + str(i) + " (" + str("%.3f" % (float(i)/(maxIter-1))) + ")" else: raise ValueError("Got invalid input: " + str((i, step, maxIter))) return outputStr @staticmethod def printIteration(i, step, maxIter, preStr="Iteration: "): if i % step == 0 or i==maxIter-1: logging.debug(Util.iterationStr(i, step, maxIter, preStr)) @staticmethod def printConciseIteration(i, step, maxIter, preStr="Iteration: "): if i==0: print(Util.iterationStr(i, step, maxIter, preStr), end=""), elif i!=maxIter-1: print(Util.iterationStr(i, step, maxIter, " "), end="") else: print(Util.iterationStr(i, step, maxIter, " ")) @staticmethod def abstract(): """ This is a method to be put in abstract methods so that they are identified as such when called. """ import inspect caller = inspect.getouterframes(inspect.currentframe())[1][3] raise NotImplementedError("Method " + caller + ' must be implemented in subclass') @staticmethod def rank(A, tol=1e-8): """ Kindly borrowed from the following forum thread: http://mail.scipy.org/pipermail/numpy-discussion/2008-February/031218.html """ s = numpy.linalg.svd(A, compute_uv=False) return numpy.sum(numpy.where(s>tol, 1, 0)) @staticmethod def randomChoice(V, n=1): """ Make a random choice from a vector V of values which are unnormalised probabilities. Return the corresponding index. For example if v = [1, 2, 4] then the probability of the indices repectively are [1/7, 2/7, 4/7]. The parameter n is the number of random choices to make. If V is a matrix, then the rows are taken as probabilities, and a choice is made for each row. """ Parameter.checkClass(V, numpy.ndarray) if V.shape[0]==0: return -1 if V.ndim == 1: cumV = numpy.cumsum(V) p = numpy.random.rand(n)*cumV[-1] return numpy.searchsorted(cumV, p) elif V.ndim == 2: cumV = numpy.cumsum(V, 1) P = numpy.random.rand(V.shape[0], n)*numpy.array([cumV[:, -1]]).T inds = numpy.zeros(P.shape, numpy.int) for i in range(P.shape[0]): inds[i, :] = numpy.searchsorted(cumV[i, :], P[i, :]) return inds else: raise ValueError("Invalid number of dimensions") @staticmethod def fitPowerLaw(x, xmin): """ Take a sample of data points which are drawn from a power law probability distribution (p(x) = (x/xmin)**-alpha) and return the exponent. This works best for continuous data. """ x = x[x >= xmin] n = x.shape[0] lnSum = n / numpy.sum(numpy.log(x/xmin)) #gamma = 1 + lnSum gamma = lnSum return gamma @staticmethod def fitDiscretePowerLaw(x, xmins = None): """ Take a sample of discrete data points which are drawn from a power law probability distribution (p(x) = x-alpha / zeta(alpha, xmin)) and return the exponent. If xmins is supplied then it searches through the set of xmins rather than using all possible xmins. Most of the time it helps to keep xmins low. Returns the goodness of fit, best alpha and xmin. If there is only 1 unique value of x then -1, -1 min(x) is returned. """ xmax = numpy.max(x) if xmins == None: xmin = numpy.max(numpy.array([numpy.min(x), 1])) xmins = numpy.arange(xmin, xmax) #Note that x must have at least 2 unique elements if xmins.shape[0] == 0: return -1, -1, numpy.min(x) alphas = numpy.arange(1.5, 3.5, 0.01) ksAlpha = numpy.zeros((xmins.shape[0], 2)) for j in range(xmins.shape[0]): xmin = xmins[j] z = x[x >= xmin] n = z.shape[0] sumLogx = numpy.sum(numpy.log(z)) likelyhoods = numpy.zeros(alphas.shape[0]) for i in range(alphas.shape[0]): likelyhoods[i] = -n*numpy.log(scipy.special.zeta(alphas[i], xmin)) -alphas[i]*sumLogx k = numpy.argmax(likelyhoods) #Compute KS statistic cdf = numpy.cumsum(numpy.bincount(z)[xmin:xmax]/float(n)) fit = numpy.arange(xmin, xmax)**-alphas[k] /scipy.special.zeta(alphas[k], xmin) fit = numpy.cumsum(fit) ksAlpha[j, 0] = numpy.max(numpy.abs(cdf - fit)) ksAlpha[j, 1] = alphas[k] i = numpy.argmin(ksAlpha[:, 0]) return ksAlpha[i, 0], ksAlpha[i, 1], xmins[i] @staticmethod def entropy(v): """ Compute the information entropy of a vector of random vector observations using the log to the base 2. """ items = numpy.unique(v) infEnt = 0 for i in items: prob = numpy.sum(v==i)/float(v.shape[0]) infEnt -= prob * numpy.log2(prob) return infEnt @staticmethod def expandIntArray(v): """ Take a vector of integers and expand it into a vector with counts of the corresponding integers. For example, with v = [1, 3, 2, 4], the expanded vector is [0, 1, 1, 1, 2, 2, 3, 3, 3, 3]. """ Parameter.checkClass(v, numpy.ndarray) Parameter.checkList(v, Parameter.checkInt, [0, float('inf')]) w = numpy.zeros(numpy.sum(v), numpy.int) currentInd = 0 for i in range(v.shape[0]): w[currentInd:currentInd+v[i]] = i currentInd += v[i] return w @staticmethod def random2Choice(V, n=1): """ Make a random binary choice from a vector V of values which are unnormalised probabilities. Return the corresponding index. For example if v = [1, 2] then the probability of the indices repectively are [1/3, 2/3]. The parameter n is the number of random choices to make. If V is a matrix, then the rows are taken as probabilities, and a choice is made for each row. """ Parameter.checkClass(V, numpy.ndarray) if V.ndim == 1 and V.shape[0] != 2: raise ValueError("Function only works on binary probabilities") if V.ndim == 2 and V.shape[1] != 2: raise ValueError("Function only works on binary probabilities") if V.ndim == 1: cumV = numpy.cumsum(V) p = numpy.random.rand(n)*cumV[-1] cumV2 = numpy.ones(n)*cumV[0] - p return numpy.array(cumV2 <= 0, numpy.int) elif V.ndim == 2: cumV = numpy.cumsum(V, 1) P = numpy.random.rand(V.shape[0], n)*numpy.array([cumV[:, -1]]).T cumV2 = numpy.outer(cumV[:, 0], numpy.ones(n)) - P return numpy.array(cumV2 <= 0, numpy.int) else: raise ValueError("Invalid number of dimensions") @staticmethod def loadPickle(filename): """ Loads a pickled file with the given filename. """ file = open(filename, 'rb') obj = pickle.load(file) file.close() #logging.debug("Loaded " + filename + " with object " + str(type(obj))) return obj @staticmethod def savePickle(obj, filename, overwrite=True, debug=False): if os.path.isfile(filename) and not overwrite: raise IOError("File exists: " + filename) file = open(filename, 'wb') pickle.dump(obj, file) file.close() if debug: logging.debug("Saved " + filename + " object type " + str(type(obj))) @staticmethod def incompleteCholesky(X, k): """ Compute the incomplete cholesky decomposition of positive semi-define square matrix X. Use an approximation of k rows. """ if X.shape[0] != X.shape[1]: raise ValueError("X must be a square matrix") ell = X.shape[0] R = numpy.zeros((k, ell)) d = numpy.diag(X) aInd = numpy.argmax(d) a = d[aInd] nu = numpy.zeros(k) for j in range(k): nu[j] = numpy.sqrt(a) for i in range(ell): R[j, i] = (X[aInd, i] - R[:, i].T.dot(R[:, aInd]))/nu[j] d[i] = d[i] - R[j, i]**2 aInd = numpy.argmax(d) a = d[aInd] return R @staticmethod def incompleteCholesky2(X, k): """ Compute the incomplete cholesky decomposition of positive semi-define square matrix X. Use an approximation of k rows. """ ell = X.shape[0] A = numpy.zeros((ell, k)) Xj = X Xaj = numpy.zeros((ell, k)) for j in range(k): d = numpy.diag(Xj) ind = numpy.argmax(d) A[ind, j] = 1/numpy.sqrt(Xj[ind, ind]) Xaj[:, j] = Xj.dot(A[:, j]) Xj = Xj - numpy.outer(Xaj[:, j], Xaj[:, j])/numpy.dot(A[:, j].T, Xaj[:, j]) return Xaj.T @staticmethod def indEig(s, U, inds): """ Take the output of numpy.linalg.eig and return the eigenvalue and vectors sorted in order indexed by ind. """ U = U[:, inds] s = s[inds] return s, U @staticmethod def indSvd(P, s, Q, inds): """ Take the output of numpy.linalg.svd and return the eigenvalue and vectors sorted in order indexed by ind. """ if inds.shape[0] != 0: P = P[:, inds] s = s[inds] Q = Q.conj().T Q = Q[:, inds] else: P = numpy.zeros((P.shape[0], 0)) s = numpy.zeros(0) Q = Q.conj().T Q = numpy.zeros((Q.shape[0], 0)) return P, s, Q @staticmethod def svd(A, eps=10**-8, tol=10**-8): """ Wrapper for 'svd_from_eigh' to work on the smallest dimention of A """ if A.shape[0] > A.shape[1]: return Util.svd_from_eigh(A, eps) else: P, s, Qh = Util.svd_from_eigh(A.conj().T, eps, tol) return Qh.conj().T, s.conj(), P.conj().T @staticmethod def svd_from_eigh(A, eps=10**-8, tol=10**-8): """ Find the SVD of an ill conditioned matrix A. This uses numpy.linalg.eig but conditions the matrix so is not as precise as numpy.linalg.svd, but can be useful if svd does not coverge. Uses the eigenvectors of A^T*A and return singular vectors corresponding to nonzero singular values. Note: This is slightly different to linalg.svd which returns zero singular values. """ AA = A.conj().T.dot(A) lmbda, Q = scipy.linalg.eigh(AA + eps*numpy.eye(A.shape[1])) lmbda = lmbda-eps inds = numpy.arange(lmbda.shape[0])[lmbda>tol] lmbda, Q = Util.indEig(lmbda, Q, inds) sigma = lmbda**0.5 P = A.dot(Q) / sigma Qh = Q.conj().T if __debug__: if not scipy.allclose(A, (P*sigma).dot(Qh), atol=tol): logging.warn(" SVD obtained from EVD is too poor") Parameter.checkArray(P, softCheck=True, arrayInfo="P in svd_from_eigh()") if not Parameter.checkOrthogonal(P, tol=tol, softCheck=True, arrayInfo="P in svd_from_eigh()", investigate=True): print("corresponding sigma: ", sigma) Parameter.checkArray(sigma, softCheck=True, arrayInfo="sigma in svd_from_eigh()") Parameter.checkArray(Qh, softCheck=True, arrayInfo="Qh in svd_from_eigh()") if not Parameter.checkOrthogonal(Qh.conj().T, tol=tol, softCheck=True, arrayInfo="Qh.H in svd_from_eigh()"): print("corresponding sigma: ", sigma) return P, sigma, Qh @staticmethod def safeSvd(A, eps=10**-8, tol=10**-8): """ Compute the SVD of a matrix using scipy.linalg.svd, and if convergence fails revert to Util.svd. """ # check input matrix if __debug__: if not Parameter.checkArray(A, softCheck = True): logging.info("... in Util.safeSvd") try: # run scipy.linalg.svd try: P, sigma, Qh = scipy.linalg.svd(A, full_matrices=False) except scipy.linalg.LinAlgError as e: logging.warn(str(e)) raise Exception('SVD decomposition has to be computed from EVD decomposition') # --- only when the SVD decomposition comes from scipy.linalg.svd --- # clean output singular values (sometimes scipy.linalg.svd returns NaN or negative singular values, let's remove them) inds = numpy.arange(sigma.shape[0])[sigma > tol] if inds.shape[0] < sigma.shape[0]: P, sigma, Q = Util.indSvd(P, sigma, Qh, inds) Qh = Q.conj().T # an expensive check but we really need it # rem: A*s = A.dot(diag(s)) ; A*s[:,new] = diag(s).dot(A) if not scipy.allclose(A, (P*sigma).dot(Qh)): logging.warn(" After cleaning singular values from scipy.linalg.svd, the SVD decomposition is too far from the original matrix") # numpy.savez("matrix_leading_to_bad_SVD.npz", A) raise Exception('SVD decomposition has to be computed from EVD decomposition') # check scipy.linalg.svd output matrices (expensive) if __debug__: badAnswerFromScipySvd = False if not Parameter.checkArray(P, softCheck=True, arrayInfo="P in Util.safeSvd()"): badAnswerFromScipySvd = True if not Parameter.checkArray(sigma, softCheck = True, arrayInfo="sigma in Util.safeSvd()"): badAnswerFromScipySvd = True if not Parameter.checkArray(Qh, softCheck = True, arrayInfo="Qh in Util.safeSvd()"): badAnswerFromScipySvd = True if badAnswerFromScipySvd: logging.warn(" After cleaning singular values from scipy.linalg.svd, the SVD decomposition still contains 'NaN', 'inf' or complex values") raise Exception('SVD decomposition has to be computed from EVD decomposition') except Exception as inst: if inst.args != ('SVD decomposition has to be computed from EVD decomposition',): raise logging.warn(" Using EVD method to compute the SVD.") P, sigma, Qh = Util.svd(A, eps, tol) # check Util.svd output matrices (expensive) if __debug__: badAnswerFromUtilSvd = False if not Parameter.checkArray(P, softCheck = True): logging.info("... in P in Util.safeSvd") badAnswerFromUtilSvd = True # print nan_rows in P: numpy.isnan(P).sum(0).nonzero() if not Parameter.checkArray(sigma, softCheck = True): logging.info("... in sigma in Util.safeSvd") badAnswerFromUtilSvd = True # print numpy.isnan(sigma).nonzero() if not Parameter.checkArray(Qh, softCheck = True): logging.info("... in Q in Util.safeSvd") badAnswerFromUtilSvd = True # blop = numpy.isnan(Qh).sum(1) # print blop.nonzero() # print blop[blop.nonzero()] if badAnswerFromUtilSvd: logging.warn(" SVD decomposition obtained from EVD decomposition contains 'NaN', 'inf' or real values") from sandbox.util.ProfileUtils import ProfileUtils if ProfileUtils.memory() > 10**9: ProfileUtils.memDisplay(locals()) return P, sigma, Qh @staticmethod def safeEigh(a, b=None, lower=True, eigvals_only=False, overwrite_a=False, overwrite_b=False, turbo=True, eigvals=None, type=1): """ Compute the EigenDecomposition of a hermitian matrix using scipy.linalg.eigh, and if convergence fails revert to scipy.linalg.eig. """ try: return scipy.linalg.eigh(a, b=b, lower=lower, eigvals_only=eigvals_only, overwrite_a=overwrite_a, overwrite_b=overwrite_b, turbo=turbo, eigvals=eigvals) #, type=type) I do not know how to manage it except: if __debug__: logging.warning(" scipy.linalg.eigh raised an error, scipy.linalg.eig() is used instead") lmbda, q = scipy.linalg.eig(a, b=b, overwrite_a=overwrite_a, overwrite_b=overwrite_b) if eigvals == None: eigvals = (0, len(lmbda)) if eigvals_only: return lmbda[eigvals[0]:eigvals[1]] else : return lmbda[eigvals[0]:eigvals[1]], q[eigvals[0]:eigvals[1]] @staticmethod def powerLawProbs(alpha, zeroVal=0.5, maxInt=100): """ Generate a vector of power law probabilities such that p(x) = C x^-alpha for some C and 0 < x <= maxInt. The value of zeroVal^-alpha is the probability to assign to x==0. """ p = numpy.arange(0, maxInt, dtype=numpy.float) p[0] = zeroVal p = p ** -alpha p /= p.sum() return p @staticmethod def matrixPower(A, n): """ Compute the matrix power of A using the exponent n. The computation simply evaluated the eigendecomposition of A and then powers the eigenvalue matrix accordingly. Warning: if at least one eigen-value is negative, n should be an integer. """ Parameter.checkClass(A, numpy.ndarray) tol = 10**-10 lmbda, V = scipy.linalg.eig(A) lmbda[numpy.abs(lmbda) <= tol] = 0 lmbda[numpy.abs(lmbda) > tol] = lmbda[numpy.abs(lmbda) > tol]**n if n >= 0: return (V*lmbda).dot(numpy.linalg.inv(V)) else: A = scipy.linalg.pinv(A) n = numpy.abs(n) lmbda, V = scipy.linalg.eig(A) lmbda[numpy.abs(lmbda) > tol] = lmbda[numpy.abs(lmbda) > tol]**n return (V*lmbda).dot(numpy.linalg.inv(V)) @staticmethod def matrixPowerh(A, n): """ Compute the matrix power of A using the exponent n. The computation simply evaluated the eigendecomposition of A and then powers the eigenvalue matrix accordingly. This version assumes that A is hermitian. Warning: if at least one eigen-value is negative, n should be an integer. """ Parameter.checkClass(A, numpy.ndarray) tol = 10**-10 lmbda, V = scipy.linalg.eigh(A) lmbda[numpy.abs(lmbda) < tol] = 0 lmbda[numpy.abs(lmbda) > tol] = lmbda[numpy.abs(lmbda) > tol]**n # next line uses the fact that eigh claims returning an orthonormal basis (even if #one sub-space is of dimension >=2) (to be precise, it claims using dsyevd which claims returning an orthonormal matrix) return (V*lmbda).dot(V.T) @staticmethod def extendArray(A, newShape, val=0): """ Take a 2D matrix A and extend the shape to newShape adding zeros to the right and bottom of it. One can optionally pass in scalar or array val and this will be broadcast into the new array. """ tempA = numpy.ones(newShape)*val tempA[0:A.shape[0], 0:A.shape[1]] = A return tempA @staticmethod def distanceMatrix(U, V): """ Compute a distance matrix between n x d matrix U and m x d matrix V, such that D_ij = ||u_i - v_i||. """ if U.shape[1] != V.shape[1]: raise ValueError("Arrays must have the same number of columns") normU = numpy.sum(U**2, 1) normV = numpy.sum(V**2, 1) D = numpy.outer(normU, numpy.ones(V.shape[0])) - 2*U.dot(V.T) + numpy.outer(numpy.ones(U.shape[0]), normV) #Fix for slightly negative numbers D[D<0] = 0 try: D **= 0.5 except FloatingPointError: numpy.set_printoptions(suppress=True, linewidth=200, threshold=2000) print(D.shape) print(D) raise return D @staticmethod def cumMin(v): """ Find the minimum element of a 1d array v for each subarray, starting with the 1st elemnt. """ u = numpy.zeros(v.shape[0]) for i in range(v.shape[0]): u[i] = numpy.min(v[0:i+1]) return u @staticmethod def argsort(seq): """ Find the indices of a sequence after being sorted. Code taken from http://stackoverflow.com/questions/3071415/efficient-method-to-calculate-the-rank-vector-of-a-list-in-python """ return sorted(range(len(seq)), key = seq.__getitem__) @staticmethod @contextmanager def suppressStdout(): with open(os.devnull, "w") as devnull: old_stdout = sys.stdout sys.stdout = devnull try: yield finally: sys.stdout = old_stdout @staticmethod @contextmanager def suppressStderr(): with open(os.devnull, "w") as devnull: old_stderr = sys.stderr sys.stderr = devnull try: yield finally: sys.stderr = old_stderr @staticmethod def powerEigs(A, eps=0.001): """ Compute the largest eigenvector of A using power iteration. Returns the eigenvector and corresponding eigenvalue. """ v = numpy.random.rand(A.shape[1]) oldV = v error = eps+1 while error > eps: v = A.dot(v) v = v/numpy.sqrt((v**2).sum()) error = numpy.linalg.norm(oldV - v) oldV = v return v.T.dot(A).dot(v), v @staticmethod def argmaxN(a, N): """ Return the top N elements of numpy array a """ b = numpy.zeros(N, numpy.int) tempA = a.copy() minA = numpy.min(a) for i in range(N): idx = numpy.argmax(tempA) b[i] = idx tempA[idx] = minA return b
25,807
8,300
#!/usr/bin/python # -*- coding: utf-8 -*- ''' .. codeauthor: Albert Weichselbraun <albert.weichselbraun@htwchur.ch> .. codeauthor:: Heinz-Peter Lang <lang@weblyzard.com> ''' from __future__ import print_function from __future__ import unicode_literals from eWRT.ws.rest import MultiRESTClient from weblyzard_api.client import ( WEBLYZARD_API_URL, WEBLYZARD_API_USER, WEBLYZARD_API_PASS) class JesajaNg(MultiRESTClient): ''' Provides access to the Jesaja keyword service which extracts associations (i.e. keywords) from text documents. ''' URL_PATH = 'rest' def __init__(self, url=WEBLYZARD_API_URL, usr=WEBLYZARD_API_USER, pwd=WEBLYZARD_API_PASS, default_timeout=None, use_random_server=True): ''' :param url: URL of the jeremia web service :param usr: optional user name :param pwd: optional password ''' MultiRESTClient.__init__(self, service_urls=url, user=usr, password=pwd, default_timeout=default_timeout, use_random_server=use_random_server) def set_keyword_profile(self, profile_name, keyword_calculation_profile): ''' Add a keyword profile to the server :param profile_name: the name of the keyword profile :param keyword_calculation_profile: the full keyword calculation \ profile (see below). .. note:: Example keyword calculation profile :: { 'valid_pos_tags' : ['NN', 'P', 'ADJ'], 'required_pos_tags' : [], 'corpus_name' : reference_corpus_name, 'min_phrase_significance' : 2.0, 'num_keywords' : 5, 'skip_underrepresented_keywords' : True, 'keyword_algorithm' : 'com.weblyzard.backend.jesaja.algorithm.keywords.YatesKeywordSignificanceAlgorithm', 'min_token_count' : 5, 'min_ngram_length' : 1, 'max_ngram_length' : 3, 'stoplists' : [], 'groundAnnotations' : False, } .. note:: ``Available keyword_algorithms`` * ``com.weblyzard.backend.jesaja.algorithm.keywords.YatesKeywordSignificanceAlgorithm`` * ``com.weblyzard.backend.jesaja.algorithm.keywords.LogLikelihoodKeywordSignificanceAlgorithm`` ''' return self.request('set_keyword_profile/{}'.format(profile_name), keyword_calculation_profile) def add_csv(self, matview_id, keyword_count_map): ''' Adds reference documents for Jesaja. :param matview_id: matview_id for which the documents are relevant :param keyword_count_map: a map of keywords and the corresponding counts {'the': 222, 'a': 200, ...} ''' if matview_id is None: raise ValueError('Please specify the matview for which the documents are designated.') return self.request('add_csv/{}'.format(matview_id), keyword_count_map) def add_documents(self, matview_id, xml_documents): ''' Adds reference documents for Jesaja. :param matview_id: matview_id for which the documents are relevant :param xml_documents: a list of weblyzard_xml documents [ xml_content, ... ] ''' if matview_id is None: raise ValueError('Please specify the matview for which the documents are designated.') return self.request('add_documents/{}'.format(matview_id), xml_documents) def get_keyword_annotations(self, matview_id, xml_documents): ''' :param matview_id: the matview id for which the keywords are computed :param xml_documents: a list of weblyzard_xml documents [ xml_content, ... ] ''' if not self.has_matview(matview_id): raise Exception( 'Cannot compute keywords - unknown matview {}'.format(matview_id)) return self.request('get_nek_annotations/{}'.format(matview_id), xml_documents) def get_keywords(self, matview_id, xml_documents): ''' :param matview_id: the matview id for which the keywords are computed :param xml_documents: a list of weblyzard_xml documents [ xml_content, ... ] ''' if not self.has_matview(matview_id): raise Exception( 'Cannot compute keywords - unknown matview {}'.format(matview_id)) return self.request('get_keywords/{}'.format(matview_id), xml_documents) def has_matview(self, matview_id): return matview_id in self.list_matviews() def has_corpus(self, matview_id): available_completed_shards = self.request( 'list_shards/complete/{}'.format(matview_id)) return len(available_completed_shards[matview_id]) > 0 def remove_matview_profile(self, matview_id): if not self.has_matview(matview_id): print('No profile {} found'.format(matview_id)) return return self.request('remove_profile/{}/{}'.format(matview_id, matview_id), return_plain=True) def get_corpus_size(self, matview_id): available_completed_shards = self.request( 'list_shards/complete/{}'.format(matview_id)) total = 0 for shard in available_completed_shards[matview_id]: total = total + shard['wordCount'] return total def list_profiles(self): return self.request('list_profiles') def list_matviews(self): return self.request('list_matview_profiles') def get_cache_stats(self): return self.request('get_cache_stats', return_plain=True) def get_cached_corpora(self): return self.request('get_cached_corpora') def set_stoplist(self, name, stoplist): ''' :param name: name of the stopword list :param stoplist: a list of stopwords for the keyword computation ''' return self.request('set_stoplist/{}'.format(name), stoplist) def set_matview_profile(self, matview_id, profile_name): ''' Determines which profile to use for the given matview ''' return self.request('set_matview_profile/{}/{}'.format(matview_id, profile_name)) def list_stoplists(self): ''' :returns: a list of all available stopword lists. ''' return self.request('list_stoplists') def rotate_shard(self, matview_id=None): ''' :param matview_id: an optional matview_id of the shard to be rotated .. note:: All shards are automatically rotated every 24 hourse. Call this method to speed up the availablilty of a shart ''' if not matview_id: return self.request('rotate_shard') else: return self.request('rotate_shard/{}'.format(matview_id))
7,242
2,082
class Config(object): DEBUG = True DEVELOPMENT = True SECRET_KEY = 'do-i-really-need-this' SQLALCHEMY_DATABASE_URI = 'sqlite:///testing.db' SQLALCHEMY_TRACK_MODIFICATIONS = False KARAOKE_MEDIA_ROOT = "/path/to/app/static/media" class ProductionConfig(Config): DEVELOPMENT = False DEBUG = False SQLALCHEMY_DATABASE_URI = 'sqlite:///production.db'
395
159
import datetime, os from django.contrib.auth.models import User from products.lib.data_load import LoadProducts from zendesk.lib.load_tickets import LoadTickets from tasks.engine.maintenance import Maintenance from tasks.models import LastRun class TaskRunner: def __init__(self): """ Initialize the variables. """ self.admin_username = 'admin' self.admin_email = 'admin@email.com' self.admin_pass = os.environ['ADMIN_PASS'] self.when_to_run = { 'refresh_pivotal_products_table': 3600, # Every Hour 'table_maintenance': 86400, # Once a day 'load_ticket_data': 900, # Every 15 minutes } def update_last_run_time(self, component): """ Once the component run is completed update the lastrun table, if its running for the first time, then set the time now. """ if LastRun.objects.filter(component=component).count() == 0: run = LastRun(component=component) run.save() else: LastRun.objects.filter(component=component).update( last_run=datetime.datetime.now() ) def check_last_run_table(self, component): """ Get all the date/time of the last run by components .. """ last_record_time = '2000-01-01 00:00:00' last_record_time = datetime.datetime.strptime(last_record_time, "%Y-%m-%d %H:%M:%S") last_record_time = (datetime.datetime.now() - last_record_time).total_seconds() last_run = LastRun.objects.filter(component=component).values('last_run') for last_run in last_run: last_record_time = (datetime.datetime.now() - last_run['last_run']).total_seconds() return last_record_time def create_super_user(self): """ Create SuperUser if not exits """ if User.objects.filter(username=self.admin_username).count() == 0: User.objects.create_superuser(self.admin_username, self.admin_email, self.admin_pass) def run_refresh_pivotal_products_table(self): """ Run the component to load all the pivotal products """ if self.when_to_run['refresh_pivotal_products_table'] < self.check_last_run_table('refresh_pivotal_products_table'): LoadProducts().load_data_to_db() self.update_last_run_time('refresh_pivotal_products_table') def run_check_for_table_maintenance(self): """ Run the component to do table maintainence ... """ if self.when_to_run['table_maintenance'] < self.check_last_run_table('table_maintenance'): Maintenance().run_table_maintenance() self.update_last_run_time('table_maintenance') def run_load_ticket_table(self): """ Run the component to do table maintainence ... """ if self.when_to_run['load_ticket_data'] < self.check_last_run_table('load_ticket_data'): LoadTickets().extract_data() self.update_last_run_time('load_ticket_data') def run_task(self): """ Execute all the tasks ... """ self.create_super_user() self.run_refresh_pivotal_products_table() self.run_check_for_table_maintenance() self.run_load_ticket_table()
3,342
1,013
# -*- coding: utf-8 -*- """ Created on Sat Oct 23 10:51:14 2018 @author: peter """ from sklearn.feature_extraction.text import TfidfVectorizer import os from sklearn.model_selection import train_test_split from sklearn.neural_network import MLPClassifier from sklearn import metrics import urllib.parse from sklearn.externals import joblib def loadFile(name):#读取文件 directory = str(os.getcwd()) filepath = os.path.join(directory, name) with open(filepath, 'r', encoding='UTF-8') as f: data = f.readlines() data = list(set(data)) result = [] for d in data: d = str(urllib.parse.unquote(d)) result.append(d) return result badQueries = loadFile('badqueries.txt')#读取恶意请求 validQueries = loadFile('goodqueries.txt')#读取正常请求 #去重 badQueries = list(set(badQueries)) validQueries = list(set(validQueries)) allQueries = badQueries + validQueries #打标签 yBad = [1 for i in range(0, len(badQueries))] yGood = [0 for i in range(0, len(validQueries))] y = yBad + yGood queries = allQueries #TF-IDF进行特征提取 vectorizer = TfidfVectorizer(min_df = 0.0, analyzer="char", sublinear_tf=True, ngram_range=(1,3)) X = vectorizer.fit_transform(queries) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) badCount = len(badQueries) validCount = len(validQueries) #加载mlp模型 mlp = joblib.load("mlp-module.m") predicted=mlp.predict(X_test) print("Bad samples: %d" % badCount) print("Good samples: %d" % validCount) print("Accuracy: %f" % mlp.score(X_test, y_test)) print("Precision: %f" % metrics.precision_score(y_test, predicted)) print("Recall: %f" % metrics.recall_score(y_test, predicted))
1,667
670
import random import string from sqlalchemy.orm import Session import models, schemas def get_brand(db: Session, brand_id: int): return db.query(models.Brand).filter(models.Brand.id == brand_id).first() def get_brand_by_name(db: Session, name: str): return db.query(models.Brand).filter(models.Brand.name == name).first() def get_brands(db: Session, skip: int = 0, limit: int = 10): return db.query(models.Brand).offset(skip).limit(limit).all() def create_brand(db: Session, brand: schemas.BrandCreate): db_brand = models.Brand(name=brand.name) db.add(db_brand) db.commit() db.refresh(db_brand) return db_brand def get_discounts_by_brand(db: Session, brand_id: int, skip: int = 0, limit: int = 100): db_brand = get_brand(db, brand_id) return db.query( models.Discount).filter(models.Discount.owner_id == db_brand.id).offset(skip).limit(limit).all() def get_discounts_by_brand_name(db: Session, brand_name: str, active: bool = True, skip: int = 0, limit: int = 100): db_brand = get_brand_by_name(db, brand_name) return db.query(models.Discount).filter( models.Discount.owner_id == db_brand.id).filter( models.Discount.is_active == active).offset(skip).limit( limit).all() def get_active_discounts_by_brand(db: Session, brand_id: int, skip: int = 0, limit: int = 100): db_brand = get_brand(db, brand_id) return db.query(models.Discount).filter( models.Discount.owner_id == db_brand.id).filter( models.Discount.is_active == True).offset(skip).limit(limit).all() def get_discount(db: Session, brand_id: int): active_discounts = get_active_discounts_by_brand(db, brand_id) if len(active_discounts) > 0: discount = active_discounts[0] discount.is_active = False db.commit() else: discount = None return discount def get_discounts(db: Session, skip: int = 0, limit: int = 100): return db.query(models.Discount).offset(skip).limit(limit).all() def create_brand_discounts(db: Session, number: int, brand_id: int, length: int = 8): db_brand = get_brand(db, brand_id) for i in range(number): code = ''.join( random.choices(string.ascii_uppercase + string.digits, k=length)) description = f"Use the following code to get a discount with {db_brand.name}" db_discount = models.Discount(code=code, description=description, is_active=True, owner_id=brand_id) db.add(db_discount) db.commit() db.refresh(db_discount) return f"Successfully generated {number} discount codes for {db_brand.name}."
3,176
982
#!/usr/bin/env python3 from mdpyformat import * import pprintex header_md("""Python object primer for Python3 / meta classes""" ) header_md("""Introduction""", nesting = 2) print_md(""" Python is good at creating the illusion of being a simple programming language. Sometimes this illusion fails, like when you have to deal with the import/module system [my attempts to get it](https://github.com/MoserMichael/pythonimportplayground). Another area of complexity is the object system, last week I tried to understand [python enums](https://docs.python.org/3/library/enum.html), it turns that they are built on top of [meta classes](https://github.com/python/cpython/blob/2c56c97f015a7ea81719615ddcf3c745fba5b4f3/Lib/enum.py#L511), So now I have come to realize, that I really don't know much about python and its object system. The purpose of this text is to figure out, how the python object system ticks. """) header_md("""The Python object system""", nesting=2) header_md("""How objects are represented""", nesting=3) print_md(""" Lets look at a simple python class Foo with a single base class Base, and see how objects are created and represented in memory """) eval_and_quote(""" # The base class. All Python3 classes have the base class of type object. # The long form is therefore # class Base(object): # However Pylint will tell you, that this long form is redundant class Base: # Class variables are shared between all instances of the class Base, and declared like this: base_class_var = "Base" # The object constructor/init method, Note the first 'self' argument, which refers to the object instance. def __init__(self): print("calling Base.__init__") # Object variables are specific to a given instance of Base # Each object has a builtin hash member: __dict__ this one lists all object members (including those added by the base class __init__ method) self.obj_var_base = 10 # An object method - needs to access the object instance, which is passed as first 'self' argument. def show_base(self): print_md("obj_var_base: ", self.obj_var_base) # A class method/static method is called without an object instance. @staticmethod def make_base(): return Base() # class Foo with a base class Base class Foo(Base): # Class variables are shared between all instances of the class Foo, and declared like this: class_var = 42 class_var2 = 43 # The object constructor/init method, Note the first 'self' argument, which is the object instance. def __init__(self): # When not calling the base class __init__ method: the base class object variables are not added to the object !!! # The base class __init__ adds the 'obj_var_base' member to the __dict__ member of this object instance. # By convention: you first init the base classes, before initialising the derived class. super().__init__() print("calling Foo.__init__") # Object variables are specific to a given instance of Foo # Each object has a builtin hash member: __dict__ this one lists all object members (including those added by the base class __init__ method) # Define object variable: obj_var_a self.obj_var_a=42 # Define object variable: obj_var_b self.obj_var_b="name" # An object method - needs to access the object instance, which is passed as first 'self' argument. def show_derived(self): print_md("obj_var_a:", self.obj_var_a, "obj_var_b:", self.obj_var_b) # A class method/static method is called without an object instance. @staticmethod def make_foo(): return Foo() # Make a new object instance of type Foo class. foo_obj=Foo() """) print_md("The memory address of object foo_obj is returned by the [id built-in](https://docs.python.org/3/library/functions.html#id)") eval_and_quote('print("id(foo_obj) : ", id(foo_obj))') print_md("If two variables have the same object id value, then they both refer to the very same object/instance!") print_md(""" Each user defined object has a __dict__ attribute, this is a dictionary that lists all the object instance variables. This also includes instance members that were added by the __init__ method of the base class !! """) eval_and_quote("""print("foo_obj.__dict__ : ", foo_obj.__dict__)""") print_md(""" So you see that the following is exactly the same thing: """) eval_and_quote("""assert id(foo_obj.obj_var_a) == id( foo_obj.__dict__['obj_var_a'] ) """) print_md(""" Wait, but where does the __dict__ attribute come from? The [built-in getattr](https://docs.python.org/3/library/functions.html#getattr) function can return this built-in __dict__ attribute! Interesting: the python notation object.member_name can mean different things: 1) for built-in attributes it means a call to getattr 2) for object instances (assigned in the __init__ method of the class) it means a call to retrieve the __dict__ attribute, and then a lookup of the variable name in that dictionary. """) print_md( """foo_obj.__dict__ and getattr(foo_obj,'__dict__',None) is the same thing! """) eval_and_quote("""assert id(foo_obj.__dict__) == id( getattr(foo_obj,'__dict__',None) )""") print_md(""" The getattr builtin function has a good part, its return value can be checked for None. This can be used, in order to check if the argument is an object with a __dict__ attribute. """) eval_and_quote("""base_obj = object()""") print_md("An object of built-in type ", type(base_obj), " doesn't have a __dict__ member") eval_and_quote("""assert getattr(base_obj, '__dict__', None) is None""") eval_and_quote("""int_obj = 42""") print_md("An object of built-in type ", type(int_obj), " doesn't have a __dict__ member") eval_and_quote("""assert getattr(int_obj, '__dict__', None) is None""") print_md(""" The [dir builtin](https://docs.python.org/3/library/functions.html#dir) function does different things, depending on the argument, for regular objects it returns a "list that contains the object’s attributes’ names, the names of its class’s attributes, and recursively of the attributes of its class’s base classes.", all this is sorted alphabetically. """) eval_and_quote("""print("dir(foo_obj) : ", dir(foo_obj))""") # doesn't have __slots__, how odd. #print_md("foo_obj.__slots__ : ", foo_obj.__slots__) header_md("""How classes are represented""", nesting=3) print_md("""The built-in function [type](https://docs.python.org/3/library/functions.html#type), is returning the class of an object, when applied to a variable (to be more exact: type is a built-in class, and not a built-in function, more on that later)""") eval_and_quote(""" # Make a new object instance of type Foo class. foo_obj=Foo() print("class of object foo_obj - type(foo_obj): ", type(foo_obj)) # That's the same as showing the __class__ member of the variable (in Python3) print("foo_obj.__class__ :", foo_obj.__class__) """) print_md(""" The class is an object, it's purpose is to hold the static data that is shared between all object instances. Each object has a built-in __class__ attribute, that refers to this class object. Note that the name of the class includes the module name, __main__ if the class is defined in the file given as argument to the python interpreter. Also note that the type built-in of type(foo_obj) is really the same as: str(foo_obj.__class__) (for Python3) """) print_md(""" Again, the built in attribute __class__ can also be accessed with the getattr built-in function. """) eval_and_quote( """ print("foo_obj.__class__ and getattr(foo_obj,'__class__',None) is the same thing!") assert id(foo_obj.__class__) == id( getattr(foo_obj,'__class__',None) ) """) print_md("""The __name__ and __qualname__ built-in attributes return the name of the class, without the module name """) eval_and_quote( """ print("foo_boj.__class__.__name__ : ", foo_obj.__class__.__name__) print("foo_boj.__class__.__qualname__ : ", foo_obj.__class__.__qualname__)""" ) print_md(""" To get the immediate base class list as declared in that particular class. """) eval_and_quote( """print("foo_obj.__class__.__bases__ :", foo_obj.__class__.__bases__)""") print_md(""" The __mro__ member is a list of types that stands for 'method resoultion order', when searching for an instance method, this list is searched in order to resolve the method name. The Python runtime creates this lists by enumerating all of its base classes recursively, in depth first traversal order. For each class it follows the base classes, from the left ot the right This list is used to resolve a member function 'member_function' of an object, when you call it via: obj_ref.member_function() """) eval_and_quote( """print("foo_obj.__class__.__mro__ :", foo_obj.__class__.__mro__) """ ) print_md("Computing the method resolution order by hand") eval_and_quote(""" # function to a class hierarchy, in depth first search order (like what you get in MRO - method resolution order) def show_type_hierarchy(type_class): def show_type_hierarchy_imp(type_class, nesting): if len(type_class.__bases__) == 0: return prefix = "\t" * nesting print( prefix + "type:", type_class.__name__ , "base types:", ",".join( map( lambda ty : ty.__name__, type_class.__bases__) ) ) #print( prefix + "str(", type_class.__name__ , ").__dict__ : ", type_class.__dict__ ) for base in type_class.__bases__: show_type_hierarchy_imp(base, nesting+1) if not inspect.isclass(type_class): print("object ", str(type_class), " is not class") return print("show type hierarchy of class:") show_type_hierarchy_imp(type_class, 0) class LevelOneFirst: pass class LevelOneSecond: pass class LevelOneThird: pass class LevelTwoFirst(LevelOneFirst, LevelOneSecond): pass class LevelThree(LevelTwoFirst,LevelOneThird): pass show_type_hierarchy(LevelThree) print("LevelThree.__mro__:", LevelThree.__mro__) """) eval_and_quote(""" print("*** mro in detail:") for cls in foo_obj.__class__.__mro__: print_md("\tclass-in-mro: ", str(cls), "id:", id(cls), "cls.__dict__: ", cls.__dict__) print("*** eof mro in detail") """) print_md(""" The class object has a __dict__ too - here you will see all the class variables (for Foo these are class_var and class_var2) and class methods (defined with @staticmethod), but also the object methods (with the self parameter) """) eval_and_quote( """print("foo_obj.__class__.__dict__ : ", foo_obj.__class__.__dict__)""" ) # doen't have slots, how odd. #print_md("foo_obj.__class__.__slots__ : ", foo_obj.__class__.__slots__) print_md(""" Again, the [dir](https://docs.python.org/3/library/functions.html#dir) built-in function does different things, depending on the argument type for a class object it returns a "list that contains the names of its attributes, and recursively of the attributes of its bases" That means it displays both the names of static variables, and the names of the static functions, for the class and it's base classes. Note that the names are sorted. """) eval_and_quote("""print("dir(foo_obj.__class__) : ", dir( foo_obj.__class__ ) )""") print_md(""" The class object derives from built-in class type, you can check if an object is a class by checking if it is an instance of class 'type'! """) # check that foo_obj.__class__ is a type - it is derived from built-in class type eval_and_quote(""" assert isinstance(foo_obj.__class__, type) # same thing as assert inspect.isclass(foo_obj.__class__) # an object is not derived from class type. assert not isinstance(foo_obj, type) # same thng as assert not inspect.isclass(foo_obj) """) print_md( """ Now there is much more: there is the inspect module that returns it all, a kind of rosetta stone of the python object model. inspect.getmembers returns everything! You can see the source of inspect.getmembers [here](https://github.com/python/cpython/blob/3.10/Lib/inspect.py) """) eval_and_quote("""print("inspect.getmembers(foo_obj): ", inspect.getmembers(foo_obj))""") print_md(""" Attention! the type of the object is the class of the object (remember: the classes is an object, where the __dict__ member holds the class variables) """) eval_and_quote(""" print("type(foo_obj) : ", type(foo_obj)) # same thing in python3 print("str(foo_obj.__class__) : ", str(foo_obj.__class__) )""") print_md(""" Let's look at both the type and identity of all these objects: """) eval_and_quote("""print("id(foo_obj) : ", id(foo_obj), " str(foo_obj) : ", str(foo_obj))""") print_md(""" The following expressions refer to the same thing: the type of the object foo_obj, also known as the class of foo_obj """) eval_and_quote(""" print("type(foo_obj) :", type(foo_obj), " id(type(foo_obj)) :", id(type(foo_obj)), " type(foo_obj).__name__ : ", type(foo_obj).__name__ ) print("str(foo_obj.__class__) :", str(foo_obj.__class__), " id(foo_obj.__class__) :", id(foo_obj.__class__), "foo_obj.__class__.__name__ : ", foo_obj.__class__.__name__) print("str(Foo) :", str(Foo), " id(Foo) :", id( Foo ), "Foo.__name__ :", Foo.__name__) assert id(Foo) == id(type(foo_obj)) assert id(type(foo_obj)) == id(foo_obj.__class__) """) print_md(""" The Foo class members """) eval_and_quote(""" print("foo_obj.__class__.__dict__ :", foo_obj.__class__.__dict__) print("Foo.__dict__ :", Foo.__dict__) # everything accessible form the class print("dir(foo_obj.__class__) :", dir( foo_obj.__class__)) """) print_md(""" The following expressions refer to the same thing: the meta-type of the foo_obj. """) eval_and_quote(""" print("type(foo_obj.__class__.__class__):", type(foo_obj.__class__.__class__), " id( foo_obj.__class__.__class__ ) :" , id( foo_obj.__class__.__class__ ) , "foo_obj.__class__.__class__.__name__ : ", foo_obj.__class__.__class__.__name__ ) print("type(Foo) :", type(Foo), " id(type(Foo)) : ", id( type( Foo ) ), " Foo.__class__.__name__ :", Foo.__class__.__name__) print("type(Foo.__class__) :", type(Foo.__class__), " id(type(Foo.__class__)) : ", id( type( Foo.__class__ ) ), " Foo.__class__.__name__ :", Foo.__class__.__name__) print("type(Foo.__class__.__class__) :", type(Foo.__class__.__class__), " id(type(Foo.__class__.__class__)) :", id( type( Foo.__class__.__class__ ) ) ) assert type(Foo) == type(Foo.__class__) assert type(Foo.__class__) == type(Foo.__class__.__class__) """) print_md(""" The type of the type is the metaclass - the metaclass constructs the Class object! (the class of an object is also an object!) """) eval_and_quote(""" print("type( type( foo_obj ) ) :", type( type( foo_obj ) ) ) print("str( foo_obj.__class__.__class__ ) :", str(foo_obj.__class__.__class__) ) """) # result: eval_and_quote(""" print(" metaclass members: foo_obj.__class__.__class__.__dict__ : ", foo_obj.__class__.__class__.__dict__) print(" everything accessible form metaclass: dir( foo_obj.__class__.__class__ ) : ", dir( foo_obj.__class__.__class__) ) """) print_md(""" Wow, any class can tell all of its derived classes! I wonder how that works... """) eval_and_quote("""print("Base.__subclasses__() : ", Base.__subclasses__())""") header_md("""Object creation""", nesting=3) print_md(""" Objects recap: The object instance holds the __dict__ attribute of the object instance, it's value is a dictionary that holds the object instance members. The class is an object that is shared between all object instances, and it holds the static data (class variables, class methods) What happens upon: foo = Foo() ? Take the type of Foo - the metaclass of Foo, the metaclass both knows how to create an instance of the class Foo, and the object instances. A metaclass is derived from built-in class 'type', The 'type' constructor with three argument creates a new class object. [see reference](https://docs.python.org/3/library/functions.html#type) class_obj = Foo The metaclass is used as a 'callable' - it has a __call__ method, and can therefore be called as if it were a function (see more about callables in the course on [decorators](https://github.com/MoserMichael/python-obj-system/blob/master/decorator.md)) Now this __call__ method creates and initialises the object instance. The implementation of __call__ now does two steps: - Class creation is done in the [__new__](https://docs.python.org/3/reference/datamodel.html#object.__new__) method of the metaclass. The __new__ method creates the Foo class, it is called exactly once, upon class declaration (you will see this shortly, in the section on custom meta classes) - It uses the Foo class and calls its to create and initialise the object (call the __new__ method of the Foo class, in order to create an instance of Foo, then calls the __init__ instance method of the Foo class, on order to initialise it). This all done by the __call__ method of the metaclass. instance_of_foo = meta_class_obj.__call__() (actually that was a bit of a simplification... ) """) eval_and_quote(""" # same as: foo_obj = Foo() foo_obj = Foo.__call__() print("foo_obj : ", foo_obj) print("foo_obj.__dict__ : ", foo_obj.__dict__) """) print_md("This is the same as:") eval_and_quote(""" class_obj = Foo instance_of_foo = class_obj() print("instance_of_foo : ", instance_of_foo) print("instance_of_foo.__dict__ : ", instance_of_foo.__dict__) """) header_md("""Custom metaclasses""", nesting = 2) header_md("""Metaclasses for implementing singleton objects""", nesting = 3) print_md(""" An object can define a different way of creating itself, it can define a custom metaclass, which will do exactly the same object creation steps described in the last section. Let's examine a custom metaclass for creating singleton objects. """) eval_and_quote(""" # metaclass are always derived from the type class. # the type class has functions to create class objects # the type class has also a default implementation of the __call__ method, for creating object instances. class Singleton_metaclass(type): # invoked to create the class object instance (for holding static data) # this function is called exactly once, in order to create the class instance! def __new__(meta_class, name, bases, cls_dict, **kwargs): print("Singleton_metaclass: __new__ meta_class:", meta_class, "name:", name, "bases:", bases, "cls_dict:", cls_dict, f'kwargs: {kwargs}') class_instance = super().__new__(meta_class, name, bases, cls_dict) print("Singleton_metaclass: __new__ return value: ", class_instance, "type(class_instance):", type(class_instance)) # the class class variable __singleton_instance__ will hold a reference to the one an only object instance of this class. class_instance.__singleton_instance__ = None return class_instance def __call__(cls, *args, **kwargs): # we get here to create an object instance. the class object has already been created. print("Singleton_metaclass: __call__ args:", *args, f'kwargs: {kwargs}') # check if the singleton has already been created. if cls.__singleton_instance__ is None: # create the one an only instance object. instance = cls.__new__(cls) # initialise the one and only instance object instance.__init__(*args, **kwargs) # store the singleton instance object in the class variable __singleton_instance__ cls.__singleton_instance__ = instance # return the singleton instance return cls.__singleton_instance__ import math # the metaclass specifier tells python to use the Singleton_metaclass, for the creation of an instance of type SquareRootOfTwo class SquareRootOfTwo(metaclass=Singleton_metaclass): # the __init__ method is called exactly once, when the first instance of the singleton is created. # the square root of two is computed exactly once. def __init__(self): self.value = math.sqrt(2) print("SquareRootOfTwo.__init__ self:", self) print("creating the objects instances...") sqrt_root_two_a = SquareRootOfTwo() print("sqrt_two_a id(sqrt_root_two_a):", id(sqrt_root_two_a), "type(sqrt_root_two_a):", type(sqrt_root_two_a), "sqrt_root_two_a.value:", sqrt_root_two_a.value) sqrt_root_two_b = SquareRootOfTwo() print("sqrt_two_b id(sqrt_root_two_b)", id(sqrt_root_two_b), "type(sqrt_root_two_b):", type(sqrt_root_two_b), "sqrt_root_two_b.value:", sqrt_root_two_b.value) # all singleton objects of the same class are referring to the same object assert id(sqrt_root_two_a) == id(sqrt_root_two_b) """) header_md("""Passing arguments to metaclasses""", nesting = 3) print_md("""" Lets extend the previous singleton creating metaclass, so that it can pass parameters to the __init__ method of the object, these parameters are defined together with the metaclass specifier. """) eval_and_quote(""" # metaclass are always derived from the type class. # The type class has functions to create class objects # The type class has also a default implementation of the __call__ method, for creating object instances. class Singleton_metaclass_with_args(type): # invoked to create the class object instance (for holding static data) # this function is called exactly once, in order to create the class instance! def __new__(meta_class, name, bases, cls_dict, **kwargs): print("Singleton_metaclass_with_args: __new__ meta_class:", meta_class, "name:", name, "bases:", bases, "cls_dict:", cls_dict, f'kwargs: {kwargs}') class_instance = super().__new__(meta_class, name, bases, cls_dict) print("Singleton_metaclass_with_args: __new__ return value: ", class_instance, "type(class_instance):", type(class_instance)) # the class class variable __singleton_instance__ will hold a reference to the one an only object instance of this class. class_instance.__singleton_instance__ = None # the keywords that have been specified, are passed into the class creation method __new__. # save them as a class variable, so as to pass them to the object constructor! class_instance.__kwargs__ = kwargs return class_instance def __call__(cls, *args, **kwargs): # we get here to create an object instance. the class object has already been created. print("Singleton_metaclass_with_args: __call__ args:", *args, f'kwargs: {kwargs}') # check if the singleton has already been created. if cls.__singleton_instance__ is None: # create the one an only instance object. instance = cls.__new__(cls) # initialise the one and only instance object # pass it the keyword parameters specified for the class! instance.__init__(*args, **cls.__kwargs__) # store the singleton instance object in the class variable __singleton_instance__ cls.__singleton_instance__ = instance # return the singleton instance return cls.__singleton_instance__ import math class AnySquareRoot: def __init__(self, arg_val): self.value = math.sqrt(arg_val) # the metaclass specifier tells python to use the Singleton_metaclass, for the creation of an instance of type SquareRootOfTwo class SquareRootOfTwo(AnySquareRoot, metaclass=Singleton_metaclass_with_args, arg_num=2): # the init method is called with arg_num specified in the class definition (value of 2) def __init__(self, arg_num): super().__init__(arg_num) class SquareRootOfThree(AnySquareRoot, metaclass=Singleton_metaclass_with_args, arg_num=3): # the init method is called with arg_num specified in the class definition (value of 3) def __init__(self, arg_num): super().__init__(arg_num) print("creating the objects instances...") sqrt_root_two_a = SquareRootOfTwo() print("sqrt_two_a id(sqrt_root_two_a):", id(sqrt_root_two_a), "type(sqrt_root_two_a):", type(sqrt_root_two_a), "sqrt_root_two_a.value:", sqrt_root_two_a.value) sqrt_root_two_b = SquareRootOfTwo() print("sqrt_two_b id(sqrt_root_two_b)", id(sqrt_root_two_b), "type(sqrt_root_two_b):", type(sqrt_root_two_b), "sqrt_root_two_b.value:", sqrt_root_two_b.value) # all singleton objects of the same class are referring to the same object assert id(sqrt_root_two_a) == id(sqrt_root_two_b) sqrt_root_three_a = SquareRootOfThree() print("sqrt_three_a id(sqrt_root_three_a):", id(sqrt_root_three_a), "type(sqrt_root_three_a):", type(sqrt_root_three_a), "sqrt_root_three_a.value:", sqrt_root_three_a.value) sqrt_root_three_b = SquareRootOfThree() print("sqrt_three_b id(sqrt_root_three_b)", id(sqrt_root_three_b), "type(sqrt_root_three_b):", type(sqrt_root_three_b), "sqrt_root_three_b.value:", sqrt_root_three_b.value) # all singleton objects of the same class are referring to the same object assert id(sqrt_root_three_a) == id(sqrt_root_three_b) """) header_md("""Metaclasses in the Python3 standard library""", nesting=2) print_md(""" This section lists examples of meta-classes in the python standard library. Looking at the standard library of a language is often quite useful, when learning about the intricacies of a programming language. """) header_md("""ABCMeta class""", nesting=3) print_md("""The purpose of this metaclass is to define abstract base classes (also known as ABC's), as defined in [PEP 3119](https://www.python.org/dev/peps/pep-3119/), the documentation for the metaclass [ABCMeta class](https://docs.python.org/3/library/abc.html#abc.ABCMeta). A python metaclass imposes a different behavior for builtin function [isinstance](https://docs.python.org/3/library/functions.html#isinstance) and [issubclass](https://docs.python.org/3/library/functions.html#issubclass) Only classes that are [registered](https://docs.python.org/3/library/abc.html#abc.ABCMeta.register) with the metaclass, are reported as being subclasses of the given metaclass. The referenced PEP explains, why this is needed, i didn't quite understand the explanation. Would be helpful if the reader can clarify this issue. """) header_md("""Enum classes""", nesting=3) print_md("""Python has support for [enum classes](https://docs.python.org/3/library/enum.html). An enum class lists a set of integer class variables, these variables can then be accessed both by their name, and by their integer value. An example usage: Note that the class doesn't have a constructor, everything is being taken care of by the baseclass [enum.Enum](https://docs.python.org/3/library/enum.html#enum.Enum) which is making use of a meta-class in he definition of the Enum class [here](https://docs.python.org/3/library/enum.html), this metaclass [EnumMeta source code](https://github.com/python/cpython/blob/f6648e229edf07a1e4897244d7d34989dd9ea647/Lib/enum.py#L161) then creates a behind the scene dictionary, that maps the integer values to their constant names. The advantage is, that you get an exception, when accessing an undefined constant, or name. There are also more things there, please refer to the linked [documentation](https://docs.python.org/3/library/enum.html) """) eval_and_quote(""" import enum class Rainbow(enum.Enum): RED=1 ORANGE=2 YELLOW=3 GREEN=4 BLUE=5 INDIGO=6 VIOLET=7 color=Rainbow.GREEN print("type(Rainbow.GREEN):", type(Rainbow.GREEN)) print("The string rep Rainbow.Green.name:", Rainbow.GREEN.name, "type(Rainbow.GREEN.name):", type(Rainbow.GREEN.name)) print("The integer rep Rainbow.GREEN.value: ", Rainbow.GREEN.value, "type(Rainbow.GREEN.value):", type(Rainbow.GREEN.value)) print("Access by name: Rainbow['GREEN']:", Rainbow['GREEN']) print("Access by value: Rainbow(4):", Rainbow(4)) # which is the same thing assert id(Rainbow['GREEN']) == id(Rainbow(4)) """) header_md("""Conclusion""", nesting=2) print_md(""" Python meta-classes and decorators are very similar in their capabilities. Both are tools for [metaprogramming](https://en.wikipedia.org/wiki/Metaprogramming), tools for modifying the program text, and treating and modifying code, as if it were data. I would argue, that decorators are most often the easiest way of achieving the same goal. However some things, like hooking the classification of classes and objects (implementing class methods [__instancecheck__ and __subclasscheck__](https://docs.python.org/3/reference/datamodel.html#customizing-instance-and-subclass-checks), can only be done with meta-classes. I hope, that this course has given you a better understanding, of what is happening under the hood, which would be a good thing. """) print_md("*** eof tutorial ***")
28,847
8,851
import speech_recognition as sr #Recognition Module import pyttsx3 #Speaking package import json import series_counter as s_c engine = pyttsx3.init() #initialising pyttsx value def speak(text): engine.say(text) engine.runAndWait() speak('hi user') def Voice_recognize(wait_time, noice_duration): #listens to commands r = sr.Recognizer() #assigning recognize value with sr.Microphone() as source: speak('i am ready for your command') r.pause_threshold = wait_time # Check when the user takes the pause for 5 second r.adjust_for_ambient_noise(source, duration=noice_duration) audio = r.listen(source) try: #Using google's api, output = r.recognize_google(audio).lower() #Well I want to use the default language so I am leaving the language field speak('you said:' + output) # use this before using "language" attribute <lang = langs["Hindi"]> except sr.UnknownValueError: #loop back to continue to listen for commands if unrecognizable speech is received speak("Your last command couldn\'t be heard") output = Voice_recognize(); # runs the code again return output class speech_to_text: def __init__(self,count, output, results_loc): self.count = count self.output = output self.results_loc = results_loc def counter(self): self.present_count = self.count def result_formatter(self): self.final_result = self.present_count + " " self.final_result += self.output + " \n" def result_to_txt(self): with open(self.results_loc, "a+", encoding='utf8') as mfopener: mfopener.write(self.final_result) def result_returner(self): return self.final_result # this class will act as a test printer class test_printer: def __init__(self, actual): self.actual = actual def printer(self): out = (f"You said: {self.actual}\n") return out class run_utils: # returns the path required def __init__(self, present_count): self.present_count = present_count def file_paths(self): with open("./storage/path.json", "r+") as file: #for mailing system data = json.load(file) results_loc = data["results"] self.results_loc = results_loc #runs the Voice_recognize function def run_Vr(self): wait_time,duration=5,0.5 Voice_recog = Voice_recognize(wait_time, duration) self.output = Voice_recog # run all the functions in the speech_to_text class def run_all_s2t(self): s2t = speech_to_text(self.present_count, self.output, self.results_loc) s2t.counter() s2t.result_formatter() s2t.result_to_txt() self.return_result = s2t.result_returner() def run_printer(self): tst_print = test_printer(self.output) self.printer = tst_print.printer def printer(self): return self.printer #this script will run all def run_all(present_count): r_utils = run_utils(present_count) r_utils.file_paths() r_utils.run_Vr() r_utils.run_all_s2t() r_utils.run_printer() printer = r_utils.printer() return printer # finally run the script def run(): present_count = s_c.run() printer = run_all(present_count) print(printer) if __name__ == "__main__": run()
3,508
1,095
# MIT License # # Copyright (c) 2019 SSL-Roots # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # coding: UTF-8 # defense.pyでは、ボールを蹴らないactionを定義する import math import rospy import sys,os from consai2_msgs.msg import BallInfo, RobotInfo from consai2_msgs.msg import ControlTarget from geometry_msgs.msg import Pose2D sys.path.append(os.pardir) from field import Field from observer import Observer import role import tool # defenseのroleによって行動を切り替える def defense_decision(my_role, ball_info, control_target, my_pose, defense_num, robot_info, zone_enable=False): # ゴール前ディフェンス if role.ROLE_ID['ROLE_DEFENSE_GOAL_1'] <= my_role <= role.ROLE_ID['ROLE_DEFENSE_GOAL_2']: return defense_goal(my_pose, ball_info, control_target, my_role, defense_num) # ゾーンディフェンス elif role.ROLE_ID['ROLE_DEFENSE_ZONE_1'] <= my_role <= role.ROLE_ID['ROLE_DEFENSE_ZONE_4']: return defense_zone(my_pose, ball_info, control_target, my_role, defense_num, robot_info['their'], zone_enable) # 例外だった場合はその場にいる else: control_target.path = [] control_target.path.append(my_pose) return control_target # ゴール前ディフェンス def defense_goal(my_pose, ball_info, control_target, my_role, defense_num): # ゴール前ディフェンスは、ペナルティエリアに沿って守備を行う # ボールとゴールを結ぶ直線と、ペナルティエリアのラインの交点を基準に移動 # ボールの位置によって、ライン左、ライン正面、ライン右のどのラインの前に移動するか変わる # ペナルティエリアからどれだけ離れるか MARGIN_LINE = 0.2 # 2台でディフェンスする時のお互いの距離 MARGIN_ROBOT = 0 # ライン左からライン正面に移動する時等に、 # 一時的にスピードアップするための数値 MARGIN_FOR_SPEED = 0.5 # ディフェンスが2台以上いる時はMARGIN_ROBOTを変更 if defense_num > 1: if my_role == role.ROLE_ID["ROLE_DEFENSE_GOAL_1"]: MARGIN_ROBOT = 0.15 else: MARGIN_ROBOT = -0.15 # ボール位置 ball_pose = ball_info.pose # ボールの位置判定用フラグ ball_is_center = False ball_is_left = False ball_is_right = False # 自分の位置判定用フラグ my_pose_is_left = False my_pose_is_right = False # 移動すべき場所判定用フラグ target_is_center = False target_is_left = False target_is_right = False # Field情報からペナルティエリアの情報を取得 # ペナルティエリアの左角 left_penalty_corner = Field.penalty_pose('our', 'upper_front') # ペナルティエリアの右角 right_penalty_corner = Field.penalty_pose('our', 'lower_front') # ペナルティエリアの左側のゴールラインとの交点 left_penalty_goalside = Field.penalty_pose('our', 'upper_back') # ペナルティエリアの右側のゴールラインとの交点 right_penalty_goalside = Field.penalty_pose('our', 'lower_back') # ゴールの中心 goal_center = Field.goal_pose('our', 'center') # ゴール中心からペナルティエリア左角への角度 angle_to_left_penalty_corner = tool.get_angle(goal_center, left_penalty_corner) # ゴール中心からペナルティエリア右角への角度 angle_to_right_penalty_corner = tool.get_angle(goal_center, right_penalty_corner) # 自分からボールへの角度(ボールの方向を向くため) angle_to_ball = tool.get_angle(my_pose, ball_pose) # ゴールを背にした左角を中心とした座標軸へ変換 trans_left = tool.Trans(left_penalty_corner, angle_to_left_penalty_corner) tr_left_ball_pose = trans_left.transform(ball_pose) # ゴールを背にした右角を中心とした座標軸へ変換 trans_right = tool.Trans(right_penalty_corner, angle_to_right_penalty_corner) tr_right_ball_pose = trans_right.transform(ball_pose) # ボールの位置を判定 if tr_left_ball_pose.y > 0: ball_is_left = True elif tr_right_ball_pose.y < 0: ball_is_right = True else: ball_is_center = True # --------------------------------------------------------- # キックとドリブルはOFF control_target.kick_power = 0.0 control_target.dribble_power = 0.0 # ボールは真ん中にある if ball_is_center: # ペナルティエリアの左角と右角の線分(正面の線)と、ゴール中心とボールの線分の交点 target_pose = tool.get_intersection(left_penalty_corner, right_penalty_corner, goal_center, ball_pose) if target_pose is not None: # ペナルティエリアに侵入しないように+MARGIN_LINE target_pose.x += MARGIN_LINE # ロボットが正面の線より後ろにいる if my_pose.x < left_penalty_corner.x: # ダッシュで正面に移動 target_pose.x += MARGIN_FOR_SPEED # ペナルティエリアを沿って移動 if my_pose.y > 0: target_pose.y = left_penalty_corner.y + MARGIN_LINE else: target_pose.y = right_penalty_corner.y - MARGIN_LINE else: target_pose.y += MARGIN_ROBOT else: target_pose = Pose2D() # ボールは左側にある elif ball_is_left: # ペナルティエリアの左側の線分と、ゴール中心とボールの線分の交点 target_pose = tool.get_intersection(left_penalty_corner, left_penalty_goalside, goal_center, ball_pose) if target_pose is not None: # ペナルティエリアに侵入しないように+MARGIN_LINE target_pose.y += MARGIN_LINE # ロボットが左側にいない if my_pose.y < left_penalty_corner.y: # 左側にいないかつ後ろにいる場合は右側を沿う if my_pose.x < left_penalty_corner.x and my_pose.y < 0: target_pose.x = left_penalty_corner.x + MARGIN_FOR_SPEED target_pose.y = right_penalty_corner.y - MARGIN_LINE # 左側にダッシュで移動 else: target_pose.x = left_penalty_corner.x + MARGIN_LINE target_pose.y += MARGIN_FOR_SPEED else: target_pose.x -= MARGIN_ROBOT else: target_pose = Pose2D() # ボールは右側にある elif ball_is_right: target_pose = tool.get_intersection(right_penalty_corner, right_penalty_goalside, goal_center, ball_pose) if target_pose is not None: # ペナルティエリアに侵入しないように-MARGIN_LINE target_pose.y -= MARGIN_LINE # ロボットが右側にいない if my_pose.y > right_penalty_corner.y: # 右側にいないかつ後ろにいる場合は左側を沿う if my_pose.x < left_penalty_corner.x and my_pose.y > 0: target_pose.x = left_penalty_corner.x + MARGIN_FOR_SPEED target_pose.y = left_penalty_corner.y + MARGIN_LINE # 右側にダッシュで移動 else: target_pose.x = right_penalty_corner.x + MARGIN_LINE target_pose.y -= MARGIN_FOR_SPEED else: target_pose.x += MARGIN_ROBOT else: target_pose = Pose2D() # フィールドから出ないように if target_pose.x < goal_center.x: target_pose.x = goal_center.x # 向きはボールの方向 target_pose.theta = angle_to_ball control_target.path = [] control_target.path.append(target_pose) return control_target # ゾーンディフェンス def defense_zone(my_pose, ball_info, control_target, my_role, defense_num, their_robot_info, zone_enable): # ゴールディフェンスに割り当てる台数 GOAL_DEFENSE_NUM = 2 # 現在のディフェンス数 - ゴールディフェンス数 = ゾーンディフェンスに割り当てられる台数 ZONE_DEFENSE_NUM = defense_num - GOAL_DEFENSE_NUM # ゾーンディフェンスが始まるROLE_ID ZONE_START_ROLE_ID = role.ROLE_ID["ROLE_DEFENSE_ZONE_1"] # ゾーンオフェンス用の待機場所 ZONE_OFFENCE_POSE = Pose2D(3,0,0) # センターライン用のマージン MARGIN_CENTER = 0.6 # ちょっと前進用のマージン MARGIN_LITTLE_FORWARD = 1.0 # ドリブルパワー DRIBBLE_POWER = 0.6 # ボール位置 ball_pose = ball_info.pose # Field情報からペナルティエリアの情報を取得 # フィールド幅 field_width = Field.field('width') # フィールド幅の半分 half_field_width = float(field_width) / 2 # フィールド幅の1/4 quarter_field_width = float(field_width) / 4 # フィールド長さ field_length = Field.field('length') # フィールド長さの1/4 → 自陣側の長さの半分 half_our_field_length = -float(field_length) / 4 # ゴール中心 goal_center = Field.goal_pose('our', 'center') # ペナルティエリアの角 left_penalty_corner = Field.penalty_pose('our', 'upper_front') right_penalty_corner = Field.penalty_pose('our', 'lower_front') # 自分からボールへの角度(ボールの方向を向くため) angle_to_ball = tool.get_angle(my_pose, ball_pose) # ゴール中心からボールへの角度 angle_to_ball_from_goal = tool.get_angle(goal_center, ball_pose) # ゾーンディフェンス用のID zone_id = None # 移動目標点の初期化 target_pose = Pose2D() # --------------------------------------------------------- # キックとドリブルはOFF control_target.kick_power = 0.0 control_target.dribble_power = 0.0 # ゾーンオフェンス判定用フラグ my_role_is_offence = False # ボールが相手フィールドにあるとき # ゾーンから1台ゾーンオフェンスに出す # 相手キックオフ時などに前に出ないように # マージンを持って相手フィールド側かを判断している if ZONE_DEFENSE_NUM > 1 and ball_pose.x > MARGIN_CENTER: # 1台ディフェンスが減る ZONE_DEFENSE_NUM -= 1 # ゾーンディフェンスが始まるROLE_IDをずらす ZONE_START_ROLE_ID = role.ROLE_ID["ROLE_DEFENSE_ZONE_2"] # ROLE_DEFENSE_ZONE_1をゾーンオフェンスとして出す if my_role is role.ROLE_ID["ROLE_DEFENSE_ZONE_1"]: my_role_is_offence = True # 私はゾーンオフェンスです if my_role_is_offence: zone_id = 0 target_pose = ZONE_OFFENCE_POSE # 基本的にアタッカーがボールを取りに行くので # ボールが無い方向に移動してこぼれ球が取れるようにする if ball_pose.y > 0: target_pose.y = - quarter_field_width else: target_pose.y = quarter_field_width # ボールを向く target_pose.theta = angle_to_ball # ゾーンオフェンス以外 if ZONE_DEFENSE_NUM > 0 and not my_role_is_offence: step = float(field_width) / (ZONE_DEFENSE_NUM * 2) # ゾーンディフェンスの数でフィールド幅を等分した配列を作る split_field = [i * step - half_field_width for i in range(0,(ZONE_DEFENSE_NUM * 2 + 1))] # ゾーンディフェンスの数でフィールド幅を等分した時の # それぞれのゾーンの中心の配列を作る split_field_center = [i * step - half_field_width for i in range(0,(ZONE_DEFENSE_NUM * 2)) \ if i % 2 != 0] # 参照エラー対策のtry try: # ゾーンIDの計算 # ロボットが8台生きていてゾーンオフェンスがいなければ # ゾーンIDは0~3 zone_id = my_role - ZONE_START_ROLE_ID # ゾーンの中心を目標位置とする target_pose.y = split_field_center[zone_id] # 自分のゾーンに入っている敵チェック # their_robot_infoのposeを抜き出してそれぞれチェック # 敵が自陣側にいる、かつ、自分のゾーンの幅の中にいる、を確認 # 当てはまらない場合配列は空っぽ invader_pose = [i.pose for i in their_robot_info \ if split_field[zone_id * 2] < i.pose.y < split_field[(zone_id + 1) * 2] and \ i.pose.x < 0] # ボールが自分のゾーンの中に入っている, かつzone_enable if(zone_enable and \ ball_pose.x < 0 and \ split_field[zone_id * 2] < ball_pose.y < split_field[(zone_id + 1) * 2]): trans = tool.Trans(ball_pose, angle_to_ball_from_goal) target_pose = trans.inverted_transform(Pose2D(-0.9, 0, 0)) # 自分のゾーンにボールはないけど敵がいる場合は割り込む elif zone_enable and invader_pose != []: # 敵とボールの間に割り込む angle_to_ball_from_invader = tool.get_angle(invader_pose[0], ball_pose) trans = tool.Trans(invader_pose[0], angle_to_ball_from_invader) target_pose = trans.inverted_transform(Pose2D(0.5, 0, 0)) else: # ボールが敵陣の時はディフェンスちょっと前進 if ball_pose.x > MARGIN_CENTER: target_pose.x = half_our_field_length + MARGIN_LITTLE_FORWARD else: target_pose.x = half_our_field_length except IndexError: target_pose = my_pose target_pose.theta = angle_to_ball # ボールが来てたらボールを受け取る if zone_id != None: receive_ball_result, receive_target_pose = update_receive_ball(ball_info, my_pose, zone_id) if receive_ball_result: # ドリブラー回す control_target.dribble_power = DRIBBLE_POWER target_pose = receive_target_pose # ペナルティエリアには入らない if((left_penalty_corner.y + 0.2 > target_pose.y > right_penalty_corner.y - 0.2) and \ target_pose.x < left_penalty_corner.x + 0.3): target_pose.x = half_our_field_length control_target.path = [] control_target.path.append(target_pose) return control_target # ボールレシーブ情報保持用のクラス class Receiving(object): _recenving = [False] * role.ZONE_DEFENSE_NUM @classmethod def update_receiving(cls, zone_id, param): Receiving._recenving[zone_id] = param @classmethod def receiving(cls, zone_id): return Receiving._recenving[zone_id] def update_receive_ball(ball_info, my_pose, zone_id): # ボール位置 ball_pose = ball_info.pose # ボールスピード ball_vel = ball_info.velocity # 受け取れると判断する距離 _can_receive_dist = 1.0 # ヒステリシス _can_receive_hysteresis = 0.3 result = False target_pose = Pose2D() # ボールが動いている if Observer.ball_is_moving(): # ボール速度ベクトルの角度 angle_velocity = tool.get_angle_from_center(ball_vel) trans = tool.Trans(ball_pose, angle_velocity) tr_pose = trans.transform(my_pose) # ボール速度の線と垂直な距離 fabs_y = math.fabs(tr_pose.y) # 受け取れる判定 if Receiving.receiving(zone_id) == False and \ fabs_y < _can_receive_dist - _can_receive_hysteresis: Receiving.update_receiving(zone_id, True) # 受け取れない判定 elif Receiving.receiving(zone_id) == True and \ fabs_y > _can_receive_dist + _can_receive_hysteresis: Receiving.update_receiving(zone_id, False) # 受け取れるかつボールが向かう方向にいる if Receiving.receiving(zone_id) and tr_pose.x > 0.0: tr_pose.y = 0.0 inv_pose = trans.inverted_transform(tr_pose) angle_to_ball = tool.get_angle(inv_pose, ball_pose) target_pose = Pose2D(inv_pose.x, inv_pose.y, angle_to_ball) result = True return result, target_pose
14,431
6,555
# # Script to fuse features per member per family (i.e., for each FID.MID, average all encodings across feature dim). # Any features can be fused. Here is link to ArcFace features, # https://www.dropbox.com/s/5rbj68dqud2folu/FIDs-features.tar.gz?dl=0 # import pickle from pathlib import Path import numpy as np from tqdm import tqdm from src.tools.features import l2_norm dir_features = str(Path("./").home() / "datasets/rfiw2021/rfiw2021-data/FIDs-features/") dir_out = "" ext = "pkl" # ["pkl', 'npy'] # assume input/output directories are the same if no output is specified dir_out = dir_out if len(dir_out) == 0 else dir_features path_features = Path(dir_features) dir_contents = list(path_features.glob("F????")) normalize_features = True do_pickle2numpy = True # convert pkl files to npy (not required, just done if preferred). # Average fuse all embeddings for each MID for fid in tqdm(dir_contents): # for each FID print(f"FID: {fid}") for mid in fid.glob("MID*"): # for each member print(f"Fusing: {mid}") if not mid.is_dir(): continue fout = mid / "avg_encoding.npy" features = [] for face_feat in mid.glob(f"*face*.{ext}"): # for each face if ext == "pkl": try: with open(str(face_feat), "rb") as fin: feature = pickle.load(fin) feature = np.array(feature) if do_pickle2numpy: np.save(str(face_feat).replace(".pkl", ".npy"), feature) except: print( f"WARNING: Exception thrown converting pickle to npy. {face_feat}" ) elif ext == "npy": feature = np.load(str(face_feat)) else: # TODO : have as assert outside for loop (i.e., when value is set), but quick solution for now print(f"extension {ext} is unrecognizable. Options: [pkl, npy]") exit(0) features.append(feature) if features and normalize_features: # if features exist and normalize flag is set True features = np.mean(features, axis=0) features = l2_norm(features[None, ...])[0] if features.shape[0] == 512: np.save(fout, features) else: print(f"ERROR saving: {fout}")
2,432
756
# Python > Collections > Company Logo # Print the number of character occurrences in descending order. # # https://www.hackerrank.com/challenges/most-commons/problem # from collections import Counter from itertools import groupby name = input() nb = 0 for c, g in groupby(Counter(name).most_common(), key=lambda x: x[1]): for l in sorted(map(lambda x: x[0], g)): print(l, c) nb += 1 if nb == 3: break if nb == 3: break
454
156
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from . import _utilities __all__ = ['CdcArgs', 'Cdc'] @pulumi.input_type class CdcArgs: def __init__(__self__, *, database_id: pulumi.Input[str], database_name: pulumi.Input[str], keyspace: pulumi.Input[str], table: pulumi.Input[str], tenant_name: pulumi.Input[str], topic_partitions: pulumi.Input[int]): """ The set of arguments for constructing a Cdc resource. :param pulumi.Input[str] database_id: Astra database to create the keyspace. :param pulumi.Input[str] database_name: Astra database name. :param pulumi.Input[str] keyspace: Initial keyspace name. For additional keyspaces, use the astra_keyspace resource. :param pulumi.Input[str] table: Astra database table. :param pulumi.Input[str] tenant_name: Streaming tenant name :param pulumi.Input[int] topic_partitions: Number of partitions in cdc topic. """ pulumi.set(__self__, "database_id", database_id) pulumi.set(__self__, "database_name", database_name) pulumi.set(__self__, "keyspace", keyspace) pulumi.set(__self__, "table", table) pulumi.set(__self__, "tenant_name", tenant_name) pulumi.set(__self__, "topic_partitions", topic_partitions) @property @pulumi.getter(name="databaseId") def database_id(self) -> pulumi.Input[str]: """ Astra database to create the keyspace. """ return pulumi.get(self, "database_id") @database_id.setter def database_id(self, value: pulumi.Input[str]): pulumi.set(self, "database_id", value) @property @pulumi.getter(name="databaseName") def database_name(self) -> pulumi.Input[str]: """ Astra database name. """ return pulumi.get(self, "database_name") @database_name.setter def database_name(self, value: pulumi.Input[str]): pulumi.set(self, "database_name", value) @property @pulumi.getter def keyspace(self) -> pulumi.Input[str]: """ Initial keyspace name. For additional keyspaces, use the astra_keyspace resource. """ return pulumi.get(self, "keyspace") @keyspace.setter def keyspace(self, value: pulumi.Input[str]): pulumi.set(self, "keyspace", value) @property @pulumi.getter def table(self) -> pulumi.Input[str]: """ Astra database table. """ return pulumi.get(self, "table") @table.setter def table(self, value: pulumi.Input[str]): pulumi.set(self, "table", value) @property @pulumi.getter(name="tenantName") def tenant_name(self) -> pulumi.Input[str]: """ Streaming tenant name """ return pulumi.get(self, "tenant_name") @tenant_name.setter def tenant_name(self, value: pulumi.Input[str]): pulumi.set(self, "tenant_name", value) @property @pulumi.getter(name="topicPartitions") def topic_partitions(self) -> pulumi.Input[int]: """ Number of partitions in cdc topic. """ return pulumi.get(self, "topic_partitions") @topic_partitions.setter def topic_partitions(self, value: pulumi.Input[int]): pulumi.set(self, "topic_partitions", value) @pulumi.input_type class _CdcState: def __init__(__self__, *, connector_status: Optional[pulumi.Input[str]] = None, data_topic: Optional[pulumi.Input[str]] = None, database_id: Optional[pulumi.Input[str]] = None, database_name: Optional[pulumi.Input[str]] = None, keyspace: Optional[pulumi.Input[str]] = None, table: Optional[pulumi.Input[str]] = None, tenant_name: Optional[pulumi.Input[str]] = None, topic_partitions: Optional[pulumi.Input[int]] = None): """ Input properties used for looking up and filtering Cdc resources. :param pulumi.Input[str] connector_status: Streaming tenant name :param pulumi.Input[str] data_topic: Streaming tenant name :param pulumi.Input[str] database_id: Astra database to create the keyspace. :param pulumi.Input[str] database_name: Astra database name. :param pulumi.Input[str] keyspace: Initial keyspace name. For additional keyspaces, use the astra_keyspace resource. :param pulumi.Input[str] table: Astra database table. :param pulumi.Input[str] tenant_name: Streaming tenant name :param pulumi.Input[int] topic_partitions: Number of partitions in cdc topic. """ if connector_status is not None: pulumi.set(__self__, "connector_status", connector_status) if data_topic is not None: pulumi.set(__self__, "data_topic", data_topic) if database_id is not None: pulumi.set(__self__, "database_id", database_id) if database_name is not None: pulumi.set(__self__, "database_name", database_name) if keyspace is not None: pulumi.set(__self__, "keyspace", keyspace) if table is not None: pulumi.set(__self__, "table", table) if tenant_name is not None: pulumi.set(__self__, "tenant_name", tenant_name) if topic_partitions is not None: pulumi.set(__self__, "topic_partitions", topic_partitions) @property @pulumi.getter(name="connectorStatus") def connector_status(self) -> Optional[pulumi.Input[str]]: """ Streaming tenant name """ return pulumi.get(self, "connector_status") @connector_status.setter def connector_status(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "connector_status", value) @property @pulumi.getter(name="dataTopic") def data_topic(self) -> Optional[pulumi.Input[str]]: """ Streaming tenant name """ return pulumi.get(self, "data_topic") @data_topic.setter def data_topic(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "data_topic", value) @property @pulumi.getter(name="databaseId") def database_id(self) -> Optional[pulumi.Input[str]]: """ Astra database to create the keyspace. """ return pulumi.get(self, "database_id") @database_id.setter def database_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "database_id", value) @property @pulumi.getter(name="databaseName") def database_name(self) -> Optional[pulumi.Input[str]]: """ Astra database name. """ return pulumi.get(self, "database_name") @database_name.setter def database_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "database_name", value) @property @pulumi.getter def keyspace(self) -> Optional[pulumi.Input[str]]: """ Initial keyspace name. For additional keyspaces, use the astra_keyspace resource. """ return pulumi.get(self, "keyspace") @keyspace.setter def keyspace(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "keyspace", value) @property @pulumi.getter def table(self) -> Optional[pulumi.Input[str]]: """ Astra database table. """ return pulumi.get(self, "table") @table.setter def table(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "table", value) @property @pulumi.getter(name="tenantName") def tenant_name(self) -> Optional[pulumi.Input[str]]: """ Streaming tenant name """ return pulumi.get(self, "tenant_name") @tenant_name.setter def tenant_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "tenant_name", value) @property @pulumi.getter(name="topicPartitions") def topic_partitions(self) -> Optional[pulumi.Input[int]]: """ Number of partitions in cdc topic. """ return pulumi.get(self, "topic_partitions") @topic_partitions.setter def topic_partitions(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "topic_partitions", value) class Cdc(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, database_id: Optional[pulumi.Input[str]] = None, database_name: Optional[pulumi.Input[str]] = None, keyspace: Optional[pulumi.Input[str]] = None, table: Optional[pulumi.Input[str]] = None, tenant_name: Optional[pulumi.Input[str]] = None, topic_partitions: Optional[pulumi.Input[int]] = None, __props__=None): """ `Cdc` enables cdc for an Astra Serverless table. ## Example Usage ```python import pulumi import pulumiverse_astra as astra streaming_tenant_1 = astra.StreamingTenant("streamingTenant-1", tenant_name="terraformtest", topic="terraformtest", region="useast-4", cloud_provider="gcp", user_email="seb@datastax.com") cdc_1 = astra.Cdc("cdc-1", database_id="5b70892f-e01a-4595-98e6-19ecc9985d50", database_name="sai_test", table="test", keyspace="sai_test", topic_partitions=3, tenant_name=streaming_tenant_1.tenant_name, opts=pulumi.ResourceOptions(depends_on=[streaming_tenant_1])) ``` ## Import ```sh $ pulumi import astra:index/cdc:Cdc example databaseId/keyspace/table/tenantName ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] database_id: Astra database to create the keyspace. :param pulumi.Input[str] database_name: Astra database name. :param pulumi.Input[str] keyspace: Initial keyspace name. For additional keyspaces, use the astra_keyspace resource. :param pulumi.Input[str] table: Astra database table. :param pulumi.Input[str] tenant_name: Streaming tenant name :param pulumi.Input[int] topic_partitions: Number of partitions in cdc topic. """ ... @overload def __init__(__self__, resource_name: str, args: CdcArgs, opts: Optional[pulumi.ResourceOptions] = None): """ `Cdc` enables cdc for an Astra Serverless table. ## Example Usage ```python import pulumi import pulumiverse_astra as astra streaming_tenant_1 = astra.StreamingTenant("streamingTenant-1", tenant_name="terraformtest", topic="terraformtest", region="useast-4", cloud_provider="gcp", user_email="seb@datastax.com") cdc_1 = astra.Cdc("cdc-1", database_id="5b70892f-e01a-4595-98e6-19ecc9985d50", database_name="sai_test", table="test", keyspace="sai_test", topic_partitions=3, tenant_name=streaming_tenant_1.tenant_name, opts=pulumi.ResourceOptions(depends_on=[streaming_tenant_1])) ``` ## Import ```sh $ pulumi import astra:index/cdc:Cdc example databaseId/keyspace/table/tenantName ``` :param str resource_name: The name of the resource. :param CdcArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(CdcArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, database_id: Optional[pulumi.Input[str]] = None, database_name: Optional[pulumi.Input[str]] = None, keyspace: Optional[pulumi.Input[str]] = None, table: Optional[pulumi.Input[str]] = None, tenant_name: Optional[pulumi.Input[str]] = None, topic_partitions: Optional[pulumi.Input[int]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.plugin_download_url is None: opts.plugin_download_url = _utilities.get_plugin_download_url() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = CdcArgs.__new__(CdcArgs) if database_id is None and not opts.urn: raise TypeError("Missing required property 'database_id'") __props__.__dict__["database_id"] = database_id if database_name is None and not opts.urn: raise TypeError("Missing required property 'database_name'") __props__.__dict__["database_name"] = database_name if keyspace is None and not opts.urn: raise TypeError("Missing required property 'keyspace'") __props__.__dict__["keyspace"] = keyspace if table is None and not opts.urn: raise TypeError("Missing required property 'table'") __props__.__dict__["table"] = table if tenant_name is None and not opts.urn: raise TypeError("Missing required property 'tenant_name'") __props__.__dict__["tenant_name"] = tenant_name if topic_partitions is None and not opts.urn: raise TypeError("Missing required property 'topic_partitions'") __props__.__dict__["topic_partitions"] = topic_partitions __props__.__dict__["connector_status"] = None __props__.__dict__["data_topic"] = None super(Cdc, __self__).__init__( 'astra:index/cdc:Cdc', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, connector_status: Optional[pulumi.Input[str]] = None, data_topic: Optional[pulumi.Input[str]] = None, database_id: Optional[pulumi.Input[str]] = None, database_name: Optional[pulumi.Input[str]] = None, keyspace: Optional[pulumi.Input[str]] = None, table: Optional[pulumi.Input[str]] = None, tenant_name: Optional[pulumi.Input[str]] = None, topic_partitions: Optional[pulumi.Input[int]] = None) -> 'Cdc': """ Get an existing Cdc resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] connector_status: Streaming tenant name :param pulumi.Input[str] data_topic: Streaming tenant name :param pulumi.Input[str] database_id: Astra database to create the keyspace. :param pulumi.Input[str] database_name: Astra database name. :param pulumi.Input[str] keyspace: Initial keyspace name. For additional keyspaces, use the astra_keyspace resource. :param pulumi.Input[str] table: Astra database table. :param pulumi.Input[str] tenant_name: Streaming tenant name :param pulumi.Input[int] topic_partitions: Number of partitions in cdc topic. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _CdcState.__new__(_CdcState) __props__.__dict__["connector_status"] = connector_status __props__.__dict__["data_topic"] = data_topic __props__.__dict__["database_id"] = database_id __props__.__dict__["database_name"] = database_name __props__.__dict__["keyspace"] = keyspace __props__.__dict__["table"] = table __props__.__dict__["tenant_name"] = tenant_name __props__.__dict__["topic_partitions"] = topic_partitions return Cdc(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="connectorStatus") def connector_status(self) -> pulumi.Output[str]: """ Streaming tenant name """ return pulumi.get(self, "connector_status") @property @pulumi.getter(name="dataTopic") def data_topic(self) -> pulumi.Output[str]: """ Streaming tenant name """ return pulumi.get(self, "data_topic") @property @pulumi.getter(name="databaseId") def database_id(self) -> pulumi.Output[str]: """ Astra database to create the keyspace. """ return pulumi.get(self, "database_id") @property @pulumi.getter(name="databaseName") def database_name(self) -> pulumi.Output[str]: """ Astra database name. """ return pulumi.get(self, "database_name") @property @pulumi.getter def keyspace(self) -> pulumi.Output[str]: """ Initial keyspace name. For additional keyspaces, use the astra_keyspace resource. """ return pulumi.get(self, "keyspace") @property @pulumi.getter def table(self) -> pulumi.Output[str]: """ Astra database table. """ return pulumi.get(self, "table") @property @pulumi.getter(name="tenantName") def tenant_name(self) -> pulumi.Output[str]: """ Streaming tenant name """ return pulumi.get(self, "tenant_name") @property @pulumi.getter(name="topicPartitions") def topic_partitions(self) -> pulumi.Output[int]: """ Number of partitions in cdc topic. """ return pulumi.get(self, "topic_partitions")
19,174
5,794
import os from torch.optim import Adam, SGD import skopt import torch from utils.data_utils import select_data from utils.visualization_utils import plot_data_and_fit from learning_models.logistic import Logistic # df_file = os.path.join(os.getcwd(), "dati-regioni", "dpc-covid19-ita-regioni.csv") df_file = os.path.join(os.getcwd(), "train.csv") area = ["China"] # list(df["denominazione_regione"].unique()) area_col_name = "Country/Region" # "denominazione_regione" value_col_name = "Fatalities" # "deceduti" groupby_cols = ["Date"] # ["Data"] configs = {"optimizer": SGD, "n_epochs": 20000} _x, _y = select_data(df_file, area, area_col_name, value_col_name, groupby_cols, file_sep=",") validation_index = 50 # fixme china only x = _x[:validation_index] y = _y[:validation_index] LOGISTIC_MODEL = Logistic((x, y), configs) def train(params): return LOGISTIC_MODEL.fit(params) SPACE = [skopt.space.Real(1e-9, 1e-3, name='lrw', prior='log-uniform'), skopt.space.Real(1e-9, 1e-2, name='lrb', prior='log-uniform'), skopt.space.Real(5e-5, 9e-1, name='lrm', prior='log-uniform'), skopt.space.Real(-3.0, 3.0, name='initial_w', prior='uniform'), skopt.space.Real(-5.0, 5.0, name='initial_b', prior='uniform'), skopt.space.Real(min(y)/5, 10*max(y), name='initial_m', prior='uniform'), ] @skopt.utils.use_named_args(SPACE) def objective(**params): all_params = {**params} return train(all_params) res_gp = skopt.gp_minimize(objective, SPACE, n_calls=50) # n_calls is the number of repeated trials # print(res_gp) score = "Best score=%.4f" % res_gp.fun result = """Best parameters: - lrw=%.9f - lrb=%.9f - lrm=%.9f - initial_w=%.6f - initial_b=%.6f - initial_m=%.6f""" % (res_gp.x[0], res_gp.x[1], res_gp.x[2], res_gp.x[3], res_gp.x[4], res_gp.x[5]) print(score) print(result) # base_path = os.path.join(os.getcwd(), "regioni") if not os.path.exists(base_path): os.mkdir(base_path) log_file = os.path.join(base_path, area[0] + "_best_results.txt") with open(log_file, "w") as f: f.write(score) f.write(result) y_hat = LOGISTIC_MODEL(LOGISTIC_MODEL.x).detach().numpy() data = (LOGISTIC_MODEL.x.detach().numpy(), LOGISTIC_MODEL.y.detach().numpy()) future_days = 10 # predictions for the future 30 days and current date future_x = torch.tensor([i+len(y) for i in range(future_days)]).view(-1, 1).float() future_y = LOGISTIC_MODEL(future_x).detach().numpy() print("Error in next 30 days") print(LOGISTIC_MODEL.eval(future_x, torch.tensor(_y[validation_index:validation_index+future_days]).float())) future_x = future_x.detach().numpy() save_plot_path = os.path.join(base_path, area[0] + ".png") # plot_data_and_fit(data, fitted_data=(x, w_hat), future_data=(future_x, future_w), save_path=save_plot_path, plot_name=area[0]) data = [_x, _y] plot_data_and_fit(data, fitted_data=(x, y_hat), future_data=(future_x, future_y), save_path=save_plot_path, plot_name=area[0])
2,960
1,238
#!/usr/bin/env python from confluent_kafka import Producer, Consumer, KafkaError import sys import time import subprocess from datetime import datetime import threading from collections import defaultdict import re import uuid def log(text, to_file=False): global output_file print(text) if to_file: output_file.write(f"{text}\n") output_file.flush() def log_order(text): global order_file time_now = datetime.now().strftime('%H:%M:%S') print(text) order_file.write(f"{time_now}: {text}\n") def create_cluster(): subprocess.call(["./setup-dedup-test-run.sh"]) def kill_tcp_connections_of_leader(): global leader port = "" if leader == "kafka1": port = "9092" elif leader == "kafka2": port = "9093" elif leader == "kafka3": port = "9094" cmd = f"sudo timeout 10s sudo tcpkill -i docker0 -9 port {port}" subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) # not used at this time def blackhole_leader(): global leader ip = "" if leader == "kafka1": ip = "172.17.0.3" elif leader == "kafka2": ip = "172.17.0.4" elif leader == "kafka3": ip = "172.17.0.5" cmd = f"sudo ip route add blackhole {ip}" subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) time.sleep(5) cmd = f"sudo ip route delete blackhole {ip}" subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) def create_topic(topic): bash_command = f"bash create-topic-print-leader.sh {topic}" process = subprocess.Popen(bash_command.split(), stdout=subprocess.PIPE) output, error = process.communicate() leader_num = output.decode('ascii').replace('\n', '') leader = f"kafka{leader_num}" return leader def get_live_nodes(): bash_command = "bash ../cluster/list-live-nodes.sh" process = subprocess.Popen(bash_command.split(), stdout=subprocess.PIPE) output, error = process.communicate() nodes_line = output.decode('ascii').replace('\n', '') return nodes_line.split(' ') def kill_partition_leader(): global leader subprocess.call(["./execute-chaos.sh", "kill-specific-node", leader]) def start_downed_broker(): global leader subprocess.call(["./execute-chaos.sh", "start-specific-node", leader]) def get_broker_ips(): bash_command = "bash ../cluster/list-broker-ips.sh" process = subprocess.Popen(bash_command.split(), stdout=subprocess.PIPE) output, error = process.communicate() nodes_line = output.decode('ascii').replace('\n', '') return nodes_line.rstrip(' ').replace(' ',',') def produce_with_java(topic, count, bootstrap_servers, pos_acked_file_path, neg_acked_file_path, enable_idempotency): global messages_sent, messages_pos_acked, messages_neg_acked cmd = f"java -jar ../KafkaDedup/build/libs/KafkaDedup-all-1.0.jar {topic} {count} {bootstrap_servers} {pos_acked_file_path} {neg_acked_file_path} {enable_idempotency}" process = subprocess.Popen( cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE ) action_taken = False while True: out = process.stdout.readline().decode("ascii") if out == '' and process.poll() != None: break if out != '': if out.startswith("50000") and action_taken == False: action_taken = True if test_type == "kill-leader": print(f"Killing partition leader: {leader}") r = threading.Thread(target=kill_partition_leader) r.start() else: print(f"Preparing to kill client connections to partition leader: {leader}") r = threading.Thread(target=kill_tcp_connections_of_leader) r.start() elif out == "FINISHED": break print(out) for i in range(0, count): messages_sent[str(i)] = list() # load pos acked file = open(pos_acked_file_path) for line in file: messages_pos_acked.add(str(int(line))) # load neg acked file = open(neg_acked_file_path) for line in file: messages_neg_acked.add(str(int(line))) # def delivery_report(err, msg): # global messages_pos_acked, messages_neg_acked, send_count, ack_count, pos_ack_count, neg_ack_count, action_mark, action_performed, topic, test_type # ack_count += 1 # if err: # neg_ack_count += 1 # value = int(msg.value()) # messages_neg_acked.add(value) # else: # pos_ack_count += 1 # value = int(msg.value()) # messages_pos_acked.add(value) # if ack_count % 50000 == 0: # log(f"Send count: {str(send_count)} Ack count: {str(ack_count)} Pos: {str(pos_ack_count)} Neg: {str(neg_ack_count)}") # if ack_count > action_mark and action_performed == False: # action_performed = True # if test_type == "kill-leader": # print(f"Preparing to kill partition leader: {leader}") # r = threading.Thread(target=kill_partition_leader) # r.start() # else: # print(f"Preparing to kill client connections to partition leader: {leader}") # r = threading.Thread(target=kill_tcp_connections_of_leader) # r.start() # def produce(): # global send_count, ack_count, pos_ack_count, neg_ack_count, messages_sent, messages_pos_acked, partitions, leader # dedup = dedup_enabled == "true" # acks_mode = "all" # bootstrap_servers = get_broker_ips() # log(f"Producer bootstrap.servers: {bootstrap_servers}") # producer = Producer({'bootstrap.servers': bootstrap_servers, # 'message.send.max.retries': 3, # 'max.in.flight.requests.per.connection': 5, # #'enable.idempotence': dedup, # 'default.topic.config': { 'request.required.acks': acks_mode }}) # # send the first message synchronously, to ensure everything is running ok # producer.produce(topic, str(send_count).encode('utf-8'), callback=delivery_report) # send_count += 1 # messages_sent[send_count] = list() # producer.poll(0) # producer.flush() # partitions = get_isolate_from_zk_partitions(leader) # print("Started producing") # # send bulk of messages asynchronously in order to achieve high message rate # while send_count < count-1: # producer.poll(0) # if send_count - ack_count >= 10000: # ensure we don't have more than 10k in flight at a time # time.sleep(0.1) # #print("Sleeping") # else: # producer.produce(topic, str(send_count).encode('utf-8'), callback=delivery_report) # messages_sent[send_count] = list() # send_count += 1 # # send last message in order to block until acked # # this way we ensure all messages are acked by the end of this function # producer.produce(topic, str(send_count).encode('utf-8'), callback=delivery_report) # send_count += 1 # messages_sent[send_count] = list() # producer.poll(0) # time.sleep(5) # producer.flush() # log(f"Send count: {str(send_count)} Ack count: {str(ack_count)} Pos: {str(pos_ack_count)} Neg: {str(neg_ack_count)}") def partition_assignment(consumer, partitions): for p in partitions: p.offset = 0 log("Partition assigned") consumer.assign(partitions) def read(): global received_count, messages_sent, topic, duplicate_jump_forward, duplicate_jump_back, jump_forward, jump_back bootstrap_servers = get_broker_ips() log(f"Consumer bootstrap.servers: {bootstrap_servers}") consumer = Consumer({ 'bootstrap.servers': bootstrap_servers, 'group.id': str(uuid.uuid1()), 'api.version.request': True, 'enable.auto.commit': True, 'auto.offset.reset': 'earliest' }) log(f"Subscribing to {topic}") consumer.subscribe([topic], on_assign=partition_assignment) no_msg_count = 0 last_payload = -1 in_dup_block = False while True: try: msg = consumer.poll(2.0) if msg is None: log("No messages") no_msg_count += 1 if no_msg_count > 30: log("Aborting test, no messages to consume") sys.exit(1) continue no_msg_count = 0 if msg.error(): if msg.error().code() == KafkaError._PARTITION_EOF: log("Consumed all messages") break else: log(msg.error()) break received_count += 1 msg_offset = msg.offset() current_payload = int(msg.value()) current_payload_str = str(current_payload) seen = len(messages_sent[current_payload_str]) > 0 if seen: if last_payload >= current_payload: duplicate_jump_back += 1 jump = str(last_payload - current_payload) log_order(f"Test run: {test_run} DUPLICATE BLOCK - JUMP BACKWARDS {jump} ({str(last_payload)} -> {str(current_payload)})") elif last_payload + 1 < current_payload: duplicate_jump_forward += 1 jump = str(current_payload - last_payload) log_order(f"Test run: {test_run} DUPLICATE BLOCK - JUMP FORWARDS {jump} ({str(last_payload)} -> {str(current_payload)})") if not seen: if last_payload >= current_payload: jump_back += 1 jump = str(last_payload - current_payload) log_order(f"Test run: {test_run} JUMP BACKWARDS {jump} ({str(last_payload)} -> {str(current_payload)})") elif last_payload + 1 < current_payload: jump_forward += 1 jump = str(current_payload - last_payload) log_order(f"Test run: {test_run} JUMP FORWARDS {jump} ({str(last_payload)} -> {str(current_payload)})") if current_payload_str in messages_sent: messages_sent[current_payload_str].append(msg_offset) last_payload = current_payload if received_count % 50000 == 0: log(f"Received: {received_count} Curr Offset: {msg_offset}") except Exception as ex: template = "An exception of type {0} occurred. Arguments:{1!r}" message = template.format(type(ex).__name__, ex.args) log(message) log(f"Read phase complete with message {msg.offset()}") consumer.close() topic_prefix = sys.argv[1] test_num = int(sys.argv[2]) count = int(sys.argv[3]) action_mark = int(sys.argv[4]) test_type = sys.argv[5] leader = "" # create log files start_time = datetime.now().strftime('%H:%M:%S') output_file_w = open(f"test-output/{topic_prefix}_dedup_output.txt", "w") output_file_w.write("DedupEnabled,TestRun,SendCount,AckCount,PosAckCount,NegAckCount,Received,NotReceived,ReceivedNoAck,MsgsWithDups,DJF,DJB,JF,JB\n") output_file = open(f"test-output/{topic_prefix}_dedup_output.txt", "a") order_file_w = open(f"test-output/{topic_prefix}_order_output.txt", "w") order_file_w.write("Log of duplicate blocks and out-of-order messages") order_file = open(f"test-output/{topic_prefix}_order_output.txt", "a") dedup_enabled_values = ["false", "true"] timeout_values = [60000, 0] for i in range(2): test_run = 1 dedup_enabled = dedup_enabled_values[i] timeout = timeout_values[i] log(f"Running {test_num} runs with deduplication enabled = {dedup_enabled}") create_cluster() while test_run <= test_num: # run test topic = f"{topic_prefix}_{str(test_run)}_dedup_{dedup_enabled}" leader = create_topic(topic) duplicate_jump_forward = 0 duplicate_jump_back = 0 jump_forward = 0 jump_back = 0 # send_count = 0 # ack_count = 0 # pos_ack_count = 0 # neg_ack_count = 0 # action_performed = False # - CHAOS VARIABLES partitions = list() log(f"") log(f"Test Run #{test_run} on topic {topic} ------------") # - WRITE PHASE -------------------- log("-------------------------------------------------") log("WRITE PHASE") log("-------------------------------------------------") messages_sent = defaultdict(list) messages_pos_acked = set() messages_neg_acked = set() # try: # produce() # print("Produce ended") # except KeyboardInterrupt: # log("Producer cancelled") # sys.exit(1) # except Exception as ex: # template = "An exception of type {0} occurred. Arguments:{1!r}" # message = template.format(type(ex).__name__, ex.args) # log("The producer has failed!!!") # log(message) # sys.exit(1) pos_acked_file = f"producer-output/{topic}_pos_acked.txt" neg_acked_file = f"producer-output/{topic}_neg_acked.txt" try: bootstrap_servers = get_broker_ips() produce_with_java(topic, count, bootstrap_servers, pos_acked_file, neg_acked_file, dedup_enabled) log("Produce ended") except KeyboardInterrupt: log("Producer cancelled") sys.exit(1) except Exception as ex: template = "An exception of type {0} occurred. Arguments:{1!r}" message = template.format(type(ex).__name__, ex.args) log("The Java producer has failed!!!") log(message) sys.exit(1) # - READ PHASE -------------------- if test_type == "kill-leader": start_downed_broker() time.sleep(10) log("-------------------------------------------------") log("READ PHASE") log("-------------------------------------------------") received_count = 0 try: read() except KeyboardInterrupt: log("Reader cancelled") sys.exit(1) not_received = 0 received_no_ack = 0 msgs_with_dups = 0 received = 0 for msg_val, msg_ids in messages_sent.items(): received += len(msg_ids) if len(msg_ids) == 0 and msg_val in messages_pos_acked: not_received += 1 elif len(msg_ids) == 1 and msg_val not in messages_pos_acked: received_no_ack += 1 elif len(msg_ids) > 1: msgs_with_dups += 1 send_count = len(messages_sent) ack_count = len(messages_pos_acked) + len(messages_neg_acked) pos_ack_count = len(messages_pos_acked) neg_ack_count = len(messages_neg_acked) log("Results --------------------------------------------") log(f"Final send count: {str(send_count)}") log(f"Final ack count: {str(ack_count)}") log(f"Final positive ack count: {str(pos_ack_count)}") log(f"Final negative ack count: {str(neg_ack_count)}") log(f"Messages received: {str(received)}") log(f"Acked messages missing: {str(not_received)}") log(f"Non-acked messages received: {str(received_no_ack)}") log(f"Duplicates: {msgs_with_dups}") log(f"Duplicate Jump Forward: {duplicate_jump_forward}") log(f"Duplicate Jump Back: {duplicate_jump_back}") log(f"Non-Duplicate Jump Forward: {jump_forward}") log(f"Non-Duplicate Jump Back: {jump_back}") log("----------------------------------------------------") log(f"{dedup_enabled},{str(test_run)},{str(send_count)},{str(ack_count)},{str(pos_ack_count)},{str(neg_ack_count)},{str(received)},{str(not_received)},{str(received_no_ack)},{str(msgs_with_dups)},{str(duplicate_jump_forward)},{str(duplicate_jump_back)},{str(jump_forward)},{str(jump_back)}", True) time.sleep(20) test_run += 1
16,181
5,188
"""Tests for string representations of Quantities and Units, i.e. __repr__ and __str__""" from units import unit from units.predefined import define_units from units.quantity import Quantity from units.registry import REGISTRY def test_quantity_repr(): """Developer-friendly string representation of quantities.""" assert repr(Quantity(1, unit('m'))) == "Quantity(1, LeafUnit('m', True))" def test_quantity_str(): """User-friendly string representation of quantities.""" assert str(Quantity(1, unit('m'))) == "1.00 m" def test_leaf_unit_repr(): """Developer-friendly string representation of leaf units.""" assert repr(unit('m')) == "LeafUnit('m', True)" def test_leaf_unit_str(): """User-friendly string representation of leaf units""" assert str(unit('s')) == "s" def test_composed_unit_repr(): """Developer-friendly string representation of composed units.""" test_repr = (repr(unit('m') * unit('g') / unit('s'))) # non-deterministic assert test_repr in ["ComposedUnit([LeafUnit('g', True), " + "LeafUnit('m', True)], " + "[LeafUnit('s', True)], 1)", "ComposedUnit([LeafUnit('m', True), " + "LeafUnit('g', True)], " + "[LeafUnit('s', True)], 1)"] def test_composed_unit_str(): """User-friendly string representation of composed units.""" test_str = (str(unit('m') * unit('g') / unit('s'))) assert test_str in ["g * m / s", "m * g / s"] # non-deterministic. def test_named_composed_unit_repr(): """Developer-friendly string representation of named units.""" assert (repr(unit('km')) == "NamedComposedUnit('km', " + "ComposedUnit([LeafUnit('m', True)], " + "[], 1000), False)") def test_named_composed_unit_str(): """User-friendly string representation of named units.""" assert str(unit('mi')) == 'mi' def setup_module(module): # Disable warning about not using module. # pylint: disable=W0613 """Called by py.test before running any of the tests here.""" define_units() def teardown_module(module): # Disable warning about not using module. # pylint: disable=W0613 """Called after running all of the tests here.""" REGISTRY.clear()
2,374
742
from rest_framework.permissions import BasePermission, SAFE_METHODS class IsOwnerOrReadOnly(BasePermission): message = 'You must be the owner of this object' def has_object_permission(self, request, view, obj): if request.method in SAFE_METHODS: return True return obj.seller == request.user class IsBuyerOrSeller(BasePermission): message = 'You must either be the buyer or the seller of this listing' def has_object_permission(self,request,view,obj): if request.method in SAFE_METHODS: return True return (obj.seller == request.user) or obj.buyer == (request.user)
643
192
#!/usr/bin/python # # FishPi - An autonomous drop in the ocean # # Simple test of PWM motor and servo drive # import logging import raspberrypi from time import sleep from drive_controller import AdafruitDriveController if __name__ == "__main__": logger = logging.getLogger() logger.setLevel(logging.DEBUG) console = logging.StreamHandler() logger.addHandler(console) print "testing drive controller..." drive = AdafruitDriveController(debug=True, i2c_bus=raspberrypi.i2c_bus()) print "run ahead..." drive.set_throttle(0.5) sleep(0.5) drive.set_throttle(1.0) sleep(0.5) drive.set_throttle(0.5) sleep(2) print "run 0%..." drive.set_throttle(-1.0) sleep(2) drive.set_throttle(0.0) sleep(2) print "run reverse for 2 sec" drive.set_throttle(-0.5) sleep(0.5) drive.set_throttle(-1.0) sleep(2) print "and back to neutral..." drive.set_throttle(0.0) sleep(5)
969
365
import logging import numpy as np import pandas as pd import scipy.stats as ss from scipy.linalg import eig from numba import jit import sg_covid_impact # from mi_scotland.utils.pandas import preview logger = logging.getLogger(__name__) np.seterr(all="raise") # Raise errors on floating point errors def process_complexity(df, dataset, year, geo_type, cluster, PCI=False): """Calculate complexity variables aggregated over the columns. Calculates: size, complexity index, complexity outlook index Args: df (pandas.DataFrame): Long dataframe Expected columns: `{"geo_nm", "geo_cd", cluster, "value"}` year (str): Year dataset (str): Name of dataset geo_type (str): Type of regional geography cluster (str): Name of cluster column to use to pivot on PCI (bool, optional): If True, calculate product complexity by transposing input # TODO refactor outside of function Returns: pandas.DataFrame """ X = ( df.pipe(pivot_area_cluster, cluster).fillna(0) # Transpose if PCI .pipe(lambda x: x.T if PCI else x) ) X.index.name = "cluster" size = X.sum(1).to_frame("size") complexity = ( X.pipe(create_lq, binary=True) .pipe(calc_eci, sign_correction=X.sum(1)) .pipe(lambda x: x.rename(columns={"eci": "pci"}) if PCI else x) ) outlook = X.pipe(complexity_outlook_index).to_frame("coi" if not PCI else "poi") return ( size.join(complexity) .join(outlook) .assign(year=year, geo_type=geo_type, source=dataset, cluster_type=cluster) ) def _melt_keep_index(df, value_name="value"): """ Fully melt a dataframe keeping index, setting new index as all but `value` """ id_vars = df.index.names return ( df.reset_index() .melt(id_vars=id_vars, value_name=value_name) .set_index([*id_vars, df.columns.name]) ) def process_complexity_unit(df, dataset, year, geo_type, cluster): """Calculate unaggregated complexity analysis variables Calculates: raw value, location quotient, RCA?, distance, opportunity outlook gain Args: df (pandas.DataFrame): Long dataframe Expected columns: `{"geo_nm", "geo_cd", cluster, "value"}` year (str): Year dataset (str): Name of dataset geo_type (str): Type of regional geography cluster (str): Name of cluster column to use to pivot on Returns: pandas.DataFrame """ X = df.pipe(pivot_area_cluster, cluster).fillna(0) X.columns.name = "cluster" # Index: year, location, cluster, geo_type # value, LQ, RCA?, distance, OOG value = X.pipe(_melt_keep_index, "value") lq = X.pipe(create_lq).pipe(_melt_keep_index, "lq") has_rca = (lq > 1).rename(columns={"lq": "has_rca"}) d = X.pipe(distance).pipe(_melt_keep_index, "distance") omega = 1 - X.pipe(proximity_density).pipe(_melt_keep_index, "omega") oog = opportunity_outlook_gain(X).pipe(_melt_keep_index, "oog") return ( pd.concat([value, lq, has_rca, d, omega, oog], axis=1) .assign(year=year, geo_type=geo_type, source=dataset, cluster_type=cluster) .pipe(preview) ) @jit(nopython=True) def _proximity_matrix(M): """ `proximity_matrix` helper function """ n_c, n_p = M.shape phi = np.empty((n_p, n_p), dtype=np.float64) k = M.sum(0) # Ubiquity for i in range(n_p): Mci = M[:, i] for j in range(n_p): if j > i: continue Mcj = M[:, j] m = max([k[i], k[j]]) if m == 0: v = np.nan else: v = (Mci * Mcj).sum() / m phi[i, j] = v phi[j, i] = v return phi def proximity_matrix(X, threshold=1): """ Calculates proximity matrix Proximity between entries calculates the probability that given a revealed comparative advantage (RCA) in entity `j`, a location also has a RCA in entity `i`. The same probability is calculated with `i` and `j` permuted, and the minimum of the two probabilities is then taken. .. math:: \\large{ \\phi_{ij} = \\min\\left\\{\\mathbb{P}(\\text{RCA}_i \\geq 1 | \\text{RCA}_j \\geq 1), \\mathbb{P}(\\text{RCA}_j \\geq 1 | \\text{RCA}_i \\geq 1)\\right\\} } \\\\ \\large{ \\phi_{ij} = \\frac{\\sum_c M_{ci} * M_{cj}}{\\max(k_i, k_j)} } k = \\sum_i M_{i, j} Args: X (pandas.DataFrame): Activity matrix [m x n] threshold (float, optional): Binarisation threshold for location quotient. Returns: pandas.DataFrame [n x n] """ M = create_lq(X, binary=True, threshold=threshold) return pd.DataFrame(_proximity_matrix(M.values), index=M.columns, columns=M.columns) def proximity_density(X, threshold=1): """Calculate proximity density .. math: \\omega_{ik} = \\frac{ \\sum_j M_{ij} \\phi_{jk}}{\\sum_j \\phi_{jk}} Args: X (pandas.DataFrame): Activity matrix [m x n] threshold (float, optional): Binarisation threshold for location quotient. Returns: pandas.DataFrame [m x n] """ M = create_lq(X, binary=True, threshold=threshold) phi = proximity_matrix(X, threshold) return (M @ phi) / phi.sum(axis=0) def distance(X, threshold=1): """Distance: 1 - proximity density w/ existing capabilities as NaN Args: X (pandas.DataFrame): [locations x activities] threshold (float, optional): Binarisation threshold for location quotient. Returns: pandas.DataFrame [locations x activites] """ M = create_lq(X, threshold, binary=True) phi = proximity_matrix(X, threshold) return (((1 - M) @ phi) / phi.sum(axis=1)) * M.applymap( lambda x: np.nan if x == 1 else 1 ) def complexity_outlook_index(X, threshold=1): """Calculate economic complexity outlook index Args: X (pandas.DataFrame): [locations x activities] threshold (float, optional): Binarisation threshold for location quotient. Returns: pandas.Series [locations] """ M = create_lq(X, threshold, binary=True) d = distance(X, threshold) PCI = calc_eci(M.T, sign_correction=X.sum(0)) if PCI.shape[0] != M.shape[1]: M = M.loc[:, PCI.index] d = d.loc[:, PCI.index] return ((1 - d) * (1 - M) * PCI.values.T).sum(axis=1) def opportunity_outlook_gain(X, threshold=1): """Calculate opportunity outlook gain Value for existing capabilities is NaN. Args: X (pandas.DataFrame): [locations x activities] threshold (float, optional): Binarisation threshold for location quotient. Returns: pandas.DataFrame [locations x activites] """ M = create_lq(X, threshold, binary=True) phi = proximity_matrix(X, threshold) d = distance(X, threshold) PCI = calc_eci(M.T, sign_correction=X.sum(0)) if PCI.shape[0] != M.shape[1]: M = M.loc[:, PCI.index] phi = phi.loc[PCI.index, PCI.index] d = d.loc[:, PCI.index] return ( (1 - M) * PCI.values.T @ (phi / phi.sum(0)) - ((1 - d) * PCI.values.T) ) * M.applymap(lambda x: np.nan if x == 1 else 1) def pivot_area_cluster(df, cluster, aggfunc=sum): """Convert long data into a matrix, pivoting on `cluster` For example, take BRES/IDBR data at Local authority (LAD) geographic level and SIC4 sectoral level to create matrix with elements representing the activity level for a given LAD-SIC4 combination. Args: df (pandas.DataFrame): Long dataframe Expected Columns: `{"geo_nm", "geo_cd", cluster}` cluster (str): Column of the sector type to pivot on agg_func (function, optional): Aggregation function passed to `pandas.DataFrame.pivot_table`. Returns: pandas.DataFrame: [number areas x number cluster] Note: Fills missing values with zero """ return ( df # Fill missing values with zeros .fillna(0) # Pivot to [areas x sectors] .pivot_table( index=["geo_cd", "geo_nm"], columns=cluster, values="value", fill_value=0, aggfunc=aggfunc, ) ) def create_lq(X, threshold=1, binary=False): """Calculate the location quotient. Divides the share of activity in a location by the share of activity in the UK total. Args: X (pandas.DataFrame): Rows are locations, columns are sectors, threshold (float, optional): Binarisation threshold. binary (bool, optional): If True, binarise matrix at `threshold`. and values are activity in a given sector at a location. Returns: pandas.DataFrame #UTILS """ Xm = X.values with np.errstate(invalid="ignore"): # Accounted for divide by zero X = pd.DataFrame( (Xm * Xm.sum()) / (Xm.sum(1)[:, np.newaxis] * Xm.sum(0)), index=X.index, columns=X.columns, ).fillna(0) return (X > threshold).astype(float) if binary else X def calc_fitness(X, n_iters): """Calculate the fitness metric of economic complexity Args: X (pandas.DataFrame): Rows are locations, columns are sectors, and values are activity in a given sector at a location. n_iters (int): Number of iterations to calculate fitness for Returns: pandas.DataFrame #UTILS """ X = _drop_zero_rows_cols(X) x = np.ones(X.shape[0]) for n in range(1, n_iters): x = (X.values / (X.values / x[:, np.newaxis]).sum(0)).sum(1) x = x / x.mean() return pd.DataFrame(np.log(x), index=X.index, columns=["fitness"]) def calc_fit_plus(X, n_iters, correction=True): """Calculate the fitness+ (ECI+) metric of economic complexity Args: X (pandas.Dataframe): Rows are locations, columns are sectors, and values are activity in a given sector at a location. n_iters (int): Number of iterations to calculate fitness for correction (bool, optional): If true, apply logarithmic correction. Returns: pandas.Dataframe #UTILS """ X = _drop_zero_rows_cols(X) if X.dtypes[0] == bool: norm_mean = np.mean else: norm_mean = ss.gmean x = X.values.sum(axis=1) x = x / norm_mean(x) for n in range(1, n_iters): x = (X.values / (X.values / x[:, np.newaxis]).sum(0)).sum(1) x = x / norm_mean(x) if correction: x = np.log(x) - np.log((X / X.sum(0)).sum(1)) else: pass # x = np.log(x) return pd.DataFrame(x, index=X.index, columns=["fit_p"]) def calc_eci(X, sign_correction=None): """Calculate the original economic complexity index (ECI). Args: X (pandas.DataFrame): Rows are locations, columns are sectors, and values are activity in a given sector at a location. sign_correction (pd.Series, optional): Array to correlate with ECI to calculate sign correction. Typically, ubiquity. If None, uses the sum over columns of the input data. Returns: pandas.DataFrame #UTILS """ X = _drop_zero_rows_cols(X) C = np.diag(1 / X.sum(1)) # Diagonal entries k_C P = np.diag(1 / X.sum(0)) # Diagonal entries k_P H = C @ X.values @ P @ X.T.values w, v = eig(H, left=False, right=True) eci = pd.DataFrame(v[:, 1].real, index=X.index, columns=["eci"]) # Positively correlate `sign_correction` (some proxy for diversity) w/ ECI if sign_correction is None: sign_correction = X.sum(1) else: sign_correction = sign_correction.loc[X.index] sign = np.sign(np.corrcoef(sign_correction, eci.eci.values)[0, 1]) logger.info(f"CI sign: {sign}") return (eci - eci.mean()) / eci.std() * sign def _drop_zero_rows_cols(X): """Drop regions/entities with no activity Fully zero column/row means ECI cannot be calculated """ nz_rows = X.sum(1) > 0 has_zero_rows = nz_rows.sum() != X.shape[0] if has_zero_rows: logger.warning(f"Dropping all zero rows: {X.loc[~nz_rows].index.values}") X = X.loc[nz_rows] nz_cols = X.sum(0) > 0 has_zero_cols = nz_cols.sum() != X.shape[1] if has_zero_cols: logger.warning(f"Dropping all zero cols: {X.loc[:, ~nz_cols].columns.values}") X = X.loc[:, nz_cols] return X def simple_diversity(X): """Generate two simple measures of diversity The first measure is the number of areas engaging in an activity The second measure is the number of areas with a revealed comparative advantage Args: X (pandas.DataFrame): Rows are locations, columns are sectors, and values are activity in a given sector at a location. Returns: pandas.DataFrame #UTILS """ div_1 = X.pipe(lambda x: np.sum(x > 0, axis=1)).to_frame("div_n_active") div_2 = ( X.pipe(create_lq, binary=True, threshold=1).sum(axis=1).to_frame("div_n_RCA") ) return pd.concat([div_1, div_2], axis=1)
13,246
4,470
'''Ex 019 - Um professor quer sortear um dos seus quatro alunos para apagar o quadro. Faça um programa que ajude ele, lendo o nome dos alunos e escrevendo na tela o nome do escolhido.''' print('-' * 15, '>Ex 19<', '-' * 15) from random import choice # Usando Random para sortiar o escolhido. # Recebendo dados. aluno1 = str(input('Digite o nome do aluno:')) aluno2 = str(input('Digite o nome do aluno:')) aluno3 = str(input('Digite o nome do aluno:')) aluno4 = str(input('Digite o nome do aluno:')) # Criando um array para escolher um entre os informados. lista = [aluno1, aluno2, aluno3, aluno4] # Usando choice para sortiar um dentro do array. escolhido = choice(lista) # Imprimindo dados na tela para o usuario. print('O escolhido foi {}'. format(escolhido))
768
281
from sklearn.model_selection import StratifiedKFold from evalml.preprocessing.data_splitters.balanced_classification_sampler import ( BalancedClassificationSampler ) from evalml.preprocessing.data_splitters.base_splitters import ( BaseUnderSamplingSplitter ) from evalml.preprocessing.data_splitters.training_validation_split import ( TrainingValidationSplit ) class BalancedClassificationDataTVSplit(BaseUnderSamplingSplitter): """Data splitter for generating training and validation split using Balanced Classification Data Sampler.""" def __init__(self, balanced_ratio=4, min_samples=100, min_percentage=0.1, test_size=0.25, shuffle=True, random_seed=0): """Create Balanced Classification Data TV splitter Arguments: balanced_ratio (float): The largest majority:minority ratio that is accepted as 'balanced'. For instance, a 4:1 ratio would be represented as 4, while a 6:5 ratio is 1.2. Must be greater than or equal to 1 (or 1:1). Defaults to 4. min_samples (int): The minimum number of samples that we must have for any class, pre or post sampling. If a class must be downsampled, it will not be downsampled past this value. To determine severe imbalance, the minority class must occur less often than this and must have a class ratio below min_percentage. Must be greater than 0. Defaults to 100. min_percentage (float): The minimum percentage of the minimum class to total dataset that we tolerate, as long as it is above min_samples. If min_percentage and min_samples are not met, treat this as severely imbalanced, and we will not resample the data. Must be between 0 and 0.5, inclusive. Defaults to 0.1. test_size (float): The size of the test split. Defaults to 0.25. shuffle (bool): Whether or not to shuffle the data before splitting. Defaults to True. random_seed (int): The seed to use for random sampling. Defaults to 0. """ self.sampler = BalancedClassificationSampler(balanced_ratio=balanced_ratio, min_samples=min_samples, min_percentage=min_percentage, random_seed=random_seed) super().__init__(sampler=self.sampler, n_splits=1, random_seed=random_seed) self.shuffle = shuffle self.test_size = test_size self.balanced_ratio = balanced_ratio self.min_samples = min_samples self.min_percentage = min_percentage self.splitter = TrainingValidationSplit(test_size=test_size, shuffle=shuffle, random_seed=random_seed) class BalancedClassificationDataCVSplit(BaseUnderSamplingSplitter): """Data splitter for generating k-fold cross-validation split using Balanced Classification Data Sampler.""" def __init__(self, balanced_ratio=4, min_samples=100, min_percentage=0.1, n_splits=3, shuffle=True, random_seed=0): """Create Balanced Classification Data CV splitter Arguments: balanced_ratio (float): The largest majority:minority ratio that is accepted as 'balanced'. For instance, a 4:1 ratio would be represented as 4, while a 6:5 ratio is 1.2. Must be greater than or equal to 1 (or 1:1). Defaults to 4. min_samples (int): The minimum number of samples that we must have for any class, pre or post sampling. If a class must be downsampled, it will not be downsampled past this value. To determine severe imbalance, the minority class must occur less often than this and must have a class ratio below min_percentage. Must be greater than 0. Defaults to 100. min_percentage (float): The minimum percentage of the minimum class to total dataset that we tolerate, as long as it is above min_samples. If min_percentage and min_samples are not met, treat this as severely imbalanced, and we will not resample the data. Must be between 0 and 0.5, inclusive. Defaults to 0.1. n_splits (int): The number of splits to use for cross validation. Defaults to 3. shuffle (bool): Whether or not to shuffle the data before splitting. Defaults to True. random_seed (int): The seed to use for random sampling. Defaults to 0. """ self.sampler = BalancedClassificationSampler(balanced_ratio=balanced_ratio, min_samples=min_samples, min_percentage=min_percentage, random_seed=random_seed) super().__init__(sampler=self.sampler, n_splits=n_splits, random_seed=random_seed) self.shuffle = shuffle self.balanced_ratio = balanced_ratio self.min_samples = min_samples self.min_percentage = min_percentage self.splitter = StratifiedKFold(n_splits=n_splits, shuffle=shuffle, random_state=random_seed)
4,807
1,358
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * class AlipayOpenAuthUserauthTokenCreateModel(object): def __init__(self): self._scopes = None self._user_id = None @property def scopes(self): return self._scopes @scopes.setter def scopes(self, value): self._scopes = value @property def user_id(self): return self._user_id @user_id.setter def user_id(self, value): self._user_id = value def to_alipay_dict(self): params = dict() if self.scopes: if hasattr(self.scopes, 'to_alipay_dict'): params['scopes'] = self.scopes.to_alipay_dict() else: params['scopes'] = self.scopes if self.user_id: if hasattr(self.user_id, 'to_alipay_dict'): params['user_id'] = self.user_id.to_alipay_dict() else: params['user_id'] = self.user_id return params @staticmethod def from_alipay_dict(d): if not d: return None o = AlipayOpenAuthUserauthTokenCreateModel() if 'scopes' in d: o.scopes = d['scopes'] if 'user_id' in d: o.user_id = d['user_id'] return o
1,327
442
from .data import CovidData import datetime as dt from matplotlib.offsetbox import AnchoredText import pandas as pd import seaborn as sns import geopandas as gpd import matplotlib.pyplot as plt plt.style.use('ggplot') def pan_duration(date): """Return the duration in days of the pandemic. As calculated from the gov.uk API. It subtracts the first date entry in the API data from the most recent date entry. Args: date (datetime): DataFrame column (i.e Series) containing date field as downloaded from the gov.uk API by get_national_data() method from CovidData Class. Returns: datetime: Duration of pandemic in days as datetime object. """ return (date[0] - date[-1]).days def validate_input(df): """Check that input into the plotting functions is of the correct type. Args: df (Pandas DataFrame): this is intended to be the plotting parameter Raises: TypeError: if parameter is not a DataFrame """ # if for_function == 'deaths' or for_function == 'cases': # expected_cols = {'cases_cumulative', 'cases_demographics', # 'cases_newDaily', 'case_rate', 'date', # 'death_Demographics', 'name', 'vac_firstDose', # 'vac_secondDose'} if not isinstance(df, pd.DataFrame): raise TypeError('Parameter must be DataFrame, use get_regional_data' + ' method from CovidData class.') # if set(df.columns) != expected_cols: # raise ValueError('Incorrect features. Expecting output from' # + ' get_regional_data method from CovidData class') def my_path(): """Find correct path at module level for geo_data files. Returns: [type]: [description] """ from pathlib import Path base = Path(__file__).resolve().parent / 'geo_data' return base def daily_case_plot(df, pan_duration=pan_duration, save=False): """Create a matplotlib plot of case numbers in the UK. Calculated over the duration of the pandemic.Display text information giving the most recent daily number, the highest daily number and the date recorded, the total cumulative number of cases and the duration of the pandemic in days. Args: df (DataFrame): containing covid data retrieved from CovidData class using get_national_data() or get_UK_data() method. pan_duration (function, optional): Defaults to pan_duration. save (bool, optional): set True to save plot. Defaults to False. Returns: - Matplotlib plot, styled using matplotlib template 'ggplot' """ # Create Variables we wish to plot cases = df['case_newCases'].to_list() date = df['date'].to_list() cumulative = df['case_cumulativeCases'].to_list() # Find date of highest number of daily cases high, arg_high = max(cases), cases.index(max(cases)) high_date = date[arg_high].strftime('%d %b %Y') duration = pan_duration(date=date) # Create matplotlib figure and specify size fig = plt.figure(figsize=(12, 10)) plt.style.use('ggplot') ax = fig.add_subplot() # Plot varibles ax.plot(date, cases) # Style and label plot ax.set_xlabel('Date') ax.set_ylabel('Cases') ax.fill_between(date, cases, alpha=0.3) ax.set_title('Number of people who tested positive for Covid-19 (UK)', fontsize=18) at = AnchoredText(f"Most recent new cases\n{cases[0]:,.0f}\ \nMax new cases\n{high:,.0f}: {high_date}\ \nCumulative cases\n{cumulative[0]:,.0f}\ \nPandemic duration\n{duration} days", prop=dict(size=16), frameon=True, loc='upper left') at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2") ax.add_artist(at) ax.annotate('Source: gov.uk https://api.coronavirus.data.gov.uk/v1/data', xy=(0.25, 0.0175), xycoords='figure fraction', fontsize=12, color='#555555') plt.style.use('ggplot') if save: plt.savefig(f"{date[0].strftime('%Y-%m-%d')}-case_numbers_plot"); plt.show() def regional_plot_cases(save=False): """Plot regional case numbers on a map of the UK. Function collects data using CovidData get_regional_data method. Args: save (bool, optional): If true will save plot. Defaults to False. Returns: Plot of regional case numbers on map of UK """ # Collect data regions = CovidData().get_regional_data() scotland = CovidData(nation='scotland').get_national_data() wales = CovidData(nation='wales').get_national_data() ni = CovidData(nation='northern ireland').get_national_data() regions = regions.assign(case_newCases=regions['cases_newDaily']) # Set date to plot date_selector = regions['date'][0] regions_date = regions.loc[regions['date'] == date_selector] scotland_date = \ scotland.loc[scotland['date'] == date_selector, ['date', 'name', 'case_newCases']] wales_date = wales.loc[wales['date'] == date_selector, ['date', 'name', 'case_newCases']] ni_date = ni.loc[ni['date'] == date_selector, ['date', 'name', 'case_newCases']] # Combine regional data into single dataframe final_df = pd.concat([regions_date, scotland_date, wales_date, ni_date], axis=0) file_path = my_path() / 'NUTS_Level_1_(January_2018)_Boundaries.shp' # Check required file exists try: # Read shape file geo_df = gpd.read_file(file_path) except: # bare except is not good practice, this should be changed print('Ensure you have imported geo_data sub-folder') geo_df['nuts118nm'] = \ geo_df['nuts118nm'].replace(['North East (England)', 'North West (England)', 'East Midlands (England)', 'West Midlands (England)', 'South East (England)', 'South West (England)'], ['North East', 'North West', 'East Midlands', 'West Midlands', 'South East', 'South West']) merged = geo_df.merge(final_df, how='left', left_on="nuts118nm", right_on="name") # Column to plot feature = 'case_newCases' # Plot range feature_min, feature_max = merged['case_newCases'].min(), \ merged['case_newCases'].max() # Create plot fig, ax = plt.subplots(1, figsize=(12, 10)) # Set style and labels ax.axis('off') ax.set_title(f'Number of new cases per region {date_selector}', fontdict={'fontsize': '18', 'fontweight': '3'}) ax.annotate('Source: gov.uk' + ' https://api.coronavirus.data.gov.uk/v1/data', xy=(0.25, .05), xycoords='figure fraction', fontsize=12, color='#555555') # Create colorbar sm = plt.cm.ScalarMappable(cmap='Reds', norm=plt.Normalize(vmin=feature_min, vmax=feature_max)) fig.colorbar(sm) # Create map merged.plot(column=feature, cmap='Reds', linewidth=0.8, ax=ax, edgecolor='0.8'); plt.show() if save: image = merged.plot(column=feature, cmap='Reds', linewidth=0.8, ax=ax, edgecolor='0.8'); image.figure.savefig(f'{date_selector}-regional_cases_plot') def regional_plot_rate(save=False): """Plot regional case rate per 100,000 on a map of the UK. Function collects data using CovidData get_regional_data method. Args: save (bool, optional): If true will save plot. Defaults to False. Returns: Plot of regional case rate on map of UK. """ # Collect data regions = CovidData().get_regional_data() scotland = CovidData(nation='scotland').get_national_data() wales = CovidData(nation='wales').get_national_data() ni = CovidData(nation='northern ireland').get_national_data() # Set date to plot date_selector = regions['date'][5] regions_date = regions.loc[regions['date'] == date_selector] scotland_date = scotland.loc[scotland['date'] == date_selector, ['date', 'name', 'case_rate']] wales_date = wales.loc[wales['date'] == date_selector, ['date', 'name', 'case_rate']] ni_date = ni.loc[ni['date'] == date_selector, ['date', 'name', 'case_rate']] # Combine regional data into single dataframe final_df = pd.concat([regions_date, scotland_date, wales_date, ni_date], axis=0) file_path = my_path() / 'NUTS_Level_1_(January_2018)_Boundaries.shp' # Check required file exists try: # Read shape file geo_df = gpd.read_file(file_path) except: # bare except should be changed, will do so in later interation print('Ensure you have imported geo_data sub-folder') geo_df['nuts118nm'] = \ geo_df['nuts118nm'].replace(['North East (England)', 'North West (England)', 'East Midlands (England)', 'West Midlands (England)', 'South East (England)', 'South West (England)'], ['North East', 'North West', 'East Midlands', 'West Midlands', 'South East', 'South West']) merged = geo_df.merge(final_df, how='left', left_on="nuts118nm", right_on="name") # Column to plot feature = 'case_rate' # Plot range feature_min, feature_max = merged['case_rate'].min(),\ merged['case_rate'].max() # Create plot fig, ax = plt.subplots(1, figsize=(12, 10)) # Set style and labels ax.axis('off') ax.set_title('Regional rate per 100,000 (new cases)', fontdict={'fontsize': '20', 'fontweight': '3'}) ax.annotate('Source: gov.uk' + ' https://api.coronavirus.data.gov.uk/v1/data', xy=(0.25, .05), xycoords='figure fraction', fontsize=12, color='#555555') # Create colorbar sm = plt.cm.ScalarMappable(cmap='Reds', norm=plt.Normalize(vmin=feature_min, vmax=feature_max)) fig.colorbar(sm) # Create map merged.plot(column=feature, cmap='Reds', linewidth=0.8, ax=ax, edgecolor='0.8'); plt.show() if save: image = merged.plot(column=feature, cmap='Reds', linewidth=0.8, ax=ax, edgecolor='0.8'); image.figure.savefig(f'{date_selector}-regional_rate_plot') def heatmap_cases(df): """Create heatmap of case numbers for duration of pandemic. Args: df (DataFrame): Covid case data retrieved by calling CovidData class method. Returns: Seaborn heatmap plot of case numbers for each day of the pandemic. """ # Variables to plot cases = df['case_newCases'].to_list() date = df['date'].to_list() # Create new DataFrame containing two columns: date and case numbers heat_df = pd.DataFrame({'date': date, 'cases': cases}, index=date) # Separate out date into year month and day heat_df['year'] = heat_df.index.year heat_df["month"] = heat_df.index.month heat_df['day'] = heat_df.index.day # Use groupby to convert data to wide format for heatmap plot x = heat_df.groupby(["year", "month", "day"])["cases"].sum() df_wide = x.unstack() # Plot data sns.set(rc={"figure.figsize": (12, 10)}) # Reverse colormap so that dark colours represent higher numbers cmap = sns.cm.rocket_r ax = sns.heatmap(df_wide, cmap=cmap) ax.set_title('Heatmap of daily cases since start of pandemic', fontsize=20) ax.annotate('Source: gov.uk https://api.coronavirus.data.gov.uk/v1/data', xy=(0.25, 0.01), xycoords='figure fraction', fontsize=12, color='#555555') plt.show() def local_rate_plot(save=False): """Plot local case rate per 100,000 on a map of the UK. Function collects data using CovidData get_regional_data method. Args: save (bool, optional): If true will save plot. Defaults to False. Returns: Plot of local case rate on map of UK """ # Find latest data recent_date = CovidData().get_regional_data() recent_date = recent_date['date'][5] # Select latest data from local data local = CovidData().get_local_data(date=recent_date) date_selector = recent_date local_date = local.loc[local['date'] == date_selector, ['date', 'name', 'case_rate']] file_path = my_path() / "Local_Authority_Districts.shp" # Check required file exists try: # Read shape file geo_df = gpd.read_file(file_path) except: # bare except should be changed, will do so in later interation print('Ensure you have imported geo_data sub-folder') local_date['name'] = \ local_date['name'].replace(['Cornwall and Isles of Scilly'], ['Cornwall']) merged = geo_df.merge(local_date, how='outer', left_on="lad19nm", right_on="name") # Column to plot feature = 'case_rate' # Plot range vmin, vmax = merged['case_rate'].min(), merged['case_rate'].max() # Create plot fig, ax = plt.subplots(1, figsize=(12, 10)) # Set style and labels ax.axis('off') ax.set_title(f'Local rate per 100,000 {recent_date}', fontdict={'fontsize': '20', 'fontweight': '3'}) ax.annotate('Source: gov.uk' + ' https://api.coronavirus.data.gov.uk/v1/data', xy=(0.25, .05), xycoords='figure fraction', fontsize=12, color='#555555') # Create colorbar sm = plt.cm.ScalarMappable(cmap='Reds', norm=plt.Normalize(vmin=vmin, vmax=vmax)) fig.colorbar(sm) # Create map merged.plot(column=feature, cmap='Reds', linewidth=0.2, ax=ax, edgecolor='0.8') plt.show() if save: image = merged.plot(column=feature, cmap='Reds', linewidth=0.2, ax=ax, edgecolor='0.8'); image.figure.savefig(f'{date_selector}-local_rate_plot') def local_cases_plot(save=False): """Plot local case numbers on a map of the UK. Function collects data using CovidData get_regional_data method. Args: save (bool, optional): If true will save plot. Defaults to False. """ # Find latest data recent_date = CovidData().get_regional_data() recent_date = recent_date['date'][0] # Select latest data from local data local = CovidData().get_local_data(date=recent_date) date_selector = recent_date local_date = local.loc[local['date'] == date_selector, ['date', 'name', 'case_newDaily']] file_path = my_path() / "Local_Authority_Districts.shp" # Check required file exists try: # Read shape file geo_df = gpd.read_file(file_path) except: # bare except should be changed, will do so in later interation print('Ensure you have imported geo_data sub-folder') local_date['name'] = \ local_date['name'].replace(['Cornwall and Isles of Scilly'], ['Cornwall']) merged = geo_df.merge(local_date, how='outer', left_on="lad19nm", right_on="name") # Column to plot feature = 'case_newDaily' # Plot range vmin, vmax = merged['case_newDaily'].min(), \ merged['case_newDaily'].max() # Create plot fig, ax = plt.subplots(1, figsize=(12, 10)) # Set style and labels ax.axis('off') ax.set_title(f'Number of new cases by local authority {recent_date}', fontdict={'fontsize': '20', 'fontweight': '3'}) ax.annotate('Source: gov.uk' + ' https://api.coronavirus.data.gov.uk/v1/data', xy=(0.25, .05), xycoords='figure fraction', fontsize=12, color='#555555') # Create colorbar sm = plt.cm.ScalarMappable(cmap='Reds', norm=plt.Normalize(vmin=vmin, vmax=vmax)) fig.colorbar(sm) # Create map merged.plot(column=feature, cmap='Reds', linewidth=0.2, ax=ax, edgecolor='0.8') plt.show() if save: image = merged.plot(column=feature, cmap='Reds', linewidth=0.2, ax=ax, edgecolor='0.8'); image.figure.savefig(f'{date_selector}-local_cases_plot') def case_demographics(df): """Produce a plot of the age demographics of cases across England. Args: df (DataFrame): this must be the dataframe produced by the get_regional_data method from the CovidData class Returns: Plot of case numbers broken down by age """ validate_input(df) df_list = df.loc[:, ['cases_demographics', 'date']] age_df = [] for i in range(df_list.shape[0]): if df_list.iloc[i, 0]: temp_df = pd.DataFrame(df_list.iloc[i, 0]) temp_df['date'] = df_list.iloc[i, 1] temp_df = temp_df.pivot(values='rollingRate', columns='age', index='date') age_df.append(temp_df) data = pd.concat(age_df) data.index = pd.to_datetime(data.index) data = \ data.assign(under_15=(data['00_04']+data['05_09']+data['10_14'])/3, age_15_29=(data['15_19']+data['20_24']+data['25_29'])/3, age_30_39=(data['30_34']+data['35_39'])/2, age_40_49=(data['40_44']+data['45_49'])/2, age_50_59=(data['50_54']+data['55_59'])/2) data.drop(columns=['00_04', '00_59', '05_09', '10_14', '15_19', '20_24', '25_29', '30_34', '35_39', '40_44', '45_49', '50_54', '55_59', '60_64', '65_69', '70_74', '75_79', '80_84', '85_89', '90+', 'unassigned'], inplace=True) date = data.index[0].strftime('%d-%b-%y') ready_df = data.resample('W').mean() ready_df.plot(figsize=(15, 10), subplots=True, layout=(3, 3), title=f'{date} - England case rate per 100,000 by age' + ' (weekly)') plt.style.use('ggplot') plt.show() def vaccine_demographics(df): """Plot of the age demographics of third vaccine uptake across England. Args: df ([DataFrame]): this must be the dataframe produced by the get_regional_data method from the CovidData class Returns: Plot of cumulative third vaccination numbers broken down by age. """ validate_input(df) df_list = df.loc[:, ['vac_demographics', 'date']] age_df = [] for i in range(df_list.shape[0]): if df_list.iloc[i, 0]: temp_df = pd.DataFrame(df_list.iloc[i, 0]) temp_df['date'] = df_list.iloc[i, 1] temp_df =\ temp_df.pivot(values= 'cumVaccinationThirdInjectionUptakeByVaccinationDatePercentage', columns='age', index='date') age_df.append(temp_df) data = pd.concat(age_df) data.index = pd.to_datetime(data.index) date = data.index[0].strftime('%d-%b-%y') ready_df = data.resample('W').mean() ready_df.plot(figsize=(15, 10), subplots=True, layout=(6, 3), title=f'{date} - England vaccine booster uptake (%) by age' + ' (weekly)') plt.style.use('ggplot') plt.show() def death_demographics(df): """Plot of the age demographics of rate of deaths across England. Args: df (DataFrame): this must be the dataframe produced by the get_regional_data method from the CovidData class Returns: Plot of death rate per 100,000 broken down by age. """ validate_input(df) df_list = df.loc[:, ['death_Demographics', 'date']] age_df = [] for i in range(df_list.shape[0]): if df_list.iloc[i, 0]: temp_df = pd.DataFrame(df_list.iloc[i, 0]) temp_df['date'] = df_list.iloc[i, 1] temp_df = temp_df.pivot(values='rollingRate', columns='age', index='date') age_df.append(temp_df) data = pd.concat(age_df) data.index = pd.to_datetime(data.index) data = \ data.assign(under_15=(data['00_04']+data['05_09']+data['10_14'])/3, age_15_29=(data['15_19']+data['20_24']+data['25_29'])/3, age_30_39=(data['30_34']+data['35_39'])/2, age_40_49=(data['40_44']+data['45_49'])/2, age_50_59=(data['50_54']+data['55_59'])/2) data.drop(columns=['00_04', '00_59', '05_09', '10_14', '15_19', '20_24', '25_29', '30_34', '35_39', '40_44', '45_49', '50_54', '55_59', '60_64', '65_69', '70_74', '75_79', '80_84', '85_89', '90+'], inplace=True) date = data.index[0].strftime('%d-%b-%y') ready_df = data.resample('W').mean() ready_df.plot(figsize=(15, 10), subplots=True, layout=(3, 3), title=f'{date} - England death rate per 100,000 by age' + ' (weekly)') plt.style.use('ggplot') plt.show() def daily_deaths(df, pan_duration=pan_duration, save=False): """Plot number of people died per day within 28 days of 1st +ve test. COVID-19 deaths over time, from the start of the pandemic March 2020. Args: df (DataFrame): requires data from get_uk_data method pan_duration (function, optional): use pre specified pan_duration. Defaults to pan_duration. save (bool, optional): [description]. Defaults to False. Returns: Matplotlib plot, styled using matplotlib template 'ggplot' """ daily_deaths = df['death_dailyDeaths'].to_list() date = df['date'].to_list() # cumulative = df['case_cumulativeCases'].to_list() # Find date of highest number of daily cases high, arg_high = max(daily_deaths), daily_deaths.index(max(daily_deaths)) # daily = df['death_dailyDeaths'][0] high_date = date[arg_high].strftime('%d %b %Y') # added the number of death for the last seven days duration = pan_duration(date=date) # Create matplotlib figure and specify size fig = plt.figure(figsize=(12, 10)) plt.style.use('ggplot') ax = fig.add_subplot() # Plot varibles ax.plot(date, daily_deaths) # Style and label plot ax.set_xlabel('Date') ax.set_ylabel('Daily deaths') ax.fill_between(date, daily_deaths, alpha=0.3) ax.set_title('Deaths within 28 days of positive test (UK)', fontsize=18) at = AnchoredText(f"Most recent daily deaths\n{daily_deaths[0]:,.0f}\ \nMax daily deaths\n{high:,.0f}: {high_date}\ \nPandemic duration\n{duration} days", prop=dict(size=16), frameon=True, loc='upper left') # \nCumulative cases\n{cumulative[0]:,.0f}\ at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2") ax.add_artist(at) ax.annotate('Source: gov.uk https://api.coronavirus.data.gov.uk/v1/data', xy=(0.25, 0.0175), xycoords='figure fraction', fontsize=12, color='#555555') if save: plt.savefig(f"casenumbers{date[0].strftime('%Y-%m-%d')}") plt.show() def cumulative_deaths(df, pan_duration=pan_duration, save=False): """Plot cum number of people who died within 28 days of +ve test. Total COVID-19 deaths over time, from the start of the pandemic March 2020. Args: df (DataFrame): containing covid data retrieved from CovidData pan_duration ([function], optional): Defaults to pan_duration. save (bool, optional): True to save plot. Defaults to False. Returns: Matplotlib plot, styled using matplotlib template 'ggplot' """ df = df.fillna(0) cum_deaths = df["death_cumulativeDeaths"].to_list() date = df['date'].to_list() # cumulative = df['death_cumulativeDeaths'].to_list() # Find date of highest number of daily cases high, arg_high = max(cum_deaths), cum_deaths.index(max(cum_deaths)) # daily = df["death_cumulativeDeaths"][0] high_date = date[arg_high].strftime('%d %b %Y') # added the number of death for the last seven days duration = pan_duration(date=date) # Create matplotlib figure and specify size fig = plt.figure(figsize=(12, 10)) ax = fig.add_subplot() # Plot varibles ax.plot(date, cum_deaths) # Style and label plot ax.set_xlabel('Date') ax.set_ylabel('Cumulative deaths') ax.fill_between(date, cum_deaths, alpha=0.3) ax.set_title('Cumulative deaths within 28 days of positive test (UK)', fontsize=18) at = AnchoredText(f"Last cumulative deaths\n{high:,.0f}: {high_date}\ \nPandemic duration\n{duration} days", prop=dict(size=16), frameon=True, loc='upper left') # \nCumulative cases\n{cumulative[0]:,.0f}\ at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2") ax.add_artist(at) ax.annotate('Source: gov.uk https://api.coronavirus.data.gov.uk/v1/data', xy=(0.25, 0.0175), xycoords='figure fraction', fontsize=12, color='#555555') plt.style.use('ggplot') if save: plt.savefig(f"casenumbers{date[0].strftime('%Y-%m-%d')}") plt.show() def regional_plot_death_rate(save=False): """Plot regional deaths rate per 100,000 on a map of the UK. Function collects data using CovidData get_regional_data method. Args: save (bool, optional): True will save plot. Defaults to False. Returns: Plot of regional case rate on map of UK """ # Collect data regions = CovidData().get_regional_data() scotland = CovidData(nation='scotland').get_national_data() wales = CovidData(nation='wales').get_national_data() ni = CovidData(nation='northern ireland').get_national_data() # Set date to plot date_selector = regions['date'][7] regions_date = regions.loc[regions['date'] == date_selector] scotland_date = scotland.loc[scotland['date'] == date_selector, ['date', 'name', 'death_newDeathRate']] wales_date = wales.loc[wales['date'] == date_selector, ['date', 'name', 'death_newDeathRate']] ni_date = ni.loc[ni['date'] == date_selector, ['date', 'name', 'death_newDeathRate']] # Combine regional data into single dataframe final_df = pd.concat([regions_date, scotland_date, wales_date, ni_date], axis=0) file_path = my_path() / 'NUTS_Level_1_(January_2018)_Boundaries.shp' # Check required file exists try: # Read shape file geo_df = gpd.read_file(file_path) except: # bare except should be changed, will do so in later interation print('Ensure you have imported geo_data sub-folder') geo_df['nuts118nm'] = \ geo_df['nuts118nm'].replace(['North East (England)', 'North West (England)', 'East Midlands (England)', 'West Midlands (England)', 'South East (England)', 'South West (England)'], ['North East', 'North West', 'East Midlands', 'West Midlands', 'South East', 'South West']) merged = geo_df.merge(final_df, how='left', left_on="nuts118nm", right_on="name") # Column to plot feature = 'death_newDeathRate' # Plot range feature_min, feature_max = merged['death_newDeathRate'].min(),\ merged['death_newDeathRate'].max() # Create plot fig, ax = plt.subplots(1, figsize=(12, 10)) # Set style and labels ax.axis('off') ax.set_title('Regional rate per 100,000 (new deaths)', fontdict={'fontsize': '20', 'fontweight': '3'}) ax.annotate('Source: gov.uk \ https://api.coronavirus.data.gov.uk/v1/data', xy=(0.25, .05), xycoords='figure fraction', fontsize=12, color='#555555') # Create colorbar sm = plt.cm.ScalarMappable(cmap='Reds', norm=plt.Normalize(vmin=feature_min, vmax=feature_max)) fig.colorbar(sm) # Create map merged.plot(column=feature, cmap='Reds', linewidth=0.8, ax=ax, edgecolor='0.8') plt.show() if save: image = merged.plot(column=feature, cmap='Reds', linewidth=0.8, ax=ax, edgecolor='0.8') image.figure.savefig(f'caserates{date_selector}') def regional_deaths_demo(save=False): """Plot number of deaths in the UK. Plot by age category (>60 , <60). Function collects data using CovidData get_regional_data method. Args: save (bool, optional): True will save plot. Defaults to False. Returns: Plot of regional deaths by age category (UK) """ CovidDataE = CovidData("england") regional = CovidDataE.get_regional_data() regional = \ regional.drop(regional.columns.difference(["date", "death_Demographics"]), 1) regional # remove empty lists in 'death_Demographcs column' regional = regional[regional["death_Demographics"].astype(bool)] # transform the regional dataframe to have 'age_categories' as columns # with 'deaths' values and 'date' as rows age_df = [] for i in range(regional.shape[0]): if regional.iloc[i, 1]: temp_df = pd.DataFrame(regional.iloc[i, 1]) temp_df['date'] = regional.iloc[i, 0] temp_df = temp_df.pivot(values='deaths', columns=['age'], index='date') age_df.append(temp_df) final_death_data = pd.concat(age_df) # create a dataframe with columns 'age category' and 'number of deaths' age_cat = ['00_04', '00_59', '05_09', '10_14', '15_19', '20_24', '25_29', '30_34', '35_39', '40_44', '45_49', '50_54', '55_59', '60+', '60_64', '65_69', '70_74', '75_79', '80_84', '85_89', '90+'] deaths = [] for ele in age_cat: x = final_death_data[ele].sum() deaths.append(x) deaths_df = pd.DataFrame(list(zip(age_cat, deaths)), columns=['age category', 'number of deaths']) # group age categories to have only <60 old years and 60+ cat_1 = deaths_df.loc[deaths_df['age category'] == '00_59'] cat_2 = deaths_df.loc[deaths_df['age category'] == '60+'] below_60 = cat_1['number of deaths'].sum() above_60 = cat_2['number of deaths'].sum() lst1 = ['<60', '60+'] lst2 = [below_60, above_60] final_deaths_age_cat = pd.DataFrame(list(zip(lst1, lst2)), columns=['age category', 'number of deaths']) # getting highest number of deaths for each age category # PLOTTING A BAR PLOT OF NUMBER OF DEATHS vs AGE CATEGORY fig = plt.figure(figsize=(12, 10)) ax = fig.add_subplot() # Plot varibles ax.bar(final_deaths_age_cat['age category'], final_deaths_age_cat['number of deaths']) # plot(date, cum_deaths) # Style and label plot ax.set_xlabel('Age category') ax.set_ylabel('Number of deaths') ax.fill_between(final_deaths_age_cat['age category'], final_deaths_age_cat['number of deaths'], alpha=0.3) ax.set_title('Number of deaths per age category (England)', fontsize=18) at = AnchoredText(f"Number of deaths:\ \nAge <60: {below_60}\ \nAge >60: {above_60}", prop=dict(size=16), frameon=True, loc='upper left') # \nCumulative cases\n{cumulative[0]:,.0f}\ at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2") ax.add_artist(at) ax.annotate('Source: gov.uk https://api.coronavirus.data.gov.uk/v1/data', xy=(0.25, 0.0175), xycoords='figure fraction', fontsize=12, color='#555555') plt.style.use('ggplot') plt.show() if save: date = dt.now() plt.savefig(f"casenumbers{date.strftime('%Y-%m-%d')}") def collect_hosp_data(country='england'): """Collect data for hosp and vac functions. Args: country (str, optional): Select country data. Defaults to 'england'. Returns: DataFrame: data in correct format for hosp and vac functions """ if country == 'england': hosp_data = CovidData("england").get_national_data() hosp_data["date"] = hosp_data["date"].astype('datetime64[ns]') hosp_data = hosp_data.fillna(0) return hosp_data else: hosp_uk = CovidData("england").get_uk_data() hosp_uk["date"] = hosp_uk["date"].astype('datetime64[ns]') hosp_uk = hosp_uk.fillna(0) return hosp_uk def hosp_cases_plot(): """Heatmap for the the daily number of hospital cases (England). Args: No args required, collects own data. Returns : Seaborn heatmap plot for the number of hospital cases per day of the pandemic. """ hosp_data = collect_hosp_data() hosp_cases_col = ["date", "hosp_hospitalCases"] hosp_data1 = hosp_data.loc[:, hosp_cases_col] hosp_data1.loc[:, ["Day"]] = hosp_data1["date"].apply(lambda x: x.day) hosp_data1["date"] = hosp_data1.date.dt.strftime("%Y-%m") newpivot = hosp_data1.pivot_table("hosp_hospitalCases", index="date", columns="Day") cmap = sns.cm.rocket_r plt.figure(figsize=(16, 9)) hm2 = sns.heatmap(newpivot, cmap=cmap) hm2.set_title("Heatmap of the daily number of hospital cases (England)", fontsize=14) hm2.set_xlabel("Day", fontsize=12) hm2.set_ylabel("Month and Year", fontsize=12) def hosp_newadmissions_plot(): """Heatmap for the the daily number of new hospital admissions (England). Args: No args required, collects own data. Returns : Seaborn heatmap plot for the number of new hospital admissions per day of the pandemic. """ hosp_data = collect_hosp_data() hosp_cases_col = ["date", "hosp_newAdmissions"] hosp_data2 = hosp_data.loc[:, hosp_cases_col] hosp_data2["Day"] = hosp_data2.date.apply(lambda x: x.day) hosp_data2["date"] = hosp_data2.date.dt.strftime("%Y-%m") newpivot = hosp_data2.pivot_table("hosp_newAdmissions", index="date", columns="Day") cmap = sns.cm.rocket_r plt.figure(figsize=(16, 9)) hm1 = sns.heatmap(newpivot, cmap=cmap) hm1.set_title("Heatmap of the daily number of new hospital admissions" + " (England)", fontsize=14) hm1.set_xlabel("Day", fontsize=12) hm1.set_ylabel("Month and Year", fontsize=12) def hosp_newadmissionschange_plot(): """Change in hospital admissions (England). Plot difference between the number of new hospital admissions during the latest 7-day period and the previous non-overlapping week. Args: No args required, collects own data. Returns : Lineplot of this difference over the months. """ hosp_data = collect_hosp_data() hosp_cases_col = ["date", "hosp_newAdmissionsChange"] hosp_data3 = hosp_data.loc[:, hosp_cases_col] x = hosp_data3["date"].dt.strftime("%Y-%m") y = hosp_data3["hosp_newAdmissionsChange"] fig, ax = plt.subplots(1, 1, figsize=(20, 3)) sns.lineplot(x=x, y=y, color="g") ax.set_title("Daily new admissions change (England)", fontsize=14) ax.invert_xaxis() ax.set_xlabel("Date", fontsize=12) ax.set_ylabel("New Admissions Change", fontsize=12) def hosp_occupiedbeds_plot(): """Plot daily number of COVID-19 patients in mechanical ventilator beds. Plots information for England. Args: No args required, collects own data. Returns : - Lineplot of this difference over the months. """ hosp_data = collect_hosp_data() hosp_cases_col = ["date", "hosp_covidOccupiedMVBeds"] hosp_data4 = hosp_data.loc[:, hosp_cases_col] fig, ax = plt.subplots(1, 1, figsize=(20, 3)) sns.lineplot(x=hosp_data4["date"].dt.strftime("%Y-%m"), y=hosp_data4["hosp_covidOccupiedMVBeds"], ax=ax, color="b") ax.set_title("Daily number of COVID occupied Mechanical Ventilator beds" + " (England)", fontsize=14) ax.invert_xaxis() ax.set_xlabel("Date", fontsize=12) ax.set_ylabel("Number of occupied MV beds", fontsize=12) def hosp_casesuk_plot(): """Heatmap for the the daily number of hospital cases in UK. Args: No args required, collects own data. Returns : Seaborn heatmap plot for the number of hospital cases per day of the pandemic. """ hosp_uk = collect_hosp_data(country='uk') hosp_cases_col = ["date", "hosp_hospitalCases"] hosp_data1 = hosp_uk.loc[:, hosp_cases_col] hosp_data1["Day"] = hosp_data1["date"].apply(lambda x: x.day) hosp_data1["date"] = hosp_data1.date.dt.strftime("%Y-%m") newpivot = hosp_data1.pivot_table("hosp_hospitalCases", index="date", columns="Day") cmap = sns.cm.rocket_r plt.figure(figsize=(16, 9)) hm2 = sns.heatmap(newpivot, cmap=cmap) hm2.set_title("Heatmap of the daily number of hospital cases in the UK", fontsize=14) hm2.set_xlabel("Day", fontsize=12) hm2.set_ylabel("Month and Year", fontsize=12) def hosp_newadmissionsuk_plot(): """Heatmap for the the daily number of new hospital admissions (UK). Args: No args required, collects own data. Returns : Seaborn heatmap plot for the number of new hospital admissions per day of the pandemic (UK). """ hosp_uk = collect_hosp_data(country='uk') hosp_cases_col = ["date", "hosp_newAdmissions"] hosp_data2 = hosp_uk.loc[:, hosp_cases_col] hosp_data2["Day"] = hosp_data2.date.apply(lambda x: x.day) hosp_data2["date"] = hosp_data2.date.dt.strftime("%Y-%m") newpivot = hosp_data2.pivot_table("hosp_newAdmissions", index="date", columns="Day") cmap = sns.cm.rocket_r plt.figure(figsize=(16, 9)) hm1 = sns.heatmap(newpivot, cmap=cmap) hm1.set_title("Heatmap of the daily number of new hospital admissions" + " in the UK", fontsize=14) hm1.set_xlabel("Day", fontsize=12) hm1.set_ylabel("Month and Year", fontsize=12) def hosp_occupiedbedsuk_plot(): """Plot daily number of COVID-19 patients in mechanical ventilator beds. Plots information for UK. Args: No args required, collects own data. Returns : - Lineplot of this difference over the months. """ hosp_uk = collect_hosp_data(country='uk') hosp_cases_col = ["date", "hosp_covidOccupiedMVBeds"] hosp_data4 = hosp_uk.loc[:, hosp_cases_col] fig, ax = plt.subplots(1, 1, figsize=(20, 3)) sns.lineplot(x=hosp_data4["date"].dt.strftime("%Y-%m"), y=hosp_data4["hosp_covidOccupiedMVBeds"], ax=ax, color="b") ax.set_title("Daily number of COVID occupied Mechanical Ventilator" + " beds in the UK", fontsize=14) ax.invert_xaxis() ax.set_xlabel("Date", fontsize=12) ax.set_ylabel("Number of occupied MV beds", fontsize=12) def vaccine_percentage(df): """Plot the percentage of the vaccinated population over time. Args: df (DataFrame): Requires data returned by get_uk_data or get_national_data methods Retuns: Plot of total percentage of population vaccinated """ df['date'] = df['date'].astype('datetime64[ns]') plt.figure(figsize=(14, 7)) plot1 = sns.lineplot(x='date', y='vac_total_perc', data=df) plt.ylim(0, 100) plot1.set_xlabel("Covid pandemic, up to date", fontsize=12) plot1.set_ylabel("Percentage", fontsize=12) plot1.set_title('Percentage of the vaccinated population over time', fontsize=14) # print(plot1) def vaccine_doses_plot(df): """Pllot both the first and second doses of vaccines. Daily information. Args: df (DataFrame): Requires data returned by get_national_data Returns: Plots of first and second vaccine doses since start of pandemic records """ df['date'] = df['date'].astype('datetime64[ns]') keep_col = ['date', 'vac_first_dose', 'vac_second_dose'] vaccines_melted = df[keep_col] vaccines_melted = vaccines_melted.melt('date', var_name="vaccine_doses", value_name='count') plt.figure(figsize=(14, 7)) plot = sns.lineplot(x='date', y='count', hue='vaccine_doses', data=vaccines_melted) plt.grid() plt.ylim(0, 50000000) plot.set_ylabel("count", fontsize=12) plot.set_xlabel("Covid pandemic, up to date", fontsize=12) plot.set_title('daily amount of first and second doses' + ' of vaccination administered', fontsize=14) # use hue = column to categorise the data # print(plot) def first_vaccination_hm(df): """Plot a heatmap of the first vaccine dose (daily). Args: df (DataFrame): Requires data returned by get_national_data Returns: Heatmap of first vaccine doses over time """ df['date'] = df['date'].astype('datetime64[ns]') df = df.fillna(0) keep_col_hm = ['date', 'vac_first_dose'] vaccines_hm = df.loc[:, keep_col_hm] vaccines_hm["Day"] = vaccines_hm.date.apply(lambda x: x.strftime("%d")) vaccines_hm.pivot_table(index="Day", columns="date", values="vac_first_dose") vaccines_hm.date = vaccines_hm.date.dt.strftime('%Y-%m') keep_colu = ['date', 'Day', 'vac_first_dose'] vaccines_hm = vaccines_hm[keep_colu] pivoted = vaccines_hm.pivot(columns='Day', index='date', values='vac_first_dose') pivoted = pivoted.fillna(0) plt.figure(figsize=(16, 9)) cmap = sns.cm.rocket_r plot_hm1 = sns.heatmap(pivoted, cmap=cmap) plot_hm1.set_title('heatmap of the first vaccination dose' + ' administered daily', fontsize=14) plot_hm1.set_ylabel('Year and month', fontsize=12) # print(plot_hm1) def second_vaccination_hm(df): """Plot a heatmap of the second vaccine dose (daily). Args: df (DataFrame): Requires data returned by get_national_data Returns: Heatmap of second vaccine doses over time """ df['date'] = df['date'].astype('datetime64[ns]') df = df.fillna(0) keep_col_hm = ['date', 'vac_second_dose'] vaccines_hm = df.loc[:, keep_col_hm] vaccines_hm["Day"] = vaccines_hm.date.apply(lambda x: x.strftime("%d")) vaccines_hm.pivot_table(index="Day", columns="date", values="vac_second_dose") vaccines_hm.date = vaccines_hm.date.dt.strftime('%Y-%m') keep_colu = ['date', 'Day', 'vac_second_dose'] vaccines_hm = vaccines_hm[keep_colu] pivoted = vaccines_hm.pivot(columns='Day', index='date', values='vac_second_dose') pivoted = pivoted.fillna(0) plt.figure(figsize=(16, 9)) cmap = sns.cm.rocket_r plot_hm2 = sns.heatmap(pivoted, cmap=cmap) plot_hm2.set_title('heatmap of the second vaccination dose' + ' administered daily', fontsize=14) plot_hm2.set_ylabel('Year and month', fontsize=12) # print(plot_hm2) def vaccines_across_regions(vaccines2): """Plot graph of the vaccination uptake percentage by English regions. Args: vaccines2 (DataFrame): data from get_regional_data required Returns: plot of vaccine uptake by regions in England """ keep_fd = ['date', 'name', 'vac_firstDose'] vaccines2['date'] = vaccines2['date'].astype('datetime64[ns]') vaccines_fd = vaccines2.loc[:, keep_fd] vaccines_fd.fillna(0, inplace=True) vaccines_fd plt.figure(figsize=(16, 9)) plot_fd = sns.lineplot(x='date', y='vac_firstDose', hue='name', data=vaccines_fd) plt.ylim(0, 100) plt.grid() plot_fd.set_ylabel("percentage", fontsize=12) plot_fd.set_xlabel("Covid pandemic, up to date", fontsize=12) plot_fd.set_title('Vaccination uptake by region', fontsize=14) # print(plot_fd)
46,745
16,417
# -*- coding: utf-8 -*- from __future__ import absolute_import import logging from contextlib import contextmanager from System.Runtime.InteropServices import Marshal @contextmanager def autorelease(comobj): """COM auto release contextmanager""" try: yield comobj finally: Marshal.ReleaseComObject(comobj) def is_com_obj(comobj): # return type(comobj).__name__ == '__ComObject' try: return Marshal.IsComObject(comobj) except: return False #separte logger for comrelease to avoid spamming of log file logger = logging.getLogger().getChild("comrelease") logger.setLevel(logging.INFO) #comment out this line for comrelease debugging #FIXME: log comrelease in separate file? class AutoReleasingComObject(object): ''' Wraps a given ComObject and allows to auto-release all ComObject-instances created by accessing attributes/methods of the ComObject. The AutoReleasingComObject-instance can be used as context-manager, which will release all generated ComObjects on exiting the with-context. By default, the wrapped ComObject in this AutoReleasingComObject-instance is also released. This is configurable by the release_self-parameter. ''' def __init__(self, comobj, release_self=True): ''' Constructor ''' # if not type(comobj).__name__ == '__ComObject': # # FIXME: raise ERROR # #raise AttributeError("AutoReleasingComObject expects to wrap a ComObject.") # self._is_comobj = False # pass self._is_comobj = is_com_obj(comobj) self._comobj = comobj self._release_self = release_self self._accessed_com_attributes = [] self._within_context = False logger.debug("Com-Release: created %s", self) # Magic methods: https://rszalski.github.io/magicmethods/ ##### COMPARISION and OPERATORS ##### def __eq__(self, other): ''' Return true if containig ComObject is equal ''' if isinstance(other, AutoReleasingComObject): return self._comobj == other._comobj else: return self._comobj == other def __ne__(self, other): ''' Return true if containig ComObject is not equal ''' if isinstance(other, AutoReleasingComObject): return self._comobj != other._comobj else: return self._comobj != other # assignment is used with events def __iadd__(self, other): self._comobj += other def __isub__(self, other): self._comobj -= other ##### CLASS REPRESENTATION ##### # def __str__(self): #__str__ not required as __repr__ returns a string def __repr__(self): try: return "<AutoReleasingComObject for %s>" % (self._comobj) except SystemError: #in rare situations the com object is already released and logging calls __repr__ which throws SystemError return "<AutoReleasingComObject for <DISPOSED COM OBJECT>>" def __dir__(self): #this is essential for interactive python console # return dir(self._comobj) return sorted(set( dir(type(self)) + self.__dict__.keys() + dir(self._comobj) )) ##### ATTRIBUTE ACCESS ##### def __setattr__(self, attr, value): ''' Only allow to write attributes starting with _ All other attributes are written to wrapped ComObject ''' if attr.startswith("_"): super(AutoReleasingComObject, self).__setattr__(attr, value) else: setattr(self._comobj, attr, value) def __getattr__(self, attr): ''' Provides access to attributes and methods of the ComObject If attr is a ComObject, an AutoReleasingComObject-instance will be returned. If attr is another value, this value is returned. If attr is a method, a wrapper-method will be returned, which will create an AutoReleasingComObject-instance (or return a non-Com-value) after the method-call The ComObjects which are generated by attribute/method-access are stored. All these ComObjects can be released be calling dispose. Dispose will go down the AutoReleasingComObject-tree to automatically release all ComObjects accessed in this ComObject-tree. ''' # FIXME: hack to allow: .item[1]=xxx if attr.lower() == "item": return self value = getattr(self._comobj, attr) logger.debug("Com-Release: access to attribute %s", attr) if type(value).__name__ != 'DispCallable': # attribute did not return a function # create auto-release-object or directly return the value return self.create_and_register_auto_release_com_object(value) else: # attribute is actually a function # Return wrapper which creates auto-release-object after it has been called. # WrappedDispCallable additionally allows array-access to the function, so that # foo.item(1), foo.item[1], foo.item(1,2), foo.item[1,2] # all work. return WrappedDispCallable(self, attr) ##### CUSTOM SEQUENCES ##### #TODO: #def __len__(self): #def __delitem__(self, key): #def __reversed__(self): #def __contains__(self, item): def __getitem__(self, key): ''' If wrapped ComObject is subscriptable, the allow array access. ComObject are wrapped as AutoReleasingComObject, before they are returned. ''' if hasattr(self._comobj, 'Item'): return_value = self._comobj.Item(key) return self.create_and_register_auto_release_com_object(return_value) else: raise TypeError('\'%s\' object is not subscriptable' % type(self._comobj)) # FIXME: setting value should also be possible through: .item[1]=value def __setitem__(self, key, value): ''' If wrapped ComObject is subscriptable, the allow array access. ComObject are wrapped as AutoReleasingComObject, before they are returned. ''' if hasattr(self._comobj, 'Item'): self._comobj[key] = value else: raise TypeError('\'%s\' object is not subscriptable' % type(self._comobj)) def __iter__(self): ''' If wrapped ComObject is iterable, the return an iterator. The iterator will wrap each ComObject as AutoReleasingComObject, before they are returned. ''' if hasattr(self._comobj, 'Item') and hasattr(self._comobj, 'Count'): for i in range(self._comobj.Count): yield AutoReleasingComObject(self._comobj.Item(i+1)) else: raise TypeError('iteration over non-sequence of type %s' % type(self._comobj)) ##### REFLECTION ##### #TODO: #def __instancecheck__(self, instance): #def __subclasscheck__(self, subclass): ##### CALLABLE OBJECTS ##### def __call__(self, *args, **kwargs): value = self._comobj(*args, **kwargs) return self.create_and_register_auto_release_com_object(value) ##### CONTEXT MANAGER ##### def __enter__(self): ''' Allow usage as context-manager (with-statement). ''' logger.info("Com-Release: entering context for %s", self) self._within_context = True return self def __exit__(self, exc_type, exc_val, exc_tb): ''' Allow usage as context-manager (with-statement). After exiting, all accessed ComObjects are released. By default, the wrapped ComObject in the AutoReleasingComObject-instance is also released. This is configurable by the release_self-parameter. ''' logger.info("Com-Release: exiting context for %s", self) self._within_context = False self.dispose() ##### AutoReleasingComObject Functionality ##### def create_and_register_auto_release_com_object(self, com_obj): ''' creates an AutoReleasingComObject-instance if com_obj is a ComObject, or returns the given value ''' if is_com_obj(com_obj): if self._is_comobj: auto_release_com_obj = AutoReleasingComObject(com_obj, release_self=True) logger.debug("Com-Release: created com-object %s", com_obj) else: # self is no com-Object, but the attribute is. # Hence, attribute is not generated here and should not be disposed. # therefore: release_self=False logger.debug("Com-Release: accessed existing com-object %s", com_obj) auto_release_com_obj = AutoReleasingComObject(com_obj, release_self=False) self._accessed_com_attributes.append(auto_release_com_obj) return auto_release_com_obj else: # value is no ComObject return com_obj def dispose(self): ''' Releases all ComObjects which were generated during the lifetime of the AutoReleasingComObject-instance. ComObjects are generated by accessing attributes or methods and are stored internally. The attribute/method-access (see __getattr__) wraps these ComObjects in AutoReleasingComObject-instances. This allows to store ComObjects which are generated further down the ComObjects-tree. Dispose will go down this AutoReleasingComObject-tree and call dispose on these instances as well. Therefore, all ComObjects accessed in the object-tree are released by a single dispose-call. ''' if self._within_context: logger.debug("Com-Release: dispose aborted on %s", self) return # release ComObjects generated further down the object-tree logger.debug("Com-Release: dispose on %s", self) for auto_release_com_obj in self._accessed_com_attributes: auto_release_com_obj.dispose() self._accessed_com_attributes = [] # release wrapped ComObject if self._release_self: logger.debug("Com-Release: releasing %s", self) Marshal.ReleaseComObject(self._comobj) class WrappedDispCallable(object): ''' A WrappedDispCallable instance represents a VBA-DispCallable-object and mimics its logic. The representing function can be called by function-call or through array-access. If the returned object is a ComObject, the object is wrapped as AutoReleasingComObject. ''' def __init__(self, auto_release_comobj, method): ''' Initialize WrappedDispCallable with an AutoReleasingComObject-instance and an attribute (string). ''' self._auto_release_comobj = auto_release_comobj self._method = method def __call__(self, *args, **kwargs): ''' Calls the DispCallable-function and returns its result. Arguments, which are AutoReleasingComObject-instances are replaced by its ComObject before calling the function. If the returned object is a ComObject, the object is wrapped as AutoReleasingComObject. ''' args_converted = [x._comobj if isinstance(x, AutoReleasingComObject) else x for x in args] kwargs_converted = {key: value._comobj if isinstance(value, AutoReleasingComObject) else value for key,value in kwargs.items()} # call ComObject's method return_value = getattr(self._auto_release_comobj._comobj, self._method)(*args_converted, **kwargs_converted) # return wrapped ComObject return self._auto_release_comobj.create_and_register_auto_release_com_object(return_value) def __getitem__(self, key): ''' Allow array-access to the DispCallable-function, so that all of the following calls work and return the same result foo.item(1), foo.item[1] foo.item(1,2), foo.item[1,2] ''' if type(key) == tuple: # convert tuple to argument list return self.__call__(*key) else: return self.__call__(key) class AutoReleasingComIterator(object): def __init__(self, comobj, autorelease_callback): self._comobj = comobj self._autorelease_callback = autorelease_callback self._index = 0 def __iter__(self): return self def __next__(self): self._index += 1 if self._index > self._comobj.Count: raise StopIteration item = self.Item(self._index) return self._autorelease_callback(item)
12,969
3,542
from pymongo import MongoClient client = MongoClient('mongodb+srv://<username>:<password>@cluster0.27gwi.mongodb.net/Cluster0?retryWrites=true&w=majority') username = "" password = "" url = f'mongodb+srv://{username}:{password}@cluster0.27gwi.mongodb.net/Cluster0?retryWrites=true&w=majority' client = MongoClient(url) # db = client.business db = client.credentials db.credentials.drop()
389
142
""" Blurring of images =================== An example showing various processes that blur an image. """ import scipy.misc from scipy import ndimage import matplotlib.pyplot as plt face = scipy.misc.face(gray=True) blurred_face = ndimage.gaussian_filter(face, sigma=3) very_blurred = ndimage.gaussian_filter(face, sigma=5) local_mean = ndimage.uniform_filter(face, size=11) plt.figure(figsize=(9, 3)) plt.subplot(131) plt.imshow(blurred_face, cmap=plt.cm.gray) plt.axis('off') plt.subplot(132) plt.imshow(very_blurred, cmap=plt.cm.gray) plt.axis('off') plt.subplot(133) plt.imshow(local_mean, cmap=plt.cm.gray) plt.axis('off') plt.subplots_adjust(wspace=0, hspace=0., top=0.99, bottom=0.01, left=0.01, right=0.99) plt.show()
750
332
#! /usr/bin/env python import json from optparse import OptionParser usage = "usage: %prog [options] message" parser = OptionParser(usage) (options, args) = parser.parse_args() if len(args) != 1: parser.error("You must supply a label") label = args[0] try: with open('pylintReport.json', 'r') as reportFile: report = json.load(reportFile) except IOError: report = {} warnings = 0 errors = 0 comments = 0 refactors = 0 score = 0 with open('pylint.out', 'r') as pylintFile: for line in pylintFile: if line.startswith('Your code has been rated at '): scorePart = line.strip('Your code has been rated at ') score = scorePart.split('/')[0] try: if not filename in report: report[filename] = {} if not label in report[filename]: report[filename][label] = {} if filename and label: report[filename][label]['score'] = score except NameError: print "Score of %s found, but no filename" % score parts = line.split(':') if len(parts) != 3: continue try: newFilename, lineNumber, rawMessage = parts newFilename = newFilename.strip() if not newFilename: # Don't update filename if we didn't find one continue lineNumber = int(lineNumber) filename = newFilename rmParts = rawMessage.split(']', 1) rawCode = rmParts[0].strip() message = rmParts[1].strip() severity = rawCode[1:2] code = rawCode[2:6] shortMsg = rawCode[7:] msgParts = shortMsg.split(',') objectName = msgParts[1].strip() if severity == 'R': refactors += 1 elif severity == 'W': warnings += 1 elif severity == 'E': errors += 1 elif severity == 'C': comments += 1 if not filename in report: report[filename] = {} if not label in report[filename]: report[filename][label] = {} if not 'events' in report[filename][label]: report[filename][label]['events'] = [] report[filename][label]['events'].append((lineNumber, severity, code, objectName, message)) report[filename][label]['refactors'] = refactors report[filename][label]['warnings'] = warnings report[filename][label]['errors'] = errors report[filename][label]['comments'] = comments except ValueError: continue with open('pylintReport.json', 'w') as reportFile: json.dump(report, reportFile, indent=2) reportFile.write('\n')
2,845
778
# -*- coding: utf-8 -*- """Unit test package for jinja_inflection."""
71
30
from cms.plugin_base import CMSPluginBase from cms.plugin_pool import plugin_pool from cms.plugins.utils import downcast_plugins from cms.models.placeholdermodel import Placeholder from cms.models.pluginmodel import CMSPlugin from smartsnippets_inherit.models import InheritPageContent from smartsnippets_inherit.forms import InheritPageForm from smartsnippets_inherit.settings import USE_BOOTSTRAP_ACE from smartsnippets.settings import inherit_variable_pattern from smartsnippets.models import Variable, SmartSnippetPointer from contextlib import contextmanager from itertools import chain @contextmanager def current_page(request, page): original_page = getattr(request, 'current_page', None) try: setattr(request, 'current_page', page) yield finally: setattr(request, 'current_page', original_page) class PageInheritPlugin(CMSPluginBase): model = InheritPageContent name = "Inherit Content from Page" render_template = 'smartsnippets/plugin.html' change_form_template = 'admin/smartsnippets_inherit/plugininherit_change_form.html' admin_preview = False form = InheritPageForm page_only = True def render_inherited(self, context, instance): content = '' if not instance.from_page.published: return content inherited = instance.get_placeholder() if not inherited: return content # prepare variables to be passed to the context with different values new_vars = {} for overwrite_var in instance.overwrite_variables.all(): var = overwrite_var.to_variable() context_var = inherit_variable_pattern.format(identifier=var.pk) new_vars[context_var] = var.formatted_value with current_page(context.get('request'), instance.from_page): # inject new variables in context # so that snippet plugin render can pick them up context.update({name: value for name, value in new_vars.items()}) # render plugins from the inherited section # with the updated context content = inherited.render(context, None) # remove overwritten data from context for name in new_vars.keys(): if name in context: del context[name] return content def render(self, context, instance, placeholder): context.update({'content': self.render_inherited(context, instance)}) return context def get_form(self, request, obj=None, **kwargs): formCls = super(PageInheritPlugin, self).get_form( request, obj, **kwargs) formCls.current_page = self.cms_plugin_instance.page or self.page formCls.use_ace_theme = USE_BOOTSTRAP_ACE return formCls def change_view(self, request, object_id, *args, **kwargs): extra_context = kwargs.get('extra_context', None) or {} try: plugin = InheritPageContent.objects.get(id=object_id) placeholder = plugin.get_placeholder() extra_context.update({ 'snippet_plugins': self.get_inherited_snippets(placeholder) }) except (InheritPageContent.DoesNotExist, ): pass kwargs['extra_context'] = extra_context return super(PageInheritPlugin, self).change_view( request, object_id, *args, **kwargs) def get_inherited_snippets(self, placeholder): if not placeholder or not placeholder.page: return [] def can_be_overwritten(plg): return ( plg.__class__ is SmartSnippetPointer and plg.variables.exists() ) page = placeholder.page slot = placeholder.slot pages = chain([page], page.get_cached_ancestors(ascending=True)) for ancestor in pages: placeholder = ancestor.placeholders.filter(slot=slot)[:1] if not placeholder: continue placeholder = placeholder[0] plugins = downcast_plugins(placeholder.get_plugins()) if not plugins: continue return sorted( filter(can_be_overwritten, plugins), key=lambda plg: plg.position, ) return [] plugin_pool.register_plugin(PageInheritPlugin)
4,376
1,163
from django.conf import settings from rest_framework.exceptions import ParseError from rest_framework.parsers import BaseParser import orjson __all__ = ["ORJSONParser"] class ORJSONParser(BaseParser): """ Parses JSON-serialized data by orjson parser. """ media_type = "application/json" def parse(self, stream, media_type=None, parser_context=None): """ De-serializes JSON strings to Python objects. :param stream: A stream-like object representing the body of the request. :param media_type: If provided, this is the media type of the incoming request content specified in the `Content-Type` HTTP header. :param parser_context: If supplied, this argument will be a dictionary containing any additional context that may be required to parse the request content. By default this will include the following keys: view, request, args, kwargs. :return: Python native instance of the JSON string. """ parser_context = parser_context or {} encoding = parser_context.get("encoding", settings.DEFAULT_CHARSET) try: data = stream.read().decode(encoding) return orjson.loads(data) except orjson.JSONDecodeError as exc: raise ParseError(f"JSON parse error - {exc}")
1,389
341
from djangosanetesting.cases import TemplateTagTestCase class TestTagLib(TemplateTagTestCase): preload = ('dsttesttags',) def test_tag_error(self): self.assert_raises(self.TemplateSyntaxError, self.render_template, '{% table %}') def test_tag_output(self): self.assert_equal(self.render_template('{% table x_y z %}'), u'<table><tr><td>x</td><td>y</td></tr><tr><td>z</td></tr></table>') class TestFilterLib(TemplateTagTestCase): preload = ('dsttestfilters',) def test_filter_output(self): self.assert_equal(self.render_template('{{ a|ihatebs }}', a='abc'), u'aac') class TestBoth(TestTagLib, TestFilterLib): preload = ('dsttesttags', 'dsttestfilters') def _call_test_render(self): return self.render_template('{% table b %}{{ a|ihatebs }}', a='a_bb_d b') def test_both_output(self): self.assert_equal(self._call_test_render(), u'<table><tr><td>b</td></tr>' '</table>a_aa_d a') def test_preload_none(self): self.preload = () self.assert_raises(self.TemplateSyntaxError, self._call_test_render) def test_preload_tags_only(self): self.preload = ('dsttesttags',) self.assert_raises(self.TemplateSyntaxError, self._call_test_render) def test_preload_filters_only(self): self.preload = ('dsttestfilters',) self.assert_raises(self.TemplateSyntaxError, self._call_test_render) class TestMisc(TemplateTagTestCase): def test_context(self): self.assert_equal(self.render_template('{{ cvar }}'), u'') self.assert_equal(self.render_template('{{ cvar }}', cvar=123), u'123') def test_nonexistent_taglib(self): self.preload = ('nonexistent',) self.assert_raises(self.TemplateSyntaxError, self.render_template, 'sthing')
1,942
633
# An example WSGI for use with mod_wsgi, edit as necessary # See http://mercurial.selenic.com/wiki/modwsgi for more information # Path to repo or hgweb config to serve (see 'hg help hgweb') config = "/path/to/repo/or/config" # Uncomment and adjust if Mercurial is not installed system-wide # (consult "installed modules" path from 'hg debuginstall'): #import sys; sys.path.insert(0, "/path/to/python/lib") # Uncomment to send python tracebacks to the browser if an error occurs: #import cgitb; cgitb.enable() # enable demandloading to reduce startup time from mercurial import demandimport; demandimport.enable() from mercurial.hgweb import hgweb application = hgweb(config)
680
213
import os from subprocess import run import pyperclip import webbrowser from urllib import parse location = 'production' def runOnSingleFolder(folder): file_list = os.listdir(os.path.join(location, folder)) for file in file_list: file_noextend = file[:-(len(folder) + 1)] url = f'https://2021.igem.org/wiki/index.php?title=Template:BNUZ-China/{folder}/{parse.quote(file_noextend)}&action=edit' webbrowser.open(url) print(url) with open(os.path.join(location, folder, file), encoding='utf-8') as f: content = f.read() pyperclip.copy(content) print('相应js代码已经复制,请粘贴至打开的网页,完成后请回车') input() runOnSingleFolder('js') runOnSingleFolder('css')
727
264
from .mscff_model import MSCFF
31
13
from .InputLayerStructure import InputLayerStructure from .LayerStructure import LayerStructure
96
24
from ursina import * app = Ursina() snake = Entity(model='cube', texture = 'assets\snake', scale=0.4, z=-1, collider='box') ground = Entity(model='cube', texture='grass',rotation=(90,0,0),scale=(5,1,5), z=1) apple = Entity(model='cube', texture='assets\\apple', scale=0.4, position=(1,-1,-1), collider='mesh') body = [Entity(model='cube', scale =0.2, texture='assets\\body') for i in range(14)] camera.orthographic = True camera.fov = 8 from random import randint dx = dy = 0 def update(): info = snake.intersects() if info.hit: apple.x = randint(-4,4)/2 apple.y = randint(-4,4)/2 new = Entity(model='cube', z = -1, scale=0.2, texture='assets\\body') body.append(new) for i in range(len(body)-1,0,-1): pos = body[i-1].position body[i].position = pos body[0].x = snake.x body[0].y = snake.y snake.x += time.dt * dx snake.y += time.dt * dy def input(key): global dx,dy for x,y,z in zip(['d','a'],[2,-2],[270,90]): if key==x: snake.rotation_z = z dx = y dy = 0 for x,y,z in zip(['w','s'],[2,-2],[180,0]): if key == x: snake.rotation_z = z dy = y dx = 0 app.run()
1,180
513
import json from cp_request import Attribute, NamedEntity, Unit, Value from cp_request.named_entity import NamedEntityEncoder, NamedEntityDecoder class TestNamedEntity: def test_entity(self): e1 = NamedEntity(name="one", reference="http://one.one") e2 = NamedEntity(name="one", reference="http://one.one") assert e1 == e2 assert e1 != {} assert repr(e1) == "NamedEntity(name='one', reference='http://one.one')" assert str(e1) == "NamedEntity(name='one', reference='http://one.one')" def test_serialization(self): e1 = NamedEntity(name="one", reference="http://one.one") e_json = json.dumps(e1, cls=NamedEntityEncoder) e2 = json.loads(e_json, cls=NamedEntityDecoder) assert e1 == e2 def test_entity_attributes(self): concentration = Attribute.create_from( name='concentration', value=Value( value=0.25, unit=Unit( reference='http://purl.obolibrary.org/obo/UO_0000064' ) )) e1 = NamedEntity( name="one", reference="http://one.one", attributes=[concentration]) assert e1.is_bound() e2 = NamedEntity( name="one", reference="http://one.one", attributes=[concentration]) assert e1 == e1 assert e1 == e2 assert e1 != {} assert repr( e1) == "NamedEntity(name='one', reference='http://one.one', attributes=[BoundAttribute(name='concentration', value=Value(value=0.25, unit=Unit(reference='http://purl.obolibrary.org/obo/UO_0000064')))])" assert str( e1) == "NamedEntity(name='one', reference='http://one.one', attributes=[BoundAttribute(name='concentration', value=Value(value=0.25, unit=Unit(reference='http://purl.obolibrary.org/obo/UO_0000064')))])" def test_entity_attribute_serialization(self): concentration = Attribute.create_from( name='concentration', value=Value( value=0.25, unit=Unit( reference='http://purl.obolibrary.org/obo/UO_0000064' ) )) e1 = NamedEntity( name="one", reference="http://one.one", attributes=[concentration]) e_json = json.dumps(e1, cls=NamedEntityEncoder) e2 = json.loads(e_json, cls=NamedEntityDecoder) assert e1 == e2 def test_entity_unbound_attributes(self): concentration = Attribute.create_from( name='concentration', value=Value( value=0.25, unit=Unit( reference='http://purl.obolibrary.org/obo/UO_0000064' ) )) timepoint = Attribute.create_from( name='timepoint', unit=Unit(reference='http://purl.obolibrary.org/obo/UO_0000027') ) e1 = NamedEntity( name="one", reference="http://one.one", attributes=[concentration, timepoint]) assert not e1.is_bound()
3,153
981
import tensorflow as tf from utils.decorators import shape_check @shape_check def photometric_loss_l1(synt_target, orig_target, reduce=True): """ :param synt_target: scaled synthesized target image [batch, numsrc, height/scale, width/scale, 3] :param orig_target: scaled original target image [batch, height/scale, width/scale, 3] :param reduce: whether to reduce loss to batch size or not :return: photo_loss [batch] """ orig_target = tf.expand_dims(orig_target, axis=1) # create mask to ignore black region synt_target_gray = tf.reduce_mean(synt_target, axis=-1, keepdims=True) error_mask = tf.equal(synt_target_gray, 0) # orig_target: [batch, 1, height/scale, width/scale, 3] # axis=1 broadcasted in subtraction # photo_error: [batch, numsrc, height/scale, width/scale, 3] photo_error = tf.abs(synt_target - orig_target) photo_error = tf.where(error_mask, tf.constant(0, dtype=tf.float32), photo_error) if reduce: # reduce to average per example photo_error = tf.reduce_mean(photo_error, axis=[1, 2, 3, 4]) return photo_error @shape_check def photometric_loss_l2(synt_target, orig_target, reduce=True): """ :param synt_target: scaled synthesized target image [batch, numsrc, height/scale, width/scale, 3] :param orig_target: scaled original target image [batch, height/scale, width/scale, 3] :param reduce: whether to reduce loss to batch size or not :return: photo_loss [batch] """ orig_target = tf.expand_dims(orig_target, axis=1) # create mask to ignore black region synt_target_gray = tf.reduce_mean(synt_target, axis=-1, keepdims=True) error_mask = tf.equal(synt_target_gray, 0) # orig_target: [batch, 1, height/scale, width/scale, 3] # axis=1 broadcasted in subtraction # photo_error: [batch, numsrc, height/scale, width/scale, 3] photo_error = tf.square(synt_target - orig_target) photo_error = tf.where(error_mask, tf.constant(0, dtype=tf.float32), photo_error) if reduce: # reduce to average per example photo_error = tf.reduce_mean(photo_error, axis=[1, 2, 3, 4]) return photo_error @shape_check def photometric_loss_ssim(synt_target, orig_target, reduce=True): """ :param synt_target: scaled synthesized target image [batch, numsrc, height/scale, width/scale, 3] :param orig_target: scaled original target image [batch, height/scale, width/scale, 3] :param reduce: whether to reduce loss to batch size or not :return: photo_loss [batch] """ numsrc = synt_target.get_shape().as_list()[1] orig_target = tf.expand_dims(orig_target, axis=1) orig_target = tf.tile(orig_target, [1, numsrc, 1, 1, 1]) # create mask to ignore black region synt_target_gray = tf.reduce_mean(synt_target, axis=-1, keepdims=True) error_mask = tf.equal(synt_target_gray, 0) x = orig_target # [batch, numsrc, height/scale, width/scale, 3] y = synt_target # [batch, numsrc, height/scale, width/scale, 3] c1 = 0.01 ** 2 c2 = 0.03 ** 2 ksize = [1, 3, 3] # TODO IMPORTANT! # tf.nn.avg_pool results in error like ['NoneType' object has no attribute 'decode'] # when training model with gradient tape in eager mode, # but no error in graph mode by @tf.function # Instead, tf.keras.layers.AveragePooling3D results in NO error in BOTH modes # mu_x, mu_y: [batch, numsrc, height/scale, width/scale, 3] average_pool = tf.keras.layers.AveragePooling3D(pool_size=ksize, strides=1, padding="SAME") mu_x = average_pool(x) mu_y = average_pool(y) # mu_x = tf.nn.avg_pool(x, ksize=ksize, strides=1, padding='SAME') # mu_y = tf.nn.avg_pool(y, ksize=ksize, strides=1, padding='SAME') sigma_x = average_pool(x ** 2) - mu_x ** 2 sigma_y = average_pool(y ** 2) - mu_y ** 2 sigma_xy = average_pool(x * y) - mu_x * mu_y ssim_n = (2 * mu_x * mu_y + c1) * (2 * sigma_xy + c2) ssim_d = (mu_x ** 2 + mu_y ** 2 + c1) * (sigma_x + sigma_y + c2) ssim = ssim_n / ssim_d ssim = tf.clip_by_value((1 - ssim) / 2, 0, 1) ssim = tf.where(error_mask, tf.constant(0, dtype=tf.float32), ssim) if reduce: # reduce to average per example ssim = tf.reduce_mean(ssim, axis=[1, 2, 3, 4]) return ssim
4,282
1,598
# -*- coding: utf-8 -*- ''' Created on 2015-08-21 @author: xhj ''' import requests import StringIO import gzip import threading from loginer import Loginer import time from my_log import WeiboSearchLog import os import traceback from bs4 import BeautifulSoup import re from Queue import Queue import datetime from store_model import Single_weibo_store, UserInfo, UserInfo_store, \ UserInfo_loc, UserInfo_loc_store, Bie_Ming_store, \ UserInfo_for_regester_time_store, UserInfo_for_regester_time from mongoengine.errors import NotUniqueError import random from craw_page_parse import Crawler_with_proxy, crawl_set_time_with_keyword import sys from urllib import quote, quote_plus from mongoengine.queryset.visitor import Q import json reload(sys) sys.setdefaultencoding('utf8') # # 通过 nickname 抓取 uid class crawl_uid_from_nickname(threading.Thread): file_write_lock = threading.Lock() def __init__(self, nicknam_list, thread_name='crawl_uid_from_nickname'): threading.Thread.__init__(self) self.nickname_list = nicknam_list self.url_queue = Queue() self.second_url_queue = Queue() pass # http://weibo.cn/search/user/?keyword=孔庆东&page=1 def init_url_queue(self): for nickname in self.nickname_list: url = "http://weibo.cn/search/user/?keyword=" + nickname + "&page=1" self.url_queue.put(url) pass # 抓取并解析页面 def crawl(self, url, is_again=True): loginer = Loginer() cookie = loginer.get_cookie() proxy = loginer.get_proxy() craw_object = Crawler_with_proxy(url, cookie, proxy) WeiboSearchLog().get_scheduler_logger().info(self.name + " start to crawl ! " + url) uid_or_uname = "" try: page = craw_object.get_page() uid_or_uname = page_parser_from_search_for_uid(page) except: print traceback.format_exc() crawl_set_time_with_keyword.del_proxy_lock.acquire() if proxy == loginer.get_proxy(): loginer.del_proxy() WeiboSearchLog().get_scheduler_logger().warning(self.name + " proxy exception , change proxy !") crawl_set_time_with_keyword.del_proxy_lock.release() if is_again: return self.crawl(url, is_again=False) else: self.second_url_queue.put(url) return uid_or_uname return uid_or_uname def run(self): self.init_url_queue() while not self.url_queue.empty() or not self.second_url_queue.empty(): url = "" if not self.url_queue.empty(): url = self.url_queue.get() else: url = self.second_url_queue.get() uid_or_uname = self.crawl(url) op_url = url[url.find("keyword="):] nickname = op_url[op_url.find('=') + 1:op_url.find('&')] crawl_uid_from_nickname.file_write_lock.acquire() file_w = open("at_nickname_to_(uid_or_uname).txt", 'a') file_w.write("[uid_or_uname:" + uid_or_uname + "][nickname:" + nickname + "]" + '\n') file_w.flush() file_w.close() crawl_uid_from_nickname.file_write_lock.release() pass # # 通过 uid_or_uname 抓取 用户信息 class crawl_userinfo_from_uname_or_uid(threading.Thread): def __init__(self, uid_or_uname_list, thread_name='crawl_userinfo_from_uname_or_uid'): threading.Thread.__init__(self, name=thread_name) self.uid_or_uname_list = uid_or_uname_list self.url_queue = Queue() self.second_url_queue = Queue() pass # http://weibo.cn/breakingnews?f=search_0 def init_url_queue(self): global UserInfo_store for uid_or_nickname in self.uid_or_uname_list: if len(UserInfo_store.objects(Q(uid_or_uname=str(uid_or_nickname)) | Q(nickname=str(uid_or_nickname)))) != 0 or\ len(Bie_Ming_store.objects(Q(uid_or_uname=str(uid_or_nickname)) | Q(bie_ming=str(uid_or_nickname)))) != 0: continue self.url_queue.put(uid_or_nickname) print "crawl size ::::::::: ", self.url_queue.qsize() pass # 抓取并解析页面 def crawl(self, uid_or_nickname, is_again=False): # $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ url = '' if len(UserInfo_store.objects(Q(uid_or_uname=str(uid_or_nickname)) | Q(nickname=str(uid_or_nickname)))) != 0 or\ len(Bie_Ming_store.objects(Q(uid_or_uname=str(uid_or_nickname)) | Q(bie_ming=str(uid_or_nickname)))) != 0: WeiboSearchLog().get_scheduler_logger().info("already in the database : " + uid_or_nickname) return "nothing" quote_uid_or_nickname = "" try: quote_uid_or_nickname = quote_plus(str(uid_or_nickname.strip())) except: print traceback.format_exc() print uid_or_nickname # url = "http://weibo.cn/" + uid_or_nickname + "?f=search_0" if quote_uid_or_nickname == uid_or_nickname: url = "http://weibo.cn/" + uid_or_nickname + "?f=search_0" else: url = "http://weibo.cn/n/" + quote_uid_or_nickname # $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ loginer = Loginer() cookie = loginer.get_cookie() proxy = loginer.get_proxy() craw_object = Crawler_with_proxy(url, cookie, proxy) WeiboSearchLog().get_scheduler_logger().info(self.name + " start to crawl ! " + url) user_info = "" try: page = craw_object.get_page() user_info = page_parser_from_search_for_UserInfo(page, url) except: if is_again: return self.crawl(url, is_again=False) else: return user_info return user_info # uid_or_uname = StringField(unique=True) # nickname = StringField() # is_persion = StringField() # check_or_not = StringField() # fensi = StringField() def store_userinfo_to_db(self, uid_or_nickname, user_info): if type(user_info) is str: WeiboSearchLog().get_scheduler_logger().info(self.name + " nothing ! :" + user_info) return unique_user_info = UserInfo_store(uid_or_uname=user_info.uid_or_uname, nickname=user_info.nickname, is_persion=user_info.is_persion, check_or_not=user_info.check_or_not, fensi=user_info.fensi, sex=user_info.sex, location=user_info.location, check_info=user_info.check_info, weibo_all_nums=user_info.weibo_all_nums, guan_zhu_nums=user_info.guan_zhu_nums ) # Bie_Ming_store if unique_user_info['uid_or_uname'] != uid_or_nickname: bie_ming = Bie_Ming_store(uid_or_uname=unique_user_info['uid_or_uname'] , bie_ming=uid_or_nickname) sign = 0 try: unique_user_info.save() except NotUniqueError: sign = 1 WeiboSearchLog().get_scheduler_logger().info(self.name + " insert to database, not unique ! " + unique_user_info['uid_or_uname'] + " crawl: " + uid_or_nickname) except: sign = 2 WeiboSearchLog().get_scheduler_logger().info(self.name + " insert to database, something wrong !") if sign == 0: WeiboSearchLog().get_scheduler_logger().info(self.name + " insert to database, success success success success!") try: bie_ming.save() except NotUniqueError: WeiboSearchLog().get_scheduler_logger().info(self.name + " bieming already in database" + unique_user_info['uid_or_uname'] + " crawl: " + uid_or_nickname) return except: WeiboSearchLog().get_scheduler_logger().info(self.name + " bieming insert to database, something wrong !") return pass def run(self): self.init_url_queue() while not self.url_queue.empty() or not self.second_url_queue.empty(): uid_or_nickname = "" if not self.url_queue.empty(): uid_or_nickname = self.url_queue.get() else: uid_or_nickname = self.second_url_queue.get() user_info = self.crawl(uid_or_nickname) # print user_info.to_string() if not user_info == "nothing" : self.store_userinfo_to_db(uid_or_nickname, user_info) pass # # 通过 uid_or_uname 抓取 用户信息 (位置信息) # 这里主要是uid,可以抓取到生日信息 class crawl_userinfo_2_from_uid(threading.Thread): def __init__(self, uid_or_uname_list, thread_name='crawl_userinfo_from_uname_or_uid'): threading.Thread.__init__(self) self.uid_or_uname_list = uid_or_uname_list self.url_queue = Queue() self.second_url_queue = Queue() pass # http://weibo.cn/1806760610/info def init_url_queue(self): for uid_or_nickname in self.uid_or_uname_list: url = "http://weibo.cn/" + uid_or_nickname + "/info" self.url_queue.put(url) pass # 抓取并解析页面 def crawl(self, url, is_again=True): loginer = Loginer() cookie = loginer.get_cookie() proxy = loginer.get_proxy() craw_object = Crawler_with_proxy(url, cookie, proxy) WeiboSearchLog().get_scheduler_logger().info(self.name + " start to crawl ! " + url) user_info_loc = "" try: page = craw_object.get_page() user_info_loc = page_parser_from_search_for_UserInfoLoc(page, url) except: print traceback.format_exc() crawl_set_time_with_keyword.del_proxy_lock.acquire() if proxy == loginer.get_proxy(): loginer.del_proxy() WeiboSearchLog().get_scheduler_logger().warning(self.name + " proxy exception , change proxy !") crawl_set_time_with_keyword.del_proxy_lock.release() if is_again: return self.crawl(url, is_again=False) else: self.second_url_queue.put(url) return user_info_loc return user_info_loc # uid_or_uname = StringField(unique=True) # nickname = StringField() # is_persion = StringField() # check_or_not = StringField() # fensi = StringField() def store_userinfo_loc_to_db(self, user_info_loc): unique_user_info_loc = UserInfo_loc_store(uid=user_info_loc.uid, nickname=user_info_loc.nickname, location=user_info_loc.location, sex=user_info_loc.sex, birth=user_info_loc.birth, intro=user_info_loc.intro, check_or_not=user_info_loc.check_or_not, check_info=user_info_loc.check_info) try: unique_user_info_loc.save() except NotUniqueError: pass except: WeiboSearchLog().get_scheduler_logger().info(self.name + " insert to database, something wrong !") pass WeiboSearchLog().get_scheduler_logger().info(self.name + " insert to database, success !") pass def run(self): self.init_url_queue() while not self.url_queue.empty() or not self.second_url_queue.empty(): url = "" if not self.url_queue.empty(): url = self.url_queue.get() else: url = self.second_url_queue.get() user_info_loc = self.crawl(url) # print user_info.to_string() self.store_userinfo_loc_to_db(user_info_loc) pass # 要从 网页端 进行抓取,为了提取用户的注册时间 class crawl_userinfo_3_for_regester_time(threading.Thread): def __init__(self, uid_or_uname_list, thread_name='crawl_userinfo_for_regester_times'): threading.Thread.__init__(self) self.uid_or_uname_list = uid_or_uname_list self.url_queue = Queue() self.second_url_queue = Queue() pass # http://weibo.cn/1806760610/info def init_url_queue(self): for uid_or_nickname in self.uid_or_uname_list: url = "http://weibo.com/" + uid_or_nickname + "/info" self.url_queue.put(url) pass # 抓取并解析页面 def crawl(self, url, is_again=True): loginer = Loginer() cookie = loginer.get_cookie() proxy = loginer.get_proxy() craw_object = Crawler_with_proxy(url, cookie, proxy) WeiboSearchLog().get_scheduler_logger().info(self.name + " start to crawl ! " + url) userInfo_for_regester_time = "" try: page = craw_object.get_page() userInfo_for_regester_time = page_parser_from_search_for_UserInfo_for_regester_time(page, url) except: print traceback.format_exc() crawl_set_time_with_keyword.del_proxy_lock.acquire() if proxy == loginer.get_proxy(): loginer.del_proxy() WeiboSearchLog().get_scheduler_logger().warning(self.name + " proxy exception , change proxy !") crawl_set_time_with_keyword.del_proxy_lock.release() if is_again: return self.crawl(url, is_again=False) else: self.second_url_queue.put(url) return userInfo_for_regester_time return userInfo_for_regester_time # uid_or_uname = StringField(unique=True) # nickname = StringField() # is_persion = StringField() # check_or_not = StringField() # fensi = StringField() def store_userinfo_loc_to_db(self, userInfo_for_regester_time): unique_user_info = UserInfo_for_regester_time_store(uid=userInfo_for_regester_time.uid, nickname=userInfo_for_regester_time.nickname, \ location=userInfo_for_regester_time.location, sex=userInfo_for_regester_time.sex, \ birth=userInfo_for_regester_time.birth, regester_time=userInfo_for_regester_time.regester_time) try: unique_user_info.save() except NotUniqueError: pass except: WeiboSearchLog().get_scheduler_logger().info(self.name + " insert to database, something wrong !") pass WeiboSearchLog().get_scheduler_logger().info(self.name + " insert to database, success !") pass def run(self): self.init_url_queue() while not self.url_queue.empty() or not self.second_url_queue.empty(): url = "" if not self.url_queue.empty(): url = self.url_queue.get() else: url = self.second_url_queue.get() userInfo_for_regester_time = self.crawl(url) # print user_info.to_string() self.store_userinfo_loc_to_db(userInfo_for_regester_time) pass ############################################ 页面解析 ########################################################### # http://weibo.cn/1806760610/info def page_parser_from_search_for_UserInfoLoc(page, url): bs_all = BeautifulSoup(page) div_all = bs_all.findAll('div', attrs={'class':'c'}) nickname = "" location = "" sex = "" birth = "" intro = "" check_or_not = u'否' check_info = "" op_uid = url[url.find('.cn'):] uid = op_uid[op_uid.find('/') + 1:op_uid.rfind('/')] for div in div_all: for str_in in str(div.getText(u'\n')).split(u'\n'): en_str = str_in.encode('utf-8') if(en_str.startswith(u"昵称")): nickname = en_str[en_str.find(':') + 1:] elif(en_str.startswith(u"地区")): location = en_str[en_str.find(':') + 1:] elif(en_str.startswith(u"性别")): sex = en_str[en_str.find(':') + 1:] elif(en_str.startswith(u"生日")): birth = en_str[en_str.find(':') + 1:] elif(en_str.startswith(u"简介")): intro = en_str[en_str.find(':') + 1:] elif(en_str.startswith(u"认证信息")): check_or_not = u'是' check_info = en_str return UserInfo_loc(uid, nickname, location, sex, birth, intro, check_or_not, check_info) pass # http://weibo.cn/1730330447?f=search_0 # http://weibo.cn/breakingnews?f=search_0 # 解析获取 UserInfo def page_parser_from_search_for_UserInfo(page, url): out_soup = BeautifulSoup(page) div_u_first = "" for div_u_one in out_soup.findAll('div', attrs={'class':'u'}): if u"资料" in div_u_one.getText() and u"私信" in div_u_one.getText(): div_u_first = div_u_one break # 获取 uid_or_uname, uid_or_uname = "" for a_one in div_u_first.findAll("a"): if u"送Ta会员" in a_one.getText() and u"uid=" in a_one.attrs["href"]: a_one_href = a_one.attrs["href"] uid_or_uname = a_one_href[a_one_href.find("uid=") + 4:] break # op_url = url[url.find(".cn"):] # uid_or_uname = op_url[op_url.find('/')+1:op_url.find('?')] # 新添加----------------------------------start sex = "" location = "" check_info = "" weibo_all_nums = "" guan_zhu_nums = "" # 新添加----------------------------------end # is_persion,check_or_not is_persion = "" check_or_not = "" div_class_ut = div_u_first.find('div', attrs={'class':'ut'}) # nickname nickname = "" span_class_ctt = div_class_ut.findAll('span', attrs={'class':'ctt'}) for span_class_ctt_one in span_class_ctt: span_class_ctt_one_text = span_class_ctt_one.getText() if u"关注" in span_class_ctt_one_text: if str(span_class_ctt_one_text).find("男") != -1: nickname = span_class_ctt_one_text[:span_class_ctt_one_text.find(u'男') - 1] sex = "男" location = span_class_ctt_one_text[span_class_ctt_one_text.find(u'男') + 2:span_class_ctt_one_text.find(u'关注') - 1] if str(span_class_ctt_one_text).find("女") != -1: nickname = span_class_ctt_one_text[:span_class_ctt_one_text.find(u'女') - 1] sex = "女" location = span_class_ctt_one_text[span_class_ctt_one_text.find(u'女') + 2:span_class_ctt_one_text.find(u'关注') - 1 ] if u"认证" in span_class_ctt_one_text: check_info = span_class_ctt_one_text[span_class_ctt_one_text.find(u"认证:") + 1:] pass # op_span_class_ctt_one_html = span_class_ctt_one_html[2:] # nickname_candidate = op_span_class_ctt_one_html[op_span_class_ctt_one_html.find('>')+1:op_span_class_ctt_one_html.find('<')] # # if str(nickname_candidate).find("男") != -1: # # nickname_candidate = nickname_candidate[:nickname_candidate.find(u'男')-1] # # if str(nickname_candidate).find("女") != -1: # # nickname_candidate = nickname_candidate[:nickname_candidate.find(u'女')-1] # nickname = nickname_candidate imag_alt_V = div_class_ut.find('img', attrs={'alt':'V'}) if imag_alt_V is not None: if u"5337" in str(imag_alt_V.attrs['src']): is_persion = "no" else: is_persion = "yes" check_or_not = "yes" else: is_persion = "yes" check_or_not = "no" # ,fensi fensi = "" div_tip2_second_leval = div_u_first.find('div', attrs={'class':'tip2'}) a_all = div_tip2_second_leval.findAll('a') for a_one in a_all: a_text = a_one.getText() if u"粉丝" in a_text: fensi = a_text[a_text.find('[') + 1:a_text.find(']')] if u"关注" in a_text: guan_zhu_nums = a_text[a_text.find('[') + 1:a_text.find(']')] for span_class_tc in div_tip2_second_leval.findAll('span'): span_class_tc_text = span_class_tc.getText() if u"微博" in span_class_tc_text: weibo_all_nums = span_class_tc_text[span_class_tc_text.find('[') + 1:span_class_tc_text.find(']')] user_info = UserInfo(uid_or_uname, nickname, is_persion, check_or_not, fensi, sex, location, check_info, weibo_all_nums, guan_zhu_nums) return user_info pass # # 解析页面,获取搜索的第一个,uid def page_parser_from_search_for_uid(page): out_soup = BeautifulSoup(page) table_first = out_soup.find('table') td_first = table_first.find('td', attrs={'valign':'top'}) a_href = td_first.find('a').attrs['href'] uid_or_uname = a_href[a_href.rfind('/') + 1:a_href.find('?')] return uid_or_uname # # 通过 http://weibo.com/1802646764/info 来抓取用户信息,主要是为了抓取用户的注册时间 def page_parser_from_search_for_UserInfo_for_regester_time(page, url): uid = url[url.find('com/') + 4:url.rfind('/')] nickname = "" location = "" sex = "" birth = "" regester_time = "" soup = BeautifulSoup(page) for script in soup.findAll('script'): text = script.text if 'FM.view(' in text: text = text[8:] if text.endswith(')'): text = text[:-1] if text.endswith(');'): text = text[:-2] data = json.loads(text) inner_html = data.get('html') if inner_html is None: continue inner_soup = BeautifulSoup(inner_html) # pf_items = inner_soup.findAll('div', attrs={'class': 'pf_item clearfix'}) li_1_clearfix_all = inner_soup.findAll('li', attrs={'class':'li_1 clearfix'}) for one_li in li_1_clearfix_all: this_text = one_li.getText() # print this_text if u'昵称' in this_text: nickname = this_text[this_text.find(u'昵称') + 3:].strip() continue if u'所在地' in this_text: location = this_text[this_text.find(u'所在地') + 4:].strip() continue if u'性别' in this_text: sex = this_text[this_text.find(u'性别') + 3:].strip() continue if u'生日' in this_text: birth = this_text[this_text.find(u'生日') + 3:].strip() continue if u'注册时间' in this_text: regester_time = this_text[this_text.find(u'注册时间') + 5:].strip() continue userInfo_for_regester_time = UserInfo_for_regester_time(uid,nickname,location,sex,birth,regester_time) # userInfo_for_regester_time.print_self() return userInfo_for_regester_time
23,127
8,152
from datetime import datetime, time from enum import Enum from typing import Optional, Sequence, Union from pydantic import BaseModel class Direction(Enum): enum_1 = 1 enum_2 = 2 class PetLocationSet(BaseModel): where: Direction
246
75
########################## #### # WARNING: THIS FILE IS DEPRECATED AND IS ONLY RETAINED FOR INFORMATIONAL PURPOSES # ../dumb_topic_client is the up-to-date sample program ### ######################### from tweepy.streaming import StreamListener from tweepy import OAuthHandler from tweepy import Stream import requests cred_file = "oauth_tokens.txt" seen_tweets = set() class RetweetListener(StreamListener): def on_status(self,status): #print(status.text.encode('utf8')) if hasattr(status,'retweeted_status'): rt_status = status.retweeted_status # print status.retweeted_status.id, status.retweeted_status.retweet_count if rt_status.retweet_count > 10000 and rt_status.id not in seen_tweets: print rt_status.id, rt_status.retweet_count, resp = requests.post("http://XXX/tweet/%s"%rt_status.id) print resp.status_code seen_tweets.add(rt_status.id) return True def on_error(self,status_code): print status_code if __name__ == '__main__': oauth = json.load(open('oauth_tokens.txt')) listener = RetweetListener() auth = OAuthHandler(oauth["consumer_key"],oauth["consumer_secret"]) auth.set_access_token(oauth["access_token"],oauth["access_token_secret"]) stream = Stream(auth,listener) stream.sample(languages=['en'])
1,303
443
#!/usr/bin/env python # -*- coding:utf-8 -*- # @Filename: DROP3.py # @Author: Daniel Puente Ramírez # @Time: 31/12/21 16:00 # @Version: 5.0 import copy from sys import maxsize import numpy as np import pandas as pd from sklearn.neighbors import NearestNeighbors from .utils import transform class DROP3: """ Wilson, D. R., & Martinez, T. R. (2000). Reduction techniques for instance-based learning algorithms. Machine learning, 38(3), 257-286. Parameters ---------- nearest_neighbors : int, default=3 Number to use as nearest neighbors when computing distances. power_parameter : int, default=2 Power parameter for the Minkowski metric. When p = 1, this is equivalent to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. """ def __init__(self, nearest_neighbors=3, power_parameter=2): """ The function takes in two parameters, nearest_neighbors and power_parameter, and assigns them to the attributes nearest_neighbors and power_parameter :param nearest_neighbors: The number of nearest neighbors to use when calculating the weights, defaults to 3 (optional) :param power_parameter: This is the exponent that is used to calculate the weights, defaults to 2 (optional) """ self.nearest_neighbors = nearest_neighbors self.power_parameter = power_parameter self.x_attr = None def filter(self, samples, y): """ Implementation of DROP3. The Decremental Reduction Optimization Procedure (DROP) algorithms base their selection rule in terms of the partner and associate concept. At the very beginning a Wilson Editing algorithm is performed in order to remove any noise that may ve contained in the data. Followed by the DROP algorithm, in which an instance will be removed is its associates are correctly classified without the instance. :param samples: DataFrame. :param y: DataFrame. :return: the input dataset with the remaining samples. """ ( initial_distances, initial_samples, initial_targets, knn, samples_info, ) = self._create_variables(samples, y) self._find_associates( initial_distances, initial_samples, initial_targets, knn, samples_info ) initial_distances.sort(key=lambda x: x[2], reverse=True) removed = 0 size = len(initial_distances) for index_x in range(size): x_sample = initial_distances[index_x - removed][0] with_, without = self._with_without(tuple(x_sample), samples_info) if without >= with_: initial_distances = ( initial_distances[: index_x - removed] + initial_distances[index_x - removed + 1:] ) removed += 1 for a_associate_of_x in samples_info[(tuple(x_sample))][1]: a_neighs, remaining_samples = self._remove_from_neighs( a_associate_of_x, initial_distances, samples_info, x_sample ) knn = NearestNeighbors( n_neighbors=self.nearest_neighbors + 2, n_jobs=1, p=self.power_parameter, ) knn.fit(remaining_samples) _, neigh_ind = knn.kneighbors([a_associate_of_x]) possible_neighs = [initial_distances[x][0] for x in neigh_ind[0]] self._find_new_neighs( a_associate_of_x, a_neighs, possible_neighs, samples_info ) new_neigh = a_neighs[-1] samples_info[tuple(new_neigh)][1].append(a_associate_of_x) samples = pd.DataFrame( [x for x, _, _ in initial_distances], columns=self.x_attr ) y = pd.DataFrame([x for _, x, _ in initial_distances]) return samples, y def _create_variables(self, samples, y): """ > It takes in the samples and targets, and returns the initial distances, samples, targets, knn, and samples_info :param samples: the data :param y: the target variable :return: initial_distances, initial_samples, initial_targets, knn, samples_info """ self.x_attr = samples.keys() samples = transform(samples, y) s = copy.deepcopy(samples) initial_samples = s["data"] initial_targets = s["target"] initial_samples, samples_index = np.unique( ar=initial_samples, return_index=True, axis=0 ) initial_targets = initial_targets[samples_index] knn = NearestNeighbors( n_neighbors=self.nearest_neighbors + 2, n_jobs=1, p=self.power_parameter ) knn.fit(initial_samples) samples_info = { tuple(x): [[], [], y] for x, y in zip(initial_samples, initial_targets) } initial_distances = [] return initial_distances, initial_samples, initial_targets, knn, samples_info @staticmethod def _find_new_neighs(a_associate_of_x, a_neighs, possible_neighs, samples_info): """ > The function takes a sample, finds its neighbors, and then checks if any of the neighbors are not already in the list of neighbors. If they are not, then they are added to the list of neighbors :param a_associate_of_x: the sample we are looking for neighbors for :param a_neighs: the list of neighbors of a_associate_of_x :param possible_neighs: a list of all the possible neighbors of a given point :param samples_info: a dictionary with the following structure: """ for pos_neigh in possible_neighs[1:]: was_in = False for old_neigh in a_neighs: if np.array_equal(old_neigh, pos_neigh): was_in = True break if not was_in: a_neighs.append(pos_neigh) break samples_info[tuple(a_associate_of_x)][0] = a_neighs @staticmethod def _remove_from_neighs( a_associate_of_x, initial_distances, samples_info, x_sample ): """ > It removes the sample `x_sample` from the list of neighbors of `a_associate_of_x` and returns the updated list of neighbors of `a_associate_of_x` and the updated list of remaining samples :param a_associate_of_x: the sample that is the associate of x :param initial_distances: a list of tuples of the form (sample, distance, associate) :param samples_info: a dictionary of the form {(x,y):[neighs,distances,associate]} :param x_sample: the sample we want to find the nearest neighbor for :return: the new list of neighbors of a_associate_of_x, and the list of remaining samples. """ a_neighs = samples_info[tuple(a_associate_of_x)][0] index_to_use = 0 for index_a, neigh in enumerate(a_neighs): index_to_use = index_a if np.array_equal(neigh, x_sample): break a_neighs = a_neighs[:index_to_use] + a_neighs[index_to_use + 1:] remaining_samples = [x for x, _, _ in initial_distances] return a_neighs, remaining_samples @staticmethod def _find_associates( initial_distances, initial_samples, initial_targets, knn, samples_info ): """ For each sample in the initial set, find the closest sample from the other class and store it in the initial_distances list :param initial_distances: a list of lists, each list containing a sample, its target, and its distance to the nearest sample of a different class :param initial_samples: the samples that we want to find the nearest neighbors for :param initial_targets: the labels of the initial samples :param knn: the k-nearest neighbors model :param samples_info: a dictionary that stores the neighbors of each sample and the samples that are neighbors of each sample """ for x_sample, x_target in zip(initial_samples, initial_targets): min_distance = maxsize for y_sample, y_label in zip(initial_samples, initial_targets): if x_target != y_label: xy_distance = np.linalg.norm(x_sample - y_sample) if xy_distance < min_distance: min_distance = xy_distance initial_distances.append([x_sample, x_target, min_distance]) _, neigh_ind = knn.kneighbors([x_sample]) x_neighs = [initial_samples[x] for x in neigh_ind[0][1:]] samples_info[tuple(x_sample)][0] = x_neighs for neigh in x_neighs[:-1]: samples_info[tuple(neigh)][1].append(x_sample) @staticmethod def _with_without(x_sample, samples_info): """ For each sample in the dataset, we find its associates and then for each associate, we find its neighbors. We then find the class with the most number of neighbors and compare it with the class of the associate. If they are the same, we increment the `with_` variable. If they are not the same, we increment the `without` variable :param x_sample: the sample we're looking at :param samples_info: a dictionary of the form {(x,y):[neighbors, associates, target]} :return: The number of times the target class of the sample is the most common class among its neighbors, with and without the sample itself. """ index_a = 0 with_ = 0 without = 0 x_associates = samples_info[x_sample][1] associates_targets = [samples_info[tuple(x)][2] for x in x_associates] associates_neighs = [samples_info[tuple(x)][0] for x in x_associates] for _, a_target, a_neighs in zip( x_associates, associates_targets, associates_neighs ): neighs_targets = np.ravel( np.array([samples_info[tuple(x)][2] for x in a_neighs]) ).astype(int) neighs_targets = neighs_targets.tolist() count = np.bincount(neighs_targets[:-1]) max_class = np.where(count == np.amax(count))[0][0] if max_class == a_target: with_ += 1 for index_a, neigh in enumerate(a_neighs): if np.array_equal(neigh, x_sample): break count = np.bincount( neighs_targets[:index_a] + neighs_targets[index_a + 1:] ) max_class = np.where(count == np.amax(count))[0][0] if max_class == a_target: without += 1 return with_, without
11,186
3,267
from django.apps import AppConfig from orchestra.core import accounts, administration from orchestra.core.translations import ModelTranslation class IssuesConfig(AppConfig): name = 'orchestra.contrib.issues' verbose_name = "Issues" def ready(self): from .models import Queue, Ticket accounts.register(Ticket, icon='Ticket_star.png') administration.register(Queue, dashboard=False) ModelTranslation.register(Queue, ('verbose_name',))
485
136
import numpy as np import matplotlib.pyplot as plt x = np.random.rand(10, 10) plt.imshow(x, cmap=plt.cm.hot) # 显示右边颜色条 plt.colorbar() plt.savefig('imshow_demo.png')
178
92
# This test code was written by the `hypothesis.extra.ghostwriter` module # and is provided under the Creative Commons Zero public domain dedication. from pathlib import Path from hypothesis import given, strategies as st import attack_surface_pypy.core.exceptions @given(message=st.text()) def test_fuzz_InvalidFileDataError(message): attack_surface_pypy.core.exceptions.InvalidFileDataError(message=message) @given(message=st.integers() | st.floats()) def test_fuzz_TimeoutExceededError(message): attack_surface_pypy.core.exceptions.TimeoutExceededError(message=message) @given(message=st.from_regex(r'^vm-\w{6,10}$')) def test_fuzz_VMNotFoundError(message): attack_surface_pypy.core.exceptions.VMNotFoundError(message=message) @given(message=st.one_of(st.builds(Path), st.text())) def test_fuzz_LoaderFileNotFoundError(message): attack_surface_pypy.core.exceptions.loader.LoaderFileNotFoundError(message=message)
942
317
#! /usr/bin/python3 from Crypto.Cipher import AES from random import randint # https://www.cryptopals.com/sets/4/challenges/27 # Recover the key from CBC with IV=Key import sys sys.path.append('..') from cryptopals import ctr,xor,random_aes_key,cbc_decrypt,cbc_encrypt def random_aes_key(blocksize=16): return random_str(blocksize,blocksize) def detect_high_ascii(text): for c in text: if c >= 0x80: return True return False def f1(plaintext): global key aes_ecb = AES.new(key, AES.MODE_ECB) return cbc_encrypt(aes_ecb,plaintext,IV) def f2(ciphertext): global key aes_ecb = AES.new(key, AES.MODE_ECB) plaintext = cbc_decrypt(aes_ecb,ciphertext,IV) if detect_high_ascii(plaintext): return plaintext else: return False def blockfy(data, blocklen=16): return [data[i:i+blocklen] for i in range(0,len(data),blocklen)] def main(): blocksize = 16 global key global IV #key = random_aes_key(blocksize) key = "YELLOW SUBMARINE" IV = bytearray(key,"ascii") INPUT = bytearray("A"*32,"ascii") ciphertext = f1(INPUT) temp = blockfy(ciphertext) x = temp[0] x.extend(bytearray(16)) x.extend(temp[0]) #x = temp[0] + "\x00"*16 + temp[0] r = f2(x) if r: error = r else: print ("Bad luck!") exit() p = blockfy(error) k = xor(p[0],p[2]) print (k) main()
1,445
588
import os import sys DIRNAME = os.path.abspath(os.path.dirname(__file__)) rel = lambda *x: os.path.abspath(os.path.join(DIRNAME, *x)) PROJECT_DIR = rel('..') activate_this = rel('env', 'bin', 'activate_this.py') # Activate virtualenv execfile(activate_this, {'__file__': activate_this}) os.environ['DJANGO_SETTINGS_MODULE'] = 'settings' os.environ['PYTHON_EGG_CACHE'] = '/srv/python_eggs/' # Need to add upper-level dir to syspath to reproduce dev Django environ sys.path.append(PROJECT_DIR) from django.core.handlers.wsgi import WSGIHandler application = WSGIHandler()
577
220
"""Functions for generating random data with injected relationships""" from itertools import product import os import json import re import random import numpy as np from numpy import random as rd from scipy.special import comb from ntp.util.util_kb import load_from_list def gen_relationships(n_pred, n_rel, body_predicates=1): """ Generates random relationships between predicates of the form goal predicate <-- {set of body predicates}. Goal predicates have a higher number than body predicates. Args: n_pred: number of total predicates n_rel: number of relationships body_predicates: number of body predicates for each relationship Returns: Dict, entries where keys are goal predicates and values are list of body predicates """ relationship_dict = {} n_rel_possible = comb(n_pred, body_predicates + 1) pred_probs = [comb(i, body_predicates)/n_rel_possible for i in range(n_pred)] relationship_head_array = list(rd.choice(n_pred, size=n_rel, replace=False, p=pred_probs)) relationship_body_array = [set(rd.choice(range(relationship_head_array[i]), size=body_predicates, replace=False)) for i in range(len(relationship_head_array))] for i in range(n_rel): relationship_dict[relationship_head_array[i]] = relationship_body_array[i] return relationship_dict def gen_simple(n_pred, relationship_dict, p_normal, p_relationship, n_constants, order=1): """ Generates random truth values for predicates for a set number of constants, and given some relationships Args: n_pred: number of total predicates relationship_dict: Dict of relationships p_normal: probability of predicate truth given no relationship/relationship body not true p_relationship: probability of goal predicate truth given body predicate truth n_constants: number of constants order: order of predicate (unary, binary) Returns: Numpy array where value j, i corresponds to the truth value of predicate i for constant j """ # Checks whether body predicates for a particular relationship hold for a particular constant def body_holds(data, body_predicates, constant): holds = True for predicate in body_predicates: if data[index + (predicate,)] != 1: holds = False break return holds data = np.zeros([n_constants] * order + [n_pred]) for predicate in range(n_pred): for index in product(*[range(n_constants) for i in range(order)]): if predicate in relationship_dict: if body_holds(data, relationship_dict[predicate], index): data[index + (predicate,)] = rd.binomial(1, p_relationship) continue # Set variable normally if predicate from relationship doesn't hold data[index + (predicate,)] = rd.binomial(1, p_normal) return data def write_data(data): """Convert numpy array of data into list of strings that the ntp algorithm can read""" shape = np.shape(data) text_list = [] for pred in range(shape[-1]): for index in product(*[range(dim_size) for dim_size in shape[:-1]]): if data[index + (pred,)] == 1: write_string = "Predicate" + str(pred) + "(" for const in index: write_string += "Constant" + str(const) + "," write_string = write_string[:-1] + ").\n" text_list.append(write_string) return text_list def write_relationships(relationships, path): """write relationship dict to file""" with open(path, "w") as f: json.dump(relationships, f) return def write_simple_templates(n_rules, body_predicates=1, order=1): """Generate rule template of form C < A ^ B of varying size and order""" text_list = [] const_term = "(" for i in range(order): const_term += chr(ord('X') + i) + "," const_term = const_term[:-1] + ")" write_string = "{0} #1{1} :- #2{1}".format(n_rules, const_term) if body_predicates > 1: for i in range(body_predicates - 1): write_string += ", #" + str(i + 3) + const_term text_list.append(write_string) return text_list def gen_transitivity(n_preds, n_rules, n_constants, p_base, max_iterations=1): """Generate data with transitivity relationships, and also rule templates""" # active predicate is predicate 0 WLOG active_values = np.random.binomial(1, p_base, size=[n_constants, n_constants]) edges = [(i, j) for i in range(n_constants) for j in range(n_constants) if active_values[i, j] == 1] closure = set(edges) while True: new_edges = set((x,w) for x,y in closure for q,w in closure if q == y) closure_until_now = closure | new_edges if closure_until_now == closure: break closure = closure_until_now edges = list(closure) active_values[tuple(np.transpose(edges))] = 1 values = np.random.binomial(1, p_base, size=[n_constants, n_constants, n_preds]) values[:, :, 0] = active_values fact_list = write_data(values) template = "{0} #1(X, Z) :- #1(X, Y), #1(Y, Z).".format(n_rules) return fact_list, template def text_to_id(fact): """Given a fact in text form, convert to predicate and constant numbers""" reduced = re.sub("[^0-9\(,]", '', fact) reduced_split = tuple(re.split("[\(,]", reduced)) predicate = int(reduced_split[0]) constants = tuple([int(constant_text) for constant_text in reduced_split[1:]]) return predicate, constants def gen_constant_dict(train_list): """Convert list of facts in text form to a dictionary of predicate truth values by constant""" constant_dict = {} for fact in train_list: predicate, constants = text_to_id(fact) if not constants in constant_dict: constant_dict[constants] = set([predicate]) else: constant_dict[constants].add(predicate) return constant_dict def test_fact_active(constant_dict, constants, predicate, relationships): """Given relationships, determine whether the truth value of a fact could be predicted by a relationship""" if predicate in relationships: if all(body_pred in constant_dict[constants] for body_pred in relationships[predicate]): return True return False def count_active(constant_dict, relationships): """Given relationships and a dataset of constants, determine for how many facts the truth value could be predicted by a relationship""" active_facts = 0 for constants, predicates in constant_dict.items(): for predicate in relationships: if predicate in predicates and all(body_pred in predicates for body_pred in relationships[predicate]): active_facts += 1 return active_facts def gen_test_kb(train_list, n_test, test_active_only=False, relationships=None): """Given a list of facts, choose some facts to be split off to a test dataset in such a way that there is at least one training fact left for each constant""" constant_dict = gen_constant_dict(train_list) random.shuffle(train_list) constant_set = set() new_train_list = [] test_list = [] for fact in train_list: predicate, constants = text_to_id(fact) if test_active_only: if test_fact_active(constant_dict, constants, predicate, relationships) and len(test_list) < n_test: test_list.append(fact) continue else: if all(constant in constant_set for constant in constants) and len(test_list) < n_test: test_list.append(fact) continue else: for constant in constants: constant_set.add(constant) new_train_list.append(fact) train_list = new_train_list test_kb = load_from_list(test_list) return test_kb, train_list
8,057
2,381
from django.urls import reverse from rest_framework import status from rest_framework.test import APITestCase from accounts.models import Account from accounts.serializers import AccountSerializer from .utils import create_accounts class AccountViewsTests(APITestCase): def test_create_account(self): """ Ensure we can create a new account object. """ url = reverse('accounts-list') data = {'name': 'Test'} response = self.client.post(url, data, format='json') self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(Account.objects.count(), 1) self.assertEqual(Account.objects.get().name, 'Test') def test_account_list(self): """ Ensure GET endpoint is returning all serialized accounts. """ create_accounts(10) url = reverse('accounts-list') response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data, AccountSerializer(Account.objects.all(), many=True).data)
1,116
312
#!/usr/bin/python3 FILE_PATH = "./littleschoolbus.bmp" with open(FILE_PATH,"rb") as f: bytes = bytearray(f.read()) result = "" for byte in bytes[54:]: result += str(byte & 1) print(result)
202
85