content
stringlengths
0
1.05M
origin
stringclasses
2 values
type
stringclasses
2 values
#!/usr/bin/env false """Generate script to activate project.""" # Internal packages (absolute references, distributed with Python) # External packages (absolute references, NOT distributed with Python) # Library modules (absolute references, NOT packaged, in project) # from src_gen.script.bash.briteonyx.source import generate as gen # from src_gen.script.bash.briteonyx.structure import * from src_gen.script.bash.source import generate as gen from src_gen.script.bash.structure import * # Project modules (relative references, NOT packaged, in project) def _abort_if_activated(): return [ if_( string_is_not_null(dq(vr("BO_Project"))), indent(), "1>&2 ", echo( dq( "Aborting, this project is already activated as ", sq(vr("BO_Project")), ) ), eol(), indent(), abort_script(), ), fi(), ] def _abort_if_missing_pwd(): return [ if_( string_is_null(dq(vr("PWD"))), indent(), "1>&2 ", echo( dq( "Aborting, missing environment variable ", sq(vn("PWD")), ) ), eol(), indent(), abort_script(), ), fi(), ] def _activate_python_virtual_environment(pve_activate_script, script, status): return [ comment("Activate Python virtual environment (PVE)"), _capture_environment("PVE-prior"), source_or_abort(pve_activate_script, script, status), _capture_environment("PVE-after"), ] def _capture_environment(file_name): return [ "(", set_("-o", "posix"), seq(), set_(), ")", pipe(), command("sort", ">", x(vr("PWD"), "/BO-", file_name, ".env")), eol(), ] def _comments(): return [ # TODO: Redesign to automatically wrap comment paragraphs at a set line length comment( "Activate the BriteOnyx framework to manage this project directory tree" ), comment(), note("We MUST NOT EVER ", cc(exit()), " during BriteOnyx activation!"), comment(), comment("We cannot use a `trap` here"), comment("because it will remain active"), comment("within the shell"), comment("that will `source` this script."), comment(), comment("Please see HowTo-use_this_project.md for details."), rule(), ] def _create_random_tmpdir(): local = "_result" # TODO: Consider capturing this special variable tmpdir = "TMPDIR" user = "USER" return [ comment("Create random temporary directory"), # TODO: Consider creating method for 'mktemp' if_( string_equals(dq(vr("BO_OS")), "macOS"), indent(), assign( vn(local), substitute("mktemp", "-d", "-t", dq("BO-", vr(user))), ), eol(), ), else_( indent(), assign( vn(local), substitute( "mktemp", "-d", "-t", dq("BO-", vr(user), "-XXXXXXX") ), ), eol(), ), fi(), if_( directory_exists(dq(vr(local))), indent(), assign(vn(tmpdir), vr(local)), eol(), indent(), log_info("Created temporary directory ", sq(vr(tmpdir))), eol(), ), fi(), if_( directory_exists(dq(vr(tmpdir))), indent(), remembering(tmpdir), eol(), indent(), export(vn(tmpdir)), eol(), ), else_( indent(), log_error( "Aborting, failed to establish temporary directory ", sq(vr(tmpdir)), ), eol(), indent(), abort_script(), ), fi(), ] def _declare_remembering(): return [ exported_function( "remembering", indent(), comment("Log that we are remembering variable $1"), indent(), integer_equal("$#", 0), and_(), eol(), indent(2), log_error("Variable name is required"), and_(), eol(), indent(2), abort_script(), indent(), command("local", "-r", "Name=$1"), eol(), indent(), log_debug("Remembering ", vr("Name"), " = '${!Name}'"), eol(), ), ] def _detect_operating_system(): # TODO: Make appropriate constants local = "_result" return [ comment("Detect operating system"), todo("Write as function"), todo("Add detection of various Linux, when we care"), assign(vn(local), substitute("uname")), eol(), if_( string_equals(dq(vr(local)), "Darwin"), indent(), export(vn("BO_OS"), "macOS"), eol(), ), else_( indent(), export(vn("BO_OS"), "UNKNOWN"), eol(), ), fi(), remembering("BO_OS"), eol(), ] def _remember_paths(): project_path = x( vr("BO_Project"), "/BriteOnyx/bin", ":", vr("BO_Project"), "/bin" ) return [ note("We can now use BriteOnyx Bash functionality."), line(), comment("BriteOnyx scripts"), comment("must precede"), comment("project-specific scripts"), comment("on the PATH"), comment("so that collisions fail fast."), comment("Any collision should be resolved"), comment("by renaming"), comment("the project-specific script"), comment("to avoid that collision."), line(), export(vn("BO_PathProject"), project_path), eol(), line(), export_if_null("BO_PathSystem", vr("PATH")), eol(), export_if_null("BO_PathUser", x(vr("HOME"), "/bin")), eol(), line(), remembering("BO_PathProject"), eol(), remembering("BO_PathSystem"), eol(), remembering("BO_PathUser"), eol(), ] def _remember_project_root(): return [ export(vn("BO_Project"), vr("PWD")), eol(), remembering("BO_Project"), eol(), ] def build(): script = "_Script" status = "_Status" alias_sample = x(vr("BO_Project"), "/cfg/sample/alias.bash") briteonyx_alias_script = x( vr("BO_Project"), "/BriteOnyx/bin/lib/alias.bash" ) briteonyx_declare_script = x( vr("BO_Project"), "/BriteOnyx/bin/lib/declare.bash" ) configure_python_script = x( vr("BO_Project"), "/BriteOnyx/bin/lib/configure-Python.bash" ) context_sample = x(vr("BO_Project"), "/cfg/sample/context.bash") log4bash_script = x(vr("PWD"), "/BriteOnyx/bin/lib/declare-log4bash.bash") log_directory = x(vr("BO_Project"), "/log") project_alias_script = x(vr("BO_Project"), "/alias.bash") project_context_script = x(vr("BO_Project"), "/context.bash") project_declare_script = x(vr("BO_Project"), "/bin/lib/declare.bash") pve_activate_script = x( vr("BO_Project"), "/BriteOnyx/bin/lib/pve-activate.bash" ) set_path_script = x(vr("BO_Project"), "/BriteOnyx/bin/lib/set_path.bash") return [ header_activation(), _comments(), _abort_if_activated(), line(), _abort_if_missing_pwd(), line(), _capture_environment("incoming"), line(), source_or_abort(log4bash_script, script, status), line(), log_info("Activating ", sq(vr("PWD")), " as the current project"), eol(), line(), _declare_remembering(), line(), _remember_project_root(), remembering("INTERACTIVE_MODE"), eol(), line(), source_or_abort(briteonyx_declare_script, script, status), line(), _remember_paths(), line(), source_or_abort(set_path_script, script, status), line(), _detect_operating_system(), line(), _create_random_tmpdir(), line(), command("maybe_create_directory_tree", log_directory), eol(), line(), _activate_python_virtual_environment( pve_activate_script, script, status ), line(), maybe_copy_file(alias_sample, project_alias_script), eol(), maybe_copy_file(context_sample, project_context_script), eol(), line(), maybe_source_or_abort(project_declare_script, script, status), line(), source_or_abort(project_context_script, script, status), line(), source_or_abort(briteonyx_alias_script, script, status), line(), source_or_abort(project_alias_script, script, status), line(), _capture_environment("outgoing"), log_good("BriteOnyx has successfully activated this project"), eol(), log_info("To get started, try executing the 'cycle' alias..."), eol(), line(), disabled_content_footer(), ] def generate(directory): gen(build(), directory, "activate.bash") """DisabledContent source(configure_python_script), eol(), source(briteonyx_alias_script), eol(), line(), """
nilq/baby-python
python
from HTML import Curve_Write_HTML from Data import Curve_Write_Data from SVG import Curve_Write_SVG class Curve_Write( Curve_Write_HTML, Curve_Write_Data, Curve_Write_SVG ): ##! ##! ##! def sifsdifdsgf(self): return 0
nilq/baby-python
python
""" Python script containing methods for building machine learning model """ import utils.text_processing as tp def classifier(X, y, tokenizer, config): word_ind_dict = tokenizer.word_index glove_path = config.get("glove_path") vocab_size = config.get("vocab_size") seq_len = config.get("seq_len") embed_dim = config.get("embed_dim") num_words = min(vocab_size, len(word_ind_dict) + 1) embed_matrix = tp.get_embedding_matrix( glove_path, word_ind_dict, num_words, embed_dim, vocab_size ) embed_layer = tp.get_embedding_layer(num_words, embed_dim, embed_matrix, seq_len)
nilq/baby-python
python
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: proto/diff-img.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='proto/diff-img.proto', package='Diff', syntax='proto3', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n\x14proto/diff-img.proto\x12\x04\x44iff\",\n\x0b\x44iffRequest\x12\r\n\x05\x66irst\x18\x01 \x01(\t\x12\x0e\n\x06second\x18\x02 \x01(\t\")\n\x0c\x44iffResponse\x12\x0b\n\x03res\x18\x01 \x01(\t\x12\x0c\n\x04ssim\x18\x02 \x01(\x02\x32=\n\x07\x44iffImg\x12\x32\n\x07getDiff\x12\x11.Diff.DiffRequest\x1a\x12.Diff.DiffResponse\"\x00\x62\x06proto3' ) _DIFFREQUEST = _descriptor.Descriptor( name='DiffRequest', full_name='Diff.DiffRequest', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='first', full_name='Diff.DiffRequest.first', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='second', full_name='Diff.DiffRequest.second', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=30, serialized_end=74, ) _DIFFRESPONSE = _descriptor.Descriptor( name='DiffResponse', full_name='Diff.DiffResponse', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='res', full_name='Diff.DiffResponse.res', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='ssim', full_name='Diff.DiffResponse.ssim', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=76, serialized_end=117, ) DESCRIPTOR.message_types_by_name['DiffRequest'] = _DIFFREQUEST DESCRIPTOR.message_types_by_name['DiffResponse'] = _DIFFRESPONSE _sym_db.RegisterFileDescriptor(DESCRIPTOR) DiffRequest = _reflection.GeneratedProtocolMessageType('DiffRequest', (_message.Message,), { 'DESCRIPTOR' : _DIFFREQUEST, '__module__' : 'proto.diff_img_pb2' # @@protoc_insertion_point(class_scope:Diff.DiffRequest) }) _sym_db.RegisterMessage(DiffRequest) DiffResponse = _reflection.GeneratedProtocolMessageType('DiffResponse', (_message.Message,), { 'DESCRIPTOR' : _DIFFRESPONSE, '__module__' : 'proto.diff_img_pb2' # @@protoc_insertion_point(class_scope:Diff.DiffResponse) }) _sym_db.RegisterMessage(DiffResponse) _DIFFIMG = _descriptor.ServiceDescriptor( name='DiffImg', full_name='Diff.DiffImg', file=DESCRIPTOR, index=0, serialized_options=None, create_key=_descriptor._internal_create_key, serialized_start=119, serialized_end=180, methods=[ _descriptor.MethodDescriptor( name='getDiff', full_name='Diff.DiffImg.getDiff', index=0, containing_service=None, input_type=_DIFFREQUEST, output_type=_DIFFRESPONSE, serialized_options=None, create_key=_descriptor._internal_create_key, ), ]) _sym_db.RegisterServiceDescriptor(_DIFFIMG) DESCRIPTOR.services_by_name['DiffImg'] = _DIFFIMG # @@protoc_insertion_point(module_scope)
nilq/baby-python
python
"""Example training a nobrainer model for brain extraction.""" import nobrainer # Instantiate object to perform real-time data augmentation on training data. # This object is similar to `keras.preprocessing.image.ImageDataGenerator` but # works with volumetric data. volume_data_generator = nobrainer.VolumeDataGenerator( samplewise_minmax=True, rot90_x=True, rot90_y=True, rot90_z=True, flip_x=True, flip_y=True, flip_z=True, salt_and_pepper=True, gaussian=True, reduce_contrast=True, binarize_y=True) # Instantiate TensorFlow model. model = nobrainer.HighRes3DNet( n_classes=2, # Two classes for brain extraction (i.e., brain vs not brain) optimizer='Adam', learning_rate=0.01, # Model-specific options. one_batchnorm_per_resblock=True, dropout_rate=0.25) # Read in filepaths to features and labels. filepaths = nobrainer.read_csv("features_labels.csv") # Most GPUs do not have enough memory to represent a 256**3 volume during # training, so we train on blocks of data. Here, we set the shape of the # blocks. block_shape = (128, 128, 128) # Train model. nobrainer.train( model=model, volume_data_generator=volume_data_generator, filepaths=filepaths, volume_shape=(256, 256, 256), block_shape=block_shape, strides=block_shape, batch_size=1, # number of blocks per training step n_epochs=1, # number of passes through the training set prefetch=4) # prefetch this many full volumes.
nilq/baby-python
python
from bflib import dice, movement, units from bflib.attacks import AttackSet, Bite, Gaze from bflib.attacks import specialproperties from bflib.characters import specialabilities from bflib.characters.classes.fighter import Fighter from bflib.monsters import listing from bflib.monsters.appearingset import AppearingSet from bflib.monsters.reptilians.base import Reptilian from bflib.sizes import Size from bflib.tables.attackbonus import AttackBonusTable from bflib.treasuretypes import TreasureType @listing.register_type @listing.register_monster class Basilisk(Reptilian): name = "Basilisk" hit_dice = dice.D8(6) attack_bonus = AttackBonusTable.get_by_hit_dice(hit_dice.amount) attack_sets = [AttackSet(Bite(dice.D10(1))), AttackSet(Gaze(None), special_properties=specialproperties.Petrify)] base_armor_class = 16 morale = 9 movement = movement.MovementSet(walk=units.FeetPerGameTurn(20), turning_distance=units.Feet(10)) no_appearing = AppearingSet(dice_dungeon=dice.D6(1), dice_wild=dice.D6(1), dice_lair=dice.D6(1)) save_as = Fighter.level_table.levels[hit_dice.amount].saving_throws_set size = Size.Large special_abilities = specialabilities.CombatFrenzy, treasure_type = TreasureType.F weight = units.Pound(300) xp = 610
nilq/baby-python
python
import requests from bs4 import BeautifulSoup address = [] def get_html(url): r = requests.get(url, headers={ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/97.0.4692.99 Safari/537.36', 'accept': '*/*'}) return r def get_address(html): soup = BeautifulSoup(html, 'html.parser') items = soup.find('tbody').find_all('tr') for item in items: address.append(item.find('a').get_text(strip=True)) def repeat(): for i in range(1, 5): url = "https://etherscan.io/accounts/" + str(i) html = get_html(url) get_address(html.text) repeat()
nilq/baby-python
python
# Copyright 2018 Braxton Mckee # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typed_python import * import typed_python._types as _types from nativepython.runtime import Runtime import unittest def Compiled(f): f = Function(f) return Runtime.singleton().compile(f) class TestPointerToCompilation(unittest.TestCase): def test_pointer_operations(self): T = ListOf(int) def testfun(x: T): pointer = x.pointerUnsafe(0) pointer.set(20) (pointer+1).set(20) (pointer+2).set((pointer+1).get()+1) (pointer+3).initialize((pointer+2).get()) (pointer+4).cast(float).set(1.0) return pointer[3] compiledFun = Compiled(testfun) l1 = T(list(range(10))) l2 = T(list(range(10))) self.assertEqual(testfun(l1), l1[3]) self.assertEqual(compiledFun(l2), l2[3]) self.assertEqual(l1, l2) self.assertEqual(l1[0], 20) self.assertEqual(l1[1], 20) self.assertEqual(l1[2], 21) self.assertEqual(l1[3], 21) self.assertEqual(l1[4], 0x3ff0000000000000) # hex representation of 64 bit float 1.0 def test_bytecount(self): def testfun(x): return _types.bytecount(type(x)) self.assertEqual(testfun(0), 8) def check(x): self.assertEqual( testfun(x), Runtime.singleton().compile(testfun, {'x': type(x)})(x) ) check(0) check(0.0) check(ListOf(int)([10])) check(Tuple(int, int, int)((10, 10, 10))) def test_pointer_subtraction(self): T = ListOf(int) def testfun(x: T): pointer = x.pointerUnsafe(0) return (pointer + 1) - pointer compiledFun = Compiled(testfun) self.assertEqual(testfun(T()), 1) self.assertEqual(compiledFun(T()), 1)
nilq/baby-python
python
from blinker import NamedSignal, signal from sagas.nlu.events import ResultDataset, RequestMeta from sagas.conf.conf import cf import sagas.tracker_fn as tc from pprint import pprint watch = signal('watch') # evts=[watch] @watch.connect def console_watch(sender, **kw): import datetime from sagas.nlu.nlu_tools import NluTools ds:ResultDataset=kw['dataset'] meta:RequestMeta=kw['meta'] print(f"****** watch {sender}") tc.emp('magenta', meta) tools = NluTools() if cf.is_enabled('print_tree'): tools.main_domains(meta.sents, lang=meta.lang, engine=meta.engine, print_domains=False) return datetime.datetime.now()
nilq/baby-python
python
import numpy as np class SpatioTemporalSignal(object): """ This Class is used to create a group of N signals that interact in space and time. The way that this interaction is carrieud out can be fully determined by the user through a SpatioTemporal matrix of vectors that specify how the signals mix. """ def __init__(self, dt=0.1, delay=10, Tmax=100, Nseries=2): # Intialize time parameters self.dt = dt self.delay = delay self.Tmax = Tmax self.Nseries = Nseries # Put time in proper units self.NTmax = int(self.Tmax * 1.0 / self.dt) self.Ndelay = int(self.delay * 1.0 / self.dt) self.time = np.arange(self.NTmax) * self.dt # Initialize series self.series = np.zeros((self.Nseries, self.NTmax)) # Intialize interaction self.interaction = np.zeros((self.Nseries, self.Nseries, self.NTmax)) def set_initial_conditions(self, initial): """ Set the initial conditions """ self.series[..., 0] = initial def construct_series(self): """ This is the function that construct the series with a given interaction Doesn't work for one dimensional series """ for t in range(self.NTmax - 1): print '------------' print 'Time t', t # First let's set the correct delay if t + 1 > self.Ndelay: delay_aux = self.Ndelay else: delay_aux = t + 1 # Update signal_index for series_idx in xrange(self.Nseries): # Intialize vector to save time contribuionts vec_aux = np.zeros(self.Nseries) # Accomulate time contributions for delay_index in range(delay_aux): aux1 = self.series[:, t - delay_index] aux2 = self.interaction[series_idx, :, delay_index] vec_aux += aux1 * aux2 # print 'vec_aux', vec_aux # Combine time contributions and normalize self.series[series_idx, t + 1] = np.sum(vec_aux) / (delay_aux) def construct_series_verbose(self): """ This is the function that construct the series with a given interaction """ for t in range(self.NTmax - 1): print '------------' print 'Time t', t # First let's set the correct delay if t + 1 > self.Ndelay: delay_aux = self.Ndelay else: delay_aux = t + 1 # Update signal_index for series_idx in xrange(self.Nseries): print 'series_idx', series_idx print 'delay_aux of delay', delay_aux, self.Ndelay # Intialize vector to save time contribuionts vec_aux = np.zeros(self.Nseries) # Accomulate time contributions for delay_index in range(delay_aux): aux1 = self.series[:, t - delay_index] aux2 = self.interaction[series_idx, :, delay_index] print 'series', aux1 print 'interactions', aux2 vec_aux += aux1 * aux2 # print 'vec_aux', vec_aux # Combine time contributions and normalize print 'Contribution ', vec_aux print 'Total contribution (BN) ', np.sum(vec_aux) self.series[series_idx, t + 1] = np.sum(vec_aux) / (delay_aux) print 'next value series', self.series[series_idx, t + 1] def set_interaction(self, interaction_matrix): """ This function is used whenever the user wants to pass a particular interaction matrix """ self.interaction = interaction_matrix class TrigonometricMix(SpatioTemporalSignal): """ This should allow us to initialize mixed signals easier """ def __init__(self, dt=0.1, delay=10, Tmax=100, Nseries=2, phase_m=None, frequency_m=None): """ Overrides the initialization but also gets the frequency and phases matrix that are sufficient to determine a trignometric mix. """ super(TrigonometricMix, self).__init__(dt, delay, Tmax, Nseries) self.phase_matrix = phase_m self.frequency_matrix = frequency_m # Create trigonometric matrix aux = [] for phase, frequency in zip(self.phase_matrix.flatten(), self.frequency_matrix.flatten()): aux.append(np.cos(frequency * self.time + frequency)) # Transform to array and reshape aux = np.array(aux) self.interaction = aux.reshape((self.Nseries, self.Nseries, self.NTmax)) def main(): print 'This is all right' return SpatioTemporalSignal() if __name__ == '__main__': x = main()
nilq/baby-python
python
from . import Token class TreeNode(object): # dictionary mapping {str: TreeNode} id_treeNodes = {} def getTreeNode(idx): return TreeNode.id_treeNodes[idx] def __init__(self, idx, tkn): self._id = idx self._tkn = tkn self._children = {} TreeNode.id_treeNodes[idx] = self def addChild(self, dep, child): try: tns = self._children[dep] except KeyError: tns = set(child) self._children[dep] = tns else: self._children[dep] = tns.add(child) return None def getId(self): return self._id def getToken(self): return self._tkn def getChildren(self): return self._children def compareTo(self, z): if not isinstance(z, TreeNode): raise ValueError return self._tkn.compareTo(z.tkn_) def equals(self, o): return self.compareTo(o) == 0 def toString(self): return self._tkn.toString() def getTreeStr(self): id_str = {} if (len(self._children) > 0): for dep in self._children.keys(): nodes = self._children[dep] s = '' for node in nodes: if dep.startswith('prep_') or dep.startswith('conj_'): s = dep[5:] + ' ' s = s + node.getTreeStr() id_str[node.getId()] = s id_str[self._id] = self._tkn.getLemma() result = ' '.join([id_str[x] for x in id_str.keys()]) return result
nilq/baby-python
python
from flask import Flask from chatpy.api import API from chatpy.auth import TokenAuthHandler app = Flask(__name__) app.config.from_object('config') chatwork = API(auth_handler=TokenAuthHandler(app.config['CHATWORK_TOKEN'])) from app.endpoint import *
nilq/baby-python
python
#!/usr/bin/python # -*- coding: utf-8 -*- # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = ''' --- module: aci_fabric_scheduler short_description: This modules creates ACI schedulers. version_added: "2.8" description: - With the module you can create schedule policies that can be a shell, onetime execution or recurring options: name: description: - The name of the Scheduler. required: yes aliases: [ name, scheduler_name ] description: description: - Description for the Scheduler. aliases: [ descr ] recurring: description: - If you want to make the Scheduler a recurring it would be a "True" and for a oneTime execution it would be "False". For a shell just exclude this option from the task type: bool default: 'no' windowname: description: - This is the name for your what recurring or oneTime execution concurCap: description: - This is the amount of devices that can be executed on at a time type: int maxTime: description: - This is the amount MAX amount of time a process can be executed date: description: - This is the date and time that the scheduler will execute hour: description: - This set the hour of execution minute: description: - This sets the minute of execution, used in conjunction with hour day: description: - This sets the day when execution will take place default: "every-day" choices: ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday','Sunday', 'even-day', 'odd-day', 'every-day'] state: description: - Use C(present) or C(absent) for adding or removing. - Use C(query) for listing an object or multiple objects. default: present choices: [ absent, present, query ] extends_documentation_fragment: aci author: - Steven Gerhart (@sgerhart) ''' EXAMPLES = ''' - name: Simple Scheduler (Empty) aci_fabric_scheduler: host: "{{ inventory_hostname }}" username: "{{ user }}" password: "{{ pass }}" validate_certs: no name: simpleScheduler state: present - name: Remove Simple Scheduler aci_fabric_scheduler: host: "{{ inventory_hostname }}" username: "{{ user }}" password: "{{ pass }}" validate_certs: no name: simpleScheduler state: absent - name: One Time Scheduler aci_fabric_scheduler: host: "{{ inventory_hostname }}" username: "{{ user }}" password: "{{ pass }}" validate_certs: no name: OneTime windowname: OneTime recurring: False concurCap: 20 date: "2018-11-20T24:00:00" state: present - name: Recurring Scheduler aci_fabric_scheduler: host: "{{ inventory_hostname }}" username: "{{ user }}" password: "{{ pass }}" validate_certs: no name: Recurring windowname: Recurring recurring: True concurCap: 20 hour: 13 minute: 30 day: Tuesday state: present ''' RETURN = ''' current: description: The existing configuration from the APIC after the module has finished returned: success type: list sample: [ { "fvTenant": { "attributes": { "descr": "Production environment", "dn": "uni/tn-production", "name": "production", "nameAlias": "", "ownerKey": "", "ownerTag": "" } } } ] error: description: The error information as returned from the APIC returned: failure type: dict sample: { "code": "122", "text": "unknown managed object class foo" } raw: description: The raw output returned by the APIC REST API (xml or json) returned: parse error type: str sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>' sent: description: The actual/minimal configuration pushed to the APIC returned: info type: list sample: { "fvTenant": { "attributes": { "descr": "Production environment" } } } previous: description: The original configuration from the APIC before the module has started returned: info type: list sample: [ { "fvTenant": { "attributes": { "descr": "Production", "dn": "uni/tn-production", "name": "production", "nameAlias": "", "ownerKey": "", "ownerTag": "" } } } ] proposed: description: The assembled configuration from the user-provided parameters returned: info type: dict sample: { "fvTenant": { "attributes": { "descr": "Production environment", "name": "production" } } } filter_string: description: The filter string used for the request returned: failure or debug type: str sample: ?rsp-prop-include=config-only method: description: The HTTP method used for the request to the APIC returned: failure or debug type: str sample: POST response: description: The HTTP response from the APIC returned: failure or debug type: str sample: OK (30 bytes) status: description: The HTTP status from the APIC returned: failure or debug type: int sample: 200 url: description: The HTTP url used for the request to the APIC returned: failure or debug type: str sample: https://10.11.12.13/api/mo/uni/tn-production.json ''' import json from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec from ansible.module_utils.basic import AnsibleModule def main(): argument_spec = aci_argument_spec() argument_spec.update( name=dict(type='str', aliases=['name', 'scheduler_name']), # Not required for querying all objects description=dict(type='str', aliases=['descr']), windowname=dict(type='str', aliases=['windowname']), recurring=dict(type='bool'), concurCap=dict(type='int'), # Number of devices it will run against concurrently maxTime=dict(type='str'), # The amount of minutes a process will be able to run (unlimited or dd:hh:mm:ss) date=dict(type='str', aliases=['date']), # The date the process will run YYYY-MM-DDTHH:MM:SS state=dict(type='str', default='present', choices=['absent', 'present', 'query']), hour=dict(type='int'), minute=dict(type='int'), day=dict(type='str', default='every-day', choices=['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday', 'every-day', 'even-day', 'odd-day']), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, required_if=[ ['state', 'absent', ['name']], ['state', 'present', ['name']], ], ) state = module.params['state'] name = module.params['name'] windowname = module.params['windowname'] recurring = module.params['recurring'] date = module.params['date'] hour = module.params['hour'] minute = module.params['minute'] maxTime = module.params['maxTime'] concurCap = module.params['concurCap'] day = module.params['day'] description = module.params['description'] if recurring: child_configs = [dict(trigRecurrWindowP=dict(attributes=dict(name=windowname, hour=hour, minute=minute, procCa=maxTime, concurCap=concurCap, day=day,)))] elif recurring is False: child_configs = [dict(trigAbsWindowP=dict(attributes=dict(name=windowname, procCap=maxTime, concurCap=concurCap, date=date,)))] else: child_configs = [] aci = ACIModule(module) aci.construct_url( root_class=dict( aci_class='trigSchedP', aci_rn='fabric/schedp-{0}'.format(name), target_filter={'name': name}, module_object=name, ), ) aci.get_existing() if state == 'present': aci.payload( aci_class='trigSchedP', class_config=dict( name=name, descr=description, ), child_configs=child_configs, ) aci.get_diff(aci_class='trigSchedP') aci.post_config() elif state == 'absent': aci.delete_config() aci.exit_json() if __name__ == "__main__": main()
nilq/baby-python
python
# Copyright (c) 2021, CRS4 # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of # the Software, and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. from ..libs.client import ProMortClient from ..libs.client import ProMortAuthenticationError from argparse import ArgumentError import sys, requests from urllib.parse import urljoin from functools import reduce class SlideImporter(object): def __init__(self, host, user, passwd, session_id, logger): self.promort_client = ProMortClient(host, user, passwd, session_id) self.logger = logger def _get_case_label(self, slide_label): return slide_label.split('-')[0] def _import_case(self, case_label): response = self.promort_client.post( api_url='api/cases/', payload={'id': case_label} ) if response.status_code == requests.codes.CREATED: self.logger.info('Case created') elif response.status_code == requests.codes.CONFLICT: self.logger.info('Case already exist') elif response.status_code == requests.codes.BAD: self.logger.error('ERROR while creating Case: {0}'.format(response.text)) sys.exit('ERROR while creating Case') def _import_slide(self, slide_label, case_label, omero_id=None, mirax_file=False, omero_host=None, ignore_duplicated=False): if mirax_file: file_type = 'MIRAX' else: file_type = 'OMERO_IMG' response = self.promort_client.post( api_url='api/slides/', payload={'id': slide_label, 'case': case_label, 'omero_id': omero_id, 'image_type': file_type} ) if response.status_code == requests.codes.CREATED: self.logger.info('Slide created') if omero_id is not None and omero_host is not None: self._update_slide(slide_label, omero_id, mirax_file, omero_host) elif response.status_code == requests.codes.CONFLICT: if ignore_duplicated: self.logger.info('Slide already exists') if omero_id is not None and omero_host is not None: self._update_slide(slide_label, omero_id, mirax_file, omero_host) else: self.logger.error('A slide with the same ID already exists') sys.exit('ERROR: duplicated slide') elif response.status_code == requests.codes.BAD: self.logger.error('ERROR while creating Slide: {0}'.format(response.text)) sys.exit('ERROR while creating Slide') def _update_slide(self, slide_label, omero_id, mirax_file, omero_host): if mirax_file: join_items = (omero_host, 'ome_seadragon/mirax/deepzoom/get/', '{0}_metadata.json'.format(slide_label)) else: join_items = (omero_host, 'ome_seadragon/deepzoom/get/', '{0}_metadata.json'.format(omero_id)) ome_url = reduce(urljoin, join_items) response = requests.get(ome_url) if response.status_code == requests.codes.OK: slide_mpp = response.json()['image_mpp'] response = self.promort_client.put( api_url='api/slides/{0}/'.format(slide_label), payload={'image_microns_per_pixel': slide_mpp, 'omero_id': omero_id} ) self.logger.info('Slide updated') def run(self, args): if args.case_label is None and not args.extract_case: raise ArgumentError(args.case_label, message='ERROR! Must specify a case label or enable the extract-case flag') if args.case_label is not None: if args.extract_case: self.logger.info('Using label passed through CLI, ignoring the extract-case flag') case_label = args.case_label else: case_label = self._get_case_label(args.slide_label) try: self.promort_client.login() except ProMortAuthenticationError: self.logger.critical('Authentication error, exit') sys.exit('Authentication error, exit') self._import_case(case_label) self._import_slide(args.slide_label, case_label, args.omero_id, args.mirax, args.omero_host, args.ignore_duplicated) self.logger.info('Import job completed') self.promort_client.logout() help_doc = """ TBD """ def implementation(host, user, passwd, session_id, logger, args): slide_importer = SlideImporter(host, user, passwd, session_id, logger) slide_importer.run(args) def make_parser(parser): parser.add_argument('--slide-label', type=str, required=True, help='slide label') parser.add_argument('--case-label', type=str, required=False, help='case label') parser.add_argument('--omero-id', type=int, help='OMERO ID, only required if the slide was previously uploaded to an OMERO server') parser.add_argument('--omero-host', type=str, help='OMERO host used to retrieve slide details (if omero-id was specified)') parser.add_argument('--mirax', action='store_true', help='slide is a 3DHISTECH MIRAX') parser.add_argument('--extract-case', action='store_true', help='extract case ID from slide label') parser.add_argument('--ignore-duplicated', action='store_true', help='if enabled, trying to import an existing slide will not produce an error') def register(registration_list): registration_list.append(('slides_importer', help_doc, make_parser, implementation))
nilq/baby-python
python
import billboard import json import urllib from urllib.parse import quote apikey = 'APIKEY' # make the empty dictionary songs = {} # loop through the years we're interested in for x in range(1960, 2016): # another dictionary inside songs[x] = {} # get the chart for the last week of that year chart = billboard.ChartData('hot-100', '%s-12-19' % str(x)) # for every song on the chart, keep its rank, title, and author for song in chart: songs[x][song.rank] = {} songs[x][song.rank]['rank'] = song.rank songs[x][song.rank]['title'] = song.title songs[x][song.rank]['artist'] = song.artist # look up the song in musixmatch api_url = "http://api.musixmatch.com/ws/1.1/matcher.track.get?apikey=%s&q_artist=%s&q_track=%s" % (apikey, quote(song.artist, safe=''), quote(song.title, safe='')) url = urllib.request.urlopen(api_url).read().decode('UTF-8') result = json.loads(url) songs[x][song.rank]['musixmatch'] = result # use lyrics id to get lyrics info and store that instead of all the junk from musixmatch api_url_lyrics = "http://api.musixmatch.com/ws/1.1/matcher.lyrics.get?apikey=%s&q_track=%s&q_artist=%s" % (apikey, quote(song.title, safe=''), quote(song.artist, safe='')) url_lyrics = urllib.request.urlopen(api_url_lyrics).read().decode('UTF-8') lyrics = json.loads(url_lyrics) #checks against any songs not in MusixMatch database and any songs without lyrics if result['message']['header']['status_code'] != 404 and result['message']['body']['track']['has_lyrics'] == 1: lyrics_id = result['message']['body']['track']['lyrics_id'] get_lyrics = lyrics['message']['body']['lyrics']['lyrics_body'] songs[x][song.rank]['lyrics'] = get_lyrics #dump all the data to a json file (readable output) with open('song-data.json', 'w') as out_file: for x in sorted(songs): out_file.write('>') json.dump(x, out_file) out_file.write('\n') for y in songs[x]: if 'lyrics' in songs[x][y]: out_file.write('(') json.dump(y, out_file) out_file.write(') ' + songs[x][y]['title'] + ' - ' + songs[x][y]['artist']) out_file.write('\n') json.dump(songs[x][y]['lyrics'].replace('\n', ' '), out_file) out_file.write('\n') out_file.write('\n')
nilq/baby-python
python
#! /usr/bin/env python """Perform massive transformations on a document tree created from the LaTeX of the Python documentation, and dump the ESIS data for the transformed tree. """ import errno import esistools import re import string import sys import xml.dom import xml.dom.minidom ELEMENT = xml.dom.Node.ELEMENT_NODE ENTITY_REFERENCE = xml.dom.Node.ENTITY_REFERENCE_NODE TEXT = xml.dom.Node.TEXT_NODE class ConversionError(Exception): pass ewrite = sys.stderr.write try: # We can only do this trick on Unix (if tput is on $PATH)! if sys.platform != "posix" or not sys.stderr.isatty(): raise ImportError import commands except ImportError: bwrite = ewrite else: def bwrite(s, BOLDON=commands.getoutput("tput bold"), BOLDOFF=commands.getoutput("tput sgr0")): ewrite("%s%s%s" % (BOLDON, s, BOLDOFF)) PARA_ELEMENT = "para" DEBUG_PARA_FIXER = 0 if DEBUG_PARA_FIXER: def para_msg(s): ewrite("*** %s\n" % s) else: def para_msg(s): pass def get_first_element(doc, gi): for n in doc.childNodes: if n.nodeName == gi: return n def extract_first_element(doc, gi): node = get_first_element(doc, gi) if node is not None: doc.removeChild(node) return node def get_documentElement(node): result = None for child in node.childNodes: if child.nodeType == ELEMENT: result = child return result def set_tagName(elem, gi): elem.nodeName = elem.tagName = gi def find_all_elements(doc, gi): nodes = [] if doc.nodeName == gi: nodes.append(doc) for child in doc.childNodes: if child.nodeType == ELEMENT: if child.tagName == gi: nodes.append(child) for node in child.getElementsByTagName(gi): nodes.append(node) return nodes def find_all_child_elements(doc, gi): nodes = [] for child in doc.childNodes: if child.nodeName == gi: nodes.append(child) return nodes def find_all_elements_from_set(doc, gi_set): return __find_all_elements_from_set(doc, gi_set, []) def __find_all_elements_from_set(doc, gi_set, nodes): if doc.nodeName in gi_set: nodes.append(doc) for child in doc.childNodes: if child.nodeType == ELEMENT: __find_all_elements_from_set(child, gi_set, nodes) return nodes def simplify(doc, fragment): # Try to rationalize the document a bit, since these things are simply # not valid SGML/XML documents as they stand, and need a little work. documentclass = "document" inputs = [] node = extract_first_element(fragment, "documentclass") if node is not None: documentclass = node.getAttribute("classname") node = extract_first_element(fragment, "title") if node is not None: inputs.append(node) # update the name of the root element node = get_first_element(fragment, "document") if node is not None: set_tagName(node, documentclass) while 1: node = extract_first_element(fragment, "input") if node is None: break inputs.append(node) if inputs: docelem = get_documentElement(fragment) inputs.reverse() for node in inputs: text = doc.createTextNode("\n") docelem.insertBefore(text, docelem.firstChild) docelem.insertBefore(node, text) docelem.insertBefore(doc.createTextNode("\n"), docelem.firstChild) while fragment.firstChild and fragment.firstChild.nodeType == TEXT: fragment.removeChild(fragment.firstChild) def cleanup_root_text(doc): discards = [] skip = 0 for n in doc.childNodes: prevskip = skip skip = 0 if n.nodeType == TEXT and not prevskip: discards.append(n) elif n.nodeName == "COMMENT": skip = 1 for node in discards: doc.removeChild(node) DESCRIPTOR_ELEMENTS = ( "cfuncdesc", "cvardesc", "ctypedesc", "classdesc", "memberdesc", "memberdescni", "methoddesc", "methoddescni", "excdesc", "funcdesc", "funcdescni", "opcodedesc", "datadesc", "datadescni", ) def fixup_descriptors(doc, fragment): sections = find_all_elements(fragment, "section") for section in sections: find_and_fix_descriptors(doc, section) def find_and_fix_descriptors(doc, container): children = container.childNodes for child in children: if child.nodeType == ELEMENT: tagName = child.tagName if tagName in DESCRIPTOR_ELEMENTS: rewrite_descriptor(doc, child) elif tagName == "subsection": find_and_fix_descriptors(doc, child) def rewrite_descriptor(doc, descriptor): # # Do these things: # 1. Add an "index='no'" attribute to the element if the tagName # ends in 'ni', removing the 'ni' from the name. # 2. Create a <signature> from the name attribute # 2a.Create an <args> if it appears to be available. # 3. Create additional <signature>s from <*line{,ni}> elements, # if found. # 4. If a <versionadded> is found, move it to an attribute on the # descriptor. # 5. Move remaining child nodes to a <description> element. # 6. Put it back together. # # 1. descname = descriptor.tagName index = 1 if descname[-2:] == "ni": descname = descname[:-2] descriptor.setAttribute("index", "no") set_tagName(descriptor, descname) index = 0 desctype = descname[:-4] # remove 'desc' linename = desctype + "line" if not index: linename = linename + "ni" # 2. signature = doc.createElement("signature") name = doc.createElement("name") signature.appendChild(doc.createTextNode("\n ")) signature.appendChild(name) name.appendChild(doc.createTextNode(descriptor.getAttribute("name"))) descriptor.removeAttribute("name") # 2a. if descriptor.hasAttribute("var"): if descname != "opcodedesc": raise RuntimeError, \ "got 'var' attribute on descriptor other than opcodedesc" variable = descriptor.getAttribute("var") if variable: args = doc.createElement("args") args.appendChild(doc.createTextNode(variable)) signature.appendChild(doc.createTextNode("\n ")) signature.appendChild(args) descriptor.removeAttribute("var") newchildren = [signature] children = descriptor.childNodes pos = skip_leading_nodes(children) if pos < len(children): child = children[pos] if child.nodeName == "args": # move <args> to <signature>, or remove if empty: child.parentNode.removeChild(child) if len(child.childNodes): signature.appendChild(doc.createTextNode("\n ")) signature.appendChild(child) signature.appendChild(doc.createTextNode("\n ")) # 3, 4. pos = skip_leading_nodes(children, pos) while pos < len(children) \ and children[pos].nodeName in (linename, "versionadded"): if children[pos].tagName == linename: # this is really a supplemental signature, create <signature> oldchild = children[pos].cloneNode(1) try: sig = methodline_to_signature(doc, children[pos]) except KeyError: print oldchild.toxml() raise newchildren.append(sig) else: # <versionadded added=...> descriptor.setAttribute( "added", children[pos].getAttribute("version")) pos = skip_leading_nodes(children, pos + 1) # 5. description = doc.createElement("description") description.appendChild(doc.createTextNode("\n")) newchildren.append(description) move_children(descriptor, description, pos) last = description.childNodes[-1] if last.nodeType == TEXT: last.data = string.rstrip(last.data) + "\n " # 6. # should have nothing but whitespace and signature lines in <descriptor>; # discard them while descriptor.childNodes: descriptor.removeChild(descriptor.childNodes[0]) for node in newchildren: descriptor.appendChild(doc.createTextNode("\n ")) descriptor.appendChild(node) descriptor.appendChild(doc.createTextNode("\n")) def methodline_to_signature(doc, methodline): signature = doc.createElement("signature") signature.appendChild(doc.createTextNode("\n ")) name = doc.createElement("name") name.appendChild(doc.createTextNode(methodline.getAttribute("name"))) methodline.removeAttribute("name") signature.appendChild(name) if len(methodline.childNodes): args = doc.createElement("args") signature.appendChild(doc.createTextNode("\n ")) signature.appendChild(args) move_children(methodline, args) signature.appendChild(doc.createTextNode("\n ")) return signature def move_children(origin, dest, start=0): children = origin.childNodes while start < len(children): node = children[start] origin.removeChild(node) dest.appendChild(node) def handle_appendix(doc, fragment): # must be called after simplfy() if document is multi-rooted to begin with docelem = get_documentElement(fragment) toplevel = docelem.tagName == "manual" and "chapter" or "section" appendices = 0 nodes = [] for node in docelem.childNodes: if appendices: nodes.append(node) elif node.nodeType == ELEMENT: appnodes = node.getElementsByTagName("appendix") if appnodes: appendices = 1 parent = appnodes[0].parentNode parent.removeChild(appnodes[0]) parent.normalize() if nodes: map(docelem.removeChild, nodes) docelem.appendChild(doc.createTextNode("\n\n\n")) back = doc.createElement("back-matter") docelem.appendChild(back) back.appendChild(doc.createTextNode("\n")) while nodes and nodes[0].nodeType == TEXT \ and not string.strip(nodes[0].data): del nodes[0] map(back.appendChild, nodes) docelem.appendChild(doc.createTextNode("\n")) def handle_labels(doc, fragment): for label in find_all_elements(fragment, "label"): id = label.getAttribute("id") if not id: continue parent = label.parentNode parentTagName = parent.tagName if parentTagName == "title": parent.parentNode.setAttribute("id", id) else: parent.setAttribute("id", id) # now, remove <label id="..."/> from parent: parent.removeChild(label) if parentTagName == "title": parent.normalize() children = parent.childNodes if children[-1].nodeType == TEXT: children[-1].data = string.rstrip(children[-1].data) def fixup_trailing_whitespace(doc, wsmap): queue = [doc] while queue: node = queue[0] del queue[0] if wsmap.has_key(node.nodeName): ws = wsmap[node.tagName] children = node.childNodes children.reverse() if children[0].nodeType == TEXT: data = string.rstrip(children[0].data) + ws children[0].data = data children.reverse() # hack to get the title in place: if node.tagName == "title" \ and node.parentNode.firstChild.nodeType == ELEMENT: node.parentNode.insertBefore(doc.createText("\n "), node.parentNode.firstChild) for child in node.childNodes: if child.nodeType == ELEMENT: queue.append(child) def normalize(doc): for node in doc.childNodes: if node.nodeType == ELEMENT: node.normalize() def cleanup_trailing_parens(doc, element_names): d = {} for gi in element_names: d[gi] = gi rewrite_element = d.has_key queue = [] for node in doc.childNodes: if node.nodeType == ELEMENT: queue.append(node) while queue: node = queue[0] del queue[0] if rewrite_element(node.tagName): children = node.childNodes if len(children) == 1 \ and children[0].nodeType == TEXT: data = children[0].data if data[-2:] == "()": children[0].data = data[:-2] else: for child in node.childNodes: if child.nodeType == ELEMENT: queue.append(child) def contents_match(left, right): left_children = left.childNodes right_children = right.childNodes if len(left_children) != len(right_children): return 0 for l, r in map(None, left_children, right_children): nodeType = l.nodeType if nodeType != r.nodeType: return 0 if nodeType == ELEMENT: if l.tagName != r.tagName: return 0 # should check attributes, but that's not a problem here if not contents_match(l, r): return 0 elif nodeType == TEXT: if l.data != r.data: return 0 else: # not quite right, but good enough return 0 return 1 def create_module_info(doc, section): # Heavy. node = extract_first_element(section, "modulesynopsis") if node is None: return set_tagName(node, "synopsis") lastchild = node.childNodes[-1] if lastchild.nodeType == TEXT \ and lastchild.data[-1:] == ".": lastchild.data = lastchild.data[:-1] modauthor = extract_first_element(section, "moduleauthor") if modauthor: set_tagName(modauthor, "author") modauthor.appendChild(doc.createTextNode( modauthor.getAttribute("name"))) modauthor.removeAttribute("name") platform = extract_first_element(section, "platform") if section.tagName == "section": modinfo_pos = 2 modinfo = doc.createElement("moduleinfo") moddecl = extract_first_element(section, "declaremodule") name = None if moddecl: modinfo.appendChild(doc.createTextNode("\n ")) name = moddecl.attributes["name"].value namenode = doc.createElement("name") namenode.appendChild(doc.createTextNode(name)) modinfo.appendChild(namenode) type = moddecl.attributes.get("type") if type: type = type.value modinfo.appendChild(doc.createTextNode("\n ")) typenode = doc.createElement("type") typenode.appendChild(doc.createTextNode(type)) modinfo.appendChild(typenode) versionadded = extract_first_element(section, "versionadded") if versionadded: modinfo.setAttribute("added", versionadded.getAttribute("version")) title = get_first_element(section, "title") if title: children = title.childNodes if len(children) >= 2 \ and children[0].nodeName == "module" \ and children[0].childNodes[0].data == name: # this is it; morph the <title> into <short-synopsis> first_data = children[1] if first_data.data[:4] == " ---": first_data.data = string.lstrip(first_data.data[4:]) set_tagName(title, "short-synopsis") if children[-1].nodeType == TEXT \ and children[-1].data[-1:] == ".": children[-1].data = children[-1].data[:-1] section.removeChild(title) section.removeChild(section.childNodes[0]) title.removeChild(children[0]) modinfo_pos = 0 else: ewrite("module name in title doesn't match" " <declaremodule/>; no <short-synopsis/>\n") else: ewrite("Unexpected condition: <section/> without <title/>\n") modinfo.appendChild(doc.createTextNode("\n ")) modinfo.appendChild(node) if title and not contents_match(title, node): # The short synopsis is actually different, # and needs to be stored: modinfo.appendChild(doc.createTextNode("\n ")) modinfo.appendChild(title) if modauthor: modinfo.appendChild(doc.createTextNode("\n ")) modinfo.appendChild(modauthor) if platform: modinfo.appendChild(doc.createTextNode("\n ")) modinfo.appendChild(platform) modinfo.appendChild(doc.createTextNode("\n ")) section.insertBefore(modinfo, section.childNodes[modinfo_pos]) section.insertBefore(doc.createTextNode("\n "), modinfo) # # The rest of this removes extra newlines from where we cut out # a lot of elements. A lot of code for minimal value, but keeps # keeps the generated *ML from being too funny looking. # section.normalize() children = section.childNodes for i in range(len(children)): node = children[i] if node.nodeName == "moduleinfo": nextnode = children[i+1] if nextnode.nodeType == TEXT: data = nextnode.data if len(string.lstrip(data)) < (len(data) - 4): nextnode.data = "\n\n\n" + string.lstrip(data) def cleanup_synopses(doc, fragment): for node in find_all_elements(fragment, "section"): create_module_info(doc, node) def fixup_table_structures(doc, fragment): for table in find_all_elements(fragment, "table"): fixup_table(doc, table) def fixup_table(doc, table): # create the table head thead = doc.createElement("thead") row = doc.createElement("row") move_elements_by_name(doc, table, row, "entry") thead.appendChild(doc.createTextNode("\n ")) thead.appendChild(row) thead.appendChild(doc.createTextNode("\n ")) # create the table body tbody = doc.createElement("tbody") prev_row = None last_was_hline = 0 children = table.childNodes for child in children: if child.nodeType == ELEMENT: tagName = child.tagName if tagName == "hline" and prev_row is not None: prev_row.setAttribute("rowsep", "1") elif tagName == "row": prev_row = child # save the rows: tbody.appendChild(doc.createTextNode("\n ")) move_elements_by_name(doc, table, tbody, "row", sep="\n ") # and toss the rest: while children: child = children[0] nodeType = child.nodeType if nodeType == TEXT: if string.strip(child.data): raise ConversionError("unexpected free data in <%s>: %r" % (table.tagName, child.data)) table.removeChild(child) continue if nodeType == ELEMENT: if child.tagName != "hline": raise ConversionError( "unexpected <%s> in table" % child.tagName) table.removeChild(child) continue raise ConversionError( "unexpected %s node in table" % child.__class__.__name__) # nothing left in the <table>; add the <thead> and <tbody> tgroup = doc.createElement("tgroup") tgroup.appendChild(doc.createTextNode("\n ")) tgroup.appendChild(thead) tgroup.appendChild(doc.createTextNode("\n ")) tgroup.appendChild(tbody) tgroup.appendChild(doc.createTextNode("\n ")) table.appendChild(tgroup) # now make the <entry>s look nice: for row in table.getElementsByTagName("row"): fixup_row(doc, row) def fixup_row(doc, row): entries = [] map(entries.append, row.childNodes[1:]) for entry in entries: row.insertBefore(doc.createTextNode("\n "), entry) # row.appendChild(doc.createTextNode("\n ")) def move_elements_by_name(doc, source, dest, name, sep=None): nodes = [] for child in source.childNodes: if child.nodeName == name: nodes.append(child) for node in nodes: source.removeChild(node) dest.appendChild(node) if sep: dest.appendChild(doc.createTextNode(sep)) RECURSE_INTO_PARA_CONTAINERS = ( "chapter", "abstract", "enumerate", "section", "subsection", "subsubsection", "paragraph", "subparagraph", "back-matter", "howto", "manual", "item", "itemize", "fulllineitems", "enumeration", "descriptionlist", "definitionlist", "definition", ) PARA_LEVEL_ELEMENTS = ( "moduleinfo", "title", "verbatim", "enumerate", "item", "interpreter-session", "back-matter", "interactive-session", "opcodedesc", "classdesc", "datadesc", "funcdesc", "methoddesc", "excdesc", "memberdesc", "membderdescni", "funcdescni", "methoddescni", "excdescni", "tableii", "tableiii", "tableiv", "localmoduletable", "sectionauthor", "seealso", "itemize", # include <para>, so we can just do it again to get subsequent paras: PARA_ELEMENT, ) PARA_LEVEL_PRECEEDERS = ( "setindexsubitem", "author", "stindex", "obindex", "COMMENT", "label", "input", "title", "versionadded", "versionchanged", "declaremodule", "modulesynopsis", "moduleauthor", "indexterm", "leader", ) def fixup_paras(doc, fragment): for child in fragment.childNodes: if child.nodeName in RECURSE_INTO_PARA_CONTAINERS: fixup_paras_helper(doc, child) descriptions = find_all_elements(fragment, "description") for description in descriptions: fixup_paras_helper(doc, description) def fixup_paras_helper(doc, container, depth=0): # document is already normalized children = container.childNodes start = skip_leading_nodes(children) while len(children) > start: if children[start].nodeName in RECURSE_INTO_PARA_CONTAINERS: # Something to recurse into: fixup_paras_helper(doc, children[start]) else: # Paragraph material: build_para(doc, container, start, len(children)) if DEBUG_PARA_FIXER and depth == 10: sys.exit(1) start = skip_leading_nodes(children, start + 1) def build_para(doc, parent, start, i): children = parent.childNodes after = start + 1 have_last = 0 BREAK_ELEMENTS = PARA_LEVEL_ELEMENTS + RECURSE_INTO_PARA_CONTAINERS # Collect all children until \n\n+ is found in a text node or a # member of BREAK_ELEMENTS is found. for j in range(start, i): after = j + 1 child = children[j] nodeType = child.nodeType if nodeType == ELEMENT: if child.tagName in BREAK_ELEMENTS: after = j break elif nodeType == TEXT: pos = string.find(child.data, "\n\n") if pos == 0: after = j break if pos >= 1: child.splitText(pos) break else: have_last = 1 if (start + 1) > after: raise ConversionError( "build_para() could not identify content to turn into a paragraph") if children[after - 1].nodeType == TEXT: # we may need to split off trailing white space: child = children[after - 1] data = child.data if string.rstrip(data) != data: have_last = 0 child.splitText(len(string.rstrip(data))) para = doc.createElement(PARA_ELEMENT) prev = None indexes = range(start, after) indexes.reverse() for j in indexes: node = parent.childNodes[j] parent.removeChild(node) para.insertBefore(node, prev) prev = node if have_last: parent.appendChild(para) parent.appendChild(doc.createTextNode("\n\n")) return len(parent.childNodes) else: nextnode = parent.childNodes[start] if nextnode.nodeType == TEXT: if nextnode.data and nextnode.data[0] != "\n": nextnode.data = "\n" + nextnode.data else: newnode = doc.createTextNode("\n") parent.insertBefore(newnode, nextnode) nextnode = newnode start = start + 1 parent.insertBefore(para, nextnode) return start + 1 def skip_leading_nodes(children, start=0): """Return index into children of a node at which paragraph building should begin or a recursive call to fixup_paras_helper() should be made (for subsections, etc.). When the return value >= len(children), we've built all the paras we can from this list of children. """ i = len(children) while i > start: # skip over leading comments and whitespace: child = children[start] nodeType = child.nodeType if nodeType == TEXT: data = child.data shortened = string.lstrip(data) if shortened: if data != shortened: # break into two nodes: whitespace and non-whitespace child.splitText(len(data) - len(shortened)) return start + 1 return start # all whitespace, just skip elif nodeType == ELEMENT: tagName = child.tagName if tagName in RECURSE_INTO_PARA_CONTAINERS: return start if tagName not in PARA_LEVEL_ELEMENTS + PARA_LEVEL_PRECEEDERS: return start start = start + 1 return start def fixup_rfc_references(doc, fragment): for rfcnode in find_all_elements(fragment, "rfc"): rfcnode.appendChild(doc.createTextNode( "RFC " + rfcnode.getAttribute("num"))) def fixup_signatures(doc, fragment): for child in fragment.childNodes: if child.nodeType == ELEMENT: args = child.getElementsByTagName("args") for arg in args: fixup_args(doc, arg) arg.normalize() args = child.getElementsByTagName("constructor-args") for arg in args: fixup_args(doc, arg) arg.normalize() def fixup_args(doc, arglist): for child in arglist.childNodes: if child.nodeName == "optional": # found it; fix and return arglist.insertBefore(doc.createTextNode("["), child) optkids = child.childNodes while optkids: k = optkids[0] child.removeChild(k) arglist.insertBefore(k, child) arglist.insertBefore(doc.createTextNode("]"), child) arglist.removeChild(child) return fixup_args(doc, arglist) def fixup_sectionauthors(doc, fragment): for sectauth in find_all_elements(fragment, "sectionauthor"): section = sectauth.parentNode section.removeChild(sectauth) set_tagName(sectauth, "author") sectauth.appendChild(doc.createTextNode( sectauth.getAttribute("name"))) sectauth.removeAttribute("name") after = section.childNodes[2] title = section.childNodes[1] if title.nodeName != "title": after = section.childNodes[0] section.insertBefore(doc.createTextNode("\n "), after) section.insertBefore(sectauth, after) def fixup_verbatims(doc): for verbatim in find_all_elements(doc, "verbatim"): child = verbatim.childNodes[0] if child.nodeType == TEXT \ and string.lstrip(child.data)[:3] == ">>>": set_tagName(verbatim, "interactive-session") def add_node_ids(fragment, counter=0): fragment.node_id = counter for node in fragment.childNodes: counter = counter + 1 if node.nodeType == ELEMENT: counter = add_node_ids(node, counter) else: node.node_id = counter return counter + 1 REFMODINDEX_ELEMENTS = ('refmodindex', 'refbimodindex', 'refexmodindex', 'refstmodindex') def fixup_refmodindexes(fragment): # Locate <ref*modindex>...</> co-located with <module>...</>, and # remove the <ref*modindex>, replacing it with index=index on the # <module> element. nodes = find_all_elements_from_set(fragment, REFMODINDEX_ELEMENTS) d = {} for node in nodes: parent = node.parentNode d[parent.node_id] = parent del nodes map(fixup_refmodindexes_chunk, d.values()) def fixup_refmodindexes_chunk(container): # node is probably a <para>; let's see how often it isn't: if container.tagName != PARA_ELEMENT: bwrite("--- fixup_refmodindexes_chunk(%s)\n" % container) module_entries = find_all_elements(container, "module") if not module_entries: return index_entries = find_all_elements_from_set(container, REFMODINDEX_ELEMENTS) removes = [] for entry in index_entries: children = entry.childNodes if len(children) != 0: bwrite("--- unexpected number of children for %s node:\n" % entry.tagName) ewrite(entry.toxml() + "\n") continue found = 0 module_name = entry.getAttribute("module") for node in module_entries: if len(node.childNodes) != 1: continue this_name = node.childNodes[0].data if this_name == module_name: found = 1 node.setAttribute("index", "yes") if found: removes.append(entry) for node in removes: container.removeChild(node) def fixup_bifuncindexes(fragment): nodes = find_all_elements(fragment, 'bifuncindex') d = {} # make sure that each parent is only processed once: for node in nodes: parent = node.parentNode d[parent.node_id] = parent del nodes map(fixup_bifuncindexes_chunk, d.values()) def fixup_bifuncindexes_chunk(container): removes = [] entries = find_all_child_elements(container, "bifuncindex") function_entries = find_all_child_elements(container, "function") for entry in entries: function_name = entry.getAttribute("name") found = 0 for func_entry in function_entries: t2 = func_entry.childNodes[0].data if t2[-2:] != "()": continue t2 = t2[:-2] if t2 == function_name: func_entry.setAttribute("index", "yes") func_entry.setAttribute("module", "__builtin__") if not found: found = 1 removes.append(entry) for entry in removes: container.removeChild(entry) def join_adjacent_elements(container, gi): queue = [container] while queue: parent = queue.pop() i = 0 children = parent.childNodes nchildren = len(children) while i < (nchildren - 1): child = children[i] if child.nodeName == gi: if children[i+1].nodeName == gi: ewrite("--- merging two <%s/> elements\n" % gi) child = children[i] nextchild = children[i+1] nextchildren = nextchild.childNodes while len(nextchildren): node = nextchildren[0] nextchild.removeChild(node) child.appendChild(node) parent.removeChild(nextchild) continue if child.nodeType == ELEMENT: queue.append(child) i = i + 1 _token_rx = re.compile(r"[a-zA-Z][a-zA-Z0-9.-]*$") def write_esis(doc, ofp, knownempty): for node in doc.childNodes: nodeType = node.nodeType if nodeType == ELEMENT: gi = node.tagName if knownempty(gi): if node.hasChildNodes(): raise ValueError, \ "declared-empty node <%s> has children" % gi ofp.write("e\n") for k, value in node.attributes.items(): if _token_rx.match(value): dtype = "TOKEN" else: dtype = "CDATA" ofp.write("A%s %s %s\n" % (k, dtype, esistools.encode(value))) ofp.write("(%s\n" % gi) write_esis(node, ofp, knownempty) ofp.write(")%s\n" % gi) elif nodeType == TEXT: ofp.write("-%s\n" % esistools.encode(node.data)) elif nodeType == ENTITY_REFERENCE: ofp.write("&%s\n" % node.nodeName) else: raise RuntimeError, "unsupported node type: %s" % nodeType def convert(ifp, ofp): events = esistools.parse(ifp) toktype, doc = events.getEvent() fragment = doc.createDocumentFragment() events.expandNode(fragment) normalize(fragment) simplify(doc, fragment) handle_labels(doc, fragment) handle_appendix(doc, fragment) fixup_trailing_whitespace(doc, { "abstract": "\n", "title": "", "chapter": "\n\n", "section": "\n\n", "subsection": "\n\n", "subsubsection": "\n\n", "paragraph": "\n\n", "subparagraph": "\n\n", }) cleanup_root_text(doc) cleanup_trailing_parens(fragment, ["function", "method", "cfunction"]) cleanup_synopses(doc, fragment) fixup_descriptors(doc, fragment) fixup_verbatims(fragment) normalize(fragment) fixup_paras(doc, fragment) fixup_sectionauthors(doc, fragment) fixup_table_structures(doc, fragment) fixup_rfc_references(doc, fragment) fixup_signatures(doc, fragment) add_node_ids(fragment) fixup_refmodindexes(fragment) fixup_bifuncindexes(fragment) # Take care of ugly hacks in the LaTeX markup to avoid LaTeX and # LaTeX2HTML screwing with GNU-style long options (the '--' problem). join_adjacent_elements(fragment, "option") # d = {} for gi in events.parser.get_empties(): d[gi] = gi if d.has_key("author"): del d["author"] if d.has_key("rfc"): del d["rfc"] knownempty = d.has_key # try: write_esis(fragment, ofp, knownempty) except IOError, (err, msg): # Ignore EPIPE; it just means that whoever we're writing to stopped # reading. The rest of the output would be ignored. All other errors # should still be reported, if err != errno.EPIPE: raise def main(): if len(sys.argv) == 1: ifp = sys.stdin ofp = sys.stdout elif len(sys.argv) == 2: ifp = open(sys.argv[1]) ofp = sys.stdout elif len(sys.argv) == 3: ifp = open(sys.argv[1]) import StringIO ofp = StringIO.StringIO() else: usage() sys.exit(2) convert(ifp, ofp) if len(sys.argv) == 3: fp = open(sys.argv[2], "w") fp.write(ofp.getvalue()) fp.close() ofp.close() if __name__ == "__main__": main()
nilq/baby-python
python
import importlib.util import os def vyLoadModuleFromFilePath(filePath, moduleName=None): if moduleName == None: replacements = [ ('/', '.'), ('\\', '.'), ('-', '_'), (' ', '_'), ] filePathSansExt = os.path.splitext(filePath)[0] for issue, replacement in replacements: filePathSansExt = filePathSansExt.replace(issue, replacement) moduleName = filePathSansExt spec = importlib.util.spec_from_file_location(moduleName, filePath) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) return module
nilq/baby-python
python
from ._plugin import Workplane from ._plugin import extend
nilq/baby-python
python
"""Testing File for roman_ssg_util""" import os import shutil import roman_ssg_util def setup_test(): """Setup Tests for tests""" os.chdir(os.getcwd()) if os.path.isdir("dist"): shutil.rmtree("dist") if os.path.isdir("testCustomDirectory"): shutil.rmtree("testCustomDirectory") def test_get_local_files(): """Test get_local_files function""" tempArr = ["File1.txt"] if os.name == "posix": assert roman_ssg_util.get_local_files(tempArr) == [ os.getcwd() + "/" + "File1.txt" ] else: assert roman_ssg_util.get_local_files(tempArr) == [ os.getcwd() + "\\" + "File1.txt" ] def test_create_css_file(): """Test Create CSS File function""" roman_ssg_util.create_css_file(True) assert os.path.isfile("./main.css") os.remove("main.css") def test_create_css_file_fail(): """Test fail of Create CSS File function""" roman_ssg_util.create_css_file(False) assert not os.path.isfile("./main.css") def test_write_to_file(): """Test write_to_file function""" if os.name == "posix": filePath = os.getcwd() + "/" + "example.txt" else: filePath = os.getcwd() + "\\" + "example.txt" roman_ssg_util.write_to_file( "en-CA", filePath, 0, ["example.txt"], ) assert os.path.isfile("example.html") os.remove("example.html") def test_conversion_func_file_non_custom_dir(): """Test Conversion File function without Custom Directory""" fileArr = [] fileArr.append("example.txt") fileArr.append("test.md") if os.name == "posix": roman_ssg_util.conversion_func_file( "en-CA", False, fileArr, "", os.getcwd() + "/" + "dist", ) else: roman_ssg_util.conversion_func_file( "en-CA", False, fileArr, "", os.getcwd() + "\\" + "dist", ) assert os.path.isfile("example.html") assert os.path.isfile("test.html") def test_conversion_func_file_custom_dir(): """Test Conversion File function with Custom Directory""" os.chdir("..") setup_test() fileArr = [] fileArr.append("example.txt") fileArr.append("test.md") os.mkdir("testCustomDirectory") if os.name == "posix": roman_ssg_util.conversion_func_file( "en-CA", True, fileArr, os.getcwd() + "/" + "testCustomDirectory", os.getcwd() + "/" + "dist", ) else: roman_ssg_util.conversion_func_file( "en-CA", True, fileArr, os.getcwd() + "\\" + "testCustomDirectory", os.getcwd() + "\\" + "dist", ) assert os.path.isfile("example.html") assert os.path.isfile("test.html") def test_converstion_func_folder_non_custom_dir(): """Test Conversion Folder function without Custom Directory""" os.chdir("..") setup_test() arrayOfFiles = [] arrayOfFiles.append("Silver Blaze.txt") arrayOfFiles.append("The Adventure of the Six Napoleans.txt") arrayOfFiles.append("The Adventure of the Speckled Band.txt") arrayOfFiles.append("The Naval Treaty.txt") arrayOfFiles.append("The Red Headed League.txt") if os.name == "posix": roman_ssg_util.conversion_func_folder( "en-CA", os.getcwd() + "/" + "Sherlock-Holmes-Selected-Stories", False, arrayOfFiles, "", os.getcwd() + "/" + "dist", ) else: roman_ssg_util.conversion_func_folder( "en-CA", os.getcwd() + "\\" + "Sherlock-Holmes-Selected-Stories", False, arrayOfFiles, "", os.getcwd() + "\\" + "dist", ) assert os.path.isfile("Silver Blaze.html") assert os.path.isfile("The Adventure of the Six Napoleans.html") assert os.path.isfile("The Adventure of the Speckled Band.html") assert os.path.isfile("The Naval Treaty.html") assert os.path.isfile("The Red Headed League.html") def test_converstion_func_folder_custom_dir(): """Test Conversion Folder function with Custom Directory""" os.chdir("..") setup_test() arrayOfFiles = [] arrayOfFiles.append("Silver Blaze.txt") arrayOfFiles.append("The Adventure of the Six Napoleans.txt") arrayOfFiles.append("The Adventure of the Speckled Band.txt") arrayOfFiles.append("The Naval Treaty.txt") arrayOfFiles.append("The Red Headed League.txt") if os.path.isdir("testCustomDirectory"): shutil.rmtree("testCustomDirectory") os.mkdir("testCustomDirectory") else: os.mkdir("testCustomDirectory") if os.name == "posix": roman_ssg_util.conversion_func_folder( "en-CA", os.getcwd() + "/" + "Sherlock-Holmes-Selected-Stories", True, arrayOfFiles, os.getcwd() + "/" + "testCustomDirectory", os.getcwd() + "/" + "dist", ) else: roman_ssg_util.conversion_func_folder( "en-CA", os.getcwd() + "\\" + "Sherlock-Holmes-Selected-Stories", True, arrayOfFiles, os.getcwd() + "\\" + "testCustomDirectory", os.getcwd() + "\\" + "dist", ) assert os.path.isfile("Silver Blaze.html") assert os.path.isfile("The Adventure of the Six Napoleans.html") assert os.path.isfile("The Adventure of the Speckled Band.html") assert os.path.isfile("The Naval Treaty.html") assert os.path.isfile("The Red Headed League.html") os.chdir("..") shutil.rmtree("testCustomDirectory")
nilq/baby-python
python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # */AIPND/intropylab-classifying-images/check_images.py # # DONE: 0. Fill in your information in the programming header below # PROGRAMMER: Aimee Ukasick # DATE CREATED: 11 April 2018 # REVISED DATE: <=(Date Revised - if any) # PURPOSE: Check images & report results: read them in, predict their # content (classifier), compare prediction to actual value labels # and output results # # Use argparse Expected Call with <> indicating expected user input: # python check_images.py --dir <directory with images> --arch <model> # --dogfile <file that contains dognames> # Example call: # python check_images.py --dir pet_images/ --arch vgg --dogfile dognames.txt ## # Imports python modules import argparse # Imports time() and sleep() functions from time module from time import time, sleep from os import listdir # Imports classifier function for using CNN to classify images from classifier import classifier # Main program function defined below def main(): # DONE: 1. Define start_time to measure total program runtime by # collecting start time start_time = time() # DONE: 2. Define get_input_args() function to create & retrieve command # line arguments print("***** calling get_input_args *****") in_args = get_input_args() #print_command_line_args(in_args) # DONE: 3. Define get_pet_labels() function to create pet image labels by # creating a dictionary with key=filename and value=file label to be used # to check the accuracy of the classifier function print("***** calling get_pet_labels *****") petlabels_dict = get_pet_labels(in_args.dir) # print("***** print_petlabels_dict(petlabels_dict) *****") # print_petlabels_dict(petlabels_dict) # DONE: 4. Define classify_images() function to create the classifier # labels with the classifier function using in_arg.arch, comparing the # labels, and creating a dictionary of results (result_dic) # print("***** calling classify_images *****") # result_dic = classify_images(in_args.dir, petlabels_dict, in_args.arch) # print("***** printing my code classify_images result_dic *****") # print_result_dic(result_dic) print("***** calling classify_images_udacity *****") result_dic = classify_images_udacity(in_args.dir, petlabels_dict, in_args.arch) # print("***** printing classify_images_udacity result_dic *****") # print_result_dic(result_dic) # DONE: 5. Define adjust_results4_isadog() function to adjust the results # dictionary(result_dic) to determine if classifier correctly classified # images as 'a dog' or 'not a dog'. This demonstrates if the model can # correctly classify dog images as dogs (regardless of breed) # print("***** calling adjust_results4_isadog *****") # adjust_results4_isadog(result_dic, in_args.dogfile) # print("***** printing my adjust_results4_isadog *****") # print_adjust_results4_isadog(result_dic) print("***** calling adjust_results4_isadog_udacity *****") adjust_results4_isadog_udacity(result_dic, in_args.dogfile) # print("***** printing my adjust_results4_isadog_udacity *****") # print_adjust_results4_isadog(result_dic) # DONE: 6. Define calculates_results_stats() function to calculate # results of run and puts statistics in a results statistics # dictionary (results_stats_dic) print("***** calculates_results_stats *****") results_stats_dic = calculates_results_stats(result_dic) print("***** check_results_stats *****") check_results_stats(results_stats_dic, result_dic) # DONE: 7. Define print_results() function to print summary results, # incorrect classifications of dogs and breeds if requested. print_results(result_dic, results_stats_dic, in_args.arch, True, True) # DONE: 1. Define end_time to measure total program runtime # by collecting end time end_time = time() # DONE: 1. Define tot_time to computes overall runtime in # seconds & prints it in hh:mm:ss format tot_time = end_time - start_time hours = int((tot_time / 3600)) minutes = int(((tot_time % 3600) / 60)) seconds = int(((tot_time % 3600) % 60)) print("\n** Total Elapsed Runtime:", str(hours) + ":" + str(minutes) + ":" + str(seconds)) # TODO: 2.-to-7. Define all the function below. Notice that the input # paramaters and return values have been left in the function's docstrings. # This is to provide guidance for acheiving a solution similar to the # instructor provided solution. Feel free to ignore this guidance as long as # you are able to acheive the desired outcomes with this lab. def get_input_args(): """ Retrieves and parses the command line arguments created and defined using the argparse module. This function returns these arguments as an ArgumentParser object. 3 command line arguements are created: dir - Path to the pet image files(default- 'pet_images/') arch - CNN model architecture to use for image classification(default- pick any of the following vgg, alexnet, resnet) dogfile - Text file that contains all labels associated to dogs(default- 'dognames.txt' Parameters: None - simply using argparse module to create & store command line arguments Returns: parse_args() -data structure that stores the command line arguments object """ parser = argparse.ArgumentParser() # arg 1 - path to folder with default parser.add_argument('--dir', type=str, default='pet_images/', help='path to the folder that contains the images; default is pet_images') # arg 2 - CNN model architecture to use for image classification parser.add_argument('--arch', type=str, default='vgg', help='CNN model to use for image classification; default is vgg') # arg 3 - file that contains all labels associated to dogs parser.add_argument('--dogfile', type=str, default='dognames.txt', help='file that contains all labels associated to dogs;default is dognames.txt') # Assigns variable in_args to parse_args() in_args = parser.parse_args() return in_args def get_pet_labels(image_dir): """ Creates a dictionary of pet labels based upon the filenames of the image files. Reads in pet filenames and extracts the pet image labels from the filenames and returns these label as petlabel_dic. This is used to check the accuracy of the image classifier model. The pet image labels are in all lower letters, have a single space separating each word in the multi-word pet labels, and that they correctly represent the filenames. Parameters: image_dir - The (full) path to the folder of images that are to be classified by pretrained CNN models (string) Returns: petlabels_dic - Dictionary storing image filename (as key) and Pet Image Labels (as value) """ filename_list = listdir(image_dir) #print("\nPrints 10 filenames from folder ", image_dir) # for idx in range(0, 10, 1): #print("%2d file: %-25s" % (idx + 1, filename_list[idx])) petlabels_dic = dict() for filename in filename_list: if filename not in petlabels_dic: # d['mynewkey'] = 'mynewvalue' name = filename.split(".")[0] name = name.replace("_", " ").lower() final_name = ''.join( char for char in name if not char.isdigit()).rstrip(" ") petlabels_dic[filename] = final_name else: print("WARNING: ", filename, " already exists in dictionary!") #udacity solution # in_files = listdir(image_dir) # petlabels_dic2 = dict() # for idx in range(0, len(in_files), 1): # if in_files[idx][0] != ".": #only for Mac # image_name = in_files[idx].split("_") # pet_label = "" # for word in image_name: # if word.isalpha(): # pet_label += word.lower() + " " # # pet_label = pet_label.strip() # # if in_files[idx] not in petlabels_dic2: # petlabels_dic2[in_files[idx]] = pet_label # # else: # print("Warning: Duplicate files exist in directory", # in_files[idx]) # # print("\n PRINTING petlabels_dic2") # print_petlabels_dict(petlabels_dic2) return petlabels_dic def classify_images(images_dir, petlabel_dic, model): """ Creates classifier labels with classifier function, compares labels, and creates a dictionary containing both labels and comparison of them to be returned. PLEASE NOTE: This function uses the classifier() function defined in classifier.py within this function. The proper use of this function is in test_classifier.py Please refer to this program prior to using the classifier() function to classify images in this function. Parameters: images_dir - The (full) path to the folder of images that are to be classified by pretrained CNN models (string) petlabel_dic - Dictionary that contains the pet image(true) labels that classify what's in the image, where its key is the pet image filename & its value is pet image label where label is lowercase with space between each word in label model - pre-trained CNN whose architecture is indicated by this parameter, values must be: resnet alexnet vgg (string) Returns: results_dic - Dictionary with key as image filename and value as a List (index)idx 0 = pet image label (string) idx 1 = classifier label (string) idx 2 = 1/0 (int) where 1 = match between pet image and classifier labels and 0 = no match between labels """ results_dic = {} for filename in petlabel_dic.keys(): pet_label = petlabel_dic[filename] path = images_dir + "/" + filename classifier_label = classifier(path, model) classifier_label = classifier_label.lower() # remove leading and trailing whitespaces classifier_label = classifier_label.strip() found_index = classifier_label.find(pet_label) # if found, make sure the pet_label is a whole standalone word within # the classifier_label and not part of another word # example: cat can be part of polecat, which is a skunk, and that # would result in incorrect classification is_whole_word_match = 0 #if found_index >= 0: # remove whitespace after comma # c_label = classifier_label.replace(", ", ",") # create list from classifier_label # label_list = c_label.split(",") # if pet_label in label_list: # is_whole_word_match = 1 if found_index >= 0: conda = found_index == 0 and len(pet_label) == len( classifier_label) condb = found_index == 0 or classifier_label[ found_index - 1] == " " condc = found_index + len(pet_label) == len(classifier_label) condd = classifier_label[found_index + len(pet_label): found_index + len(pet_label) + 1] in ( ",", " ") if conda or (condb and (condc or condd)): is_whole_word_match = 1 value_list = [pet_label, classifier_label, is_whole_word_match] if pet_label not in results_dic: results_dic[pet_label] = value_list return results_dic def classify_images_udacity(images_dir, petlabel_dic, model): """ Creates classifier labels with classifier function, compares labels, and creates a dictionary containing both labels and comparison of them to be returned. PLEASE NOTE: This function uses the classifier() function defined in classifier.py within this function. The proper use of this function is in test_classifier.py Please refer to this program prior to using the classifier() function to classify images in this function. Parameters: images_dir - The (full) path to the folder of images that are to be classified by pretrained CNN models (string) petlabel_dic - Dictionary that contains the pet image(true) labels that classify what's in the image, where its key is the pet image filename & its value is pet image label where label is lowercase with space between each word in label model - pre-trained CNN whose architecture is indicated by this parameter, values must be: resnet alexnet vgg (string) Returns: results_dic - Dictionary with key as image filename and value as a List (index)idx 0 = pet image label (string) idx 1 = classifier label (string) idx 2 = 1/0 (int) where 1 = match between pet image and classifier labels and 0 = no match between labels """ results_dic = dict() for key in petlabel_dic: model_label = classifier(images_dir+key, model) model_label = model_label.lower() model_label = model_label.strip() # defines truth as pet image label and tries to find it using find() # string function to find it within classifier_label(model_label) truth = petlabel_dic[key] found = model_label.find(truth) if found >= 0: conda = found == 0 and len(truth) == len( model_label) condb = found == 0 or model_label[ found - 1] == " " condc = found + len(truth) == len(model_label) condd = model_label[found + len(truth): found + len(truth) + 1] in ( ",", " ") if conda or (condb and (condc or condd)): if key not in results_dic: results_dic[key] = [truth, model_label, 1] # found within a word/term not a label existing on its own else: if key not in results_dic: results_dic[key] = [truth, model_label, 0] return results_dic def adjust_results4_isadog(results_dic, dogsfilename): """ Adjusts the results dictionary to determine if classifier correctly classified images 'as a dog' or 'not a dog' especially when not a match. Demonstrates if model architecture correctly classifies dog images even if it gets dog breed wrong (not a match). Parameters: results_dic - Dictionary with key as image filename and value as a List (index)idx 0 = pet image label (string) idx 1 = classifier label (string) idx 2 = 1/0 (int) where 1 = match between pet image and classifier labels and 0 = no match between labels --- where idx 3 & idx 4 are added by this function --- idx 3 = 1/0 (int) where 1 = pet image 'is-a' dog and 0 = pet Image 'is-NOT-a' dog. idx 4 = 1/0 (int) where 1 = Classifier classifies image 'as-a' dog and 0 = Classifier classifies image 'as-NOT-a' dog. dogsfile - A text file that contains names of all dogs from ImageNet 1000 labels (used by classifier model) and dog names from the pet image files. This file has one dog name per line dog names are all in lowercase with spaces separating the distinct words of the dog name. This file should have been passed in as a command line argument. (string - indicates text file's name) Returns: None - results_dic is mutable data type so no return needed. """ # a match between classifier label and pet label match dogsfilename entry # if pet label in dogsfilename list, idx 3 = 1 # if classifier label in dogsfilename list, idx 4 is 1 dogsname_dic = dict() try: with open(dogsfilename) as f: for line in f: line = line.rstrip() if line not in dogsname_dic: dogsname_dic[line] = 1 else: print("WARNING: duplicate dog name: " + line) print("dogsname_dic length = ", len(dogsname_dic)) except BaseException as be: print("***** ERROR *****") print(be) for filename in results_dic: #pet label image IS of dog/found in dognames_dic pet_label = results_dic[filename][0] classifier_label = results_dic[filename][1] if pet_label in dogsname_dic: if classifier_label in dogsname_dic: #if classifier_label in dognames_dic, extend by 1, 1 results_dic[filename].extend((1, 1)) else: #classifier is not a dog; extend by 1.0 results_dic[filename].extend((1, 0)) else: if classifier_label in dogsname_dic: results_dic[filename].extend((0, 1)) else: results_dic[filename].extend((0, 0)) def adjust_results4_isadog_udacity(results_dic, dogsfile): """ Adjusts the results dictionary to determine if classifier correctly classified images 'as a dog' or 'not a dog' especially when not a match. Demonstrates if model architecture correctly classifies dog images even if it gets dog breed wrong (not a match). Parameters: results_dic - Dictionary with key as image filename and value as a List (index)idx 0 = pet image label (string) idx 1 = classifier label (string) idx 2 = 1/0 (int) where 1 = match between pet image and classifier labels and 0 = no match between labels --- where idx 3 & idx 4 are added by this function --- idx 3 = 1/0 (int) where 1 = pet image 'is-a' dog and 0 = pet Image 'is-NOT-a' dog. idx 4 = 1/0 (int) where 1 = Classifier classifies image 'as-a' dog and 0 = Classifier classifies image 'as-NOT-a' dog. dogsfile - A text file that contains names of all dogs from ImageNet 1000 labels (used by classifier model) and dog names from the pet image files. This file has one dog name per line dog names are all in lowercase with spaces separating the distinct words of the dog name. This file should have been passed in as a command line argument. (string - indicates text file's name) Returns: None - results_dic is mutable data type so no return needed. """ dognames_dic = dict() with open(dogsfile, "r") as infile: line = infile.readline() while line != "": line = line.rstrip() if line not in dognames_dic: dognames_dic[line] = 1 else: print("Warning: duplicate dognames", line) line = infile.readline() for key in results_dic: if results_dic[key][0] in dognames_dic: if results_dic[key][1] in dognames_dic: results_dic[key].extend((1, 1)) else: results_dic[key].extend((1, 0)) else: if results_dic[key][1] in dognames_dic: results_dic[key].extend((0, 1)) else: results_dic[key].extend((0, 0)) def calculates_results_stats(results_dic): """ Calculates statistics of the results of the run using classifier's model architecture on classifying images. Then puts the results statistics in a dictionary (results_stats) so that it's returned for printing as to help the user to determine the 'best' model for classifying images. Note that the statistics calculated as the results are either percentages or counts. Parameters: results_dic - Dictionary with key as image filename and value as a List (index)idx 0 = pet image label (string) idx 1 = classifier label (string) idx 2 = 1/0 (int) where 1 = match between pet image and classifer labels and 0 = no match between labels idx 3 = 1/0 (int) where 1 = pet image 'is-a' dog and 0 = pet Image 'is-NOT-a' dog. idx 4 = 1/0 (int) where 1 = Classifier classifies image 'as-a' dog and 0 = Classifier classifies image 'as-NOT-a' dog. Returns: results_stats - Dictionary that contains the results statistics (either a percentage or a count) where the key is the statistic's name (starting with 'pct' for percentage or 'n' for count) and the value is the statistic's value """ #{'Beagle_01141.jpg': ['beagle', 'walker hound, walker foxhound', 0, 1, 1]} # key = statistic's name (e.g. n_correct_dogs, pct_correct_dogs, n_correct_breed, pct_correct_breed) # value = statistic's value (e.g. 30, 100%, 24, 80%) # example_dictionary = {'n_correct_dogs': 30, 'pct_correct_dogs': 100.0, 'n_correct_breed': 24, 'pct_correct_breed': 80.0} results_stats = dict() # sets all counters to initial values of zero so they can be incremented # while processing through the images in results_dic results_stats['n_dogs_img'] = 0 results_stats['n_match'] = 0 results_stats['n_correct_dogs'] = 0 results_stats['n_correct_notdogs'] = 0 results_stats['n_correct_breed'] = 0 for key in results_dic: # labels match exactly if results_dic[key][2] == 1: results_stats['n_match'] += 1 # pet image label is a dog AND labels match - counts correct breed if sum(results_dic[key][2:]) == 3: results_stats['n_correct_breed'] += 1 # pet image label is a dog - counts num dog images if results_dic[key][3] == 1: results_stats['n_dogs_img'] += 1 # classifier classifies image as Dog (& pet image is a dog) # counts number of correct dog classifications if results_dic[key][4] == 1: results_stats['n_correct_dogs'] += 1 # pet image label is NOT a dog else: # classifier classifies image as NOT a Dog # (& pet image is NOT a dog) # counts number of correct dog classifications if results_dic[key][4] == 0: results_stats['n_correct_notdogs'] += 1 # calc num total images results_stats['n_images'] = len(results_dic) # calc num of not-a-dog images using images & dog images counts results_stats['n_notdogs_img'] = (results_stats['n_images'] - results_stats['n_dogs_img']) # calc % correct matches results_stats['pct_match'] = (results_stats['n_match'] / results_stats['n_images']) * 100.0 # calc % correct matches results_stats['pct_correct_dogs'] = (results_stats['n_correct_dogs'] / results_stats['n_dogs_img']) * 100.0 # calc % correct breed of dog results_stats['pct_correct_breed'] = (results_stats['n_correct_breed'] / results_stats['n_dogs_img']) * 100.0 # calc % correct not-a-dog images # uses conditional statement for when no 'not a dog' images were submitted if results_stats['n_notdogs_img'] > 0: results_stats['pct_correct_notdogs'] = (results_stats[ 'n_correct_notdogs'] / results_stats['n_notdogs_img']) *100.0 else: results_stats['pct_correct_notdogs'] = 0.0 return results_stats def print_results(results_dic, results_stats, model, print_incorrect_dogs = False, print_incorrect_breed = False): """ Prints summary results on the classification and then prints incorrectly classified dogs and incorrectly classified dog breeds if user indicates they want those printouts (use non-default values) Parameters: results_dic - Dictionary with key as image filename and value as a List (index)idx 0 = pet image label (string) idx 1 = classifier label (string) idx 2 = 1/0 (int) where 1 = match between pet image and classifier labels and 0 = no match between labels idx 3 = 1/0 (int) where 1 = pet image 'is-a' dog and 0 = pet Image 'is-NOT-a' dog. idx 4 = 1/0 (int) where 1 = Classifier classifies image 'as-a' dog and 0 = Classifier classifies image 'as-NOT-a' dog. results_stats - Dictionary that contains the results statistics (either a percentage or a count) where the key is the statistic's name (starting with 'pct' for percentage or 'n' for count) and the value is the statistic's value model - pretrained CNN whose architecture is indicated by this parameter, values must be: resnet alexnet vgg (string) print_incorrect_dogs - True prints incorrectly classified dog images and False doesn't print anything(default) (bool) print_incorrect_breed - True prints incorrectly classified dog breeds and False doesn't print anything(default) (bool) Returns: None - simply printing results. """ # OLD STRING FORMAT see following link: # https://docs.python.org/2/library/stdtypes.html#string-formatting # NEW STRING FORMAT see following link: # https://docs.python.org/3/library/string.html#format-string-syntax print("/nResults Summary for Model Architecture: ", model.upper()) print("%20s: %3d" % ("N Images", results_stats['n_images'])) print("%20s: %3d" % ("N Dog Images", results_stats['n_dogs_img'])) print("%20s: %3d" % ("N Not-Dog Images", results_stats['n_notdogs_img'])) # prints summary stats on model run print(" ") for key in results_stats: if key[0] == 'p': print("%20s: %5.1f" % (key, results_stats[key])) if (print_incorrect_dogs and ((results_stats['n_correct_dogs'] + results_stats['n_correct_notdogs']) != results_stats['n_images'])): print("\nINCORRECT Dog/NOT Dog Assignments:") for key in results_dic: if sum(results_dic[key][3:]) == 1: print("Real: {0} Classifier: {1}".format( results_dic[key][0], results_dic[key][1])) if (print_incorrect_breed and (results_stats['n_correct_dogs'] != results_stats[ 'n_correct_breed'])): print("\nINCORRECT Dog Breed Assignment:") for key in results_dic: if sum(results_dic[key][3:]) == 2 and results_dic[key][2] == 0: print("Real: {0} Classifier: {1}".format( results_dic[key][0], results_dic[key][1])) def print_result_dic(result_dic): # temp code to print out result_dic print("\nprint_result_dic") print("\nMATCH:") n_match = 0 n_notmatch = 0 for key in result_dic: if result_dic[key][2] == 1: n_match += 1 print("Pet Label: %-26s Classifier Label: %-30s" % (result_dic[ key][0], result_dic[key][1])) print("\nNOT A MATCH:") for key in result_dic: if result_dic[key][2] == 0: n_notmatch += 1 print("Pet Label: %-26s Classifier Label: %-30s" % (result_dic[ key][0], result_dic[key][1])) print("\n# Total Images:", n_match + n_notmatch, "# Matches:", n_match, " # NOT MATCH:", n_notmatch) def print_petlabels_dict(petlabels_dict): print("petlabels_dict has ", len(petlabels_dict), " key-value pairs. ") prnt = 0 for key in petlabels_dict: print("{} key: {} ; value: {}".format((prnt+1), key, petlabels_dict[key])) prnt += 1 def print_command_line_args(in_args): print("arg1 --dir: ", in_args.dir, "; arg2 --arch: ", in_args.arch, "; arg3 --dogfile: ", in_args.dogfile) def print_adjust_results4_isadog(result_dic): match = 0 nomatch = 0 print("\nMATCH:") for key in result_dic: if result_dic[key][2] == 1: match += 1 print("Pet Label: %-26s Classifier Label: %-30s PetLabelDog: " "%1d ClassLabelDog: %1d" % (result_dic[key][0], result_dic[key][1], result_dic[key][3], result_dic[key][4])) print("\nNOT A MATCH:") for key in result_dic: if result_dic[key][2] == 0: nomatch += 1 print("Pet Label: %-26s Classifier Label: %-30s PetLabelDog: " "%1d ClassLabelDog: %1d" % (result_dic[key][0], result_dic[key][1], result_dic[key][3], result_dic[key][4])) print("\n# Total Images:", match + nomatch, "# Matches:", match, " # NOT MATCH:", nomatch) def check_results_stats(results_stats, result_dic): n_images = len(result_dic) n_pet_dog = 0 n_class_cdog = 0 n_class_cnotd = 0 n_match_breed = 0 for key in result_dic: if result_dic[key][2] == 1: if result_dic[key][3] == 1: n_pet_dog += 1 if result_dic[key][4] == 1: n_class_cdog += 1 n_match_breed += 1 else: if result_dic[key][4] == 0: n_class_cnotd += 1 else: if result_dic[key][3] == 1: n_pet_dog += 1 if result_dic[key][4] == 0: n_class_cnotd += 1 n_pet_notd = n_images - n_pet_dog pct_corr_dog = (n_class_cdog / n_pet_dog)*100 pct_corr_notdog = (n_class_cnotd / n_pet_notd)*100 pct_corr_breed = (n_match_breed / n_pet_dog)*100 print("\n ** Function's Stats:") print("N images: %2d N Dog Images: %2d N Not Dog Images: %2d \nPct Corr " "dog: %5.1f Pct Correct not-a-dog: %5.1f Pct Correct Breed: %5.1f" % (results_stats['n_images'], results_stats['n_dogs_img'], results_stats['n_notdogs_img'], results_stats['pct_correct_dogs'], results_stats['pct_correct_notdogs'], results_stats['pct_correct_breed'])) print("\n ** Check Stats:") print( "N images: %2d N Dog Images: %2d N Not Dog Images: %2d \nPet Corr " "dog: %5.lf Pct Correct not-a-dog: %5.1f Pct Correct Breed: %5.1f" % (n_images, n_pet_dog, n_pet_notd, pct_corr_dog, pct_corr_notdog, pct_corr_breed)) # Call to main function to run the program if __name__ == "__main__": main()
nilq/baby-python
python
# -*- coding: utf-8 -*- # Copyright (C) 2017 Intel Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json class RDMonitorPayload(object): def __init__(self, dt=None, rt=None, di=None, with_rts=[], st=None, groups=[], mid=None, purl=None, local_path=None): self.dt = dt self.rt = rt self.di = di self.with_rts = with_rts self.st = st self.mid = mid self.groups = groups self.purl = purl self.local_path = local_path def equals(self, obj): if self == obj: return True if obj is None or getattr(self, '__class__') != getattr( obj, '__class__'): return False other = obj if other.mid.__eq__(self.mid): return True else: return False def hash_code(self): return 0 if self.mid is None else self.mid.__hash__() def to_json(self): bf = [] # bf.append("{") if self.dt is not None: bf.append("\"dt\":\"" + self.dt + "\"") if self.rt is not None: bf.append("\"rt\":\"" + self.rt + "\"") if self.di is not None: bf.append("\"di\":\"" + str(self.di) + "\"") if len(self.with_rts) != 0 : bf.append("\"with_rts\":" + json.dumps(self.with_rts)) if self.st is not None: bf.append("\"st\":\"" + str(self.st) + "\"") if self.mid is not None: bf.append("\"mid\":\"" + self.mid + "\"") if len(self.groups) != 0: bf.append("\"groups\":" + json.dumps(self.groups)) if self.purl is not None: bf.append("\"purl\":\"" + self.purl + "\"") # if self.local_path is not None: # bf.append("\"local_path\":\"" + self.local_path + "\"") # bf.append("}") return '{' + ','.join(bf) + '}'
nilq/baby-python
python
import os import sys import hashlib def e(s): if type(s) == str: return s return s.encode('utf-8') def d(s): if type(s) == unicode: return s return unicode(s, 'utf-8') def mkid(s): return hashlib.sha1(e(s)).hexdigest()[:2*4] def running_in_virtualenv(): return hasattr(sys, 'real_prefix') def running_in_tools_labs(): return os.path.exists('/etc/wmflabs-project') class Logger(object): def __init__(self): self._mode = 'INFO' def progress(self, message): message = e(message) if not sys.stderr.isatty(): return if self._mode == 'PROGRESS': print >>sys.stderr, '\r', print >>sys.stderr, message, self._mode = 'PROGRESS' def info(self, message): message = e(message) if self._mode == 'PROGRESS': print >>sys.stderr print >>sys.stderr, message self._mode = 'INFO'
nilq/baby-python
python
import unittest import pprint import os from numpy import testing import invest_natcap.fisheries.fisheries_hst as main import invest_natcap.fisheries.fisheries_hst_io as io pp = pprint.PrettyPrinter(indent=4) workspace_dir = './invest-data/test/data/fisheries/' data_dir = './invest-data/Fisheries' inputs_dir = os.path.join(data_dir, 'input/Habitat_Scenario_Tool') outputs_dir = os.path.join(workspace_dir, 'output') class TestConvertSurvivalMatrix(unittest.TestCase): def setUp(self): self.args = { 'workspace_dir': workspace_dir, 'sexsp': 'No', 'population_csv_uri': os.path.join( inputs_dir, 'pop_params.csv'), 'habitat_dep_csv_uri': os.path.join( inputs_dir, 'habitat_dep_params.csv'), 'habitat_chg_csv_uri': os.path.join( inputs_dir, 'habitat_chg_params.csv'), 'gamma': 0.5, } self.check = { 'workspace_dir': workspace_dir, 'sexsp': 'No', 'population_csv_uri': os.path.join( outputs_dir, 'pop_params_spreadsheet_mod.csv'), 'habitat_dep_csv_uri': os.path.join( inputs_dir, 'habitat_dep_params.csv'), 'habitat_chg_csv_uri': os.path.join( inputs_dir, 'habitat_chg_params.csv'), 'gamma': 0.5, } def test_convert_spreadsheet(self): ''' Test an example from the provided spreadsheet ''' # Fetch pre and post variables vars_dict = io.fetch_args(self.args) check = io.fetch_args(self.check) # Run operation guess = main.convert_survival_matrix(vars_dict) # Check for correctness testing.assert_array_almost_equal( guess['Surv_nat_xsa_mod'], check['Surv_nat_xsa']) if __name__ == '__main__': unittest.main()
nilq/baby-python
python
""" Decorators """ import sys from contextlib import contextmanager import mock from maya_mock.cmds import MockedCmdsSession from maya_mock.pymel import MockedPymelSession, MockedPymelNode, MockedPymelPort @contextmanager def _patched_sys_modules(data): """ Temporary override sys.modules with provided data. This will take control of the import process. :param dict data: The data to overrides. """ # Hold sys.modules old_data = {key: sys.modules.get(key) for key in data} # Patch sys.modules for key, val in data.items(): sys.modules[key] = val yield # Restore sys.modules for key, val in old_data.item(): if val is None: sys.modules.pop(key) else: sys.modules[key] = val def _create_cmds_module_mock(cmds): """ Create a MagicMock for the cmds module. """ kwargs = {"cmds": cmds} module_maya = mock.MagicMock(**kwargs) return module_maya @contextmanager def mock_cmds(session): """ Context that temporary intercept maya.session with our mock. Use this to run complex maya operations in a mocked env. Usage: >>> with mock_cmds(session) as session: >>> cmds.createNode('transform1') :param MockedSession session: The session to mock. :return: A context :rtype: contextmanager.GeneratorContextManager """ cmds = ( session if isinstance(session, MockedCmdsSession) else MockedCmdsSession(session) ) # Prepare sys.modules patch module_maya = _create_cmds_module_mock(cmds) new_sys = {"maya": module_maya, "maya.cmds": cmds} with _patched_sys_modules(new_sys): yield cmds def _create_pymel_module_mock(pymel): """ Create a pymel module mock from a mocked pymel session. :param MockedPymelSession pymel: A mocked pymel session :return: A MagicMock :rtype: mock.MagicMock """ kwargs = { "core.PyNode": MockedPymelNode, "core.Attribute": MockedPymelPort, } for attr in dir(pymel): if not attr.startswith("_"): kwargs["core.{}".format(attr)] = getattr(pymel, attr) module_pymel = mock.MagicMock(**kwargs) return module_pymel @contextmanager def mock_pymel(session): """ Context that temporary intercept maya.cmds with our mock. Use this to run complex maya operations in a mocked env. Usage: >>> with mock_pymel(session) as pymel: >>> pymel.createNode('transform') :param MockedPymelSession session: The session to mock. :return: A context :rtype: contextmanager.GeneratorContextManager """ pymel = ( session if isinstance(session, MockedPymelSession) else MockedPymelSession(session) ) # Prepare sys.modules patch module_pymel = _create_pymel_module_mock(pymel) sys_data = { "pymel": module_pymel, "pymel.core": module_pymel.core, "pymel.core.PyNode": module_pymel.core.PyNode, "pymel.core.Attribute": module_pymel.core.Attribute, } with _patched_sys_modules(sys_data): yield pymel
nilq/baby-python
python
# -*- coding: utf-8 -*- from .torsimany import main main()
nilq/baby-python
python
from cumulusci.tasks.metadata_etl.base import ( BaseMetadataETLTask, BaseMetadataSynthesisTask, BaseMetadataTransformTask, MetadataSingleEntityTransformTask, MetadataOperation, ) from cumulusci.tasks.metadata_etl.duplicate_rules import SetDuplicateRuleStatus from cumulusci.tasks.metadata_etl.layouts import AddRelatedLists from cumulusci.tasks.metadata_etl.permissions import AddPermissionSetPermissions from cumulusci.tasks.metadata_etl.value_sets import AddValueSetEntries from cumulusci.tasks.metadata_etl.sharing import SetOrgWideDefaults flake8 = ( BaseMetadataETLTask, BaseMetadataSynthesisTask, BaseMetadataTransformTask, MetadataSingleEntityTransformTask, AddRelatedLists, AddPermissionSetPermissions, AddValueSetEntries, SetOrgWideDefaults, MetadataOperation, SetDuplicateRuleStatus, )
nilq/baby-python
python
# Copyright The PyTorch Lightning team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Modifications copyright (C) 2021 Immanuel Weber # derived from https://github.com/PyTorchLightning/pytorch-lightning/blob/master/pytorch_lightning/callbacks/lr_monitor.py def get_scheduler_names(schedulers): names = [] for scheduler in schedulers: sch = scheduler["scheduler"] if scheduler["name"] is not None: name = scheduler["name"] else: opt_name = "lr-" + sch.optimizer.__class__.__name__ i, name = 1, opt_name # Multiple scheduler of the same type while True: if name not in names: break i, name = i + 1, f"{opt_name}-{i}" param_groups = sch.optimizer.param_groups if len(param_groups) != 1: for i in range(len(param_groups)): names.append(f"{name}/pg{i + 1}") else: names.append(name) return names def get_lrs(schedulers, scheduler_names, interval): latest_stat = {} for name, scheduler in zip(scheduler_names, schedulers): if scheduler["interval"] == interval or interval == "any": opt = scheduler["scheduler"].optimizer param_groups = opt.param_groups for i, pg in enumerate(param_groups): suffix = f"/pg{i + 1}" if len(param_groups) > 1 else "" lr = {f"{name}{suffix}": pg.get("lr")} latest_stat.update(lr) else: print(f"warning: interval {scheduler['interval']} not supported yet.") return latest_stat
nilq/baby-python
python
features = [1,2,3] prices = [1,2,3] theta = [0,0] LEARNING_RATE=0.01 NO_TRAINING_EXAMPLES=len(features) EPSILON=0.000000001 #Cost function to calculate half the average of the squared errors for the given theta def cost(features, prices, theta): sum = 0 for i in range(NO_TRAINING_EXAMPLES): sum += (predict(features[i],theta)-prices[i])**2 cost = sum/(2*NO_TRAINING_EXAMPLES) return cost #prediction function to find the price given the feature and theta def predict(feature,theta): y=theta[0]+theta[1]*feature return y #gradient descent algorithm to find the value of theta that makes the prediction most accurate #i.e causing the cost function to be minimum def gradient_descent(features,prices,theta): old_cost=cost(features,prices,theta) while True: #evaluate the partial derivative for theta0 and theta1 sum0 = 0 sum1 = 0 for i in range(NO_TRAINING_EXAMPLES): sum0 += (predict(features[i],theta)-prices[i]) sum1 += (predict(features[i], theta) - prices[i]) * features[i] #update both thetas simultaneously theta[0] = theta[0] - (LEARNING_RATE / NO_TRAINING_EXAMPLES) * sum0 theta[1] = theta[1] - (LEARNING_RATE / NO_TRAINING_EXAMPLES) * sum1 new_cost=cost(features,prices,theta) #test for convergence if abs(old_cost-new_cost) < EPSILON: break else: old_cost=new_cost return theta print(gradient_descent(features,prices,theta))
nilq/baby-python
python
__all__ = ('WeakMap',) from .docs import has_docs from .removed_descriptor import RemovedDescriptor from .weak_core import WeakReferer, add_to_pending_removals @has_docs class _WeakMapCallback: """ Callback used by ``WeakMap``-s. Attributes ---------- _parent : ``WeakReferer`` to ``WeakMap`` The parent weak map. """ __slots__ = ('_parent', ) @has_docs def __new__(cls, parent): """ Creates a new ``_WeakMapCallback`` bound to the given ``WeakMap``. Parameters ---------- parent : ``WeakMap`` The parent weak map. """ parent = WeakReferer(parent) self = object.__new__(cls) self._parent = parent return self @has_docs def __call__(self, reference): """ Called when an element of the respective weak map is garbage collected. Parameters ---------- reference : ``WeakReferer`` Weakreference to the respective object. """ parent = self._parent() if parent is None: return if parent._iterating: add_to_pending_removals(parent, reference) else: try: dict.__delitem__(parent, reference) except KeyError: pass @has_docs class WeakMap(dict): """ Weak map is a mix of weak dictionaries and weak sets. Can be used to retrieve an already existing weakreferenced value from itself. Attributes ---------- _pending_removals : `None`, `set` of ``WeakReferer`` Pending removals of the weak map if applicable. _iterating : `int` Whether the weak map is iterating and how much times. _callback : ``_WeakMapCallback`` Callback added to the ``WeakMap``'s weak keys. Class Attributes ---------------- MAX_REPR_ELEMENT_LIMIT : `int` = `50` The maximal amount of items to render by ``.__repr__``. Notes ----- ``WeakMap``-s are weakreferable. """ __slots__ = ('__weakref__', '_pending_removals', '_iterating', '_callback') MAX_REPR_ELEMENT_LIMIT = 50 @has_docs def _commit_removals(self): """ Commits the pending removals of the weak map if applicable. """ if self._iterating: return pending_removals = self._pending_removals if pending_removals is None: return for reference in pending_removals: try: dict.__delitem__(self, reference) except KeyError: pass self._pending_removals = None # __class__ -> same @has_docs def __contains__(self, key): """Returns whether the weak map contains the given key.""" try: reference = WeakReferer(key) except TypeError: return False return dict.__contains__(self, reference) # __delattr__ -> same @has_docs def __delitem__(self, key): """Deletes the given key from the weak map""" try: reference = WeakReferer(key) except TypeError: raise KeyError(key) from None try: dict.__delitem__(self, reference) except KeyError as err: raise KeyError(key) from None # __dir__ -> same # __doc__ -> same def __eq__(self, other): """returns whether the two weak maps are equal.""" if isinstance(other, type(self)): return dict.__eq__(self, other) if isinstance(other, set): pass elif hasattr(type(other), '__iter__'): other = set(other) else: return NotImplemented self_set = set(iter(self)) return self_set == other # __format__ -> same # __ge__ -> same # __getattribute__ -> same @has_docs def __getitem__(self, key): """Gets the already existing key from the weak map, which matches the given one.""" try: reference = WeakReferer(key) except TypeError: raise KeyError(key) from None reference = dict.__getitem__(self, reference) key = reference() if (key is None): if self._iterating: add_to_pending_removals(self, reference) else: dict.__delitem__(self, reference) raise KeyError(key) return key # __gt__ -> same # __hash__ -> same @has_docs def __init__(self, iterable=None): """ Creates a new ``WeakMap`` from the given iterable. Parameters ---------- iterable : `None`, `iterable` = `None`, Optional Iterable to update the created map with. """ self._pending_removals = None self._iterating = 0 self._callback = _WeakMapCallback(self) if (iterable is not None): self.update(iterable) # __init_subclass__ -> same @has_docs def __iter__(self): """ Iterates over the weak map's elements. This method is an iterable generator, """ self._iterating += 1 try: for reference in dict.__iter__(self): key = reference() if (key is None): add_to_pending_removals(self, reference) continue yield key continue finally: self._iterating -= 1 self._commit_removals() # __le__ -> same @has_docs def __len__(self): """Returns the length of the weak map.""" length = dict.__len__(self) pending_removals = self._pending_removals if (pending_removals is not None): length -= len(pending_removals) return length # __lt__ -> same def __ne__(self, other): """returns whether the two weak maps are equal.""" if isinstance(other, type(self)): return dict.__ne__(self, other) if isinstance(other, set): pass elif hasattr(type(other), '__iter__'): other = set(other) else: return NotImplemented self_set = set(iter(self)) return self_set != other # __new__ -> same @has_docs def __reduce__(self): """Reduces the map to a picklable object.""" return (type(self), list(self)) @has_docs def __reduce_ex__(self, version): """Reduces the map to a picklable object.""" return type(self).__reduce__(self) @has_docs def __repr__(self): """Returns the weak map's representation.""" result = [self.__class__.__name__, '({'] if len(self): limit = self.MAX_REPR_ELEMENT_LIMIT collected = 0 for reference in dict.__iter__(self): key = reference() if (key is None): add_to_pending_removals(self, reference) continue result.append(repr(key)) result.append(', ') collected +=1 if collected != limit: continue leftover = len(self) - collected if leftover: result.append('...}, ') result.append(str(leftover)) result.append(' truncated)') else: result[-1] = '})' break else: result[-1] = '})' self._commit_removals() else: result.append('})') return ''.join(result) # __setattr__ -> same __setitem__ = RemovedDescriptor() # __sizeof__ -> same __str__ = __repr__ # __subclasshook__ -> same @has_docs def clear(self): """ Clear's the weak map. """ dict.clear(self) self._pending_removals = None @has_docs def copy(self): """ Copies the weak map. Returns ------- new : ``WeakMap`` """ new = dict.__new__(type(self)) new._iterating = 0 new._pending_removals = None new._callback = callback = _WeakMapCallback(new) for reference in dict.__iter__(self): key = reference() if (key is None): add_to_pending_removals(self, reference) continue reference = WeakReferer(key, callback) dict.__setitem__(new, reference, reference) continue self._commit_removals() return new @has_docs def get(self, key, default=None): """ Gets the key of the weak map, which matches the given one. Parameters ---------- key : `Any` A key to match. default : `Any` = `None`, Optional Default value to return if the given `key` could not be matched. Returns ------- real_key : `Any`, `default` The matched key. If no key was matched returns the `default` value. """ try: reference = WeakReferer(key) except TypeError: return default real_reference = dict.get(self, reference, reference) if real_reference is reference: return default real_key = real_reference() if (real_key is not None): return real_key if self._iterating: add_to_pending_removals(self, real_reference) else: dict.__delitem__(self, real_reference) return default items = RemovedDescriptor() keys = RemovedDescriptor() @has_docs def pop(self, key, default=...): """ Pops a key from the weak map which matches the given one. Parameters ---------- key : `Any` A key to match. default : `Any`, Optional Default value to return if the given `key` could not be matched. Returns ------- real_key : `Any`, `default` The matched key. If no key was matched and `default` value is given, then returns that. Raises ------ KeyError If `key` could not be matched and `default` value is was not given either. """ try: reference = WeakReferer(key) except TypeError: pass else: real_reference = dict.pop(self, reference, ...) if (real_reference is not ...): real_key = real_reference() if (real_key is not None): return real_key if self._iterating: add_to_pending_removals(self, real_reference) else: dict.__delitem__(self, real_reference) if default is ...: raise KeyError(key) return default popitem = RemovedDescriptor() setdefault = RemovedDescriptor() @has_docs def update(self, iterable): """ Updates the map with the given iterable. Parameters ---------- iterable : `iterable` The iterable to update the map with. Raises ------ TypeError If the given value is not `iterable`, or any of it's elements is not weakreferable. """ if hasattr(type(iterable), '__iter__'): # Make sure, we have unique elements, so convert other to set for element in iterable: self.set(element) else: raise TypeError( f'Parameter `iterable` must be an iterable, got {iterable.__class__.__name__}; {iterable!r}.' ) values = RemovedDescriptor() @has_docs def set(self, key): """ Sets a key to the ``WeakMap`` and then returns it. If they given key is already present in the ``WeakMap``, returns that instead. Parameters ---------- key : `Any` A key to match. Returns ------- real_key : `Any` The matched key, or the given one. Raises ------ TypeError If `key` not supports weakreferencing. """ reference = WeakReferer(key, self._callback) real_reference = dict.get(self, reference, None) if (real_reference is not None): real_key = real_reference() if (real_key is not None): return real_key dict.__setitem__(self, reference, reference) return key
nilq/baby-python
python
import dash import dash_table import dash_core_components as dcc import dash_html_components as html from dash.dependencies import Input, Output, State import numpy as np import pandas as pd import plotly.graph_objects as go from database import get_data_cache, get_all_food_data external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css', '/assets/style.css'] app = dash.Dash(__name__, external_stylesheets=external_stylesheets) app.config.suppress_callback_exceptions = True colors = ['#000000', '#FC6D41', '#274228', '#274228', '#7FB800', '#955E42', '#000000', '#F0A202', '#706C61', '#65743A']
nilq/baby-python
python
# Generated by Django 3.1.7 on 2021-04-07 15:46 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('main', '0001_initial'), ] operations = [ migrations.CreateModel( name='Rubric', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(db_index=True, max_length=20, unique=True, verbose_name='Название')), ('order', models.SmallIntegerField(db_index=True, default=0, verbose_name='Порядок')), ], ), migrations.CreateModel( name='SubRubric', fields=[ ], options={ 'verbose_name': 'Подрубрика', 'verbose_name_plural': 'Подрубрики', 'ordering': ('super_rubric__order', 'super_rubric__name', 'order', 'name'), 'proxy': True, 'indexes': [], 'constraints': [], }, bases=('main.rubric',), ), migrations.CreateModel( name='SuperRubric', fields=[ ], options={ 'verbose_name': 'Надрубрика', 'verbose_name_plural': 'Надрубрики', 'ordering': ('order', 'name'), 'proxy': True, 'indexes': [], 'constraints': [], }, bases=('main.rubric',), ), migrations.AddField( model_name='rubric', name='super_rubric', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='main.superrubric', verbose_name='Надрубрика'), ), ]
nilq/baby-python
python
# based on django-markdownify # https://github.com/erwinmatijsen/django-markdownify # https://django-markdownify.readthedocs.io/en/latest/settings.html from functools import partial from django import template from django.conf import settings import bleach from markdownx.utils import markdownify as markdownx_markdownify def markdownify(value): # Get the settings or set defaults if not set # Bleach settings whitelist_tags = getattr(settings, 'MARKDOWNX_WHITELIST_TAGS', bleach.sanitizer.ALLOWED_TAGS) whitelist_attrs = getattr(settings, 'MARKDOWNX_WHITELIST_ATTRS', bleach.sanitizer.ALLOWED_ATTRIBUTES) whitelist_styles = getattr(settings, 'MARKDOWNX_WHITELIST_STYLES', bleach.sanitizer.ALLOWED_STYLES) whitelist_protocols = getattr(settings, 'MARKDOWNX_WHITELIST_PROTOCOLS', bleach.sanitizer.ALLOWED_PROTOCOLS) # Markdown settings strip = getattr(settings, 'MARKDOWNX_STRIP', True) # Bleach Linkify linkify = None linkify_text = getattr(settings, 'MARKDOWNX_LINKIFY_TEXT', True) if linkify_text: linkify_parse_email = getattr(settings, 'MARKDOWNX_LINKIFY_PARSE_EMAIL', False) linkify_callbacks = getattr(settings, 'MARKDOWNX_LINKIFY_CALLBACKS', None) linkify_skip_tags = getattr(settings, 'MARKDOWNX_LINKIFY_SKIP_TAGS', None) linkifyfilter = bleach.linkifier.LinkifyFilter linkify = [partial(linkifyfilter, callbacks=linkify_callbacks, skip_tags=linkify_skip_tags, parse_email=linkify_parse_email )] # Convert markdown to html html = markdownx_markdownify(value) #.replace('&amp;', '&') # Sanitize html if wanted if getattr(settings, 'MARKDOWNX_BLEACH', True): cleaner = bleach.Cleaner(tags=whitelist_tags, attributes=whitelist_attrs, styles=whitelist_styles, protocols=whitelist_protocols, strip=strip, filters=linkify, ) html = cleaner.clean(html) return html
nilq/baby-python
python
""" Artificial Images Simulator ============================ The Artificial Images Simulator (AIS) class was developed to generate artificial star images, similar to those images that would be acquired by using the acquisition system of the instrument. To accomplish this, the AIS models as star flux as a 2D gaussian distribution. Then, the star flux is added to an image with a background level given by counts distribution of an image of the SPARC4 cameras, as a function of its operation mode. """ import openpyxl import astropy.io.fits as fits from PSF import Point_Spread_Function from BGI import Background_Image from HDR import Header from CHC import (Concrete_Channel_1, Concrete_Channel_2, Concrete_Channel_3, Concrete_Channel_4) class Artificial_Image_Simulator: """Create an image cube with the star flux distribution. Parameters ---------- star_magitude : float Magnitude of the star sky_magnitude: float Magnitude of the sky gaussian_stddev: int Number of pixels of the gaussian standard deviation ccd_operation_mode: dictionary A python dictionary with the CCD operation mode. The allowed keywords values for the dictionary are * em_mode: {0, 1} Use the 0 for the Conventional Mode and 1 for the EM Mode * em_gain: float Electron Multiplying gain * preamp: {1, 2} Pre-amplification * hss: {0.1, 1, 10, 20, 30} Horizontal Shift Speed (readout rate) in MHz * bin: int Number of the binned pixels * t_exp: float Exposure time in seconds ccd_temp: float, optional CCD temperature serial_number: {9914, 9915, 9916 or 9917}, optional CCD serial number bias_level: int, optional Bias level, in ADU, of the image image_dir: str, optional Directory where the image should be saved Yields ------ image cube: array like An image cube in the FITS format with the star flux distribution Notes ----- Explicar o código; background; passo-a-passo Examples -------- Incluir exemplos References ---------- .. [#Bernardes_2018] Bernardes, D. V., Martioli, E., and Rodrigues, C. V., “Characterization of the SPARC4 CCDs”, <i>Publications of the Astronomical Society of the Pacific</i>, vol. 130, no. 991, p. 95002, 2018. doi:10.1088/1538-3873/aacb1e. """ def __init__(self, star_magnitude, sky_magnitude, gaussian_std, ccd_operation_mode, channel, bias_level=500, image_dir=''): """Initialize the class.""" if type(star_magnitude) not in [int, float]: raise ValueError('The star flux must be a number: ' + f'{star_magnitude}') elif star_magnitude <= 0: raise ValueError( f'The star flux must be greater than zero: {star_magnitude}') else: self.star_magnitude = star_magnitude if type(sky_magnitude) not in [int, float]: raise ValueError(f'The sky flux must be a number: {sky_magnitude}') elif sky_magnitude <= 0: raise ValueError( f'The sky flux must be greater than zero: {sky_magnitude}') else: self.sky_magnitude = sky_magnitude if type(gaussian_std) is not int: raise ValueError( f'The gaussian standard deviation must be \ an integer: {gaussian_std}') elif gaussian_std <= 0: raise ValueError( f'The gaussian standard deviation must be greater \ than zero: {gaussian_std}') else: self.gaussian_std = gaussian_std if channel in [1, 2, 3, 4]: self.channel = channel else: raise ValueError( 'There is no camera with the provided' + f'serial number: {channel}') if type(bias_level) is not int: raise ValueError( f'The bias level must be an integer: {bias_level}') elif bias_level <= 0: raise ValueError(f'The bias level must be positive: {bias_level}') else: self.bias_level = bias_level if type(image_dir) is not str: raise ValueError( f'The directory path must be a string: {image_dir}') else: if image_dir != '': if '/' not in image_dir[-1]: image_dir += '/' self.image_dir = image_dir self._verify_ccd_operation_mode(ccd_operation_mode) self._configure_gain(ccd_operation_mode) self._configure_image_name(ccd_operation_mode) CHC = 0 if channel == 1: CHC = Concrete_Channel_1(ccd_operation_mode['ccd_temp'], sparc4_acquisition_mode='phot') elif channel == 2: CHC = Concrete_Channel_2(ccd_operation_mode['ccd_temp'], sparc4_acquisition_mode='phot') elif channel == 3: CHC = Concrete_Channel_3(ccd_operation_mode['ccd_temp'], sparc4_acquisition_mode='phot') elif channel == 4: CHC = Concrete_Channel_4(ccd_operation_mode['ccd_temp'], sparc4_acquisition_mode='phot') self.CHC = CHC self.PSF = Point_Spread_Function( CHC, ccd_operation_mode, self.ccd_gain, self.gaussian_std) self.BGI = Background_Image(CHC, ccd_operation_mode, self.ccd_gain, self.bias_level) self.HDR = Header(ccd_operation_mode, self.ccd_gain, CHC.get_serial_number()) def _verify_ccd_operation_mode(self, ccd_operation_mode): """Verify if the provided CCD operation mode is correct.""" em_mode = ccd_operation_mode['em_mode'] em_gain = ccd_operation_mode['em_gain'] hss = ccd_operation_mode['hss'] preamp = ccd_operation_mode['preamp'] binn = ccd_operation_mode['binn'] t_exp = ccd_operation_mode['t_exp'] ccd_temp = ccd_operation_mode['ccd_temp'] dic_keywords_list = [ 'binn', 'ccd_temp', 'em_gain', 'em_mode', 'hss', 'preamp', 't_exp'] for key in ccd_operation_mode.keys(): if key not in dic_keywords_list: raise ValueError( f'The name provided is not a CCD parameter: {key}') if list(ccd_operation_mode.keys()).sort() != dic_keywords_list.sort(): raise ValueError( 'There is a missing parameter of the CCD operation mode') if em_mode not in [0, 1]: raise ValueError( f'Invalid value for the EM mode: {em_mode}') if em_mode == 0: if em_gain != 1: raise ValueError( 'The EM Gain must be 1 for the Conventional' + f' Mode: {em_gain}') else: if em_gain not in [float, int]: raise ValueError( f'The EM gain must be a number: {em_gain}') elif em_gain < 2 or em_gain > 300: raise ValueError( f'EM gain out of range [2, 300]: {em_gain}') if preamp not in [1, 2]: raise ValueError( f'Invalid value for the pre-amplification: {preamp}') if hss not in [0.1, 1, 10, 20, 30]: raise ValueError( f'Invalid value for the Readout rate: {hss}') if binn not in [1, 2]: raise ValueError( f'Invalid value for the binning: {bin}') if type(t_exp) not in [float, int]: raise ValueError( f'The exposure time must be a number: {t_exp}') elif ccd_operation_mode['t_exp'] < 1e-5: raise ValueError( f'Invalid value for the exposure time: {t_exp}') if type(ccd_temp) not in [float, int]: raise ValueError( f'The CCD temperature must be a number: {ccd_temp}') if ccd_temp < -80 or ccd_temp > 20: raise ValueError( f'CCD temperature out of range [-80, 20]: {ccd_temp}') def get_channel_ID(self): """Return the ID for the respective SPARC4 channel.""" return self.CHC.get_channel_ID() def _configure_image_name(self, ccd_operation_mode, include_star_mag=False): """Create the image name. The image name will be created based on the provided information Parameters ---------- include_star_flux: bool, optional Indicate if it is needed to include the star flux value in the image name """ dic = ccd_operation_mode em_gain = '_G' + str(dic['em_gain']) em_mode = 'CONV' if dic['em_mode'] == 1: em_mode = 'EM' hss = '_HSS' + str(dic['hss']) preamp = '_PA' + str(dic['preamp']) binn = '_B' + str(dic['binn']) t_exp = '_TEXP' + str(dic['t_exp']) self.image_name = em_mode + hss + preamp + binn + t_exp + em_gain if include_star_mag: star_flux = '_S' + str(self.star_magnitude) self.image_name += star_flux def _configure_gain(self, ccd_operation_mode): """Configure the CCD gain based on its operation mode.""" em_mode = ccd_operation_mode['em_mode'] hss = ccd_operation_mode['hss'] preamp = ccd_operation_mode['preamp'] tab_index = 0 if hss == 0.1: tab_index = 23 elif hss == 1: tab_index = 19 if em_mode == 1: tab_index = 15 elif hss == 10: tab_index = 11 elif hss == 20: tab_index = 7 elif hss == 30: tab_index = 3 else: raise ValueError('Unexpected value for the readout rate: {hss}') if preamp == 2: tab_index += 2 spreadsheet = openpyxl.load_workbook( f'code/RNC/spreadsheet/Channel {self.channel}' + '/Read_noise_and_gain_values.xlsx').active self.ccd_gain = spreadsheet.cell(tab_index, 5).value def create_artificial_image(self): """Create the artificial star image. This function will sum the background image with the star SPF image to create an artificil image, similar to those acquired by the SPARC4 cameras. Returns ------- Star Image: A FITS file with the calculated artificial image """ background = self.BGI.create_background_image() star_PSF = self.PSF.create_star_PSF() header = self.HDR.create_header() fits.writeto(self.image_dir + self.image_name + '.fits', background + star_PSF, overwrite=True, header=header)
nilq/baby-python
python
from rest_framework import viewsets from .models import RESP, REGONError, REGON, JSTConnection, Institution, ESP from .serializers import RESPSerializer, REGONSerializer, REGONErrorSerializer, JSTConnectionSerializer, \ InstitutionSerializer, ESPSerializer class InstitutionViewSet(viewsets.ReadOnlyModelViewSet): queryset = Institution.objects.prefetch_related('esp_set', 'regon_data__regonerror_set').\ select_related('jstconnection', 'regon_data', 'resp').all() serializer_class = InstitutionSerializer class ESPViewSet(viewsets.ReadOnlyModelViewSet): queryset = ESP.objects.select_related('institution').all() serializer_class = ESPSerializer class RESPViewSet(viewsets.ReadOnlyModelViewSet): queryset = RESP.objects.select_related('institution').all() serializer_class = RESPSerializer class REGONViewSet(viewsets.ReadOnlyModelViewSet): queryset = REGON.objects.prefetch_related('regonerror_set').select_related('institution').all() serializer_class = REGONSerializer class REGONErrorViewSet(viewsets.ReadOnlyModelViewSet): queryset = REGONError.objects.select_related('regon').all() serializer_class = REGONErrorSerializer class JSTConnectionViewSet(viewsets.ReadOnlyModelViewSet): queryset = JSTConnection.objects.select_related('institution', 'jst').all() serializer_class = JSTConnectionSerializer
nilq/baby-python
python
# coding=utf-8 from OTLMOW.OTLModel.Datatypes.KeuzelijstField import KeuzelijstField from OTLMOW.OTLModel.Datatypes.KeuzelijstWaarde import KeuzelijstWaarde # Generated with OTLEnumerationCreator. To modify: extend, do not edit class KlEleAansluitvermogen(KeuzelijstField): """Keuzelijst met gangbare waarden voor elektrisch aansluitvermogen.""" naam = 'KlEleAansluitvermogen' label = 'Elektrisch aansluitvermogen' objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#KlEleAansluitvermogen' definition = 'Keuzelijst met gangbare waarden voor elektrisch aansluitvermogen.' codelist = 'https://wegenenverkeer.data.vlaanderen.be/id/conceptscheme/KlEleAansluitvermogen' options = { '16A-230Vdriefasig-6.4kVA': KeuzelijstWaarde(invulwaarde='16A-230Vdriefasig-6.4kVA', label='16A 230Vdriefasig-6.4kVA', objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/16A-230Vdriefasig-6.4kVA'), '16A-230Veenfasig-3.7kVA': KeuzelijstWaarde(invulwaarde='16A-230Veenfasig-3.7kVA', label='16A 230Veenfasig-3.7kVA', objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/16A-230Veenfasig-3.7kVA'), '16A-400Vdriefasig-11.1kVA': KeuzelijstWaarde(invulwaarde='16A-400Vdriefasig-11.1kVA', label='16A 400Vdriefasig-11.1kVA', objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/16A-400Vdriefasig-11.1kVA'), '20A-230Vdriefasig-8kVA': KeuzelijstWaarde(invulwaarde='20A-230Vdriefasig-8kVA', label='20A 230Vdriefasig-8kVA', objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/20A-230Vdriefasig-8kVA'), '20A-230Veenfasig-4.6kVA': KeuzelijstWaarde(invulwaarde='20A-230Veenfasig-4.6kVA', label='20A 230Veenfasig-4.6kVA', objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/20A-230Veenfasig-4.6kVA'), '20A-400Vdriefasig-13.9kVA': KeuzelijstWaarde(invulwaarde='20A-400Vdriefasig-13.9kVA', label='20A 400Vdriefasig-13.9kVA', objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/20A-400Vdriefasig-13.9kVA'), '25A-230Vdriefasig-10kVA': KeuzelijstWaarde(invulwaarde='25A-230Vdriefasig-10kVA', label='25A 230Vdriefasig-10kVA', objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/25A-230Vdriefasig-10kVA'), '25A-230Veenfasig-5.8kVA': KeuzelijstWaarde(invulwaarde='25A-230Veenfasig-5.8kVA', label='25A 230Veenfasig-5.8kVA', objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/25A-230Veenfasig-5.8kVA'), '25A-400Vdriefasig-17.3kVA': KeuzelijstWaarde(invulwaarde='25A-400Vdriefasig-17.3kVA', label='25A 400Vdriefasig-17.3kVA', objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/25A-400Vdriefasig-17.3kVA'), '32A-230Vdriefasig-12.7kVA': KeuzelijstWaarde(invulwaarde='32A-230Vdriefasig-12.7kVA', label='32A 230Vdriefasig-12.7kVA', objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/32A-230Vdriefasig-12.7kVA'), '32A-230Veenfasig-7.4kVA': KeuzelijstWaarde(invulwaarde='32A-230Veenfasig-7.4kVA', label='32A 230Veenfasig-7.4kVA', objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/32A-230Veenfasig-7.4kVA'), '32A-400Vdriefasig-22.2kVA': KeuzelijstWaarde(invulwaarde='32A-400Vdriefasig-22.2kVA', label='32A 400Vdriefasig-22.2kVA', objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/32A-400Vdriefasig-22.2kVA'), '40A-230Vdriefasig-15.9kVA': KeuzelijstWaarde(invulwaarde='40A-230Vdriefasig-15.9kVA', label='40A 230Vdriefasig-15.9kVA', objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/40A-230Vdriefasig-15.9kVA'), '40A-230Veenfasig-9.2kVA': KeuzelijstWaarde(invulwaarde='40A-230Veenfasig-9.2kVA', label='40A 230Veenfasig-9.2kVA', objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/40A-230Veenfasig-9.2kVA'), '40A-400Vdriefasig-27.7kVA': KeuzelijstWaarde(invulwaarde='40A-400Vdriefasig-27.7kVA', label='40A 400Vdriefasig-27.7kVA', objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/40A-400Vdriefasig-27.7kVA'), '50A-230Vdriefasig-19.9kVA': KeuzelijstWaarde(invulwaarde='50A-230Vdriefasig-19.9kVA', label='50A 230Vdriefasig-19.9kVA', objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/50A-230Vdriefasig-19.9kVA'), '50A-230Veenfasig-11.5kVA': KeuzelijstWaarde(invulwaarde='50A-230Veenfasig-11.5kVA', label='50A 230Veenfasig-11.5kVA', objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/50A-230Veenfasig-11.5kVA'), '50A-400Vdriefasig-34.6kVA': KeuzelijstWaarde(invulwaarde='50A-400Vdriefasig-34.6kVA', label='50A 400Vdriefasig-34.6kVA', objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/50A-400Vdriefasig-34.6kVA'), '63A-230Vdriefasig-25.1kVA': KeuzelijstWaarde(invulwaarde='63A-230Vdriefasig-25.1kVA', label='63A 230Vdriefasig-25.1kVA', objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/63A-230Vdriefasig-25.1kVA'), '63A-230Veenfasig-14.5kVA': KeuzelijstWaarde(invulwaarde='63A-230Veenfasig-14.5kVA', label='63A 230Veenfasig-14.5kVA', objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/63A-230Veenfasig-14.5kVA'), '63A-400Vdriefasig-43.6kVA': KeuzelijstWaarde(invulwaarde='63A-400Vdriefasig-43.6kVA', label='63A 400Vdriefasig-43.6kVA', objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/63A-400Vdriefasig-43.6kVA') }
nilq/baby-python
python
# Generated by Django 2.0.2 on 2018-04-26 17:00 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0002_auto_20180406_1917'), ] operations = [ migrations.AlterField( model_name='profile', name='img_src', field=models.ImageField(blank=True, default='sampleavatar.png', upload_to=''), ), ]
nilq/baby-python
python
# -*- coding: utf-8 -*- ''' :file: score.py :author: -Farmer :url: https://blog.farmer233.top :date: 2021/09/20 20:06:29 ''' # cjcx/cjcx_cxDgXscj.html?doType=query&gnmkdm=N305005&su=2018133209 from school_sdk.client.api import BaseCrawler class Score(BaseCrawler): def __init__(self, user_client) -> None: super().__init__(user_client) self.endpoints: dict = self.school.config['url_endpoints'] self.raw_score = None self.score_dict:dict = {} self.score_list:list = [] def get_score(self, **kwargs): return self.get_score_dict(**kwargs) def get_score_list(self, **kwargs): """获取成绩清单-列表 Returns: list: 成绩列表 """ if not self.score_list: self.parse(**kwargs) return self.score_list def get_score_dict(self, **kwargs): """获取成绩清单-字典 Returns: dict: 成绩字典清单 """ if not self.score_dict: self.parse(**kwargs) return self.score_dict def parse(self, **kwargs): """解析数据 """ if self.raw_score is None: self.load_score(**kwargs) self._parse(self.raw_score) def load_score(self, **kwargs) -> None: """加载课表 """ self.raw_score = self._get_score(**kwargs) def _get_score(self, year: int, term: int = 1, **kwargs): """获取教务系统成绩 Args: year (int): 学年 term (int, optional): 学期. Defaults to 1. Returns: json: json数据 """ url = self.endpoints['SCORE']['API'] params = { 'doType': 'query', 'gnmkdm': 'N305005', 'su': self.account } data = { 'xnm': year, 'xqm': self.TERM.get(term, 3), '_search': False, 'nd': self.t, 'queryModel.showCount': 500, 'queryModel.currentPage': 1, 'queryModel.sortName': None, 'queryModel.sortOrder': 'asc', 'time': 4, } res = self.post(url=url, params=params, data=data, **kwargs) return res.json() def _parse(self, raw: dict): # kcmc -> 课程名称 # kcxzmc -> 课程性质名称 # kcbj -> 课程标记 # jsxm -> 教师姓名 # khfsmc -> 考核方式 # ksxz -> 考试性质 # xf -> 学分 # kkbmmc -> 开课部门名称 # cj -> 成绩 # njdm_id -> 年级代码 """解析教务系统成绩 Args: raw (dict): 教务系统的原始数据 """ items = raw.get('items') for item in items: format_item = { "course_name": item.get('kcmc'), 'course_nature': item.get('kcxzmc'), 'course_target': item.get('kcbj'), 'teacher': item.get('jsxm'), 'exam_method': item.get('khfsmc'), 'exam_nature': item.get('ksxz'), 'exam_result': item.get('cj'), 'credit': item.get('xf'), 'course_group': item.get('kkbmmc'), 'grade': item.get('njdm_id') } self.score_list.append(format_item) self.score_dict.setdefault(item.get('kcmc'), format_item)
nilq/baby-python
python
# -------------------------------------------------------- # Copyright (c) 2021 Microsoft # Licensed under The MIT License # -------------------------------------------------------- import os import random from PIL import Image from PIL import ImageFile from torch.utils.data import Dataset from .transforms import transform_train, transform_test ImageFile.LOAD_TRUNCATED_IMAGES = True class CommonDataset(Dataset): def __init__(self, is_train: bool = True): self.data = [] self.domain_id = [] self.image_root = '' self.transform = transform_train() if is_train else transform_test() self._domains = None self.num_domain = 1 @property def domains(self): return self._domains def __getitem__(self, index): # domain = random.randint(0, self.num_domain - 1) # path, label = self.data[domain][index] domain = self.domain_id[index] path, label = self.data[index] path = os.path.join(self.image_root, path) with Image.open(path) as image: image = image.convert('RGB') if self.transform is not None: image = self.transform(image) return { 'image': image, 'label': label, 'domain': domain } def __len__(self): pass
nilq/baby-python
python
# """""" from os import getpid import logging.handlers from .snippet import T2I def mapped_level(name): levels = {'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'fatal': logging.FATAL} return levels[name] if name in levels else logging.WARNING; def mapped_when(name): when = ['S', 'M', 'H', 'D'] return name if name in when else when[-1] def mapped_backup_count(name): return T2I(name, default=7) def mapped_interval(name): return T2I(name, 1) def init_logging_parameters(**kwargs): logger = logging.getLogger() filename = kwargs.get('log.file', 'default.{}.log'.format(getpid())) level = mapped_level(kwargs.get('log.level', 'warning')) backup = mapped_backup_count(kwargs.get('log.backup', 7)) when = mapped_when(kwargs.get('log.when', 'D').upper()) interval = mapped_interval(kwargs.get('log.interval', 1)) handler = logging.handlers.TimedRotatingFileHandler(filename, backupCount = backup, when = when, interval = interval) formatter = logging.Formatter('[%(asctime)s]+%(levelname)s: %(message)s', '%Y-%m-%d %H:%M:%S') handler.setFormatter(formatter) logger.setLevel(level) logger.addHandler(handler)
nilq/baby-python
python
""" October 2018 Simulations of a Ramsey experiment in the presence of flux 1/f noise """ import time import numpy as np import qutip as qtp from pycqed.measurement import detector_functions as det from scipy.interpolate import interp1d import scipy import matplotlib.pyplot as plt import logging from pycqed.simulations import cz_superoperator_simulation_withdistortions_newdevice_singlequbitphases_newcode_fluxnoise2 as czu from pycqed.tests import test_ramsey_simulations as tests def time_evolution(H_vec, c_ops, sim_step): ''' Arguments: H: list of Hamiltonians at different times, each on for a time = sim_step c_ops: list of collapse operators. if an element of the list is a single operator, then it is a time-independent one, otherwise, if it's a 2-list, then the first el. is the operator and the second one is a list of time-dependent coefficients. Note that in the first case the coefficient is included in the operator sim_step: time for which each H[t] is on. ''' exp_L_total=1 for i in range(len(H_vec)): H=H_vec[i] if c_ops != []: c_ops_temp=[] for c in range(len(c_ops)): if isinstance(c_ops[c],list): c_ops_temp.append(c_ops[c][0]*c_ops[c][1][i]) # c_ops are already in the H_0 basis else: c_ops_temp.append(c_ops[c]) liouville_exp_t=(qtp.liouvillian(H,c_ops_temp)*sim_step).expm() else: liouville_exp_t=(-1j*H*sim_step).expm() exp_L_total=liouville_exp_t*exp_L_total return exp_L_total def freq_shift_from_fluxbias(frequency,frequency_target,fluxbias_q0,positive_arc): ''' frequency_target = max frequency of the qubit positive_arc (bool) for single and double-sided ''' if frequency > frequency_target: logging.warning('Detuning can only be negative. Freq = {}, Freq_max = {}'.format(frequency,frequency_target)) frequency = frequency_target if positive_arc: sign = 1 else: sign = -1 # formula obtained for omega = omega_0 * sqrt(abs(cos(pi Phi/Phi_0))) frequency_biased = frequency - np.pi/2 * (frequency_target**2/frequency) * np.sqrt(1 - (frequency**4/frequency_target**4)) * fluxbias_q0 * sign - \ - np.pi**2/2 * frequency_target * (1+(frequency**4/frequency_target**4)) / (frequency/frequency_target)**3 * fluxbias_q0**2 # with sigma up to circa 1e-3 \mu\Phi_0 the second order is irrelevant return frequency_biased def calc_populations(U): hadamard_singleq = qtp.Qobj([[1,1,0], [1,-1,0], [0,0,0]])/np.sqrt(2) hadamard_q0 = qtp.tensor(qtp.qeye(3),hadamard_singleq) if U.type == 'oper': U_pi2_pulsed = hadamard_q0 * U * hadamard_q0 populations = {'population_in_0': np.abs(U_pi2_pulsed[0,0])**2, 'population_in_1': np.abs(U_pi2_pulsed[0,1])**2} elif U.type == 'super': U_pi2_pulsed = qtp.to_super(hadamard_q0) * U * qtp.to_super(hadamard_q0) populations = {'population_in_0': np.real(U_pi2_pulsed[0,0]), 'population_in_1': np.real(U_pi2_pulsed[0,10])} return populations class ramsey_experiment(det.Soft_Detector): def __init__(self, fluxlutman, noise_parameters_CZ, control_parameters_ramsey): """ Detector for simulating a Ramsey experiment. Args: fluxlutman (instr): an instrument that contains the parameters required to generate the waveform for the trajectory, and the hamiltonian as well. noise_parameters_CZ: instrument that contains the noise parameters, plus some more control_parameters_ramsey: instrument containing some parameters for ramsey that are passed via notebook """ super().__init__() self.value_names = ['population_in_0','population_in_1'] self.value_units = ['%', '%'] self.fluxlutman = fluxlutman self.noise_parameters_CZ = noise_parameters_CZ self.control_parameters_ramsey = control_parameters_ramsey def acquire_data_point(self, **kw): ramsey = self.control_parameters_ramsey.ramsey() # True for Ram-Z, False for Echo-Z sigma = self.control_parameters_ramsey.sigma() # width of the Gaussian distribution of the fluxbias detuning = self.control_parameters_ramsey.detuning_ramsey() # how much the freq of q0 is offset from the sweetspot t = self.control_parameters_ramsey.pulse_length() # separation time between the two pi/2 pulses qoi_plot = list() # used to verify convergence properties. If len(n_sampling_gaussian_vec)==1, it is useless n_sampling_gaussian_vec = [101] # 11 guarantees excellent convergence. We choose it odd so that the central point of the Gaussian is included. # ALWAYS choose it odd for n_sampling_gaussian in n_sampling_gaussian_vec: # If sigma=0 there's no need for sampling weights=[] if sigma != 0: samplingpoints_gaussian = np.linspace(-5*sigma,5*sigma,n_sampling_gaussian) # after 5 sigmas we cut the integral delta_x = samplingpoints_gaussian[1]-samplingpoints_gaussian[0] values_gaussian = czu.gaussian(samplingpoints_gaussian,mean=0,sigma=sigma) else: samplingpoints_gaussian = np.array([0]) delta_x = 1 values_gaussian = np.array([1]) U_final_vec = list() for j_q0 in range(len(samplingpoints_gaussian)): fluxbias_q0 = samplingpoints_gaussian[j_q0] if sigma != 0: weight=values_gaussian[j_q0]*delta_x weights.append(weight) else: weight=1 weights.append(weight) f_q0_sweetspot = self.fluxlutman.q_freq_01() f_q0_detuned = f_q0_sweetspot + detuning H=[] if ramsey: # the freq shift takes a different sign at first order on the two sides of Echo-Z positive = [True] else: positive = [True, False] for pos in positive: f_q0_biased = freq_shift_from_fluxbias(f_q0_detuned,f_q0_sweetspot,fluxbias_q0,positive_arc=pos) freq_rotating_frame_detuned = f_q0_biased-f_q0_sweetspot-detuning H.append(czu.coupled_transmons_hamiltonian_new(w_q0=freq_rotating_frame_detuned, w_q1=0, alpha_q0=-2*freq_rotating_frame_detuned, alpha_q1=0, J=0)) # convenient way of getting the uncpupled Hamiltonian for one qubit sim_step = t/len(positive) c_ops=[] U_final = time_evolution(H, c_ops, sim_step) if U_final.type == 'oper': U_final = qtp.to_super(U_final) U_final_vec.append(U_final*weight) weights = np.array(weights) U_superop_average = np.sum(np.array(U_final_vec)) # computing resulting superoperator qoi = calc_populations(U_superop_average) quantities_of_interest = [qoi['population_in_0']*100, qoi['population_in_1']*100] qoi_vec=np.array(quantities_of_interest) qoi_plot.append(qoi_vec) qoi_plot = np.array(qoi_plot) ### Plot to study the convergence properties of averaging over a Gaussian # for i in range(len(qoi_plot[0])): # czu.plot(x_plot_vec=[n_sampling_gaussian_vec], # y_plot_vec=[qoi_plot[:,i]], # title='Study of convergence of average', # xlabel='n_sampling_gaussian points',ylabel=self.value_names[i]) return qoi_plot[0,0], qoi_plot[0,1]
nilq/baby-python
python
from .cloudchain import checkconfig from .cloudchain import CloudChainConfigError from .cloudchain import CloudChainError from .cloudchain import CloudChain from .cloudchain import decryptcreds from .cloudchain import encryptcreds from .cloudchain import endpoint_url from .cloudchain import getconn from .cloudchain import keyalias from .cloudchain import read_configfile from .cloudchain import readcreds from .cloudchain import region_name from .cloudchain import savecreds from .cloudchain import tablename from .cloudchain import get_default_cloud_chain
nilq/baby-python
python
import logging from typing import List, Dict from bs4 import BeautifulSoup, element from .row_utils import ( movies_utils, series_utils, books_utils, comics_utils, music_utils, videogames_utils, ) logger = logging.getLogger(__name__) def get_rows_from_topchart(soup: BeautifulSoup) -> List[element.ResultSet]: """Returns a list of rows from a topchart.""" return soup.find("ol", {"class": "elto-list"}).find_all( "li", {"class": "elto-item"} ) def get_topchart_infos(soup: BeautifulSoup, category: str) -> List[Dict]: """Returns a list of dict containing data of a topchart.""" rows = get_rows_from_topchart(soup) if category == "films": return [movies_utils.get_movies_infos_from_row(x) for x in rows] elif category == "series": return [series_utils.get_series_infos_from_row(x) for x in rows] elif category == "jeuxvideo": return [videogames_utils.get_videogames_infos_from_row(x) for x in rows] elif category == "livres": return [books_utils.get_books_infos_from_row(x) for x in rows] elif category == "bd": return [comics_utils.get_comics_infos_from_row(x) for x in rows] elif category == "musique": return [music_utils.get_music_infos_from_row(x) for x in rows] else: logger.error(f"Category {category} not supported.") return [] def get_topchart_order(category: str) -> List: """Returns the order of columns for a topchart based on its category.""" if category == "films": return movies_utils.get_order_movies_columns() elif category == "series": return series_utils.get_order_series_columns() elif category == "jeuxvideo": return videogames_utils.get_order_videogames_columns() elif category == "livres": return books_utils.get_order_books_columns() elif category == "bd": return comics_utils.get_order_comics_columns() elif category == "musique": return music_utils.get_order_music_columns() else: logger.error(f"Category {category} not supported.") return []
nilq/baby-python
python
#!/usr/bin/env python3 """ For each family and device, obtain a tilegrid and save it in the database """ import os from os import path import subprocess import extract_tilegrid import database def main(): devices = database.get_devices() for family in sorted(devices["families"].keys()): for device in sorted(devices["families"][family]["devices"].keys()): output_file = path.join(database.get_db_subdir(family, device), "tilegrid.json") subprocess.check_call(["./get_device_tilegrid.sh", device]) extract_tilegrid.main(["extract_tilegrid", device, "../minitests/simple/wire.dump", output_file]) if __name__ == "__main__": main()
nilq/baby-python
python
import os import random import string import pytest from check_mk_web_api import WebApi, CheckMkWebApiException api = WebApi( os.environ['CHECK_MK_URL'], os.environ['CHECK_MK_USER'], os.environ['CHECK_MK_SECRET'] ) def setup(): api.delete_all_hosts() api.delete_all_hostgroups() api.delete_all_servicegroups() for group in api.get_all_contactgroups(): if group != 'all': api.delete_contactgroup(group) for user_id in api.get_all_users(): if user_id != 'cmkadmin' and user_id != os.environ['CHECK_MK_USER']: api.delete_user(user_id) for folder in api.get_all_folders(): if folder != '': api.delete_folder(folder) def test_add_host(): api.add_host('host00') assert 'host00' in api.get_all_hosts() def test_add_duplicate_host(): with pytest.raises(CheckMkWebApiException): api.add_host('host00') api.add_host('host00') def test_edit_host(): api.add_host('host00', ipaddress='192.168.0.100') assert api.get_host('host00')['attributes']['ipaddress'] == '192.168.0.100' api.edit_host('host00', ipaddress='192.168.0.101') assert api.get_host('host00')['attributes']['ipaddress'] == '192.168.0.101' def test_unset_host_attribute(): api.add_host('host00', ipaddress='192.168.0.100') assert api.get_host('host00')['attributes']['ipaddress'] == '192.168.0.100' api.edit_host('host00', unset_attributes=['ipaddress']) assert 'ipaddress' not in api.get_host('host00')['attributes'] def test_edit_nonexistent_host(): with pytest.raises(CheckMkWebApiException): api.edit_host('host00', ipaddress='192.168.0.101') def test_get_host(): api.add_host('host00') assert api.get_host('host00')['hostname'] == 'host00' def test_get_nonexistent_host(): with pytest.raises(CheckMkWebApiException): api.get_host('host00') def test_get_all_hosts(): api.add_host('host00') api.add_host('host01') all_hosts = api.get_all_hosts() assert len(all_hosts) == 2 assert 'host00' in all_hosts assert 'host01' in all_hosts def test_get_hosts_by_folder(): api.add_folder('test') api.add_host('host00', 'test') api.add_host('host01', 'test') hosts = api.get_hosts_by_folder('test') assert len(hosts) == 2 assert 'host00' in hosts assert 'host01' in hosts def test_delete_host(): api.add_host('host00') assert len(api.get_all_hosts()) == 1 api.delete_host('host00') assert len(api.get_all_hosts()) == 0 def test_delete_nonexistent_host(): with pytest.raises(CheckMkWebApiException): api.delete_host('host00') def test_delete_all_hosts(): api.add_host('host00') api.add_host('host01') assert len(api.get_all_hosts()) == 2 api.delete_all_hosts() assert len(api.get_all_hosts()) == 0 def test_discover_services(): api.add_host('localhost') api.discover_services('localhost') def test_discover_services_for_nonexistent_host(): with pytest.raises(CheckMkWebApiException): api.discover_services('localhost') def test_get_user(): api.add_user('user00', 'User 00', 'p4ssw0rd') assert api.get_user('user00')['alias'] == 'User 00' def test_get_all_users(): api.add_user('user00', 'User 00', 'p4ssw0rd') api.add_user('user01', 'User 01', 'p4ssw0rd') users = api.get_all_users() assert 'user00' in users assert 'user01' in users def test_add_user(): api.add_user('user00', 'User 00', 'p4ssw0rd') assert 'user00' in api.get_all_users() def test_add_automation_user(): api.add_automation_user('automation00', 'Automation 00', 's3cr3t1234') assert 'automation00' in api.get_all_users() def test_add_duplicate_user(): with pytest.raises(CheckMkWebApiException): api.add_user('user00', 'User 00', 'p4ssw0rd') api.add_user('user00', 'User 00', 'p4ssw0rd') def test_add_duplicate_automation_user(): with pytest.raises(CheckMkWebApiException): api.add_automation_user('automation00', 'Automation 00', 's3cr3t1234') api.add_automation_user('automation00', 'Automation 00', 's3cr3t1234') def test_edit_user(): api.add_user('user00', 'User 00', 'p4ssw0rd') assert api.get_all_users()['user00']['alias'] == 'User 00' api.edit_user('user00', {'alias': 'User 0'}) assert api.get_all_users()['user00']['alias'] == 'User 0' def test_unset_user_attribute(): api.add_user('user00', 'User 00', 'p4ssw0rd', pager='49123456789') assert api.get_all_users()['user00']['pager'] == '49123456789' api.edit_user('user00', {}, unset_attributes=['pager']) assert 'pager' not in api.get_all_users()['user00'] def test_edit_nonexistent_user(): with pytest.raises(CheckMkWebApiException): api.edit_user('user00', {}) def test_delete_user(): api.add_user('user00', 'User 00', 'p4ssw0rd') assert 'user00' in api.get_all_users() api.delete_user('user00') assert 'user00' not in api.get_all_users() def test_delete_nonexistent_user(): with pytest.raises(CheckMkWebApiException): api.delete_user('user00') def test_get_folder(): api.add_folder('productive') assert api.get_folder('productive') def test_get_nonexistent_folder(): with pytest.raises(CheckMkWebApiException): assert api.get_folder('productive') def test_get_all_folders(): api.add_folder('productive') api.add_folder('testing') folders = api.get_all_folders() assert 'productive' in folders assert 'testing' in folders def test_add_folder(): api.add_folder('productive') assert 'productive' in api.get_all_folders() def test_edit_folder(): api.add_folder('productive', snmp_community='public') assert api.get_folder('productive')['attributes']['snmp_community'] == 'public' api.edit_folder('productive', snmp_community='private') assert api.get_folder('productive')['attributes']['snmp_community'] == 'private' def test_edit_nonexistent_folder(): with pytest.raises(CheckMkWebApiException): assert api.edit_folder('productive') def test_delete_folder(): api.add_folder('productive') assert 'productive' in api.get_all_folders() api.delete_folder('productive') assert 'productive' not in api.get_all_folders() def test_delete_nonexistent_folder(): with pytest.raises(CheckMkWebApiException): api.delete_folder('productive') def test_get_contactgroup(): api.add_contactgroup('user', 'User') assert api.get_contactgroup('user') def test_get_all_contactgroups(): api.add_contactgroup('user', 'User') api.add_contactgroup('admin', 'Admin') groups = api.get_all_contactgroups() assert 'user' in groups assert 'admin' in groups def test_get_nonexistent_contactgroup(): with pytest.raises(KeyError): api.get_contactgroup('user') def test_add_contactgroup(): api.add_contactgroup('user', 'User') assert api.get_contactgroup('user')['alias'] == 'User' def test_add_duplicate_contactgroup(): with pytest.raises(CheckMkWebApiException): api.add_contactgroup('user', 'User') api.add_contactgroup('user', 'User') def test_edit_contactgroup(): api.add_contactgroup('user', 'User') assert api.get_contactgroup('user')['alias'] == 'User' api.edit_contactgroup('user', 'Users') assert api.get_contactgroup('user')['alias'] == 'Users' def test_edit_nonexisting_contactgroup(): with pytest.raises(CheckMkWebApiException): api.edit_contactgroup('user', 'Users') def test_delete_contactgroup(): api.add_contactgroup('user', 'User') assert 'user' in api.get_all_contactgroups() api.delete_contactgroup('user') assert 'user' not in api.get_all_contactgroups() def test_delete_nonexistent_contactgroup(): with pytest.raises(CheckMkWebApiException): api.delete_contactgroup('user') def test_get_hostgroup(): api.add_hostgroup('vm', 'VM') api.get_hostgroup('vm') def test_get_all_hostgroups(): api.add_hostgroup('vm', 'VM') api.add_hostgroup('physical', 'Physical') groups = api.get_all_hostgroups() assert 'vm' in groups assert 'physical' in groups def test_get_nonexistent_hostgroup(): with pytest.raises(KeyError): api.get_hostgroup('vm') def test_add_hostgroup(): api.add_hostgroup('vm', 'VM') assert api.get_hostgroup('vm')['alias'] == 'VM' def test_add_duplicate_hostgroup(): with pytest.raises(CheckMkWebApiException): api.add_hostgroup('vm', 'VM') api.add_hostgroup('vm', 'VM') def test_edit_hostgroup(): api.add_hostgroup('vm', 'VM') assert api.get_hostgroup('vm')['alias'] == 'VM' api.edit_hostgroup('vm', 'VMs') assert api.get_hostgroup('vm')['alias'] == 'VMs' def test_edit_nonexisting_hostgroup(): with pytest.raises(CheckMkWebApiException): api.edit_hostgroup('vm', 'VM') def test_delete_hostgroup(): api.add_hostgroup('vm', 'VM') assert 'vm' in api.get_all_hostgroups() api.delete_hostgroup('vm') assert 'vm' not in api.get_all_hostgroups() def test_delete_nonexistent_hostgroup(): with pytest.raises(CheckMkWebApiException): api.delete_hostgroup('vm') def test_get_servicegroup(): api.add_servicegroup('db', 'Database') assert api.get_servicegroup('db') def test_get_all_servicegroups(): api.add_servicegroup('db', 'Database') api.add_servicegroup('web', 'Webserver') groups = api.get_all_servicegroups() assert 'db' in groups assert 'web' in groups def test_get_nonexistent_servicegroup(): with pytest.raises(KeyError): api.get_servicegroup('db') def test_add_servicegroup(): api.add_servicegroup('db', 'Database') assert api.get_servicegroup('db')['alias'] == 'Database' def test_add_duplicate_servicegroup(): with pytest.raises(CheckMkWebApiException): api.add_servicegroup('db', 'Database') api.add_servicegroup('db', 'Database') def test_edit_servicegroup(): api.add_servicegroup('db', 'Database') assert api.get_servicegroup('db')['alias'] == 'Database' api.edit_servicegroup('db', 'Databases') assert api.get_servicegroup('db')['alias'] == 'Databases' def test_edit_nonexisting_servicegroup(): with pytest.raises(CheckMkWebApiException): api.edit_servicegroup('db', 'Database') def test_delete_servicegroup(): api.add_servicegroup('db', 'Database') assert 'db' in api.get_all_servicegroups() api.delete_servicegroup('db') assert 'db' not in api.get_all_servicegroups() def test_delete_nonexistent_servicegroup(): with pytest.raises(CheckMkWebApiException): api.delete_servicegroup('db') def test_get_hosttags(): assert api.get_hosttags() def test_get_ruleset(): assert api.get_ruleset('checkgroup_parameters:hw_fans_perc') def test_get_nonexistent_rulesets(): with pytest.raises(CheckMkWebApiException): api.get_ruleset('nonexistent') def test_set_nonexistent_rulesets(): with pytest.raises(CheckMkWebApiException): api.set_ruleset('nonexistent', {}) def test_get_rulesets(): assert api.get_rulesets() def test_get_site(): assert api.get_site('cmk') def test_set_site(): random_alias = 'alias_' + ''.join(random.choices(string.ascii_uppercase + string.digits, k=10)) config = api.get_site('cmk')['site_config'] config['alias'] = random_alias api.set_site('cmk', config) assert api.get_site('cmk')['site_config']['alias'] == random_alias @pytest.mark.skip(reason="bug in Check_Mk") def test_login_site(): api.add_user('user00', 'User 00', 'p4ssw0rd') api.login_site('cmk', 'user00', 'p4ssw0rd') @pytest.mark.skip(reason="bug in Check_Mk") def test_logout_site(): api.add_user('user00', 'User 00', 'p4ssw0rd') api.login_site('cmk', 'user00', 'p4ssw0rd') api.logout_site('cmk')
nilq/baby-python
python
from functools import reduce hashes = [None] + [i for i in range(1, 11)] index = 0 hash_list = [(lambda _h=h: _h, i == index, []) for i, h in enumerate(hashes)] print(hashes) print("zip = {0}".format(zip(hash_list[::2], hash_list[1::2] + [hash_list[::2][-1]]))) while len(hash_list) > 1: hash_list = [ ( lambda _left=left, _right=right: _left() + _right(), left_f or right_f, (left_l if left_f else right_l) + [dict(side=1, hash=right) if left_f else dict(side=0, hash=left)], ) for (left, left_f, left_l), (right, right_f, right_l) in zip(hash_list[::2], hash_list[1::2] + [hash_list[::2][-1]]) ] def _sum(a): return a['left']+a['right'] def check_merkle_link(tip_hash, link): if link['index'] >= 2**len(link['branch']): raise ValueError('index too large') return reduce(lambda c, e: _sum( dict(left=e[1], right=c) if (link['index'] >> e[0]) & 1 else dict(left=c, right=e[1]) ), enumerate(link['branch']), tip_hash) print(hash_list) res = [x['hash']() for x in hash_list[0][2]] print(res) check = check_merkle_link(0,dict(branch=res, index=index)) print(check)
nilq/baby-python
python
import numpy as np import pandas as pd from dataset import MultiDataset, RegressionDataset def scatter_path_array(path_data, size, rank): all_lst = [] for row, item in path_data.iterrows(): path, num = item['path'], int(item['num']) all_lst.extend([[path, i] for i in range(num)]) all_lst = np.array(all_lst, dtype=object) all_lst = np.random.permutation(all_lst) all_lst = all_lst[int(len(all_lst) / size) * rank:int(len(all_lst) / size) * (rank + 1):] return all_lst[:, 0], all_lst[:, 1] class Dataproc(): def __init__(self, size, rank, config): self.verbose = 10 if rank == 0 else 0 self.config = config np.random.seed(7) path_data = pd.read_csv(self.config['csv_path'], index_col=0) # path_data = path_data[path_data.apply(lambda x: int(x['dir_name'][2:5]) < 759, axis=1)] path_data = path_data.reindex(np.random.permutation(path_data.index)).reset_index(drop=True) rate = self.config['train_rate'] protein_name_list = set(path_data['dir_name'].unique()) similar_protein = {'T0356', 'T0456', 'T0483', 'T0292', 'T0494', 'T0597', 'T0291', 'T0637', 'T0392', 'T0738', 'T0640', 'T0308', 'T0690', 'T0653', 'T0671', 'T0636', 'T0645', 'T0532', 'T0664', 'T0699', 'T0324', 'T0303', 'T0418', 'T0379', 'T0398', 'T0518'} protein_name_list = protein_name_list - similar_protein protein_name_list = np.sort(list(protein_name_list)) protein_name_list = np.random.permutation(protein_name_list) self.protein_name = {'train': protein_name_list[:int(len(protein_name_list) * rate)], 'test': protein_name_list[int(len(protein_name_list) * rate):]} self.data_dict = {} train_data = path_data.ix[path_data['dir_name'].isin(self.protein_name['train'])] test_data = path_data.ix[path_data['dir_name'].isin(self.protein_name['test'])] native_data = train_data[train_data['gdtts'] == 1] other_data = train_data[train_data['gdtts'] != 1] # random # other_data = other_data.groupby('dir_name').apply(lambda x: x.sample(frac=self.config['data_frac'])) # upper # other_data = other_data.groupby('dir_name').apply( # lambda x: x.sort_values('label_list')[int(x.shape[0] * (1 - self.config['data_frac'])):x.shape[0]]) # lower other_data = other_data.groupby('dir_name').apply( lambda x: x.sort_values('gdtts')[:int(x.shape[0] * self.config['data_frac'])]) train_data = pd.concat([native_data, other_data]) path, index = scatter_path_array(path_data=train_data, size=size, rank=rank) self.data_dict.update({'train': {'path': path, 'index': index}}) path, index = scatter_path_array(path_data=test_data, size=size, rank=rank) self.data_dict.update({'test': {'path': path, 'index': index}}) if self.config['scop']: scop_path_data = pd.read_csv('./scop_e_40_path_list.csv', index_col=0) path, index = scatter_path_array( path_data=scop_path_data, size=size, rank=rank) self.data_dict['train']['path'] = np.append(self.data_dict['train']['path'], path) self.data_dict['train']['index'] = np.append(self.data_dict['train']['index'], index) def get_protein_name_dict(self): return self.protein_name def get_classification_dataset(self, key): dataset = MultiDataset(path=self.data_dict[key]['path'], index=self.data_dict[key]['index'], config=self.config) return dataset def get_regression_dataset(self, key): dataset = RegressionDataset(path=self.data_dict[key]['path'], index=self.data_dict[key]['index'], config=self.config) return dataset
nilq/baby-python
python
from . import controller from . import model
nilq/baby-python
python
#------------------------------------------------------------------------------ # Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ # ShardingNumberKey.py # This script demonstrates how to use sharding keys with a sharded database. # The sample schema provided does not include support for running this demo. A # sharded database must first be created. Information on how to create a # sharded database can be found in the documentation: # https://www.oracle.com/pls/topic/lookup?ctx=dblatest&id=SHARD # # This script requires cx_Oracle 6.1 and higher but it is recommended to use # cx_Oracle 7.3 and higher in order to avoid a set of known issues when using # sharding capabilities. #------------------------------------------------------------------------------ import cx_Oracle import SampleEnv pool = cx_Oracle.SessionPool(SampleEnv.GetMainUser(), SampleEnv.GetMainPassword(), SampleEnv.GetConnectString(), min=1, max=5, increment=1) def ConnectAndDisplay(shardingKey): print("Connecting with sharding key:", shardingKey) with pool.acquire(shardingkey=[shardingKey]) as conn: cursor = conn.cursor() cursor.execute("select sys_context('userenv', 'db_name') from dual") name, = cursor.fetchone() print("--> connected to database", name) ConnectAndDisplay(100) ConnectAndDisplay(167)
nilq/baby-python
python
''' Comp_slice is a terminal fork of intra_blob. - It traces blob axis by cross-comparing vertically adjacent Ps: horizontal slices across an edge blob. These low-M high-Ma blobs are vectorized into outlines of adjacent flat or high-M blobs. (high match: M / Ma, roughly corresponds to low gradient: G / Ga) - Vectorization is clustering of Ps + their derivatives (derPs) into PPs: patterns of Ps that describe an edge. This process is a reduced-dimensionality (2D->1D) version of cross-comp and clustering cycle, common across this project. As we add higher dimensions (2D alg, 3D alg), this dimensionality reduction is done in salient high-aspect blobs (likely edges / contours in 2D or surfaces in 3D) to form more compressed "skeletal" representations of full-D patterns. Most functions should be replaced by casting generic Search, Compare, Cluster functions ''' from collections import deque import sys import numpy as np from class_cluster import ClusterStructure, NoneType, comp_param, Cdm from comp_blob import ave_min, ave_inv # import warnings # to detect overflow issue, in case of infinity loop # warnings.filterwarnings('error') ave_g = 30 # change to Ave from the root intra_blob? flip_ave = .1 flip_ave_FPP = 0 # flip large FPPs only (change to 0 for debug purpose) div_ave = 200 ave_dX = 10 # difference between median x coords of consecutive Ps ave_Dx = 10 ave_mP = 8 # just a random number right now. ave_rmP = .7 # the rate of mP decay per relative dX (x shift) = 1: initial form of distance ave_ortho = 20 ave_ga = 0.78 # ga at 22.5 degree # comp_PP ave_mPP = 0 ave_rM = .7 # comp_param ave_comp = 0 # comp_PPP ave_mPPP = 5 class CP(ClusterStructure): # comp_pixel: I = int Dy = int Dx = int G = int M = int # comp_angle: Dydy = int Dxdy = int Dydx = int Dxdx = int Ga = int Ma = int # comp_dx: Mdx = int Ddx = int # new: L = int x0 = int x = int # median x dX = int # shift of average x between P and _P, if any y = int # for visualization only sign = NoneType # sign of gradient deviation dert_ = list # array of pixel-level derts: (p, dy, dx, g, m), extended in intra_blob upconnect_ = list downconnect_cnt = int derP = object # derP object reference # only in Pd: Pm = object # reference to root P dxdert_ = list # only in Pm: Pd_ = list class CderP(ClusterStructure): layer1 = dict # derP params mP = int dP = int P = object # lower comparand _P = object # higher comparand PP = object # FPP if flip_val, contains this derP # from comp_dx fdx = NoneType distance = int # d_ave_x class CPP(CP, CderP): layer1 = dict # between PPs: upconnect_ = list downconnect_cnt = int fPPm = NoneType # PPm if 1, else PPd; not needed if packed in PP_ fdiv = NoneType box = list # for visualization only, original box before flipping dert__ = list mask__ = bool # PP params derP__ = list P__ = list PPmm_ = list PPdm_ = list # PPd params derPd__ = list Pd__ = list # comp_dx params PPmd_ = list PPdd_ = list # comp_PP derPPm_ = list derPPd_ = list distance = int mmPP = int dmPP = int mdPP = int ddPP = int PPPm = object PPPd = object neg_mmPP = int neg_mdPP = int class CderPP(ClusterStructure): layer01 = dict layer1 = dict layer11 = dict PP = object _PP = object mmPP = int dmPP = int mdPP = int ddPP = int class CPPP(CPP, CderPP): layer01 = dict layer1 = dict layer11 = dict PPm_ = list PPd_ = list derPPm_ = list derPPd_ = list mmPP = int dmPP = int mdPP = int ddPP = int # Functions: ''' leading '_' denotes higher-line variable or structure, vs. same-type lower-line variable or structure trailing '_' denotes array name, vs. same-name elements of that array. '__' is a 2D array leading 'f' denotes flag - rough workflow: - intra_blob -> slice_blob(blob) -> derP_ -> PP, if flip_val(PP is FPP): pack FPP in blob.PP_ -> flip FPP.dert__ -> slice_blob(FPP) -> pack PP in FPP.PP_ else (PP is PP): pack PP in blob.PP_ ''' def slice_blob(blob, verbose=False): # where should we use this Ave? ''' Slice_blob converts selected smooth-edge blobs (high G, low Ga or low M, high Ma) into sliced blobs, adding horizontal blob slices: Ps or 1D patterns ''' dert__ = blob.dert__ mask__ = blob.mask__ height, width = dert__[0].shape if verbose: print("Converting to image...") for fPPd in range(2): # run twice, 1st loop fPPd=0: form PPs, 2nd loop fPPd=1: form PPds P__ , derP__, Pd__, derPd__ = [], [], [], [] zip_dert__ = zip(*dert__) _P_ = form_P_(list(zip(*next(zip_dert__))), mask__[0], 0) # 1st upper row P__ += _P_ # frame of Ps for y, dert_ in enumerate(zip_dert__, start=1): # scan top down if verbose: print(f"\rProcessing line {y + 1}/{height}, ", end=""); sys.stdout.flush() P_ = form_P_(list(zip(*dert_)), mask__[y], y) # horizontal clustering - lower row derP_ = scan_P_(P_, _P_) # tests for x overlap between Ps, calls comp_slice Pd_ = form_Pd_(P_) # form Pds within Ps derPd_ = scan_Pd_(P_, _P_) # adds upconnect_ in Pds and calls derPd_2_PP_derPd_, same as derP_2_PP_ derP__ += derP_; derPd__ += derPd_ # frame of derPs P__ += P_; Pd__ += Pd_ _P_ = P_ # set current lower row P_ as next upper row _P_ form_PP_root(blob, derP__, P__, derPd__, Pd__, fPPd) # form PPs in blob or in FPP comp_PP_(blob,fPPd) # if not isinstance(blob, CPP): # draw_PP_(blob) def form_P_(idert_, mask_, y): # segment dert__ into P__ in horizontal ) vertical order, sum dert params into P params P_ = [] # rows of derPs _dert = list(idert_[0]) # first dert dert_ = [_dert] # pack 1st dert _mask = mask_[0] # mask bit per dert if ~_mask: # initialize P with first dert P = CP(I=_dert[0], Dy=_dert[1], Dx=_dert[2], G=_dert[3], M=_dert[4], Dydy=_dert[5], Dxdy=_dert[6], Dydx=_dert[7], Dxdx=_dert[8], Ga=_dert[9], Ma=_dert[10], x0=0, L=1, y=y, dert_=dert_) for x, dert in enumerate(idert_[1:], start=1): # left to right in each row of derts mask = mask_[x] # pixel mask if mask: # masks: if 1,_0: P termination, if 0,_1: P initialization, if 0,_0: P accumulation: if ~_mask: # _dert is not masked, dert is masked, terminate P: P.x = P.x0 + (P.L-1) // 2 P_.append(P) else: # dert is not masked if _mask: # _dert is masked, initialize P params: # initialize P with first dert P = CP(I=dert[0], Dy=dert[1], Dx=dert[2], G=dert[3], M=dert[4], Dydy=_dert[5], Dxdy=_dert[6], Dydx=_dert[7], Dxdx=_dert[8], Ga=_dert[9], Ma=_dert[10], x0=x, L=1, y=y, dert_=dert_) else: # _dert is not masked, accumulate P params with (p, dy, dx, g, m, day, dax, ga, ma) = dert P.accumulate(I=dert[0], Dy=dert[1], Dx=dert[2], G=dert[3], M=dert[4], Dydy=dert[5], Dxdy=dert[6], Dydx=dert[7], Dxdx=dert[8], Ga=dert[9], Ma=dert[10], L=1) P.dert_.append(dert) _mask = mask if ~_mask: # terminate last P in a row P.x = P.x0 + (P.L-1) // 2 P_.append(P) return P_ def form_Pd_(P_): # form Pds from Pm derts by dx sign, otherwise same as form_P Pd__ = [] for iP in P_: if (iP.downconnect_cnt>0) or (iP.upconnect_): # form Pd s if at least one connect in P, else they won't be compared P_Ddx = 0 # sum of Ddx across Pd s P_Mdx = 0 # sum of Mdx across Pd s Pd_ = [] # Pds in P _dert = iP.dert_[0] # 1st dert dert_ = [_dert] _sign = _dert[2] > 0 # initialize P with first dert P = CP(I=_dert[0], Dy=_dert[1], Dx=_dert[2], G=_dert[3], M=_dert[4], Dydy=_dert[5], Dxdy=_dert[6], Dydx=_dert[7], Dxdx=_dert[8], Ga=_dert[9], Ma=_dert[10], x0=iP.x0, dert_=dert_, L=1, y=iP.y, sign=_sign, Pm=iP) x = 1 # relative x within P for dert in iP.dert_[1:]: sign = dert[2] > 0 if sign == _sign: # same Dx sign # accumulate P params with (p, dy, dx, g, m, dyy, dyx, dxy, dxx, ga, ma) = dert P.accumulate(I=dert[0], Dy=dert[1], Dx=dert[2], G=dert[3], M=dert[4], Dydy=_dert[5], Dxdy=_dert[6], Dydx=_dert[7], Dxdx=_dert[8], Ga=_dert[9], Ma=_dert[10], L=1) P.dert_.append(dert) else: # sign change, terminate P if P.Dx > ave_Dx: # cross-comp of dx in P.dert_ comp_dx(P); P_Ddx += P.Ddx; P_Mdx += P.Mdx P.x = P.x0 + (P.L-1) // 2 Pd_.append(P) # reinitialize params P = CP(I=dert[0], Dy=dert[1], Dx=dert[2], G=dert[3], M=dert[4], Dydy=_dert[5], Dxdy=_dert[6], Dydx=_dert[7], Dxdx=_dert[8], Ga=_dert[9], Ma=_dert[10], x0=iP.x0+x, dert_=[dert], L=1, y=iP.y, sign=sign, Pm=iP) _sign = sign x += 1 # terminate last P if P.Dx > ave_Dx: comp_dx(P); P_Ddx += P.Ddx; P_Mdx += P.Mdx P.x = P.x0 + (P.L-1) // 2 Pd_.append(P) # update Pd params in P iP.Pd_ = Pd_; iP.Ddx = P_Ddx; iP.Mdx = P_Mdx Pd__ += Pd_ return Pd__ def scan_P_(P_, _P_): # test for x overlap between Ps, call comp_slice derP_ = [] for P in P_: # lower row for _P in _P_: # upper row # test for x overlap between P and _P in 8 directions if (P.x0 - 1 < (_P.x0 + _P.L) and (P.x0 + P.L) + 1 > _P.x0): # all Ps here are positive fcomp = [1 for derP in P.upconnect_ if P is derP.P] # upconnect could be derP or dirP if not fcomp: derP = comp_slice(_P, P) # form vertical and directional derivatives derP_.append(derP) P.upconnect_.append(derP) _P.downconnect_cnt += 1 elif (P.x0 + P.L) < _P.x0: # stop scanning the rest of lower P_ if there is no overlap break return derP_ def scan_Pd_(P_, _P_): # test for x overlap between Pds derPd_ = [] for P in P_: # lower row for _P in _P_: # upper row for Pd in P.Pd_: # lower row Pds for _Pd in _P.Pd_: # upper row Pds # test for same sign & x overlap between Pd and _Pd in 8 directions if (Pd.x0 - 1 < (_Pd.x0 + _Pd.L) and (Pd.x0 + Pd.L) + 1 > _Pd.x0) and (Pd.sign == _Pd.sign): fcomp = [1 for derPd in Pd.upconnect_ if Pd is derPd.P] # upconnect could be derP or dirP if not fcomp: derPd = comp_slice(_Pd, Pd) derPd_.append(derPd) Pd.upconnect_.append(derPd) _Pd.downconnect_cnt += 1 elif (Pd.x0 + Pd.L) < _Pd.x0: # stop scanning the rest of lower P_ if there is no overlap break return derPd_ def form_PP_root(blob, derP__, P__, derPd__, Pd__, fPPd): ''' form vertically contiguous patterns of patterns by the sign of derP, in blob or in FPP ''' blob.derP__ = derP__; blob.P__ = P__ blob.derPd__ = derPd__; blob.Pd__ = Pd__ if fPPd: derP_2_PP_(blob.derP__, blob.PPdm_, 1) # cluster by derPm dP sign derP_2_PP_(blob.derPd__, blob.PPdd_, 1) # cluster by derPd dP sign, not used else: derP_2_PP_(blob.derP__, blob.PPmm_, 0) # cluster by derPm mP sign derP_2_PP_(blob.derPd__, blob.PPmd_, 0) # cluster by derPd mP sign, not used def derP_2_PP_(derP_, PP_, fPPd): ''' first row of derP_ has downconnect_cnt == 0, higher rows may also have them ''' for derP in reversed(derP_): # bottom-up to follow upconnects, derP is stored top-down if not derP.P.downconnect_cnt and not isinstance(derP.PP, CPP): # root derP was not terminated in prior call PP = CPP() # init accum_PP(PP,derP) if derP._P.upconnect_: # derP has upconnects upconnect_2_PP_(derP, PP_, fPPd) # form PPs across _P upconnects else: PP_.append(derP.PP) def upconnect_2_PP_(iderP, PP_, fPPd): ''' compare sign of lower-layer iderP to the sign of its upconnects to form contiguous same-sign PPs ''' confirmed_upconnect_ = [] for derP in iderP._P.upconnect_: # potential upconnects from previous call if derP not in iderP.PP.derP__: # this may occur after PP merging if fPPd: same_sign = (iderP.dP > 0) == (derP.dP > 0) # comp dP sign else: same_sign = (iderP.mP > 0) == (derP.mP > 0) # comp mP sign if same_sign: # upconnect derP has different PP, merge them if isinstance(derP.PP, CPP) and (derP.PP is not iderP.PP): merge_PP(iderP.PP, derP.PP, PP_) else: # accumulate derP in current PP accum_PP(iderP.PP, derP) confirmed_upconnect_.append(derP) else: if not isinstance(derP.PP, CPP): # sign changed, derP is root derP unless it already has FPP/PP PP = CPP() accum_PP(PP,derP) derP.P.downconnect_cnt = 0 # reset downconnect count for root derP iderP.PP.upconnect_.append(derP.PP) # add new initialized PP as upconnect of current PP derP.PP.downconnect_cnt += 1 # add downconnect count to newly initialized PP if derP._P.upconnect_: upconnect_2_PP_(derP, PP_, fPPd) # recursive compare sign of next-layer upconnects elif derP.PP is not iderP.PP and derP.P.downconnect_cnt == 0: PP_.append(derP.PP) # terminate PP (not iPP) at the sign change iderP._P.upconnect_ = confirmed_upconnect_ if not iderP.P.downconnect_cnt: PP_.append(iderP.PP) # iPP is terminated after all upconnects are checked def merge_PP(_PP, PP, PP_): # merge PP into _PP for derP in PP.derP__: if derP not in _PP.derP__: _PP.derP__.append(derP) # add derP to PP derP.PP = _PP # update reference _PP.accum_from(derP) # accumulate params if PP in PP_: PP_.remove(PP) # remove merged PP def accum_Dert(Dert: dict, **params) -> None: Dert.update({param: Dert[param] + value for param, value in params.items()}) def accum_PP(PP, derP): # accumulate params in PP PP.accum_from(derP) # accumulate params PP.accum_from(derP.P) # accum derP's P base param to PP PP.derP__.append(derP) # add derP to PP derP.PP = PP # update reference def comp_dx(P): # cross-comp of dx s in P.dert_ Ddx = 0 Mdx = 0 dxdert_ = [] _dx = P.dert_[0][2] # first dx for dert in P.dert_[1:]: dx = dert[2] ddx = dx - _dx if dx > 0 == _dx > 0: mdx = min(dx, _dx) else: mdx = -min(abs(dx), abs(_dx)) dxdert_.append((ddx, mdx)) # no dx: already in dert_ Ddx += ddx # P-wide cross-sign, P.L is too short to form sub_Ps Mdx += mdx _dx = dx P.dxdert_ = dxdert_ P.Ddx = Ddx P.Mdx = Mdx def comp_slice(_P, P): # forms vertical derivatives of derP params, and conditional ders from norm and DIV comp layer1 = dict({'I':.0,'Da':.0,'G':.0,'M':.0,'Dady':.0,'Dadx':.0,'Ga':.0,'Ma':.0,'L':.0,'Mdx':.0, 'Ddx':.0, 'x':.0}) mP, dP = 0, 0 absG = max(1,P.G + (ave_g*P.L)); _absG = max(1,_P.G + (ave_g*_P.L)) # use max to avoid zero division absGa = max(1,P.Ga + (ave_ga *P.L)); _absGa = max(1,_P.Ga + (ave_ga *_P.L)) # compare base param to get layer1 for param_name in layer1: if param_name == 'Da': sin = P.Dy/absG ; cos = P.Dx/absG _sin = _P.Dy/_absG; _cos = _P.Dx/_absG param = [sin, cos] _param = [_sin, _cos] elif param_name == 'Dady': sin = P.Dydy/absGa; cos = P.Dxdy/absGa _sin = _P.Dydy/_absGa; _cos = _P.Dxdy/_absGa param = [sin, cos] _param = [_sin, _cos] elif param_name == 'Dadx': sin = P.Dydx/absGa; cos = P.Dxdx/absGa _sin = _P.Dydx/_absGa; _cos = _P.Dxdx/_absGa param = [sin, cos] _param = [_sin, _cos] elif param_name == "x": _param = _P.dX # _dX param = P.x # dX elif param_name == "L" or param_name == "M": hyp = np.hypot(P.x, 1) # ratio of local segment of long (vertical) axis to dY = 1 _param = getattr(_P,param_name) param = getattr(P,param_name) / hyp # orthogonal L & M are reduced by hyp else: param = getattr(P, param_name) _param = getattr(_P, param_name) dm = comp_param(param, _param, param_name, ave_min) # add ave_min, * P.L is not needed? layer1[param_name] = dm mP += dm.m dP += dm.d ''' s, x0, Dx, Dy, G, M, L, Ddx, Mdx = P.sign, P.x0, P.Dx, P.Dy, P.G, P.M, P.L, P.Ddx, P.Mdx # params per comp branch _s, _x0, _Dx, _Dy, _G, _M, _dX, _L, _Ddx, _Mdx = _P.sign, _P.x0, _P.Dx, _P.Dy, _P.G, _P.M, _P.dX, _P.L, _P.Ddx, _P.Mdx dX = (x0 + (L-1) / 2) - (_x0 + (_L-1) / 2) # x shift: d_ave_x, or from offsets: abs(x0 - _x0) + abs(xn - _xn)? ddX = dX - _dX # long axis curvature, if > ave: ortho eval per P, else per PP_dX? mdX = min(dX, _dX) # dX is inversely predictive of mP? hyp = np.hypot(dX, 1) # ratio of local segment of long (vertical) axis to dY = 1 L /= hyp # orthogonal L is reduced by hyp dL = L - _L; mL = min(L, _L) # L: positions / sign, dderived: magnitude-proportional value M /= hyp # orthogonal M is reduced by hyp dM = M - _M; mM = min(M, _M) # use abs M? no Mx, My: non-core, lesser and redundant bias? # G + Ave was wrong because Dy, Dx are summed as signed, resulting G is different from summed abs G G = np.hypot(P.Dy, P.Dx) if G == 0: G = 1 _G = np.hypot(_P.Dy, _P.Dx) if _G == 0: _G = 1 sin = P.Dy / G; _sin = _P.Dy / _G cos = P.Dx / G; _cos = _P.Dx / _G sin_da = (cos * _sin) - (sin * _cos) cos_da = (cos * _cos) + (sin * _sin) da = np.arctan2( sin_da, cos_da ) ma = ave_ga - abs(da) dP = dL + dM + da # -> directional PPd, equal-weight params, no rdn? mP = mL + mM + ma # -> complementary PPm, rdn *= Pd | Pm rolp? mP -= ave_mP * ave_rmP ** (dX / L) # dX / L is relative x-distance between P and _P, ''' mP -= ave_mP * ave_rmP ** (P.dX / P.L) derP = CderP(mP=mP, dP=dP, P=P, _P=_P, layer1=layer1) P.derP = derP return derP def comp_slice_full(_P, P): # forms vertical derivatives of derP params, and conditional ders from norm and DIV comp x0, Dx, Dy, L, = P.x0, P.Dx, P.Dy, P.L # params per comp branch, add angle params _x0, _Dx, _Dy,_dX, _L = _P.x0, _P.Dx, _P.Dy, _P.dX, _P.L dX = (x0 + (L-1) / 2) - (_x0 + (_L-1) / 2) # x shift: d_ave_x, or from offsets: abs(x0 - _x0) + abs(xn - _xn)? if dX > ave_dX: # internal comp is higher-power, else two-input comp not compressive? xn = x0 + L - 1 _xn = _x0 + _L - 1 mX = min(xn, _xn) - max(x0, _x0) # overlap = abs proximity: summed binary x match rX = dX / mX if mX else dX*2 # average dist / prox, | prox / dist, | mX / max_L? ddX = dX - _dX # long axis curvature, if > ave: ortho eval per P, else per PP_dX? mdX = min(dX, _dX) # dX is inversely predictive of mP? # is this looks better? or it would better if we stick to the old code? difference = P.difference(_P) # P - _P match = P.min_match(_P) # min of P and _P abs_match = P.abs_min_match(_P) # min of abs(P) and abs(_P) dL = difference['L'] # L: positions / sign, dderived: magnitude-proportional value mL = match['L'] dM = difference['M'] # use abs M? no Mx, My: non-core, lesser and redundant bias? mM = match['M'] # min is value distance for opposite-sign comparands, vs. value overlap for same-sign comparands dDy = difference['Dy'] # Dy per sub_P by intra_comp(dx), vs. less vertically specific dI mDy = abs_match['Dy'] # no comp G: Dy, Dx are more specific: dDx = difference['Dx'] # same-sign Dx if Pd mDx = abs_match['Dx'] if dX * P.G > ave_ortho: # estimate params of P locally orthogonal to long axis, maximizing lateral diff and vertical match # diagram: https://github.com/boris-kz/CogAlg/blob/master/frame_2D_alg/Illustrations/orthogonalization.png # Long axis is a curve of connections between ave_xs: mid-points of consecutive Ps. # Ortho virtually rotates P to connection-orthogonal direction: hyp = np.hypot(dX, 1) # ratio of local segment of long (vertical) axis to dY = 1 L = L / hyp # orthogonal L # combine derivatives in proportion to the contribution of their axes to orthogonal axes: # contribution of Dx should increase with hyp(dX,dY=1), this is original direction of Dx: Dy = (Dy / hyp + Dx * hyp) / 2 # estimated along-axis D Dx = (Dy * hyp + Dx / hyp) / 2 # estimated cross-axis D ''' alternatives: oDy = (Dy * hyp - Dx / hyp) / 2; oDx = (Dx / hyp + Dy * hyp) / 2; or: oDy = hypot( Dy / hyp, Dx * hyp); oDx = hypot( Dy * hyp, Dx / hyp) ''' # recompute difference and match dL = _L - L mL = min(_L, L) dDy = _Dy - Dy mDy = min(abs(_Dy), abs(Dy)) dDx = _Dx - Dx mDx = min(abs(_Dx), abs(Dx)) if (Dx > 0) != (_Dx > 0): mDx = -mDx if (Dy > 0) != (_Dy > 0): mDy = -mDy dDdx, dMdx, mDdx, mMdx = 0, 0, 0, 0 if P.dxdert_ and _P.dxdert_: # from comp_dx fdx = 1 dDdx = difference['Ddx'] mDdx = abs_match['Ddx'] if (P.Ddx > 0) != (_P.Ddx > 0): mDdx = -mDdx # Mdx is signed: dMdx = match['Mdx'] mMdx = -abs_match['Mdx'] if (P.Mdx > 0) != (_P.Mdx > 0): mMdx = -mMdx else: fdx = 0 # coeff = 0.7 for semi redundant parameters, 0.5 for fully redundant parameters: dP = ddX + dL + 0.7*(dM + dDx + dDy) # -> directional PPd, equal-weight params, no rdn? # correlation: dX -> L, oDy, !oDx, ddX -> dL, odDy ! odDx? dL -> dDx, dDy? if fdx: dP += 0.7*(dDdx + dMdx) mP = mdX + mL + 0.7*(mM + mDx + mDy) # -> complementary PPm, rdn *= Pd | Pm rolp? if fdx: mP += 0.7*(mDdx + mMdx) mP -= ave_mP * ave_rmP ** (dX / L) # dX / L is relative x-distance between P and _P, derP = CderP(P=P, _P=_P, mP=mP, dP=dP, dX=dX, mL=mL, dL=dL, mDx=mDx, dDx=dDx, mDy=mDy, dDy=dDy) P.derP = derP if fdx: derP.fdx=1; derP.dDdx=dDdx; derP.mDdx=mDdx; derP.dMdx=dMdx; derP.mMdx=mMdx ''' min comp for rotation: L, Dy, Dx, no redundancy? mParam weighting by relative contribution to mP, /= redundancy? div_f, nvars: if abs dP per PPd, primary comp L, the rest is normalized? ''' return derP ''' radial comp extension for co-internal blobs: != sign comp x sum( adj_blob_) -> intra_comp value, isolation value, cross-sign merge if weak, else: == sign comp x ind( adj_adj_blob_) -> same-sign merge | composition: borrow = adj_G * rA: default sum div_comp S -> relative area and distance to adjj_blob_ internal sum comp if mA: in thin lines only? comp_norm_G or div_comp_G -> rG? isolation = decay + contrast: G - G * (rA * ave_rG: decay) - (rA * adj_G: contrast, = lend | borrow, no need to compare vG?) if isolation: cross adjj_blob composition eval, else: cross adjj_blob merge eval: blob merger if internal match (~raG) - isolation, rdn external match: blob compos if external match (~rA?) + isolation, Also eval comp_slice over fork_? rng+ should preserve resolution: rng+_dert_ is dert layers, rng_sum-> rng+, der+: whole rng, rng_incr-> angle / past vs next g, rdn Rng | rng_ eval at rng term, Rng -= lost coord bits mag, always > discr? Add comp_PP_recursive ''' # draft of comp_PP, following structure of comp_blob def comp_PP_(blob, fPPd): for fPd in [0,1]: if fPPd: # cluster by d sign if fPd: # using derPd (PPdd) PP_ = blob.PPdd_ else: # using derPm (PPdm) PP_ = blob.PPdm_ for PP in PP_: if len(PP.derPPd_) == 0: # PP doesn't perform any searching in prior function call comp_PP_recursive(PP, PP.upconnect_, derPP_=[], fPPd=fPPd) form_PPP_(PP_, fPPd) else: # cluster by m sign if fPd: # using derPd (PPmd) PP_ = blob.PPmd_ else: # using derPm (PPmm) PP_ = blob.PPmm_ for PP in PP_: if len(PP.derPPm_) == 0: # PP doesn't perform any searching in prior function call comp_PP_recursive(PP, PP.upconnect_, derPP_=[], fPPd=fPPd) form_PPP_(PP_, fPPd) def comp_PP_recursive(PP, upconnect_, derPP_, fPPd): derPP_pair_ = [ [derPP.PP, derPP._PP] for derPP in derPP_] for _PP in upconnect_: if [_PP, PP] in derPP_pair_ : # derPP.PP = _PP, derPP._PP = PP derPP = derPP_[derPP_pair_.index([_PP,PP])] elif [PP, _PP] not in derPP_pair_ : # same pair of PP and _PP doesn't checked prior this function call derPP = comp_PP(PP, _PP) # comp_PP derPP_.append(derPP) if "derPP" in locals(): # derPP exists accum_derPP(PP, derPP, fPPd) # accumulate derPP if fPPd: # PP cluster by d mPP = derPP.mdPP # match of PPs' d else: # PP cluster by m mPP = derPP.mmPP # match of PPs' m if mPP>0: # _PP replace PP to continue the searching comp_PP_recursive(_PP, _PP.upconnect_, derPP_, fPPd) elif fPPd and PP.neg_mdPP + PP.mdPP > ave_mPP: # evaluation to extend PPd comparison PP.distance += len(_PP.Pd__) # approximate using number of Py, not so sure PP.neg_mdPP += derPP.mdPP comp_PP_recursive(PP, _PP.upconnect_, derPP_, fPPd) elif not fPPd and PP.neg_mmPP + PP.mmPP > ave_mPP: # evaluation to extend PPm comparison PP.distance += len(_PP.P__) # approximate using number of Py, not so sure PP.neg_mmPP += derPP.mmPP comp_PP_recursive(PP, _PP.upconnect_, derPP_, fPPd) # draft def form_PPP_(PP_, fPPd): PPP_ = [] for PP in PP_: if fPPd: mPP = PP.mdPP # match of PP's d PPP = PP.PPPd else: mPP = PP.mmPP # match of PP's m PPP = PP.PPPm if mPP > ave_mPPP and not isinstance(PPP, CPPP): PPP = CPPP() # init new PPP accum_PPP(PPP, PP, fPPd) # accum PP into PPP form_PPP_recursive(PPP_, PPP, PP.upconnect_, checked_ids=[PP.id], fPPd=fPPd) PPP_.append(PPP) # pack PPP after scanning all upconnects return PPP_ def form_PPP_recursive(PPP_, PPP, upconnect_, checked_ids, fPPd): for _PP in upconnect_: if _PP.id not in checked_ids: checked_ids.append(_PP.id) if fPPd: _mPP = _PP.mdPP # match of _PPs' d else: _mPP = _PP.mmPP # match of _PPs' m if _mPP>0 : # _PP.mPP >0 if fPPd: _PPP = _PP.PPPd else: _PPP = _PP.PPPm if isinstance(_PPP, CPPP): # _PP's PPP exists, merge with current PPP PPP_.remove(_PPP) # remove the merging PPP from PPP_ merge_PPP(PPP, _PPP, fPPd) else: accum_PPP(PPP, _PP, fPPd) # accum PP into PPP if _PP.upconnect_: # continue with _PP upconnects form_PPP_recursive(PPP_, PPP, _PP.upconnect_, checked_ids, fPPd) def accum_PPP(PPP, PP, fPPd): PPP.accum_from(PP) # accumulate parameter, including layer1 if fPPd: PPP.PPd_.append(PP) # add PPd to PPP's PPd_ PP.PPPd = PPP # update PPP reference of PP for derPPd in PP.derPPd_: # accumulate derPPd params, layer01 and layer11 PPP.accum_from(derPPd) PPP.derPPd_.append(derPPd) else: PPP.PPm_.append(PP) # add PPm to PPP's PPm_ PP.PPPm = PPP # update PPP reference of PP for derPPm in PP.derPPm_: # accumulate derPPm params, layer01 and layer11 PPP.accum_from(derPPm) PPP.derPPm_.append(derPPm) def merge_PPP(PPP, _PPP, fPPd): if fPPd: for _PP in _PPP.PPd_: if _PP not in PPP.PPd_: accum_PPP(PPP, _PP, fPPd) else: for _PP in _PPP.PPm_: if _PP not in PPP.PPm_: accum_PPP(PPP, _PP, fPPd) def comp_PP(PP, _PP): # compare PP and _PP base params to get layer 01 of derPP #----------------- layer01 = dict({'I':.0,'Da':.0,'G':.0,'M':.0,'Dady':.0,'Dadx':.0,'Ga':.0,'Ma':.0,'L':.0,'Mdx':.0, 'Ddx':.0, 'x':.0}) mP, dP = 0, 0 absG = max(1, PP.G + (ave_g*PP.L)); _absG = max(1, _PP.G + (ave_g*_PP.L)) # use max to avoid zero division absGa = max(1,PP.Ga + (ave_ga*PP.L)); _absGa = max(1, _PP.Ga + (ave_ga*_PP.L)) for param_name in layer01: if param_name == 'Da': # sin and cos components sin = PP.Dy/absG; cos = PP.Dx/absG _sin = _PP.Dy/_absG; _cos = _PP.Dx/_absG param = [sin, cos] _param = [_sin, _cos] elif param_name == 'Dady': # sin and cos components sin = PP.Dydy/absGa; cos = PP.Dxdy/absGa _sin = _PP.Dydy/_absGa; _cos = _PP.Dxdy/_absGa param = [sin, cos] _param = [_sin, _cos] elif param_name == 'Dadx': # sin and cos components sin = PP.Dydx/absGa; cos = PP.Dxdx/absGa _sin = _PP.Dydx/_absGa; _cos = _PP.Dxdx/_absGa param = [sin, cos] _param = [_sin, _cos] elif param_name == "x": _param = _PP.dX # _dX param = PP.x # dX elif param_name == "L" or param_name == "M": hyp = np.hypot(PP.x, 1) # ratio of local segment of long (vertical) axis to dY = 1 _param = getattr(_PP,param_name) param = getattr(PP,param_name) / hyp # orthogonal L & M are reduced by hyp else: param = getattr(PP, param_name) _param = getattr(_PP, param_name) dm = comp_param(param, _param, param_name, ave_mPP) layer01[param_name] = dm mP += dm.m dP += dm.d # compare layer1 to get layer11 #------------------------------------------- layer11 = dict({'I':.0,'Da':.0,'G':.0,'M':.0,'Dady':.0,'Dadx':.0,'Ga':.0,'Ma':.0,'L':.0,'Mdx':.0, 'Ddx':.0, 'x':.0}) mmPP, dmPP, mdPP, ddPP = 0, 0, 0, 0 for i, ((param_name, dm), (_param_name, _dm)) in enumerate(zip(PP.layer1.items(), _PP.layer1.items())): f_comp = 0 if param_name in ['Da', 'Dady', 'Dadx']: # angle, need convert to vector form if dm.m > ave_comp and _dm.m >ave_comp: # check da.m of prior layer f_comp = 1 sin, cos = np.sin(dm.d), np.cos(dm.d) # da is computed from normalized dy and dx, do we still need to absGalize it again here in layer1? _sin, _cos = np.sin(_dm.d), np.cos(_dm.d) param_d = [sin, cos]; param_m = dm.m _param_d = [_sin, _cos]; _param_m = _dm.m else: if dm.m > ave_comp and _dm.m >ave_comp: # check m of prior layer f_comp = 1 param_d = dm.d; param_m = dm.m _param_d = _dm.d; _param_m = _dm.m if f_comp: dmd = comp_param(param_d, _param_d, param_name, ave_mPP) # dm of d dmm = comp_param(param_m, _param_m, param_name, ave_mPP) # dm of m layer11[param_name] = [dmd, dmm] # layer 2 in list ,storing dm of each d and m mdPP += dmd.m # m from dm of d ddPP += dmd.d # d from dm of d mmPP += dmm.m # m from dm of m dmPP += dmm.d # d from dm of m else: dmd = Cdm() dmm = Cdm() if PP.mP >ave_comp and PP.dP>ave_comp and _PP.mP >ave_comp and _PP.dP>ave_comp: dmmP = comp_param(PP.mP, _PP.mP, [], ave_mPP) # dm of mP dmdP = comp_param(PP.dP, _PP.dP, [], ave_mPP) # dm of dP mdPP += dmdP.m # match of compared PPs' d components ddPP += dmdP.d # difference of compared PPs' d components mmPP += dmmP.m # match of compared PPs' m components dmPP += dmmP.d # difference of compared PPs' m components mmPP -= ave_mPP # match of compared PPs' m components dmPP -= ave_mPP # difference of compared PPs' m components derPP = CderPP(PP=PP, _PP=_PP, mmPP=mmPP, dmPP = dmPP, mdPP=mdPP, ddPP=ddPP,layer01=layer01, layer11=layer11) ''' # match of compared PPs' m components mmPP = match['mP'] + match['mx'] + match['mL'] + match['mDx'] + match['mDy'] - ave_mPP # difference of compared PPs' m components dmPP = difference['mP'] + difference['mx'] + difference['mL'] + difference['mDx'] + difference['mDy'] - ave_mPP # match of compared PPs' d components mdPP = match['dP'] + match['dx'] + match['dL'] + match['dDx'] + match['dDy'] # difference of compared PPs' d components ddPP = difference['dP'] + difference['dx'] + difference['dL'] + difference['dDx'] + difference['dDy'] derPP = CderPP(PP=PP, _PP=_PP, mmPP=mmPP, dmPP = dmPP, mdPP=mdPP, ddPP=ddPP) ''' return derPP def accum_derPP(PP, derPP, fPPd): if fPPd: # PP cluster by d PP.derPPd_.append(derPP) else: # PP cluster by m PP.derPPm_.append(derPP) PP.accum_from(derPP)
nilq/baby-python
python
__version__ = "0.2" from PyQNX6.core import *
nilq/baby-python
python
import numpy import six from chainer.backends import cuda from chainer import function_node from chainer import utils from chainer.utils import collections_abc from chainer.utils import type_check def _tensordot(a, b, a_axes, b_axes, c_axes=None): a_col_ndim = len(a_axes[1]) b_row_ndim = len(b_axes[0]) if a_col_ndim != b_row_ndim: raise ValueError('axes count mismatch') if a.ndim < a_col_ndim or b.ndim < b_row_ndim: raise ValueError('dimension of input tensors must be ' 'greater equal to dot-axes count ({})' .format(a_col_ndim)) for a_axis, b_axis in zip(a_axes[1], b_axes[0]): if a.shape[a_axis] != b.shape[b_axis]: raise ValueError('shape mismatch') xp = cuda.get_array_module(a) y = xp.tensordot(a, b, axes=(tuple(a_axes[1]), tuple(b_axes[0]))) if c_axes is not None: a_row_ndim = len(a_axes[0]) b_col_ndim = len(b_axes[1]) c_row_ndim = len(c_axes[0]) c_col_ndim = len(c_axes[1]) if a_row_ndim != c_row_ndim: raise ValueError('axes count mismatch') if b_col_ndim != c_col_ndim: raise ValueError('axes count mismatch') trans = [None for i in six.moves.range(y.ndim)] table_a = [1 if i in a_axes[0] else 0 for i in six.moves.range(a.ndim)] table_a = numpy.cumsum(table_a) - 1 for i, c_axis in enumerate(c_axes[0]): trans[c_axis] = table_a[a_axes[0][i]] table_b = [1 if i in b_axes[1] else 0 for i in six.moves.range(b.ndim)] table_b = numpy.cumsum(table_b) - 1 for i, c_axis in enumerate(c_axes[1]): trans[c_axis] = table_b[b_axes[1][i]] + len(a_axes[0]) for i, c_axis in enumerate(trans): if i != c_axis: y = xp.transpose(y, trans) break return y class TensorDot(function_node.FunctionNode): def __init__(self, axes=2, a_axes=None, b_axes=None, c_axes=None, dtype=None): self.axes = axes self.a_axes = a_axes self.b_axes = b_axes self.c_axes = c_axes self.dtype = dtype if isinstance(axes, collections_abc.Sequence): if len(axes) != 2: raise ValueError('axes must be a pair of sequence of integers ' 'when it is a list or tuple.') elif isinstance(axes, int): pass else: raise TypeError('axes must be a pair of sequence of integers or ' 'an integer') def check_type_forward(self, in_types): type_check.argname(in_types, ('a', 'b')) a_type, b_type = in_types type_check.expect( a_type.dtype.kind == 'f', b_type.dtype.kind == 'f', ) def forward(self, inputs): self.retain_inputs((0, 1)) a, b = inputs if self.a_axes is None or self.b_axes is None: a_axes = [[], []] # 0:row axes, 1:col axes b_axes = [[], []] # 0:row axes, 1:col axes axes = self.axes if isinstance(axes, collections_abc.Sequence): a_axes[1], b_axes[0] = axes if numpy.isscalar(a_axes[1]): a_axes[1] = a_axes[1], if numpy.isscalar(b_axes[0]): b_axes[0] = b_axes[0], else: a_axes[1] = six.moves.range(a.ndim - axes, a.ndim) b_axes[0] = six.moves.range(axes) a_range = six.moves.range(a.ndim) a_axes[0] = [i for i in a_range if i not in a_axes[1]] b_range = six.moves.range(b.ndim) b_axes[1] = [i for i in b_range if i not in b_axes[0]] self.a_axes = a_axes self.b_axes = b_axes c = _tensordot(a, b, self.a_axes, self.b_axes, self.c_axes) if self.c_axes is None: c_axes = [[], []] # 0:row axes, 1:col axes c_row_ndim = len(self.a_axes[0]) c_col_ndim = len(self.b_axes[1]) c_axes[0] = six.moves.range(c_row_ndim) c_axes[1] = six.moves.range(c_row_ndim, c_row_ndim + c_col_ndim) self.c_axes = c_axes return utils.force_array(c, self.dtype), def backward(self, indexes, grad_outputs): a, b = self.get_retained_inputs() gc, = grad_outputs ga = None if 0 in indexes: ga, = TensorDot(a_axes=self.c_axes, b_axes=[self.b_axes[1], self.b_axes[0]], c_axes=self.a_axes, dtype=a.dtype).apply((gc, b)) gb = None if 1 in indexes: gb, = TensorDot(a_axes=[self.a_axes[1], self.a_axes[0]], b_axes=self.c_axes, c_axes=self.b_axes, dtype=b.dtype).apply((a, gc)) return ga, gb def tensordot(a, b, axes=2): """Returns the tensor dot product of two arrays along specified axes. This is equivalent to compute dot product along the specified axes which are treated as one axis by reshaping. Args: a (Variable): The first argument. b (Variable): The second argument. axes: - If it is an integer, then ``axes`` axes at the last of ``a`` and the first of ``b`` are used. - If it is a pair of sequences of integers, then these two sequences specify the list of axes for ``a`` and ``b``. The corresponding axes are paired for sum-product. Returns: ~chainer.Variable: The tensor dot product of ``a`` and ``b`` along the axes specified by ``axes``. .. admonition:: Example >>> a = np.random.rand(5, 3, 2) >>> b = np.random.rand(3, 2, 4) >>> c = F.tensordot(a, b, axes=2) >>> c.shape (5, 4) .. seealso:: :func:`numpy.tensordot` """ return TensorDot(axes=axes).apply((a, b))[0]
nilq/baby-python
python
import pytest from di.core.element import Element from di.core.module import ( Module, ModuleElementConsistencyCheck, ModuleElementConsistencyError, ModuleImportSolver, ModuleImportSolverError, ) def test_module_cycle(): modules = [Module(name=f"{index}") for index in range(3)] modules[0].imports = {modules[1]} modules[1].imports = {modules[2]} modules[2].imports = {modules[0]} solver = ModuleImportSolver() with pytest.raises(ModuleImportSolverError): solver.solve(modules) def test_module_simple(): modules = [Module(name=f"{index}") for index in range(4)] modules[1].imports = {modules[0]} modules[2].imports = {modules[0]} modules[3].imports = {modules[1], modules[2]} solver = ModuleImportSolver() plan = solver.solve(modules) order = plan.steps assert len(order[0]) == 1 and modules[0] in order[0] assert len(order[1]) == 2 and modules[1] in order[1] and modules[2] in order[1] assert len(order[2]) == 1 and modules[3] in order[2] def test_module_consistency_check_internals(): check = ModuleElementConsistencyCheck() elements = [Element(injector=..., strategy=...) for _ in range(4)] check.check([Module(elements={*elements}, exports={*elements[:2]})]) with pytest.raises(ModuleElementConsistencyError): check.check([Module(elements={*elements[:2]}, exports={*elements[1:]})]) a = Module(elements={*elements[:2]}, exports={*elements[:2]}) b = Module(elements={*elements[2:]}, exports={*elements}, imports={a}) check.check([a, b]) a = Module(elements={*elements[:2]}, exports={*elements[:1]}) b = Module(elements={*elements[2:]}, exports={*elements}, imports={a}) with pytest.raises(ModuleElementConsistencyError): check.check([a, b]) a = Module(elements={*elements}, bootstrap={*elements}) check.check([a]) a = Module(elements={*elements[:2]}, bootstrap={*elements}) with pytest.raises(ModuleElementConsistencyError): check.check([a]) a = Module(elements={*elements[:2]}, exports={*elements[:1]}) b = Module(elements={*elements[2:]}, bootstrap={*elements}) with pytest.raises(ModuleElementConsistencyError): check.check([a, b]) def test_module_consistency_check_duplicates(): check = ModuleElementConsistencyCheck() elements = [Element(injector=..., strategy=...) for _ in range(8)] check.check( [ Module(elements={*elements[:4]}, exports={*elements[:2]}), Module(elements={*elements[4:]}, exports={*elements[6:]}), ] ) with pytest.raises(ModuleElementConsistencyError): check.check( [ Module(elements={*elements}, exports={*elements[:2]}), Module(elements={*elements[4:]}, exports={*elements[6:]}), ] )
nilq/baby-python
python
import acipdt import xlrd import xlwt import requests from requests.packages.urllib3.exceptions import InsecureRequestWarning from xlutils.copy import copy from orderedset import OrderedSet import sys import time import ipaddress import getpass import os # Log levels 0 = None, 1 = Class only, 2 = Line log_level = 2 # Define the name of the configuration file you will be using. # This doesn't alter the folder name. ACI_DEPLOY_FILE = 'aci_deploy.xls' # Adding these values are NOT secure. Use for testing only. APICIP = None APICUSER = None APICPASS = None def stdout_log(sheet, line): if log_level == 0: return elif ((log_level == (1) or log_level == (2)) and (sheet) and (line is None)): print('*' * 80) print('Starting work on {} section'.format(sheet)) print('*' * 80) elif log_level == (2) and (sheet) and (line is not None): print('Deploying line {} from section {}...'.format(line, sheet)) else: return def read_in(usr_path): try: wb_path = os.path.join(usr_path, ACI_DEPLOY_FILE) wb = xlrd.open_workbook(wb_path) print("Workbook Loaded.") except Exception as e: print("Something went wrong logging opening the workbook - ABORT!") sys.exit(e) return wb def findKeys(ws, rows): func_list = OrderedSet() for i in range(2, rows): if (ws.cell(i, 0)).value: func_list.add((ws.cell(i, 0)).value) else: i += 1 return func_list def countKeys(ws, rows, func): count = 0 for i in range(2, rows): if (ws.cell(i, 0)).value == func: count += 1 else: i += 1 return count def findVars(ws, rows, func, count): var_list = [] var_dict = {} for i in range(2, rows): if (ws.cell(i, 0)).value == func: try: for x in range(4, 17): if (ws.cell(i - 1, x)).value: var_list.append((ws.cell(i - 1, x)).value) else: x += 1 except Exception as e: e = e pass break while count > 0: var_dict[count] = {} var_count = 0 for z in var_list: var_dict[count][z] = ws.cell(i + count - 1, 4 + var_count).value var_count += 1 var_dict[count]['row'] = i + count - 1 count -= 1 return var_dict def wb_update(wr_ws, status, i): # build green and red style sheets for excel green_st = xlwt.easyxf('pattern: pattern solid;') green_st.pattern.pattern_fore_colour = 3 red_st = xlwt.easyxf('pattern: pattern solid;') red_st.pattern.pattern_fore_colour = 2 yellow_st = xlwt.easyxf('pattern: pattern solid;') yellow_st.pattern.pattern_fore_colour = 5 # if stanzas to catch the status code from the request # and then input the appropriate information in the workbook # this then writes the changes to the doc if status == 200: wr_ws.write(i, 1, 'Success (200)', green_st) if status == 400: print("Error 400 - Bad Request - ABORT!") print("Probably have a bad URL or payload") wr_ws.write(i, 1, 'Bad Request (400)', red_st) pass if status == 401: print("Error 401 - Unauthorized - ABORT!") print("Probably have incorrect credentials") wr_ws.write(i, 1, 'Unauthorized (401)', red_st) pass if status == 403: print("Error 403 - Forbidden - ABORT!") print("Server refuses to handle your request") wr_ws.write(i, 1, 'Forbidden (403)', red_st) pass if status == 404: print("Error 404 - Not Found - ABORT!") print("Seems like you're trying to POST to a page that doesn't" " exist.") wr_ws.write(i, 1, 'Not Found (400)', red_st) pass if status == 666: print("Error - Something failed!") print("The POST failed, see stdout for the exception.") wr_ws.write(i, 1, 'Unkown Failure', yellow_st) pass if status == 667: print("Error - Invalid Input!") print("Invalid integer or other input.") wr_ws.write(i, 1, 'Unkown Failure', yellow_st) pass def pod_policies(apic, cookies, wb, wr_wb): ws = wb.sheet_by_name('Fabric Pod Policies') wr_ws = wr_wb.get_sheet(0) rows = ws.nrows func_list = findKeys(ws, rows) podpol = acipdt.FabPodPol(apic, cookies) stdout_log(wr_ws.name, None) for func in func_list: count = countKeys(ws, rows, func) var_dict = findVars(ws, rows, func, count) for pos in var_dict: row_num = var_dict[pos]['row'] del var_dict[pos]['row'] for x in list(var_dict[pos].keys()): if var_dict[pos][x] == '': del var_dict[pos][x] stdout_log(wr_ws.name, row_num) status = eval("podpol.%s(**var_dict[pos])" % func) wb_update(wr_ws, status, row_num) time.sleep(.025) def access_policies(apic, cookies, wb, wr_wb): ws = wb.sheet_by_name('Fabric Access Policies') wr_ws = wr_wb.get_sheet(1) rows = ws.nrows func_list = findKeys(ws, rows) accpol = acipdt.FabAccPol(apic, cookies) stdout_log(wr_ws.name, None) for func in func_list: count = countKeys(ws, rows, func) var_dict = findVars(ws, rows, func, count) for pos in var_dict: row_num = var_dict[pos]['row'] del var_dict[pos]['row'] for x in list(var_dict[pos].keys()): if var_dict[pos][x] == '': del var_dict[pos][x] stdout_log(wr_ws.name, row_num) status = eval("accpol.%s(**var_dict[pos])" % func) wb_update(wr_ws, status, row_num) time.sleep(.025) def tn_policies(apic, cookies, wb, wr_wb): ws = wb.sheet_by_name('Tenant Configuration') wr_ws = wr_wb.get_sheet(2) rows = ws.nrows func_list = findKeys(ws, rows) tnpol = acipdt.FabTnPol(apic, cookies) stdout_log(wr_ws.name, None) for func in func_list: count = countKeys(ws, rows, func) var_dict = findVars(ws, rows, func, count) for pos in var_dict: row_num = var_dict[pos]['row'] del var_dict[pos]['row'] for x in list(var_dict[pos].keys()): if var_dict[pos][x] == '': del var_dict[pos][x] stdout_log(wr_ws.name, row_num) status = eval("tnpol.%s(**var_dict[pos])" % func) wb_update(wr_ws, status, row_num) time.sleep(.025) def l3_policies(apic, cookies, wb, wr_wb): ws = wb.sheet_by_name('L3 Out') wr_ws = wr_wb.get_sheet(3) rows = ws.nrows func_list = findKeys(ws, rows) l3pol = acipdt.FabL3Pol(apic, cookies) stdout_log(wr_ws.name, None) for func in func_list: count = countKeys(ws, rows, func) var_dict = findVars(ws, rows, func, count) for pos in var_dict: row_num = var_dict[pos]['row'] del var_dict[pos]['row'] for x in list(var_dict[pos].keys()): if var_dict[pos][x] == '': del var_dict[pos][x] stdout_log(wr_ws.name, row_num) status = eval("l3pol.%s(**var_dict[pos])" % func) wb_update(wr_ws, status, row_num) time.sleep(.025) def vmm_policies(apic, cookies, wb, wr_wb): ws = wb.sheet_by_name('VMM') wr_ws = wr_wb.get_sheet(4) rows = ws.nrows func_list = findKeys(ws, rows) vmm = acipdt.FabVMM(apic, cookies) stdout_log(wr_ws.name, None) for func in func_list: count = countKeys(ws, rows, func) var_dict = findVars(ws, rows, func, count) for pos in var_dict: row_num = var_dict[pos]['row'] del var_dict[pos]['row'] for x in list(var_dict[pos].keys()): if var_dict[pos][x] == '': del var_dict[pos][x] stdout_log(wr_ws.name, row_num) status = eval("vmm.%s(**var_dict[pos])" % func) wb_update(wr_ws, status, row_num) time.sleep(.025) def fab_admin_policies(apic, cookies, wb, wr_wb): ws = wb.sheet_by_name('Fabric Admin') wr_ws = wr_wb.get_sheet(5) rows = ws.nrows func_list = findKeys(ws, rows) fabadmin = acipdt.FabAdminMgmt(apic, cookies) stdout_log(wr_ws.name, None) for func in func_list: count = countKeys(ws, rows, func) var_dict = findVars(ws, rows, func, count) for pos in var_dict: row_num = var_dict[pos]['row'] del var_dict[pos]['row'] for x in list(var_dict[pos].keys()): if var_dict[pos][x] == '': del var_dict[pos][x] stdout_log(wr_ws.name, row_num) status = eval("fabadmin.%s(**var_dict[pos])" % func) wb_update(wr_ws, status, row_num) time.sleep(.025) def mpod_policies(apic, cookies, wb, wr_wb): ws = wb.sheet_by_name('Multipod') wr_ws = wr_wb.get_sheet(6) rows = ws.nrows func_list = findKeys(ws, rows) mpod = acipdt.Mpod(apic, cookies) stdout_log(wr_ws.name, None) for func in func_list: count = countKeys(ws, rows, func) var_dict = findVars(ws, rows, func, count) for pos in var_dict: row_num = var_dict[pos]['row'] del var_dict[pos]['row'] for x in list(var_dict[pos].keys()): if var_dict[pos][x] == '': del var_dict[pos][x] stdout_log(wr_ws.name, row_num) status = eval("mpod.%s(**var_dict[pos])" % func) wb_update(wr_ws, status, row_num) time.sleep(.025) def take_snapshot(apic, cookies, snapshot_name): query = acipdt.Query(apic, cookies) query_string = 'configSnapshot' query_payload = query.query_class(query_string) payload_len = len(query_payload[1]['imdata']) snap_count = 0 for x in range(0, payload_len): try: if (query_payload[1]['imdata'][x]['configSnapshot']['attributes'] ['fileName'])[4:17] == snapshot_name: snap_count += 1 except Exception as e: e = e print("It seems the APIC does not support snapshots, moving on.") return(None) if snap_count > 0: print("A snapshot including 'acipdt_backup' already exists. Would you " "like to delete this snapshot or exit?") user_input = input("Delete 'd' or Exit 'q' [q]: ") selection = user_input or 'q' if selection.lower() == 'd': del_snap_pol(apic, cookies, snapshot_name) elif selection.lower() == 'q': sys.exit() snapshot = 'true' status = 'created,modified' snapshot_args = {} snapshot_args['name'] = snapshot_name snapshot_args['snapshot'] = snapshot snapshot_args['status'] = status cfgmgmt = acipdt.FabCfgMgmt(apic, cookies) status = cfgmgmt.backup(**snapshot_args) if status == 200: print("Snapshot taken successfully, continuing.") time.sleep(1) snap = True return(snap) else: print("Snapshot failed for some reason, do you want to continue?") while True: user_input = input("Continue 'y' or 'n' [n]: ") selection = user_input or 'n' if selection.lower() == 'y': snap = None return(snap) elif selection.lower() == 'n': del_snap_pol(apic, cookies, snapshot_name) sys.exit() def revert_snapshot(apic, cookies, snapshot_name): print('Deployment completed, please verify status in workbook.') while True: user_input = input("Rollback to previous snapshot 'y' or 'n' [n]: ") selection = user_input or 'n' if selection.lower() == 'n': return elif selection.lower() == 'y': query = acipdt.Query(apic, cookies) query_string = 'configSnapshot' query_payload = query.query_class(query_string) payload_len = len(query_payload[1]['imdata']) for x in range(0, payload_len): if (query_payload[1]['imdata'][x]['configSnapshot'] ['attributes']['fileName'])[4:17] == snapshot_name: snapshot_name = (query_payload[1]['imdata'][x] ['configSnapshot']['attributes'] ['fileName']) break cfgmgmt = acipdt.FabCfgMgmt(apic, cookies) snapshot_args = {} snapshot_args['name'] = snapshot_name cfgmgmt.snapback(**snapshot_args) return def del_snap_pol(apic, cookies, snapshot_name): status = 'deleted' snapshot = 'true' snapshot_args = {} snapshot_args['name'] = snapshot_name snapshot_args['snapshot'] = snapshot snapshot_args['status'] = status cfgmgmt = acipdt.FabCfgMgmt(apic, cookies) status = cfgmgmt.backup(**snapshot_args) def main(): # Disable urllib3 warnings requests.packages.urllib3.disable_warnings(InsecureRequestWarning) # Ask user for path to the ACI_DEPLOY_FILE while True: print('Please enter the path to {0}, note that this is ' 'also where the workbook will be saved upon completion.'.format(ACI_DEPLOY_FILE)) usr_path = input('Path: ') if os.path.exists(usr_path): break else: print('Enter a valid path.') # Static snapshot name snapshot_name = 'acipdt_backup' # Prompt for APIC IP if the constant is None if APICIP is not None: apic = APICIP else: while True: apic = input('Enter the APIC IP: ') try: ipaddress.ip_address(apic) break except Exception as e: print('Enter a valid IP address. Error received: {}'.format(e)) # Prompt for APIC Username if the constant is None if APICUSER is not None: user = APICUSER else: user = input('Enter APIC username: ') # Prompt for APIC Password if the constant is None if APICPASS is not None: pword = APICPASS else: while True: try: pword = getpass.getpass(prompt='Enter APIC password: ') break except Exception as e: print('Something went wrong. Error received: {}'.format(e)) # Initialize the fabric login method, passing appropriate variables fablogin = acipdt.FabLogin(apic, user, pword) # Run the login and load the cookies var cookies = fablogin.login() # Load workbook wb = read_in(usr_path) # Copy workbook to a RW version wr_wb = copy(wb) snap = take_snapshot(apic, cookies, snapshot_name) pod_policies(apic, cookies, wb, wr_wb) access_policies(apic, cookies, wb, wr_wb) tn_policies(apic, cookies, wb, wr_wb) l3_policies(apic, cookies, wb, wr_wb) vmm_policies(apic, cookies, wb, wr_wb) fab_admin_policies(apic, cookies, wb, wr_wb) mpod_policies(apic, cookies, wb, wr_wb) # Save workbook to user path wr_wb.save('{0}/{1}'.format(usr_path, ACI_DEPLOY_FILE)) if snap is not None: revert_snapshot(apic, cookies, snapshot_name) del_snap_pol(apic, cookies, snapshot_name) if __name__ == '__main__': main()
nilq/baby-python
python
from distutils.version import StrictVersion from unittest.mock import patch from keepassxc_pwned.keepass_wrapper import KeepassWrapper from .common import * # use keepassxc.cli wrapper script to test changing location of keepassxc-cli keepass_different_binary = os.path.abspath( os.path.join(this_dir, "keepassxc.cli")) class OldKeepassWrapper(KeepassWrapper): def version(self) -> StrictVersion: return StrictVersion("2.4.9") class VersionError(KeepassWrapper): def version(self): return StrictVersion("2.5.a") # raises ValueError def test_is_strict_version(): assert isinstance(KeepassWrapper().version(), StrictVersion) def test_subcommand_old(): assert OldKeepassWrapper().backwards_compatible_export() == "extract" def test_subcommand_new(): assert KeepassWrapper().backwards_compatible_export() == "export" def test_issue_parsing_version_string(): # should return "export" by default (newer syntax) assert VersionError().backwards_compatible_export() == "export" @patch("shutil.which", return_value=None) def test_no_keepass_cli(mock_shutil_which, caplog): # with default keepassxc-cli as --keepassxc-cli flag (binary) with pytest.raises(SystemExit): assert KeepassWrapper().verify_binary_exists() assert ("Could not find a binary called keepassxc-cli on your $PATH." in caplog.text) def test_use_different_binary(): k = KeepassWrapper(keepass_different_binary) assert k.keepassxc_cli_location == keepass_different_binary assert k.backwards_compatible_export() == "export"
nilq/baby-python
python
# -*- coding: utf-8 -*- ''' Support for the softwareupdate command on MacOS. ''' from __future__ import absolute_import # Import python libs import re import os # import salt libs import salt.utils import salt.utils.mac_utils from salt.exceptions import CommandExecutionError, SaltInvocationError __virtualname__ = 'softwareupdate' def __virtual__(): ''' Only for MacOS ''' if not salt.utils.is_darwin(): return (False, 'The softwareupdate module could not be loaded: ' 'module only works on MacOS systems.') return __virtualname__ def _get_available(recommended=False, restart=False): ''' Utility function to get all available update packages. Sample return date: { 'updatename': '1.2.3-45', ... } ''' cmd = ['softwareupdate', '--list'] out = salt.utils.mac_utils.execute_return_result(cmd) # rexp parses lines that look like the following: # * Safari6.1.2MountainLion-6.1.2 # Safari (6.1.2), 51679K [recommended] # - iCal-1.0.2 # iCal, 1.0.2, 6520K rexp = re.compile('(?m)^ [*|-] ' r'([^ ].*)[\r\n].*\(([^\)]+)') if salt.utils.is_true(recommended): # rexp parses lines that look like the following: # * Safari6.1.2MountainLion-6.1.2 # Safari (6.1.2), 51679K [recommended] rexp = re.compile('(?m)^ [*] ' r'([^ ].*)[\r\n].*\(([^\)]+)') keys = ['name', 'version'] _get = lambda l, k: l[keys.index(k)] updates = rexp.findall(out) ret = {} for line in updates: name = _get(line, 'name') version_num = _get(line, 'version') ret[name] = version_num if not salt.utils.is_true(restart): return ret # rexp parses lines that look like the following: # * Safari6.1.2MountainLion-6.1.2 # Safari (6.1.2), 51679K [recommended] [restart] rexp1 = re.compile('(?m)^ [*|-] ' r'([^ ].*)[\r\n].*restart*') restart_updates = rexp1.findall(out) ret_restart = {} for update in ret: if update in restart_updates: ret_restart[update] = ret[update] return ret_restart def list_available(recommended=False, restart=False): ''' List all available updates. :param bool recommended: Show only recommended updates. :param bool restart: Show only updates that require a restart. :return: Returns a dictionary containing the updates :rtype: dict CLI Example: .. code-block:: bash salt '*' softwareupdate.list_available ''' return _get_available(recommended, restart) def ignore(name): ''' Ignore a specific program update. When an update is ignored the '-' and version number at the end will be omitted, so "SecUpd2014-001-1.0" becomes "SecUpd2014-001". It will be removed automatically if present. An update is successfully ignored when it no longer shows up after list_updates. :param name: The name of the update to add to the ignore list. :ptype: str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' softwareupdate.ignore <update-name> ''' # remove everything after and including the '-' in the updates name. to_ignore = name.rsplit('-', 1)[0] cmd = ['softwareupdate', '--ignore', to_ignore] salt.utils.mac_utils.execute_return_success(cmd) return to_ignore in list_ignored() def list_ignored(): ''' List all updates that have been ignored. Ignored updates are shown without the '-' and version number at the end, this is how the softwareupdate command works. :return: The list of ignored updates :rtype: list CLI Example: .. code-block:: bash salt '*' softwareupdate.list_ignored ''' cmd = ['softwareupdate', '--list', '--ignore'] out = salt.utils.mac_utils.execute_return_result(cmd) # rep parses lines that look like the following: # "Safari6.1.2MountainLion-6.1.2", # or: # Safari6.1.2MountainLion-6.1.2 rexp = re.compile('(?m)^ ["]?' r'([^,|\s].*[^"|\n|,])[,|"]?') return rexp.findall(out) def reset_ignored(): ''' Make sure the ignored updates are not ignored anymore, returns a list of the updates that are no longer ignored. :return: True if the list was reset, Otherwise False :rtype: bool CLI Example: .. code-block:: bash salt '*' softwareupdate.reset_ignored ''' cmd = ['softwareupdate', '--reset-ignored'] salt.utils.mac_utils.execute_return_success(cmd) return list_ignored() == [] def schedule_enabled(): ''' Check the status of automatic update scheduling. :return: True if scheduling is enabled, False if disabled - ``True``: Automatic checking is on, - ``False``: Automatic checking is off, :rtype: bool CLI Example: .. code-block:: bash salt '*' softwareupdate.schedule_enabled ''' cmd = ['softwareupdate', '--schedule'] ret = salt.utils.mac_utils.execute_return_result(cmd) enabled = ret.split()[-1] return salt.utils.mac_utils.validate_enabled(enabled) == 'on' def schedule_enable(enable): ''' Enable/disable automatic update scheduling. :param enable: True/On/Yes/1 to turn on automatic updates. False/No/Off/0 to turn off automatic updates. If this value is empty, the current status will be returned. :type: bool str :return: True if scheduling is enabled, False if disabled :rtype: bool CLI Example: .. code-block:: bash salt '*' softwareupdate.schedule_enable on|off ''' status = salt.utils.mac_utils.validate_enabled(enable) cmd = ['softwareupdate', '--schedule', salt.utils.mac_utils.validate_enabled(status)] salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.validate_enabled(schedule_enabled()) == status def update_all(recommended=False, restart=True): ''' Install all available updates. Returns a dictionary containing the name of the update and the status of its installation. :param bool recommended: If set to True, only install the recommended updates. If set to False (default) all updates are installed. :param bool restart: Set this to False if you do not want to install updates that require a restart. Default is True :return: A dictionary containing the updates that were installed and the status of its installation. If no updates were installed an empty dictionary is returned. :rtype: dict - ``True``: The update was installed. - ``False``: The update was not installed. CLI Example: .. code-block:: bash salt '*' softwareupdate.update_all ''' to_update = _get_available(recommended, restart) if not to_update: return {} for _update in to_update: cmd = ['softwareupdate', '--install', _update] salt.utils.mac_utils.execute_return_success(cmd) ret = {} updates_left = _get_available() for _update in to_update: ret[_update] = True if _update not in updates_left else False return ret def update(name): ''' Install a named update. :param str name: The name of the of the update to install. :return: True if successfully updated, otherwise False :rtype: bool CLI Example: .. code-block:: bash salt '*' softwareupdate.update <update-name> ''' if not update_available(name): raise SaltInvocationError('Update not available: {0}'.format(name)) cmd = ['softwareupdate', '--install', name] salt.utils.mac_utils.execute_return_success(cmd) return not update_available(name) def update_available(name): ''' Check whether or not an update is available with a given name. :param str name: The name of the update to look for :return: True if available, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' softwareupdate.update_available <update-name> salt '*' softwareupdate.update_available "<update with whitespace>" ''' return name in _get_available() def list_downloads(): ''' Return a list of all updates that have been downloaded locally. :return: A list of updates that have been downloaded :rtype: list CLI Example: .. code-block:: bash salt '*' softwareupdate.list_downloads ''' outfiles = [] for root, subFolder, files in os.walk('/Library/Updates'): for f in files: outfiles.append(os.path.join(root, f)) dist_files = [] for f in outfiles: if f.endswith('.dist'): dist_files.append(f) ret = [] for update in _get_available(): for f in dist_files: with salt.utils.fopen(f) as fhr: if update.rsplit('-', 1)[0] in fhr.read(): ret.append(update) return ret def download(name): ''' Download a named update so that it can be installed later with the ``update`` or ``update_all`` functions :param str name: The update to download. :return: True if successful, otherwise False :rtype: bool CLI Example: .. code-block:: bash salt '*' softwareupdate.download <update name> ''' if not update_available(name): raise SaltInvocationError('Update not available: {0}'.format(name)) if name in list_downloads(): return True cmd = ['softwareupdate', '--download', name] salt.utils.mac_utils.execute_return_success(cmd) return name in list_downloads() def download_all(recommended=False, restart=True): ''' Download all available updates so that they can be installed later with the ``update`` or ``update_all`` functions. It returns a list of updates that are now downloaded. :param bool recommended: If set to True, only install the recommended updates. If set to False (default) all updates are installed. :param bool restart: Set this to False if you do not want to install updates that require a restart. Default is True :return: A list containing all downloaded updates on the system. :rtype: list CLI Example: .. code-block:: bash salt '*' softwareupdate.download_all ''' to_download = _get_available(recommended, restart) for name in to_download: download(name) return list_downloads() def get_catalog(): ''' .. versionadded:: 2016.3.0 Get the current catalog being used for update lookups. Will return a url if a custom catalog has been specified. Otherwise the word 'Default' will be returned :return: The catalog being used for update lookups :rtype: str CLI Example: .. code-block:: bash salt '*' softwareupdates.get_catalog ''' cmd = ['defaults', 'read', '/Library/Preferences/com.apple.SoftwareUpdate.plist'] out = salt.utils.mac_utils.execute_return_result(cmd) if 'AppleCatalogURL' in out: cmd.append('AppleCatalogURL') out = salt.utils.mac_utils.execute_return_result(cmd) return out elif 'CatalogURL' in out: cmd.append('CatalogURL') out = salt.utils.mac_utils.execute_return_result(cmd) return out else: return 'Default' def set_catalog(url): ''' .. versionadded:: 2016.3.0 Set the Software Update Catalog to the URL specified :param str url: The url to the update catalog :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' softwareupdates.set_catalog http://swupd.local:8888/index.sucatalog ''' # This command always returns an error code, though it completes # successfully. Success will be determined by making sure get_catalog # returns the passed url cmd = ['softwareupdate', '--set-catalog', url] try: salt.utils.mac_utils.execute_return_success(cmd) except CommandExecutionError as exc: pass return get_catalog() == url def reset_catalog(): ''' .. versionadded:: 2016.3.0 Reset the Software Update Catalog to the default. :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' softwareupdates.reset_catalog ''' # This command always returns an error code, though it completes # successfully. Success will be determined by making sure get_catalog # returns 'Default' cmd = ['softwareupdate', '--clear-catalog'] try: salt.utils.mac_utils.execute_return_success(cmd) except CommandExecutionError as exc: pass return get_catalog() == 'Default'
nilq/baby-python
python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Tue Jun 23 13:01:46 2020 @author: dnb3k """ #use the unique Full Tokens code to get teh dataframe import ossPyFuncs import pandas as pd import wordcloud import re import matplotlib.pyplot as plt import os import numpy as np #perform sql query to get company column postgreSql_selectQuery="SELECT company FROM gh.ctrs_raw ;" inputRaw=ossPyFuncs.queryToPDTable(postgreSql_selectQuery) #obtain the eralse list currentDir=os.path.dirname('ossPyFuncs.py') eraseList=pd.read_csv(os.path.join(currentDir,'keyFiles/eraseStrings_v6.csv'),quotechar="'") #apply the erase list semiCleanedOutput=pd.DataFrame(ossPyFuncs.eraseFromColumn(inputRaw['company'],eraseList)) #get the counts for the unique values tableUniqueFullNameCounts=semiCleanedOutput.iloc[:,0].value_counts() #convert that output to a proper table tableUniqueFullNameCounts=tableUniqueFullNameCounts.reset_index() #rename the columns tableUniqueFullNameCounts.rename(columns={"company":"count","index":"company"},inplace=True) selfEmployedKeys=re.compile('(?i)self|Me, myself and I|personal|^home$|private|individual|myself|^me$|\\bindependent\\b|independent contractor|consultant|freelancer|freelance|self-employed| my ') dataTest2=tableUniqueFullNameCounts[tableUniqueFullNameCounts['company'].str.contains(selfEmployedKeys)] dataTest2=tableUniqueFullNameCounts[tableUniqueFullNameCounts['company'].str.contains('(?i)S\.R\.L\.')] freelanceSum=np.sum(dataTest2['count']) allSum=np.sum(tableUniqueFullNameCounts['count']) def addBooleanColumnFromCriteria(inputDataToAssess,assessItems,newColumnName): """iteratively determine if input column contains member of other column Keyword arguments: inputDataToAssess -- a column from a pandas dataframe, this will be the set of target words/entries that deletions will be made from assessItems -- a seriers or dataframe containing strings (regex expressions) which will be searched for (as substrings) in the inputDataToAssess. This will be done in an iterative fashion, and a bolean vector will be created and appended to the output, indicating which entries in inputDataToAssess contained a substring from assessItems. newColumnName -- name of the new column (i.e. 'government', 'academic', etc.) """ import pandas as pd import re inputDataToAssess[newColumnName]=False #necessary, due to escape nonsense inputColumn=inputColumn.replace(regex=True, to_replace='\\\\',value='/') for index, row in newColumnName.iterrows(): curReplaceVal=row[0] currentRegexExpression=re.compile(curReplaceVal) CurrentBoolVec=inputColumn.str.contains(currentRegexExpression,na=False) inputDataToAssess[newColumnName].loc[CurrentBoolVec]=True return inputDataToAssess;
nilq/baby-python
python
# Write the benchmarking functions here. # See "Writing benchmarks" in the asv docs for more information. from math import radians import numpy as np from passpredict import _rotations class Rotations: """ Example from Vallado, Eg 11-6, p. 912 """ def setup(self, *args): self.lat = radians(42.38) self.lon = radians(-71.13) self.location_ecef = np.array([1526.122, -4465.064, 4276.894]) self.satellite_ecef = np.array([885.7296, -4389.3856, 5070.1765]) def time_ecef_to_razel(self): _rotations.razel(self.lat, self.lon, self.location_ecef, self.satellite_ecef) def time_elevation_at(self): _rotations.elevation_at(self.lat, self.lon, self.location_ecef, self.satellite_ecef) def time_range_at(self): _rotations.range_at(self.lat, self.lon, self.location_ecef, self.satellite_ecef) def time_ecef_to_llh(self): _rotations.ecef_to_llh(self.satellite_ecef) class SolarRotations: """ Example from Vallado, Eg.5-1, p.280, April 2, 2006, 00:00 UTC """ def setup(self, *args): self.jd = 2453827.5 self.rmod = np.array([146186212.0, 28788976.0, 12481064.0]) self.rpef = np.empty(3, dtype=np.double) def time_mod2ecef(self): _rotations.mod2ecef(self.jd, self.rmod, self.rpef)
nilq/baby-python
python
""" Main file! """ import argparse import logging import pickle import string import sys from typing import Optional, TextIO, BinaryIO import ujson from vk_dumper import utils, vk log = logging.getLogger('vk-dumper') def main() -> None: log.info('Starting vk-dumper!') args = parse_args() # Output variants: json_file: Optional[TextIO] = args.json_file[0] if args.json_file else None pickle_file: Optional[BinaryIO] = args.pickle_file[0] if args.pickle_file else None text_file: Optional[TextIO] = args.text_file[0] if args.text_file else None if not (json_file or pickle_file or text_file): log.critical('No dump save method selected, use -j/-p/-x parameters') sys.exit(1) if args.verbose: utils.init_logging(debug=True) log.debug('Logger reinitialized, debug logs enabled') result = vk.dump_messages( args.login, args.password, args.message_count[0] if args.message_count else None, args.vk_ids[0] if args.vk_ids else None ) log.info('Got %d messages', len(result)) if json_file: log.info('Saving results into JSON file "%s"...', json_file.name) ujson.dump(result, json_file, ensure_ascii=False, escape_forward_slashes=False) log.info('...done') if pickle_file: log.info('Saving results into Pickle file "%s"...', pickle_file.name) pickle.dump( result, pickle_file, protocol=pickle.HIGHEST_PROTOCOL, fix_imports=False ) log.info('...done') if text_file: log.info('Saving results into plain text file (with sanitizing) "%s"...', text_file.name) sanitized = '' for entry in result: words = entry['text'] \ .lower() \ .split() bad_chars = string.punctuation + string.ascii_lowercase + string.digits translator = str.maketrans(bad_chars, ' ' * len(bad_chars)) stripped = [word.translate(translator) for word in words] sanitized += ' '.join(stripped) + ' ' if args.markovify: sanitized += '\n' if args.markovify: text_file.write(sanitized) else: text_file.write( ' '.join(sanitized.strip().split()) ) log.info('...done') def parse_args() -> argparse.Namespace: arg_parser = argparse.ArgumentParser( prog='python -m vk_dumper', description='Utility for dumping VK messages.', epilog='desu~' ) arg_parser.add_argument( '--verbose', '-v', action='store_true', help='show additional debug logs', dest='verbose' ) arg_parser.add_argument( '--message-count', '-c', action='store', nargs=1, type=int, help='maximum messages count to extract', metavar='count', dest='message_count' ) arg_parser.add_argument( '--vk-id', '-d', action='append', nargs=1, type=str, help='select usernames to extract, without @', metavar='username', dest='vk_ids' ) arg_parser.add_argument( '--out-json', '-j', action='store', nargs=1, type=argparse.FileType( 'xt', encoding='utf-8', errors='surrogateescape' ), help='choose a path to dump results as JSON', metavar='/path/to/dump.json', dest='json_file' ) arg_parser.add_argument( '--out-pickle', '-p', action='store', nargs=1, type=argparse.FileType('xb'), help='choose a path to dump results as Pickle', metavar='/path/to/dump.pkl', dest='pickle_file' ) arg_parser.add_argument( '--out-txt', '-x', action='store', nargs=1, type=argparse.FileType( 'xt', encoding='utf-8', errors='surrogateescape' ), help='choose a path to dump results as plain text', metavar='/path/to/dump.txt', dest='text_file' ) arg_parser.add_argument( '--markov', '-m', action='store_true', help='store plaintext with line break after each message for markovify', dest='markovify' ) arg_parser.add_argument( 'login', action='store', type=str, help='VK.com phone number/email', metavar='login', ) arg_parser.add_argument( 'password', action='store', type=str, help='VK.com password', metavar='password', ) return arg_parser.parse_args() if __name__ == '__main__': utils.init_logging(debug=False) main()
nilq/baby-python
python
from .iostream import cprint, cin, cout, cerr, endl from .cmath import * from . import cmath, iostream
nilq/baby-python
python
from __future__ import unicode_literals from django.core.urlresolvers import reverse from tracpro.test import factories from tracpro.test.cases import TracProTest from .. import charts from .. import models class PollChartTest(TracProTest): def setUp(self): super(PollChartTest, self).setUp() self.org = factories.Org() self.poll = factories.Poll(org=self.org) self.region1 = factories.Region(org=self.org, name="Beta") self.region2 = factories.Region(org=self.org, name="Acme") self.question1 = factories.Question( poll=self.poll, question_type=models.Question.TYPE_MULTIPLE_CHOICE) self.question2 = factories.Question( poll=self.poll, question_type=models.Question.TYPE_OPEN) self.question3 = factories.Question( poll=self.poll, question_type=models.Question.TYPE_NUMERIC) self.pollrun = factories.UniversalPollRun(poll=self.poll) self.contact1 = factories.Contact(org=self.org, region=self.region1) self.response1 = factories.Response( contact=self.contact1, pollrun=self.pollrun, status=models.Response.STATUS_COMPLETE) factories.Answer( response=self.response1, question=self.question1, value="4.00000", category="1 - 5") factories.Answer( response=self.response1, question=self.question2, value="It's very rainy", category="All Responses") factories.Answer( response=self.response1, question=self.question3, value="4.00000", category="1 - 5") self.contact2 = factories.Contact(org=self.org, region=self.region1) self.response2 = factories.Response( contact=self.contact2, pollrun=self.pollrun, status=models.Response.STATUS_COMPLETE) factories.Answer( response=self.response2, question=self.question1, value="3.00000", category="1 - 5") factories.Answer( response=self.response2, question=self.question2, value="rainy and rainy", category="All Responses") factories.Answer( response=self.response2, question=self.question3, value="3.00000", category="1 - 5") self.contact3 = factories.Contact(org=self.org, region=self.region2) self.response3 = factories.Response( contact=self.contact3, pollrun=self.pollrun, status=models.Response.STATUS_COMPLETE) factories.Answer( response=self.response3, question=self.question1, value="8.00000", category="6 - 10") factories.Answer( response=self.response3, question=self.question2, value="Sunny sunny", category="All Responses") factories.Answer( response=self.response3, question=self.question3, value="8.00000", category="6 - 10") self.pollruns = models.PollRun.objects.filter(pk=self.pollrun.pk) self.responses = models.Response.objects.filter(pollrun=self.pollrun) def test_multiple_pollruns_multiple_choice(self): answers = models.Answer.objects.filter(question=self.question1) data, summary_table = charts.multiple_pollruns_multiple_choice( self.pollruns, answers, self.responses, contact_filters={}) self.assertEqual( data['dates'], [self.pollrun.conducted_on.strftime('%Y-%m-%d')]) self.assertEqual(data['series'], [ {'name': '1 - 5', 'data': [{'y': 2, 'url': reverse('polls.pollrun_read', args=[self.pollrun.pk])}]}, {'name': '6 - 10', 'data': [{'y': 1, 'url': reverse('polls.pollrun_read', args=[self.pollrun.pk])}]}, ]) def test_word_cloud_data(self): answers = models.Answer.objects.filter(question=self.question2) data = charts.word_cloud_data(answers) self.assertEqual(data, [ {"text": "rainy", "weight": 3}, {"text": "sunny", "weight": 2}, ]) def test_multiple_pollruns_numeric(self): chart_type, data, summary_table = charts.multiple_pollruns( self.pollruns, self.responses, self.question3, split_regions=False, contact_filters={}) summary_data = dict(summary_table) self.assertEqual(chart_type, "numeric") self.assertEqual(data['pollrun-urls'], [ reverse('polls.pollrun_read', args=[self.pollrun.pk]), ]) self.assertEqual(data['participation-urls'], [ reverse('polls.pollrun_participation', args=[self.pollrun.pk]), ]) # Answers are 4, 3 and 8 for a single date # Single item for single date: sum = 4 + 3 + 8 = 15 # URL points to pollrun detail page for this date self.assertEqual(data['sum'], [{ 'name': self.question3.name, 'data': [15.0], }]) # Single item for single date: average = (4 + 3 + 8)/3 = 5 # URL points to pollrun detail page for this date self.assertEqual(data['average'], [{ 'name': self.question3.name, 'data': [5.0], }]) # Set all responses to complete in setUp() # Response rate = 100% # URL points to participation tab self.assertEqual(data['response-rate'], [{ 'name': self.question3.name, 'data': [100.0], }]) # Today's date self.assertEqual( data['dates'], [self.pollrun.conducted_on.strftime('%Y-%m-%d')]) # Mean, Standard Dev, response rate avg, pollrun list self.assertEqual(summary_data['Mean'], 5.0) self.assertEqual(summary_data['Standard deviation'], 0.0) self.assertEqual(summary_data['Response rate average (%)'], 100.0) # Remove an answer, thus changing the response rate. self.response1.answers.get(question=self.question3).delete() chart_type, data, summary_table = charts.multiple_pollruns( self.pollruns, self.responses, self.question3, split_regions=False, contact_filters={}) summary_data = dict(summary_table) self.assertEqual(chart_type, "numeric") # 2 answers of 3 expected - response rate should be 66.7% self.assertEqual(data['response-rate'], [{ 'name': self.question3.name, 'data': [66.7], }]) self.assertEqual(summary_data['Response rate average (%)'], 66.7) def test_multiple_pollruns_numeric_split(self): chart_type, data, summary_table = charts.multiple_pollruns( self.pollruns, self.responses, self.question3, split_regions=True, contact_filters={}) summary_data = dict(summary_table) self.assertEqual(chart_type, "numeric") self.assertEqual(data['pollrun-urls'], [ reverse('polls.pollrun_read', args=[self.pollrun.pk]), ]) self.assertEqual(data['participation-urls'], [ reverse('polls.pollrun_participation', args=[self.pollrun.pk]), ]) self.assertEqual(data['sum'], [ { 'name': "Acme", 'data': [8.0], }, { 'name': "Beta", 'data': [7.0], }, ]) # Single item for single date: average = (4 + 3 + 8)/3 = 5 # URL points to pollrun detail page for this date self.assertEqual(data['average'], [ { 'name': "Acme", 'data': [8.0], }, { 'name': "Beta", 'data': [3.5], }, ]) # Set all responses to complete in setUp() # Response rate = 100% # URL points to participation tab self.assertEqual(data['response-rate'], [ { 'name': "Acme", 'data': [100.0], }, { 'name': "Beta", 'data': [100.0], }, ]) # Today's date self.assertEqual( data['dates'], [self.pollrun.conducted_on.strftime('%Y-%m-%d')]) self.assertEqual(summary_data['Mean'], 5.0) self.assertEqual(summary_data['Standard deviation'], 0.0) self.assertEqual(summary_data['Response rate average (%)'], 100.0) def test_single_pollrun_multiple_choice(self): answers = models.Answer.objects.filter(question=self.question1) data = charts.single_pollrun_multiple_choice(answers, self.pollrun) self.assertEqual( data['data'], [2, 1]) self.assertEqual( data['categories'], ['1 - 5', '6 - 10']) def test_single_pollrun_open(self): chart_type, chart_data, summary_table = charts.single_pollrun( self.pollrun, self.responses, self.question2) self.assertEqual(chart_type, 'open-ended') self.assertEqual(chart_data[0], {'text': 'rainy', 'weight': 3}) self.assertEqual(len(chart_data), 2) self.assertEqual(summary_table, None) def test_single_pollrun_numeric(self): # Make answers numeric self.question3.question_type = models.Question.TYPE_NUMERIC self.question3.save() # Answers for question 3 = 8, 3 and 4 # Average = 5, Response Rate = 100%, STDEV = 2.2 chart_type, chart_data, summary_table = charts.single_pollrun( self.pollrun, self.responses, self.question3) summary_data = dict(summary_table) self.assertEqual(chart_type, 'bar') self.assertEqual(summary_data['Mean'], 5) self.assertEqual(summary_data['Response rate average (%)'], 100) self.assertEqual(summary_data['Standard deviation'], 2.2) # Results are autocategorized self.assertEqual([2, 1], chart_data['data']) self.assertEqual(2, len(chart_data['categories']))
nilq/baby-python
python
# Copyright 2021 The Distla Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """ Function to compute the square root and its inverse for positive definite matrix. This file covers the distributed case. This function is the interface, with the work functions defined in `distla_core.linalg.invsqrt_utils`. """ from jax import lax from distla_core.linalg.backends import distributed_backend from distla_core.linalg.invsqrt import invsqrt_utils def invsqrt(A, eps=None, maxiter=200, s_min=None, s_thresh=0.1, p_sz=128, precision=lax.Precision.HIGHEST): """ Computes the square root and inverse square root of the positive definite matrix `A`. The method is an iterative one. As explained in Higobj_fn's "Stable iterations for the matrix square root", 1997, the matrix sign function of the block matrix `[[0, A], [I, 0]]` is `[[0, sqrt(A)], [inv(sqrt(A)), 0]]`, and hence the same Newton-Schultz iteration that is used for computing the matrix sign function (see `polar.py`) can be applied to simultaneously compute `sqrt(A)`, `inv(sqrt(A))`. The iteration proceeds in two stages. First we repeatedly apply the so called "rogue" polynomial ``` Y_{k+1} = a_m * Y_k - 4 * (a_m/3)**3 * Y_k @ Z_k @ Y_k Z_{k+1} = a_m * Z_k - 4 * (a_m/3)**3 * Z_k @ Y_k @ Z_k ``` where `a_m = (3 / 2) * sqrt(3) - s_thresh`, and `Y_0 = A` and `Z_0 = I`, to bring the eigenvalues of `[[0, Y], [Z, 0]]` to within the range `[s_thresh, 1]`. Then we switch to the Newton-Schultz iteration ``` Y_{k+1} = (3 / 2) * Y_k - (1 / 2) * Y_k @ Z_k @ Y_k Z_{k+1} = (3 / 2) * Z_k - (1 / 2) * Z_k @ Y_k @ Z_k ``` until convergence. Args: `A`: The input matrix. Assumed to be positive definite. `eps`: The final result will satisfy `|I - Y @ Z| <= eps * |I|`, where `Y` and `Z` are the returned approximations to `sqrt(A)` and `inv(sqrt(A))` respectively. Machine epsilon by default. `maxiter`: Iterations will terminate after this many steps even if the above is unsatisfied. 200 by default. `s_min`: An under estimate of the smallest eigenvalue value of `[[0, A], [I, 0]]`. Machine epsilon by default. `s_thresh`: The iteration switches from the `rogue` polynomial to the Newton-Schultz iterations after `s_min` is estimated to have reached this value. 0.1 by default. `p_sz`: Panel size for the SUMMA matmuls. 128 by default. `precision`: Precision of the matrix multiplications. Returns: `Y`: approximation to `sqrt(A)`. `Z`: approximation to `inv(sqrt(A))`. `jr`: The number of 'rogue' iterations. `jt`: The total number of iterations. """ # TODO The above description for `s_min` isn't very helpful. How do we # understand the connection between eigenvalues of the block matrix, and # eigenvalues of A? backend = distributed_backend.DistributedBackend(p_sz, precision=precision) return invsqrt_utils._invsqrt(A, eps, maxiter, s_min, s_thresh, backend)
nilq/baby-python
python
""" Train a DeeProtein-model. """ import argparse import json from DeeProtein import DeeProtein import helpers import os def main(): with open(FLAGS.config_json) as config_fobj: config_dict = json.load(config_fobj) # set the gpu context if not FLAGS.gpu: if config_dict["gpu"] == 'True': config_dict["gpu"] = "False" optionhandler = helpers.OptionHandler(config_dict) model = DeeProtein(optionhandler) model.train(restore_whole=FLAGS.restore_whole, binary=True) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( '--config_json', type=str, required=True, help='Path to the config.JSON') parser.add_argument( '--restore_whole', type=str, default=True, help='Wheter to restore the whole model including the outlayer ' '(optional). Defaults to True.') parser.add_argument( '--gpu', type=str, default=True, help='Wheter to train in gpu context or not ' '(optional). Defaults to True.') FLAGS, unparsed = parser.parse_known_args() if unparsed: print('Error, unrecognized flags:', unparsed) exit(-1) main()
nilq/baby-python
python
# -*- coding: utf-8 -*- __author__ = 'Lara Olmos Camarena' import re import json import re """ utils """ def preprocess_text(text_str): regular_expr = re.compile('\n|\r|\t|\(|\)|\[|\]|:|\,|\;|"|\?|\-|\%') text_str = re.sub(regular_expr, ' ', text_str) token_list = text_str.split(' ') token_list = [element for element in token_list if element] return ' '.join(token_list) def sublist(lst1, lst2): if len(lst1) > 1: res1 = set(lst1) else: res1 = set(list(lst1)) res2 = set(lst2) return res1 <= res2 """ NER """ NER_TYPES = ['ORGANIZATION', 'PERSON', 'TITLE', 'IDEOLOGY', 'CITY', 'COUNTRY', 'LOCATION', 'NATIONALITY', 'STATE_OR_PROVINCE', 'DATE', 'DURATION', 'TIME', 'PERCENT', 'NUMBER', 'ORDINAL', 'MONEY', 'CAUSE_OF_DEATH', 'CRIMINAL_CHARGE', 'RELIGION'] def load_data(string_element): data = {} if string_element == '{}' or string_element == '[]': return data try: raw_data = str(string_element).replace("',", '",').replace("['", '["').replace("']", '"]').replace("':", '":').replace("{'", '{"').replace(", '", ', "') data = json.loads(raw_data) except Exception as e: print(e) print(raw_data) return data def get_ner_values(ner_dict, specific_ner): if ner_dict and specific_ner in ner_dict.keys(): return list(ner_dict[specific_ner]) return [] def get_ner_tags(ner_dict): if ner_dict: return list(ner_dict.keys()) return [] def get_ner_count(ner_dict, specific_ner): if ner_dict and specific_ner in ner_dict.keys(): return len(ner_dict[specific_ner]) return 0 def ner_type_answer(element): if element in ['CAUSE_OF_DEATH', 'CITY', 'COUNTRY', 'CRIMINAL_CHARGE', 'DATE', 'DURATION', 'IDEOLOGY', 'LOCATION', 'MISC', 'MONEY', 'MULTI', 'NATIONALITY', 'NONE', 'NUMBER', 'ORDINAL', 'ORGANIZATION', 'PERCENT', 'PERSON', 'RELIGION', 'SET', 'STATE_OR_PROVINCE', 'TIME', 'TITLE']: return element return 'MISC' """ POS TAGGING """ # ALL TYPES: ['CC', 'CD', 'DT', 'EX', 'FW', 'IN', 'IN', 'JJ', 'JJR', 'JJS', 'MD', 'NN', 'NNS', 'NP', 'NPS', 'PDT', 'POS', 'PP', 'PP', 'RB', 'RBR', 'RBS', 'RP', 'SENT', 'SYM', 'TO', 'UH', 'VB', 'VBD', 'VBG', 'VBN', 'VBZ', 'VBP', 'VD', 'VDD', 'VDG', 'VDN', 'VDZ', 'VDP', 'VH', 'VHD', 'VHG', 'VHN', 'VHZ', 'VHP', 'VV', 'VVD', 'VVG', 'VVN', 'VVP', 'VVZ', 'WDT', 'WP', 'WP', 'WRB', ':', '$'] pattern_pos = r"pos=['\"]('?\w+|``)?[\?,.`\[\]]?['\"]" reg_expr_pos = re.compile(pattern_pos) def get_pos(text): return [item.replace('pos=','').replace("'",'') for item in re.findall(reg_expr_pos, text)] pattern_word = r"word=['\"]([\"'`]?(\w|\.|,|\-)+)?[\?,.\[\]]?['\"]" reg_expr_word = re.compile(pattern_word) def get_word_pos(text): return [item[0].replace('word=','').replace("'",'') for item in re.findall(reg_expr_word, text)] def get_pos_count(pos_list, specific_pos): if pos_list and specific_pos in pos_list: return pos_list.count(specific_pos) return 0 def pral_pos(pos_str): if 'NP' in pos_str: return 'NP' if 'JJ' in pos_str: return 'JJ' if 'V' in pos_str: return 'V' if 'R' in pos_str: return 'R' if 'CD' in pos_str: return 'CD' if 'NN' in pos_str: return 'NN' return '' import treetaggerwrapper tagger = treetaggerwrapper.TreeTagger(TAGLANG='en', TAGDIR='C:\\Users\\larao_000\\Documents\\nlp\\tree-tagger-windows-3.2.3\\TreeTagger\\') def pos_tagging(text, max_length=1000): results = [] for i in range(0, len(text), max_length): partial_text = text[i:i+max_length] tags = tagger.tag_text(partial_text) results += treetaggerwrapper.make_tags(tags) return results """ Primer sustantivo de la pregunta, o en caso de partícula Wh, es automático. Where -> place, Who -> person. No tiene por qué aparecer en el texto explícito. WDT wh-determiner which; WP wh-pronoun who, what; WP$ possessive wh-pronoun whose; WRB wh-abverb where, when """ def wh_query(foco, common_wh=['what', 'who', 'where', 'when', 'why', 'which', 'how', 'in', 'the']): if foco in common_wh: return foco return 'other' def obtener_foco(query, query_pos): candidate_focus = [] minor_index = [] if len(query) > 0: if (query[0].lower() == 'who') and 'WP' in query_pos: candidate_focus.append('person') if 'WP$' in query_pos[0]: candidate_focus.append('person') if (query[0].lower() == 'where') and 'WRB' in query_pos: candidate_focus.append('place') if (query[0].lower() == 'when') and 'WRB' in query_pos: candidate_focus.append('time') if sublist(['NN'],query_pos): minor_index.append(query_pos.index('NN')) if sublist(['NNS'],query_pos): minor_index.append(query_pos.index('NNS')) if sublist(['NPS'],query_pos): minor_index.append(query_pos.index('NPS')) if sublist(['NP'],query_pos): minor_index.append(query_pos.index('NP')) if sublist(['WP'], query_pos) and not sublist(['NN'], query_pos) and not sublist(['NNS'], query_pos) and not sublist(['NP'], query_pos) and not sublist(['NPS'], query_pos): if sublist(['VVG'], query_pos): minor_index.append(query_pos.index('VVG')) if sublist(['JJ'], query_pos): minor_index.append(query_pos.index('JJ')) if sublist(['VVN'], query_pos): minor_index.append(query_pos.index('VVN')) if sublist(['VVD'], query_pos): minor_index.append(query_pos.index('VVD')) if sublist(['RB'], query_pos): minor_index.append(query_pos.index('RB')) if len(minor_index) > 0 and min(minor_index) < len(query) and min(minor_index) >= 0: candidate_focus.append(query[min(minor_index)]) if ('how much' in ' '.join(query).lower()) or ('how many' in ' '.join(query).lower()): candidate_focus.append('quantity') if candidate_focus: return candidate_focus[0] else: return '' def transform_foco(foco, common_focos=['type', 'kind', 'percentage', 'term', 'group', 'language', 'part', 'date', 'word', 'example', 'period', 'event', 'product', 'title', 'ideology', 'religion', 'money', 'percentage']): # who, what if foco in ['person','name','people', 'names','nationalities', 'nationality']: return 'person' if foco in ['organization','company','companies','organizations']: return 'organization' # where if foco in ['place', 'country', 'city', 'state', 'province', 'location', 'area', 'region', 'areas','locations','states', 'cities', 'countries']: return 'location' # when if foco in ['time', 'duration', 'age', 'year', 'month', 'day', 'week', 'hour', 'decade', 'century', 'days', 'years', 'hours', 'ages', 'weeks', 'decades', 'months', 'centuries']: return 'time' if foco in ['number','numbers','quantity']: return 'number' if foco in common_focos: return foco if foco[-1]=='s' and len(foco) > 2 and foco in common_focos: return foco[:-1] if foco == 'nan' or foco == '': return 'other' # NN, what, ... foco_pos = pos_tagging(str(foco)) if foco_pos[0].pos == 'NN' or foco_pos[0].pos == 'NNS': return 'NN' if foco_pos[0].pos == 'NP' or foco_pos[0].pos == 'NPS': return 'NP' return 'other' def validate_foco_ner(foco, ner_query, answer): result = 'KO' foco_pos = get_pos(str(pos_tagging(foco))) if foco_pos: foco_pos = foco_pos[0] if not isinstance(ner_query, list): ner_query = str(ner_query).replace('[','').replace(']','').replace("'", '').split(', ') if ner_query == '[]': result = 'NA' elif ner_query == []: result = 'NA' elif not foco or foco == 'NaN': result = 'NA' elif str(answer)!='' and str(answer)!='NaN' and str(answer)!='[NaN]': if (foco.lower() in ['person','name','people', 'names','nationalities', 'nationality'] or foco_pos in ['NP','NPS']) and sublist(ner_query,['PERSON', 'ORGANIZATION', 'TITLE', 'NATIONALITY']): result = 'OK-PERSON-ORG' if (foco.lower() in ['place', 'country', 'city', 'state', 'province', 'location', 'area', 'region', 'areas','locations','states', 'cities', 'countries'] or foco_pos in ['NP','NPS']) and sublist(ner_query,['CITY', 'COUNTRY', 'LOCATION', 'STATE_OR_PROVINCE']): result = 'OK-LOC' if (foco.lower() in ['time', 'duration', 'age', 'year', 'month', 'day', 'week', 'hour', 'decade', 'century', 'days', 'years', 'hours', 'ages', 'weeks', 'decades', 'months', 'centuries']) and sublist(ner_query,['DATE', 'TIME', 'DURATION', 'NUMBER']): result = 'OK-TIME' if (foco.lower() in ['titles','title','role','roles']) and sublist(ner_query,['TITLE']): result = 'OK-TITLE' if (foco.lower() in ['percentage']) and sublist(ner_query,['PERCENT']): result = 'OK-PERCENT' if (foco.lower() in ['number','numbers','quantity', 'money', 'age', 'percentage'] or foco_pos in ['CD','LS', 'NNS']) and sublist(ner_query,['NUMBER', 'PERCENT', 'MONEY', 'ORDINAL', 'CARDINAL']): result = 'OK-NUMBER' if foco and sublist([foco.upper()], ner_query): result = 'OK-' + foco.upper() elif foco and foco[-1]=='s' and len(foco) > 2 and sublist([foco[:-1].upper()], ner_query): result = 'OK-' + foco[:-1].upper() else: result='NA' return result """ QA """ def load_answer_data(string_element): data = {} if string_element == '{}' or string_element == '[]': return data try: raw_data = str(string_element).replace("',", '",').replace("['", '["').replace("']", '"]').replace("':", '":').replace("{'", '{"').replace(", '", ', "').replace(": '", ': "').replace("'}", '"}').replace(': \\"', ': "').replace('\\"}', '"}') raw_data = raw_data.replace('\\""','"').replace("\\'","'").replace('""', '"\"').replace('""','"') answer_data = re.search(r'"answer": ".*"}', raw_data).group(0).replace('"answer": ', '').replace('"}', '').replace('"', '').replace("'", '').replace("\\",'') raw_data = raw_data[:raw_data.index('answer": ')+len('answer": ')] + '"'+ answer_data + '"}' data = json.loads(raw_data) except Exception as e: print(e) print(raw_data) return data def correct(answer, model_answer, plausible): answer = str(answer).replace("'", '').replace('"', '').replace(',','') model_answer = str(model_answer).replace("'", '').replace('"', '').replace(',','') plausible = str(plausible).replace("'", '').replace('"', '').replace(',','').replace('.','') if answer and model_answer: if answer == model_answer: return True if str(answer).lower().replace('the ', '') == str(model_answer).lower().replace('the ', ''): return True if str(answer).lower() in str(model_answer).lower() or str(model_answer).lower() in str(answer).lower(): return True elif plausible and model_answer: if plausible == model_answer: return True if str(plausible).lower().replace('the ', '') == model_answer.lower().replace('the ', ''): return True if str(plausible).lower() in str(model_answer).lower() or str(model_answer).lower() in str(plausible).lower(): return True elif answer == '' and model_answer == '': return True return False def correct_medium(answer, model_answer, plausible): answer = answer.replace("'", '').replace('"', '').replace(',','') model_answer = model_answer.replace("'", '').replace('"', '').replace(',','') plausible = plausible.replace("'", '').replace('"', '').replace(',','') if answer and model_answer: if answer == model_answer: return 'FACIL' if str(answer).lower().replace('the ', '') == str(model_answer).lower().replace('the ', ''): return 'FACIL' if str(answer).lower() in str(model_answer).lower() or str(model_answer).lower() in str(answer).lower(): return 'MEDIA' elif plausible and model_answer: if plausible == model_answer: return 'FACIL' if str(plausible).lower().replace('the ', '') == model_answer.lower().replace('the ', ''): return 'FACIL' if str(plausible).lower() in str(model_answer).lower() or str(model_answer).lower() in str(plausible).lower(): return 'MEDIA' return 'DIFICIL'
nilq/baby-python
python
import importlib from time import clock for i in range(2): module = input("Enter module to import") importlib.import_module(module) start = clock() print(iterativefact(27)) end = clock() elapsed = end - start print(elapsed) start = clock() print(recursivefactorial(27)) end = clock() elapsed = end - start print(elapsed)
nilq/baby-python
python
# -*- encoding: utf-8 -*- """ Boolean type comparator used to match Boolean Comparators are used by Audit module to compare module output with the expected result In FDG-connector, comparators might also be used with FDG Boolean comparator exposes various commands: - "match" Use Cases: - To check a boolean value against boolean true/false - To check whether we got anything (boolean_typecast) i.e. True for anything, False for None or empty string example: comparator: type: boolean match: True boolean_cast: False # Optional param """ import logging log = logging.getLogger(__name__) def match(audit_id, result_to_compare, args): """ Match against a boolean match: True :param result_to_compare: The value to compare. :param args: Comparator boolean as mentioned in the check. """ log.debug('Running boolean::match for check: {0}'.format(audit_id)) # if result_to_compare is not of boolean type, but we want a type-cast boolean_cast = args.get('boolean_cast', False) value_to_compare = result_to_compare if boolean_cast: value_to_compare = bool(value_to_compare) if value_to_compare == args['match']: return True, "Check Passed" return False, "boolean::match failure. Expected={0} Got={1}".format(str(args['match']), result_to_compare)
nilq/baby-python
python
'''MobileNet in PyTorch. See the paper "MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications" for more details. ''' import math import torch import torch.nn as nn import torch.nn.functional as F import os, sys project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, project_root) from butterfly import Butterfly from cnn.models.low_rank_conv import LowRankConv2d def _make_divisible(v, divisor, min_value=None): """ This function is taken from the original tf repo. It ensures that all layers have a channel number that is divisible by 8 It can be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py :param v: :param divisor: :param min_value: :return: """ if min_value is None: min_value = divisor new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than 10%. if new_v < 0.9 * v: new_v += divisor return new_v class Butterfly1x1Conv(Butterfly): """Product of log N butterfly factors, each is a block 2x2 of diagonal matrices. """ def forward(self, input): """ Parameters: input: (batch, c, h, w) if real or (batch, c, h, w, 2) if complex Return: output: (batch, nstack * c, h, w) if real or (batch, nstack * c, h, w, 2) if complex """ batch, c, h, w = input.shape input_reshape = input.view(batch, c, h * w).transpose(1, 2).reshape(-1, c) output = super().forward(input_reshape) return output.view(batch, h * w, self.nstack * c).transpose(1, 2).view(batch, self.nstack * c, h, w) class Block(nn.Module): '''Depthwise conv + Pointwise conv''' def __init__(self, in_planes, out_planes, stride=1, structure='D'): super(Block, self).__init__() self.conv1 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=in_planes, bias=False) self.conv1.weight._no_wd = True self.bn1 = nn.BatchNorm2d(in_planes) self.bn1.weight._no_wd = True self.bn1.bias._no_wd = True if structure == 'D': self.conv2 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False) elif structure.startswith('LR'): odo_nblocks = int(structure.split('_')[1]) rank = int(odo_nblocks * math.log2(out_planes) / 2) self.conv2 = LowRankConv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False, rank=rank) else: param = structure.split('_')[0] nblocks = 0 if len(structure.split('_')) <= 1 else int(structure.split('_')[1]) self.residual = False if len(structure.split('_')) <= 2 else (structure.split('_')[2] == 'res') # self.residual = self.residual and in_planes == out_planes self.conv2 = Butterfly1x1Conv(in_planes, out_planes, bias=False, tied_weight=False, ortho_init=True, param=param, nblocks=nblocks) self.bn2 = nn.BatchNorm2d(out_planes) self.bn2.weight._no_wd = True self.bn2.bias._no_wd = True def forward(self, x): out = F.relu(self.bn1(self.conv1(x)), inplace=True) if not getattr(self, 'residual', False): out = F.relu(self.bn2(self.conv2(out)), inplace=True) else: prev = out out = self.conv2(out) if out.shape[1] == 2 * prev.shape[1]: b, c, h, w = prev.shape out = (out.reshape(b, 2, c, h, w) + prev.reshape(b, 1, c, h, w)).reshape(b, 2 * c, h, w) else: out = out + prev out = F.relu(self.bn2(out), inplace=True) return out class MobileNet(nn.Module): # (128,2) means conv planes=128, conv stride=2, by default conv stride=1 cfg = [64, (128,2), 128, (256,2), 256, (512,2), 512, 512, 512, 512, 512, (1024,2), 1024] def __init__(self, num_classes=1000, width_mult=1.0, round_nearest=8, structure=None, softmax_structure='D', sm_pooling=1): """ structure: list of string """ super(MobileNet, self).__init__() self.width_mult = width_mult self.round_nearest = round_nearest self.structure = [] if structure is None else structure self.n_structure_layer = len(self.structure) self.structure = ['D'] * (len(self.cfg) - self.n_structure_layer) + self.structure self.sm_pooling = sm_pooling input_channel = _make_divisible(32 * width_mult, round_nearest) self.conv1 = nn.Conv2d(3, input_channel, kernel_size=3, stride=2, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(input_channel) self.bn1.weight._no_wd = True self.bn1.bias._no_wd = True self.layers = self._make_layers(in_planes=input_channel) self.last_channel = _make_divisible(1024 * width_mult // sm_pooling, round_nearest) if softmax_structure == 'D': self.linear = nn.Linear(self.last_channel, num_classes) else: param = softmax_structure.split('_')[0] nblocks = 0 if len(softmax_structure.split('_')) <= 1 else int(softmax_structure.split('_')[1]) self.linear = Butterfly(self.last_channel, num_classes, tied_weight=False, ortho_init=True, param=param, nblocks=nblocks) def _make_layers(self, in_planes): layers = [] for x, struct in zip(self.cfg, self.structure): out_planes = _make_divisible((x if isinstance(x, int) else x[0]) * self.width_mult, self.round_nearest) stride = 1 if isinstance(x, int) else x[1] layers.append(Block(in_planes, out_planes, stride, structure=struct)) in_planes = out_planes return nn.Sequential(*layers) def forward(self, x): out = F.relu(self.bn1(self.conv1(x)), inplace=True) out = self.layers(out) out = out.mean([2, 3]) if self.sm_pooling != 1: b, n = out.shape out = out.reshape(b, self.sm_pooling, n // self.sm_pooling).mean(1) out = self.linear(out) return out def mixed_model_state_dict(self, full_model_path, distilled_param_path): current_state_dict_keys = self.state_dict().keys() full_model_state_dict = torch.load(full_model_path, map_location='cpu')['state_dict'] full_model_state_dict = {name.replace('module.', ''): param for name, param in full_model_state_dict.items()} distilled_params = torch.load(distilled_param_path, map_location='cpu') state_dict = {name: param for name, param in full_model_state_dict.items() if name in current_state_dict_keys} for i, struct in enumerate(self.structure): # Only support butterfly for now if struct.startswith('odo') or struct.startswith('regular'): layer = f'layers.{i}.conv2' nblocks = int(struct.split('_')[1]) structured_param = distilled_params[layer, nblocks] state_dict.update({layer + '.' + name: param for name, param in structured_param.items()}) return state_dict def test(): net = MobileNet() x = torch.randn(1,3,32,32) y = net(x) print(y.size()) # test()
nilq/baby-python
python
import numpy as np import tensorflow as tf import scipy.signal def add_histogram(writer, tag, values, step, bins=1000): """ Logs the histogram of a list/vector of values. From: https://gist.github.com/gyglim/1f8dfb1b5c82627ae3efcfbbadb9f514 """ # Create histogram using numpy counts, bin_edges = np.histogram(values, bins=bins) # Fill fields of histogram proto hist = tf.HistogramProto() hist.min = float(np.min(values)) hist.max = float(np.max(values)) hist.num = int(np.prod(values.shape)) hist.sum = float(np.sum(values)) hist.sum_squares = float(np.sum(values ** 2)) # Requires equal number as bins, where the first goes from -DBL_MAX to bin_edges[1] # See https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/summary.proto#L30 # Therefore we drop the start of the first bin bin_edges = bin_edges[1:] # Add bin edges and counts for edge in bin_edges: hist.bucket_limit.append(edge) for c in counts: hist.bucket.append(c) # Create and write Summary summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)]) writer.add_summary(summary, step) def discount(x, gamma, terminal_array=None): if terminal_array is None: return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1] else: y, adv = 0, [] terminals_reversed = terminal_array[1:][::-1] for step, dt in enumerate(reversed(x)): y = dt + gamma * y * (1 - terminals_reversed[step]) adv.append(y) return np.array(adv)[::-1] class RunningStats(object): # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm # https://github.com/openai/baselines/blob/master/baselines/common/running_mean_std.py def __init__(self, epsilon=1e-4, shape=()): self.mean = np.zeros(shape, 'float64') self.var = np.ones(shape, 'float64') self.std = np.ones(shape, 'float64') self.count = epsilon def update(self, x): batch_mean = np.mean(x, axis=0) batch_var = np.var(x, axis=0) batch_count = x.shape[0] self.update_from_moments(batch_mean, batch_var, batch_count) def update_from_moments(self, batch_mean, batch_var, batch_count): delta = batch_mean - self.mean new_mean = self.mean + delta * batch_count / (self.count + batch_count) m_a = self.var * self.count m_b = batch_var * batch_count M2 = m_a + m_b + np.square(delta) * self.count * batch_count / (self.count + batch_count) new_var = M2 / (self.count + batch_count) self.mean = new_mean self.var = new_var self.std = np.maximum(np.sqrt(self.var), 1e-6) self.count = batch_count + self.count def lstm_state_combine(state): return np.reshape([s[0] for s in state], (len(state), -1)), \ np.reshape([s[1] for s in state], (len(state), -1))
nilq/baby-python
python
# See documentation in: # http://doc.scrapy.org/topics/items.html from scrapy.item import Item, Field class Author(Item): name = Field() profile_url = Field() avatar_url = Field() class BlogAuthor(Author): pass class CommentAuthor(Author): pass class Post(Item): author = Field() title = Field(default="") content = Field() posted = Field() origin_url = Field() class BlogPost(Post): tags = Field(default=[]) comments = Field(default=[]) class CommentPost(Post): pass
nilq/baby-python
python
import xml.etree.ElementTree as ET import fnmatch import matplotlib.pyplot as plt #rootDir = '/Users/yanzhexu/Desktop/Research/GBM/aCGH_whole_tumor_maps_for_Neuro-Onc_dataset/CEFSL_slices_only/slice22/ROI for +C_3D_AXIAL_IRSPGR_Fast_IM-0005-0022.xml' # draw ROI from coordinates in XML file def ParseXMLDrawROI(rootDir): tree = ET.parse(rootDir) root = tree.getroot() childnum = 0 xcoordlist = list() ycoordlist = list() xycoordlist = list() for child in root.iter('string'): if not fnmatch.fnmatch(child.text,'*{*}*'): continue childnum+=1 #print child.text #xycoord = list() xcoords = str(child.text).split(',')[0] ycoords = str(child.text).split(',')[1] xc = float(xcoords.split('{')[1]) yc = float(ycoords.split('}')[0].replace(' ','')) # xycoord.append(xc) # xycoord.append(yc) # xycoordlist.append(xycoord) xcoordlist.append(xc) ycoordlist.append(yc) xcoordlist.append(xcoordlist[0]) ycoordlist.append(ycoordlist[0]) # print childnum # print xcoordlist # print ycoordlist plt.plot(xcoordlist,ycoordlist,'b') #plt.show() # print xycoordlist
nilq/baby-python
python
word = 'Bye' phrase = word * 3 + '!' print(phrase) name = input() print('I love', name)
nilq/baby-python
python
# <<BEGIN-copyright>> # Copyright 2021, Lawrence Livermore National Security, LLC. # See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: BSD-3-Clause # <<END-copyright>> from PoPs import IDs as IDsPoPsModule from PoPs.groups import misc as chemicalElementMiscPoPsModule from fudge.channelData import Q as QModule from brownies.legacy.endl import misc as miscENDLModule endfMATBases = { 0 : 1, 1 : 1, 2 : 3, 3 : 6, 4 : 9, 5 : 10, 6 : 12, 7 : 14, 8 : 16, 9 : 19, 10 : 20, 11 : 23, 12 : 24, 13 : 27, 14 : 28, 15 : 31, 16 : 32, 17 : 35, 18 : 36, 19 : 39, 20 : 40, 21 : 45, 22 : 46, 23 : 50, 24 : 50, 25 : 55, 26 : 54, 27 : 59, 28 : 58, 29 : 63, 30 : 64, 31 : 69, 32 : 70, 33 : 75, 34 : 74, 35 : 79, 36 : 78, 37 : 85, 38 : 84, 39 : 89, 40 : 90, 41 : 93, 42 : 92, 43 : 99, 44 : 96, 45 : 103, 46 : 102, 47 : 107, 48 : 106, 49 : 113, 50 : 112, 51 : 121, 52 : 120, 53 : 127, 54 : 124, 55 : 133, 56 : 130, 57 : 138, 58 : 136, 59 : 141, 60 : 142, 61 : 139, 62 : 144, 63 : 151, 64 : 152, 65 : 159, 66 : 156, 67 : 165, 68 : 162, 69 : 169, 70 : 168, 71 : 175, 72 : 174, 73 : 180, 74 : 180, 75 : 185, 76 : 184, 77 : 191, 78 : 190, 79 : 197, 80 : 196, 81 : 203, 82 : 204, 83 : 209, 84 : 206, 85 : -1, 86 : -1, 87 : -1, 88 : 223, 89 : 225, 90 : 227, 91 : 229, 92 : 234, 93 : 230, 94 : 235, 95 : 235, 96 : 240, 97 : 240, 98 : 240, 99 : 265, 100 : 244 } def endfZAPFromMT( MT ) : """This function identifies the outgoing particle (i.e., ZAP) from the MT number. The outgoing particle is almost always a neutron, except in a few cases.""" if ( ( ( MT >= 600 ) and ( MT <= 649 ) ) or ( MT == 103 ) ) : # proton return( "H1" ) elif( ( ( MT >= 650 ) and ( MT <= 699 ) ) or ( MT == 104 ) ) : # deuteron return( "H2" ) elif( ( ( MT >= 700 ) and ( MT <= 749 ) ) or ( MT == 105 ) ) : # triton return( "H3" ) elif( ( ( MT >= 750 ) and ( MT <= 799 ) ) or ( MT == 106 ) ) : # helium-3 return( "He3" ) elif( ( ( MT >= 800 ) and ( MT <= 849 ) ) or ( MT == 107 ) ) : # helium-4 return( "He4" ) return( "n" ) # neutron def ZAAndMATFromParticleName( particleName ) : Z, A, suffix, ZA = miscENDLModule.getZ_A_suffix_andZAFromName( particleName ) m = 0 if( len( suffix ) ) : if( suffix[0] == 'm' ) : m = int( suffix[1:] ) if( m > 2 ) : raise Exception( 'Unsupport ENDF MAT for particle = %s' % particleName ) if( A == 0 ) : MAT = 100 * Z if( Z == 100 ) : MAT = 9920 # Special case for 100_Fm_000. else : Zp, AJumps = Z, 3 if( Z >= 99 ) : Zp, AJumps = 99, 1 MATBases = endfMATBases[Z] if( MATBases < 0 ) : MATBases = { 85 : 210, 86 : 211, 87 : 212 }[Z] MAT = 100 * Zp + 25 + AJumps * ( A - MATBases ) + m # Kludge for Es254_m1 (MAT logic doesn't allow for isomers above Z=98, so Es254_m1 takes what should # be the Es255 MAT): if Z==99 and A>=255: MAT += 1 return( ZA, MAT ) def getParticleNameFromMAT( MAT ): Z, MATstuff = divmod( MAT, 100 ) nStable, nIsomer = divmod( (MATstuff-25), 3 ) A = endfMATBases[Z] + nStable name = chemicalElementMiscPoPsModule.idFromZAndA( Z, A ) if( nIsomer ) : name += '_m%d' % nIsomer return( name ) class endfMTtoC_ProductList : def __init__( self, C, reactionLabel, isFission = 0, ns = 0, H1s = 0, H2s = 0, H3s = 0, He3s = 0, He4s = 0, gammas = 0, residualLevel = None ) : self.C = C self.residualLevel = residualLevel self.reactionLabel = reactionLabel self.isFission = isFission self.productCounts = { 'n' : ns, 'H1' : H1s, 'H2' : H2s, 'H3' : H3s, 'He3' : He3s, 'He4' : He4s, IDsPoPsModule.photon : gammas } def __getitem__( self, product ) : return( self.productCounts[product] ) def __repr__( self ) : s = '' for p in [ 'n', 'H1', 'H2', 'H3', 'He3', 'He4', IDsPoPsModule.photon ] : if( self.productCounts[p] != 0 ) : s += " %5s = %d:" % ( p, self.productCounts[p] ) s = "C = %s: isFission = %5s:%s --- %s" % ( self.C, self.isFission != 0, s, self.reactionLabel ) return( s ) def endfMTtoC_ProductList_excitedStateInitializer( list, MTGround, MTContinuum, C, label, ns = 0, H1s = 0, H2s = 0, H3s = 0, He3s = 0, He4s = 0, gammas = 0 ) : levelSuffixes = [ "", "st", "nd", "rd" ] list[MTGround] = endfMTtoC_ProductList( C, "(z,%s[0]) -- to ground state" % label, 0, ns, H1s, H2s, H3s, He3s, He4s, gammas, 0 ) for idx in range( MTGround + 1, MTContinuum ) : level = idx - MTGround try : levelSuffix = levelSuffixes[level] except : levelSuffix = "th" list[idx] = endfMTtoC_ProductList( C, "(z,%s[%d]) -- to %d%s excited state" % ( label, level, level, levelSuffix ), 0, ns, H1s, H2s, H3s, He3s, He4s, gammas, level ) list[MTContinuum] = endfMTtoC_ProductList( C, "(z,%s[c]) -- excitation to continuum" % label, 0, ns, H1s, H2s, H3s, He3s, He4s, gammas, 'c' ) endfMTtoC_ProductLists = {} endfMTtoC_ProductLists[1] = endfMTtoC_ProductList( 1, "(n,total)" ) endfMTtoC_ProductLists[2] = endfMTtoC_ProductList( 10, "(z,elastic)" ) endfMTtoC_ProductLists[3] = endfMTtoC_ProductList( 55, "(z,non-elastic)" ) endfMTtoC_ProductLists[4] = endfMTtoC_ProductList( 11, "(z,n)", 0, 1, 0, 0, 0, 0, 0, -1 ) endfMTtoC_ProductLists[5] = endfMTtoC_ProductList( 5, "(z,anything)" ) endfMTtoC_ProductLists[10] = endfMTtoC_ProductList( -1, "(z,continuum)" ) endfMTtoC_ProductLists[11] = endfMTtoC_ProductList( 32, "(n,2nd)", 0, 2, 0, 1, 0, 0, 0, 0 ) endfMTtoC_ProductLists[16] = endfMTtoC_ProductList( 12, "(z,2n)", 0, 2, 0, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[17] = endfMTtoC_ProductList( 13, "(z,3n)", 0, 3, 0, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[18] = endfMTtoC_ProductList( 15, "(z,f)", -1, 0, 0, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[19] = endfMTtoC_ProductList( -1, "(n,f) -- 1st chance fission.", -1, 0, 0, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[20] = endfMTtoC_ProductList( -1, "(n,nf) -- 2nd chance fission.", -1, 1, 0, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[21] = endfMTtoC_ProductList( -1, "(n,2nf) -- 3rd chance fission.", -1, 2, 0, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[22] = endfMTtoC_ProductList( 26, "(z,na)", 0, 1, 0, 0, 0, 0, 1, 0 ) endfMTtoC_ProductLists[23] = endfMTtoC_ProductList( 36, "(z,n3a)", 0, 1, 0, 0, 0, 0, 3, 0 ) endfMTtoC_ProductLists[24] = endfMTtoC_ProductList( 33, "(z,2na)", 0, 2, 0, 0, 0, 0, 1, 0 ) endfMTtoC_ProductLists[25] = endfMTtoC_ProductList( 16, "(z,3na)", 0, 3, 0, 0, 0, 0, 1, 0 ) endfMTtoC_ProductLists[27] = endfMTtoC_ProductList( -1, "(n,abs)" ) endfMTtoC_ProductLists[28] = endfMTtoC_ProductList( 20, "(z,np)", 0, 1, 1, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[29] = endfMTtoC_ProductList( 27, "(z,n2a)", 0, 1, 0, 0, 0, 0, 2, 0 ) endfMTtoC_ProductLists[30] = endfMTtoC_ProductList( -1, "(z,2n2a)", 0, 2, 0, 0, 0, 0, 2, 0 ) endfMTtoC_ProductLists[32] = endfMTtoC_ProductList( 22, "(z,nd)", 0, 1, 0, 1, 0, 0, 0, 0 ) endfMTtoC_ProductLists[33] = endfMTtoC_ProductList( 24, "(z,nt)", 0, 1, 0, 0, 1, 0, 0, 0 ) endfMTtoC_ProductLists[34] = endfMTtoC_ProductList( 25, "(z,nH)", 0, 1, 0, 0, 0, 1, 0, 0 ) endfMTtoC_ProductLists[35] = endfMTtoC_ProductList( -1, "(z,nd2a)", 0, 1, 0, 1, 0, 0, 2, 0 ) endfMTtoC_ProductLists[36] = endfMTtoC_ProductList( -1, "(z,nt2a)", 0, 1, 0, 0, 1, 0, 2, 0 ) endfMTtoC_ProductLists[37] = endfMTtoC_ProductList( 14, "(z,4n)", 0, 4, 0, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[38] = endfMTtoC_ProductList( -1, "(n,3nf) -- 4th chance fission.", -1, 3, 0, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[41] = endfMTtoC_ProductList( 29, "(z,2np)", 0, 2, 1, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[42] = endfMTtoC_ProductList( 16, "(z,3np)", 0, 3, 1, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[44] = endfMTtoC_ProductList( 17, "(n,n2p)", 0, 1, 2, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[45] = endfMTtoC_ProductList( 34, "(n,npa)", 0, 1, 1, 0, 0, 0, 1, 0 ) endfMTtoC_ProductList_excitedStateInitializer( endfMTtoC_ProductLists, 50, 91, 11, "n", ns = 1 ) endfMTtoC_ProductLists[101] = endfMTtoC_ProductList( -1, "(n,disappearance)" ) endfMTtoC_ProductLists[102] = endfMTtoC_ProductList( 46, "(z,g)", 0, 0, 0, 0, 0, 0, 0, 1 ) endfMTtoC_ProductLists[103] = endfMTtoC_ProductList( 40, "(z,p)", 0, 0, 1, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[104] = endfMTtoC_ProductList( 41, "(z,d)", 0, 0, 0, 1, 0, 0, 0, 0 ) endfMTtoC_ProductLists[105] = endfMTtoC_ProductList( 42, "(z,t)", 0, 0, 0, 0, 1, 0, 0, 0 ) endfMTtoC_ProductLists[106] = endfMTtoC_ProductList( 44, "(z,H)", 0, 0, 0, 0, 0, 1, 0, 0 ) endfMTtoC_ProductLists[107] = endfMTtoC_ProductList( 45, "(z,a)", 0, 0, 0, 0, 0, 0, 1, 0 ) endfMTtoC_ProductLists[108] = endfMTtoC_ProductList( 37, "(z,2a)", 0, 0, 0, 0, 0, 0, 2, 0 ) endfMTtoC_ProductLists[109] = endfMTtoC_ProductList( -1, "(z,3a)", 0, 0, 0, 0, 0, 0, 3, 0 ) endfMTtoC_ProductLists[111] = endfMTtoC_ProductList( 18, "(z,2p)", 0, 0, 2, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[112] = endfMTtoC_ProductList( 48, "(z,pa)", 0, 0, 1, 0, 0, 0, 1, 0 ) endfMTtoC_ProductLists[113] = endfMTtoC_ProductList( 42, "(z,t2a)", 0, 0, 0, 0, 1, 0, 2, 0 ) endfMTtoC_ProductLists[114] = endfMTtoC_ProductList( -1, "(z,d2a)", 0, 0, 0, 1, 0, 0, 2, 0 ) endfMTtoC_ProductLists[115] = endfMTtoC_ProductList( 19, "(z,pd)", 0, 0, 1, 1, 0, 0, 0, 0 ) endfMTtoC_ProductLists[116] = endfMTtoC_ProductList( 39, "(z,pt)", 0, 0, 1, 0, 1, 0, 0, 0 ) endfMTtoC_ProductLists[117] = endfMTtoC_ProductList( 47, "(z,da)", 0, 0, 0, 1, 0, 0, 1, 0 ) endfMTtoC_ProductLists[151] = endfMTtoC_ProductList( -1, "(n,resonance)" ) # cmattoon Septmeber 2011, additional MT #s defined by CSEWG in 2010: endfMTtoC_ProductLists[152] = endfMTtoC_ProductList( -1, "(z,5n)", 0, 5, 0, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[153] = endfMTtoC_ProductList( -1, "(z,6n)", 0, 6, 0, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[154] = endfMTtoC_ProductList( -1, "(z,2nt)", 0, 2, 0, 0, 1, 0, 0, 0 ) endfMTtoC_ProductLists[155] = endfMTtoC_ProductList( 43, "(z,ta)", 0, 0, 0, 0, 1, 0, 1, 0 ) endfMTtoC_ProductLists[156] = endfMTtoC_ProductList( -1, "(z,4np)", 0, 4, 1, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[157] = endfMTtoC_ProductList( -1, "(z,3nd)", 0, 3, 0, 1, 0, 0, 0, 0 ) endfMTtoC_ProductLists[158] = endfMTtoC_ProductList( 23, "(z,nda)", 0, 1, 0, 1, 0, 0, 1, 0 ) endfMTtoC_ProductLists[159] = endfMTtoC_ProductList( 31, "(z,2npa)", 0, 2, 1, 0, 0, 0, 1, 0 ) endfMTtoC_ProductLists[160] = endfMTtoC_ProductList( -1, "(z,7n)", 0, 7, 0, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[161] = endfMTtoC_ProductList( -1, "(z,8n)", 0, 8, 0, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[162] = endfMTtoC_ProductList( -1, "(z,5np)", 0, 5, 1, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[163] = endfMTtoC_ProductList( -1, "(z,6np)", 0, 6, 1, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[164] = endfMTtoC_ProductList( -1, "(z,7np)", 0, 7, 1, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[165] = endfMTtoC_ProductList( -1, "(z,4na)", 0, 4, 0, 0, 0, 0, 1, 0 ) endfMTtoC_ProductLists[166] = endfMTtoC_ProductList( -1, "(z,5na)", 0, 5, 0, 0, 0, 0, 1, 0 ) endfMTtoC_ProductLists[167] = endfMTtoC_ProductList( -1, "(z,6na)", 0, 6, 0, 0, 0, 0, 1, 0 ) endfMTtoC_ProductLists[168] = endfMTtoC_ProductList( -1, "(z,7na)", 0, 7, 0, 0, 0, 0, 1, 0 ) endfMTtoC_ProductLists[169] = endfMTtoC_ProductList( -1, "(z,4nd)", 0, 4, 0, 1, 0, 0, 0, 0 ) endfMTtoC_ProductLists[170] = endfMTtoC_ProductList( -1, "(z,5nd)", 0, 5, 0, 1, 0, 0, 0, 0 ) endfMTtoC_ProductLists[171] = endfMTtoC_ProductList( -1, "(z,6nd)", 0, 6, 0, 1, 0, 0, 0, 0 ) endfMTtoC_ProductLists[172] = endfMTtoC_ProductList( -1, "(z,3nt)", 0, 3, 0, 0, 1, 0, 0, 0 ) endfMTtoC_ProductLists[173] = endfMTtoC_ProductList( -1, "(z,4nt)", 0, 4, 0, 0, 1, 0, 0, 0 ) endfMTtoC_ProductLists[174] = endfMTtoC_ProductList( -1, "(z,5nt)", 0, 5, 0, 0, 1, 0, 0, 0 ) endfMTtoC_ProductLists[175] = endfMTtoC_ProductList( -1, "(z,6nt)", 0, 6, 0, 0, 1, 0, 0, 0 ) endfMTtoC_ProductLists[176] = endfMTtoC_ProductList( -1, "(z,2nH)", 0, 2, 0, 0, 0, 1, 0, 0 ) endfMTtoC_ProductLists[177] = endfMTtoC_ProductList( -1, "(z,3nH)", 0, 3, 0, 0, 0, 1, 0, 0 ) endfMTtoC_ProductLists[178] = endfMTtoC_ProductList( -1, "(z,4nH)", 0, 4, 0, 0, 0, 1, 0, 0 ) endfMTtoC_ProductLists[179] = endfMTtoC_ProductList( -1, "(z,3n2p)", 0, 3, 2, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[180] = endfMTtoC_ProductList( -1, "(z,3n2a)", 0, 3, 0, 0, 0, 0, 2, 0 ) endfMTtoC_ProductLists[181] = endfMTtoC_ProductList( -1, "(z,3npa)", 0, 3, 1, 0, 0, 0, 1, 0 ) endfMTtoC_ProductLists[182] = endfMTtoC_ProductList( -1, "(z,dt)", 0, 0, 0, 1, 1, 0, 0, 0 ) endfMTtoC_ProductLists[183] = endfMTtoC_ProductList( -1, "(z,npd)", 0, 1, 1, 1, 0, 0, 0, 0 ) endfMTtoC_ProductLists[184] = endfMTtoC_ProductList( -1, "(z,npt)", 0, 1, 1, 0, 1, 0, 0, 0 ) endfMTtoC_ProductLists[185] = endfMTtoC_ProductList( -1, "(z,ndt)", 0, 1, 0, 1, 1, 0, 0, 0 ) endfMTtoC_ProductLists[186] = endfMTtoC_ProductList( -1, "(z,npH)", 0, 1, 1, 0, 0, 1, 0, 0 ) endfMTtoC_ProductLists[187] = endfMTtoC_ProductList( -1, "(z,ndH)", 0, 1, 0, 1, 0, 1, 0, 0 ) endfMTtoC_ProductLists[188] = endfMTtoC_ProductList( -1, "(z,ntH)", 0, 1, 0, 0, 1, 1, 0, 0 ) endfMTtoC_ProductLists[189] = endfMTtoC_ProductList( 28, "(z,nta)", 0, 1, 0, 0, 1, 0, 1, 0 ) endfMTtoC_ProductLists[190] = endfMTtoC_ProductList( -1, "(z,2n2p)", 0, 2, 2, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[191] = endfMTtoC_ProductList( -1, "(z,pH)", 0, 0, 1, 0, 0, 1, 0, 0 ) endfMTtoC_ProductLists[192] = endfMTtoC_ProductList( -1, "(z,dH)", 0, 0, 0, 1, 0, 1, 0, 0 ) endfMTtoC_ProductLists[193] = endfMTtoC_ProductList( 38, "(z,Ha)", 0, 0, 0, 0, 0, 1, 1, 0 ) endfMTtoC_ProductLists[194] = endfMTtoC_ProductList( -1, "(z,4n2p)", 0, 4, 2, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[195] = endfMTtoC_ProductList( -1, "(z,4n2a)", 0, 4, 0, 0, 0, 0, 2, 0 ) endfMTtoC_ProductLists[196] = endfMTtoC_ProductList( -1, "(z,4npa)", 0, 4, 1, 0, 0, 0, 1, 0 ) endfMTtoC_ProductLists[197] = endfMTtoC_ProductList( -1, "(z,3p)", 0, 0, 3, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[198] = endfMTtoC_ProductList( -1, "(z,n3p)", 0, 1, 3, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[199] = endfMTtoC_ProductList( -1, "(z,3n2pa)", 0, 3, 2, 0, 0, 0, 1, 0 ) endfMTtoC_ProductLists[200] = endfMTtoC_ProductList( -1, "(z,5n2p)", 0, 5, 2, 0, 0, 0, 0, 0 ) # end of new MT #s endfMTtoC_ProductLists[201] = endfMTtoC_ProductList( -1, "(z,Xn)", 0, -1, 0, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[202] = endfMTtoC_ProductList( -1, "(z,Xg)", 0, 0, 0, 0, 0, 0, 0, -1 ) endfMTtoC_ProductLists[203] = endfMTtoC_ProductList( 50, "(z,Xp)", 0, 0, -1, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[204] = endfMTtoC_ProductList( 51, "(z,Xd)", 0, 0, 0, -1, 0, 0, 0, 0 ) endfMTtoC_ProductLists[205] = endfMTtoC_ProductList( 52, "(z,Xt)", 0, 0, 0, 0, -1, 0, 0, 0 ) endfMTtoC_ProductLists[206] = endfMTtoC_ProductList( 53, "(z,XH)", 0, 0, 0, 0, 0, -1, 0, 0 ) endfMTtoC_ProductLists[207] = endfMTtoC_ProductList( 54, "(z,Xa)", 0, 0, 0, 0, 0, 0, -1, 0 ) endfMTtoC_ProductLists[208] = endfMTtoC_ProductList( -1, "Various meson and antiparticle production $sigma$'s" ) endfMTtoC_ProductLists[209] = endfMTtoC_ProductList( -1, "Various meson and antiparticle production $sigma$'s" ) endfMTtoC_ProductLists[210] = endfMTtoC_ProductList( -1, "Various meson and antiparticle production $sigma$'s" ) endfMTtoC_ProductLists[211] = endfMTtoC_ProductList( -1, "Various meson and antiparticle production $sigma$'s" ) endfMTtoC_ProductLists[212] = endfMTtoC_ProductList( -1, "Various meson and antiparticle production $sigma$'s" ) endfMTtoC_ProductLists[213] = endfMTtoC_ProductList( -1, "Various meson and antiparticle production $sigma$'s" ) endfMTtoC_ProductLists[214] = endfMTtoC_ProductList( -1, "Various meson and antiparticle production $sigma$'s" ) endfMTtoC_ProductLists[215] = endfMTtoC_ProductList( -1, "Various meson and antiparticle production $sigma$'s" ) endfMTtoC_ProductLists[216] = endfMTtoC_ProductList( -1, "Various meson and antiparticle production $sigma$'s" ) endfMTtoC_ProductLists[217] = endfMTtoC_ProductList( -1, "Various meson and antiparticle production $sigma$'s" ) endfMTtoC_ProductLists[251] = endfMTtoC_ProductList( -1, "Various elastic neutrons scattering parameters." ) endfMTtoC_ProductLists[252] = endfMTtoC_ProductList( -1, "Various elastic neutrons scattering parameters." ) endfMTtoC_ProductLists[253] = endfMTtoC_ProductList( -1, "Various elastic neutrons scattering parameters." ) endfMTtoC_ProductLists[301] = endfMTtoC_ProductList( -1, "Energy release for total and partial $sigma$'s." ) endfMTtoC_ProductLists[451] = endfMTtoC_ProductList( -1, "Heading or title information, MF=1 only." ) endfMTtoC_ProductLists[452] = endfMTtoC_ProductList( 15, "(z,f) $bar{nu}$ total, i.e. prompt plus delayed, fission.", -1, 0, 0, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[454] = endfMTtoC_ProductList( 15, "(z,f) Independent fission product yields.", -1, 0, 0, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[455] = endfMTtoC_ProductList( 15, "(z,f) $bar{nu}$ for delayed fission.", -1, 0, 0, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[456] = endfMTtoC_ProductList( 15, "(z,f) $bar{nu}$ for prompt fission.", -1, 0, 0, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[457] = endfMTtoC_ProductList( -1, "(z,f) Radioactive decay data.", -1, 0, 0, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[458] = endfMTtoC_ProductList( 15, "(z,f) Energy release in fission for incident $n$'s.", -1, 0, 0, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[459] = endfMTtoC_ProductList( 15, "(z,f) Cumulative fission product yields.", -1, 0, 0, 0, 0, 0, 0, 0 ) endfMTtoC_ProductLists[500] = endfMTtoC_ProductList( -1, "Total charged particle stopping power." ) endfMTtoC_ProductLists[501] = endfMTtoC_ProductList( 70, "Total photon interaction $sigma$." ) endfMTtoC_ProductLists[502] = endfMTtoC_ProductList( 71, "Photon coherent scattering." ) endfMTtoC_ProductLists[504] = endfMTtoC_ProductList( 72, "Photon incoherent scattering." ) endfMTtoC_ProductLists[505] = endfMTtoC_ProductList( -1, "Imaginary scattering factor." ) endfMTtoC_ProductLists[506] = endfMTtoC_ProductList( -1, "Real scattering factor." ) endfMTtoC_ProductLists[515] = endfMTtoC_ProductList( -1, "Pair production, electron field." ) endfMTtoC_ProductLists[516] = endfMTtoC_ProductList( 74, "Pair production." ) endfMTtoC_ProductLists[517] = endfMTtoC_ProductList( -1, "Pair production, nuclear field." ) endfMTtoC_ProductLists[522] = endfMTtoC_ProductList( 73, "Photoelectric absorption." ) endfMTtoC_ProductLists[534] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[535] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[536] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[537] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[538] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[539] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[540] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[541] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[542] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[543] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[544] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[545] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[546] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[547] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[548] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[549] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[550] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[551] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[552] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[553] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[554] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[555] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[556] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[557] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[558] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[559] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[560] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[561] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[562] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[563] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[564] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[565] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[566] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[567] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[568] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[569] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[570] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductLists[571] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." ) endfMTtoC_ProductList_excitedStateInitializer( endfMTtoC_ProductLists, 600, 649, 40, "p", H1s = 1 ) endfMTtoC_ProductList_excitedStateInitializer( endfMTtoC_ProductLists, 650, 699, 41, "d", H2s = 1 ) endfMTtoC_ProductList_excitedStateInitializer( endfMTtoC_ProductLists, 700, 749, 42, "t", H3s = 1 ) endfMTtoC_ProductList_excitedStateInitializer( endfMTtoC_ProductLists, 750, 799, 44, "He3", He3s = 1 ) endfMTtoC_ProductList_excitedStateInitializer( endfMTtoC_ProductLists, 800, 849, 45, "a", He4s = 1 ) endfMTtoC_ProductLists[851] = endfMTtoC_ProductList( -1, "Lumped reaction covariances." ) endfMTtoC_ProductList_excitedStateInitializer( endfMTtoC_ProductLists, 875, 891, 12, "2n", ns = 2 ) def getCSFromMT( MT ) : if( MT == 1 ) : return( 1, 0 ) # (z,total) if( MT == 2 ) : return( 10, 0 ) # (z,elas) if( MT in [ 3, 4, 5, 10, 25, 27, 30, 35, 36, 101, 109, 113, 114, 152, 153, 154, 156, 157, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190, 191, 192, 194, 195, 196, 197, 198, 199, 200] ) : return( -MT, 0 ) if( MT == 11 ) : return( 32, 0 ) if( MT == 16 ) : return( 12, 0 ) if( MT == 17 ) : return( 13, 0 ) if( MT == 18 ) : return( 15, 0 ) if( MT == 19 ) : return( 15, 0 ) if( MT == 20 ) : return( 15, 0 ) if( MT == 21 ) : return( 15, 0 ) if( MT == 22 ) : return( 26, 0 ) if( MT == 23 ) : return( 36, 0 ) if( MT == 24 ) : return( 33, 0 ) if( MT == 28 ) : return( 20, 0 ) if( MT == 29 ) : return( 27, 0 ) if( MT == 32 ) : return( 22, 0 ) if( MT == 33 ) : return( 24, 0 ) if( MT == 34 ) : return( 25, 0 ) if( MT == 37 ) : return( 14, 0 ) if( MT == 38 ) : return( 15, 0 ) if( MT == 41 ) : return( 29, 0 ) if( MT == 42 ) : return( 16, 0 ) if( MT == 44 ) : return( 17, 0 ) if( MT == 45 ) : return( 34, 0 ) if( 50 <= MT < 91 ) : return( 11, 1 ) if( MT == 91 ) : return( 11, 0 ) if( MT == 102 ) : return( 46, 0 ) if( MT == 103 ) : return( 40, 0 ) if( MT == 104 ) : return( 41, 0 ) if( MT == 105 ) : return( 42, 0 ) if( MT == 106 ) : return( 44, 0 ) if( MT == 107 ) : return( 45, 0 ) if( MT == 108 ) : return( 37, 0 ) if( MT == 111 ) : return( 18, 0 ) if( MT == 112 ) : return( 48, 0 ) if( MT == 115 ) : return( 19, 0 ) if( MT == 116 ) : return( 39, 0 ) if( MT == 117 ) : return( 47, 0 ) if( MT == 155 ) : return( 43, 0 ) if( MT == 158 ) : return( 23, 0 ) if( MT == 159 ) : return( 31, 0 ) if( MT == 189 ) : return( 28, 0 ) if( MT == 193 ) : return( 38, 0 ) if( MT == 452 ) : return( 15, 0 ) # prompt plus delayed fission neutrons if( MT == 455 ) : return( 15, 7 ) # delayed fission neutrons if( MT == 456 ) : return( 15, 0 ) # prompt fission neutrons if( MT == 458 ) : return( 15, 0 ) # prompt fission neutron energy if( 600 <= MT < 649 ) : return( 40, 1 ) if( MT == 649 ) : return( 40, 0 ) if( 650 <= MT < 699 ) : return( 41, 1 ) if( MT == 699 ) : return( 41, 0 ) if( 700 <= MT < 749 ) : return( 42, 1 ) if( MT == 749 ) : return( 42, 0 ) if( 750 <= MT < 799 ) : return( 44, 1 ) if( MT == 799 ) : return( 44, 0 ) if( 800 <= MT < 849 ) : return( 45, 1 ) if( MT == 849 ) : return( 45, 0 ) if( 875 <= MT < 891 ) : return( 12, 1 ) if( MT == 891 ) : return( 12, 0 ) if( MT == 502 ) : return( 71, 0 ) # photo-atomic coherent if( MT == 504 ) : return( 72, 0 ) # photo-atomic incoherent if( MT == 516 ) : return( 74, 0 ) # photo-atomic pair production if( MT == 522 ) : return( 73, 0 ) # photo-atomic photo-electric raise Exception( 'MT = %d is not supported for conversion to C, S' % MT ) def getMTFromC( C ) : if( C == 1 ) : return( 1 ) # (z,total) if( C == 5 ) : return( -5 ) # (z,prod) if( C == 8 ) : return( -8 ) # (z,lacs) if( C == 9 ) : return( -9 ) # (z,n+i) if( C == 10 ) : return( 2 ) # (z,elas) if( C == 11 ) : return( 50 ) # (z,n) if( C == 12 ) : return( 16 ) # (z,2n) if( C == 13 ) : return( 17 ) # (z,3n) if( C == 14 ) : return( 37 ) # (z,4n) if( C == 15 ) : return( 18 ) # (z,f) if( C == 16 ) : return( 42 ) # (z,3np) if( C == 17 ) : return( 44 ) # (z,n2p) if( C == 18 ) : return( 111 ) # (z,2p) if( C == 19 ) : return( 115 ) # (z,pd) if( C == 20 ) : return( 28 ) # (z,np) if( C == 21 ) : return( -20 ) # (z,pn) if( C == 22 ) : return( 32 ) # (z,nd) if( C == 23 ) : return( 158 ) # (z,nda) if( C == 24 ) : return( 33 ) # (z,nt) if( C == 25 ) : return( 34 ) # (z,nHe3) if( C == 26 ) : return( 22 ) # (z,na) if( C == 27 ) : return( 29 ) # (z,n2a) if( C == 28 ) : return( 189 ) # (z,nta) if( C == 29 ) : return( 41 ) # (z,2np) if( C == 30 ) : return( -30 ) # (z,gna) if( C == 31 ) : return( 159 ) # (z,2npa) if( C == 32 ) : return( 11 ) # (z,2nd) if( C == 33 ) : return( 24 ) # (z,2na) if( C == 34 ) : return( 45 ) # (z,npa) if( C == 35 ) : return( 32 ) # (z,dn), ENDF does not have an (z,dn) reaction only an (z,nd) reaction and ENDL's only (z,dn) reaction is not two-body so order does not matter. if( C == 36 ) : return( 23 ) # (z,n3a) if( C == 37 ) : return( 108 ) # (z,2a) if( C == 38 ) : return( 193 ) # (z,He3 a) if( C == 39 ) : return( 116 ) # (z,pt) if( C == 40 ) : return( 600 ) # (z,p) if( C == 41 ) : return( 650 ) # (z,d) if( C == 42 ) : return( 700 ) # (z,t) if( C == 43 ) : return( 155 ) # (z,ta) if( C == 44 ) : return( 750 ) # (z,He3) if( C == 45 ) : return( 800 ) # (z,a) if( C == 46 ) : return( 102 ) # (z,g) if( C == 47 ) : return( 117 ) # (z,da) if( C == 48 ) : return( 112 ) # (z,pa) if( C == 49 ) : return( -49 ) # (z,2pa) if( C == 50 ) : return( -50 ) # (z,Xp) if( C == 51 ) : return( -51 ) # (z,Xd) if( C == 52 ) : return( -52 ) # (z,Xt) if( C == 53 ) : return( -53 ) # (z,XHe3) if( C == 54 ) : return( -54 ) # (z,Xa) if( C == 55 ) : return( -55 ) # (z,Xg) if( C == 56 ) : return( -56 ) # (z,Xn) if( C == 57 ) : return( -57 ) # (z,Xe) if( C == 70 ) : return( 501 ) # (z,totp) if( C == 71 ) : return( 502 ) # (z,coh) if( C == 72 ) : return( 504 ) # (z,incoh) if( C == 73 ) : return( 522 ) # (z,photo) if( C == 74 ) : return( 516 ) # (z,pair) if( C == 75 ) : return( -75 ) # (z,triplet) if( C == 78 ) : return( -78 ) # (z,ic) if( C == 81 ) : return( -81 ) # (z,ion) if( C == 82 ) : return( -82 ) # (z,brem) if( C == 83 ) : return( -83 ) # (z,excit) if( C == 84 ) : return( -84 ) # (z,coll) if( C == 91 ) : return( -91 ) # (z,shell) if( C == 92 ) : return( -92 ) # (z,trans) if( C == 93 ) : return( -93 ) # (z,whole) raise Exception( 'C = %d is not supported for conversion to MT' % C ) class ENDLCS_To_ENDFMT : def __init__( self, projectile ) : self.projectile = projectile self.MTPrimes = {} def getMTFromCS( self, C, S, CCounts = 0 ) : def MTPrimes( self, MT, S, projectile ) : if( S == 0 ) : if( MT == 50 ) : MT = 91 else : MT += 49 else : if( MT not in self.MTPrimes ) : self.MTPrimes[MT] = -1 if( self.projectile == projectile ) : self.MTPrimes[MT] += 1 self.MTPrimes[MT] += 1 MT += self.MTPrimes[MT] return( MT ) MT = getMTFromC( C ) if( MT == 50 ) : MT = MTPrimes( self, MT, S, 'n' ) elif( MT == 600 ) : MT = MTPrimes( self, MT, S, 'H1' ) if( ( S == 0 ) and ( CCounts == 1 ) ) : MT = 103 elif( MT == 650 ) : MT = MTPrimes( self, MT, S, 'H2' ) if( ( S == 0 ) and ( CCounts == 1 ) ) : MT = 104 elif( MT == 700 ) : MT = MTPrimes( self, MT, S, 'H3' ) if( ( S == 0 ) and ( CCounts == 1 ) ) : MT = 105 elif( MT == 750 ) : MT = MTPrimes( self, MT, S, 'He3' ) if( ( S == 0 ) and ( CCounts == 1 ) ) : MT = 106 elif( MT == 800 ) : MT = MTPrimes( self, MT, S, 'He4' ) if( ( S == 0 ) and ( CCounts == 1 ) ) : MT = 107 return( MT ) def ENDF_MTZAEquation( projectileZA, targetZA, MT ) : """ This function returns a python list of length 2. The first element is a list of all outgoing particle ZA's (including the residual) for the reaction of projectileZA + targetZA with ENDF's reaction identifier MT. The second element is a reaction equation for this projectileZA, targetZA and MT. For example ENDF_MTZAEquation( 1, 95242, 22 ) returns ([1, 2004, 93238], 'n + Am242 -> n + He4 + Np238') That is, for a neutron ( projectileZA = 1 ) hitting Am242 ( targetZA = 95242 ) with MT = 22 - ENDF (z,na) reaction - the outgoing particle ZA's are [1, 2004, 93238] and the reaction equation is 'n + Am242 -> n + He4 + Np238'. """ if( ( MT < 0 ) or ( MT > 999 ) or ( MT in [ 1, 3, 5, 10, 18, 19, 20, 21, 27, 38, 101, 151 ] or ( 200 < MT < 600 ) or ( 850 < MT < 875 ) ) ) : raise Exception( 'MT = %s is no supported' % MT ) elif( MT == 2 ) : productCounts = { chemicalElementMiscPoPsModule.idFromZA( projectileZA ) : 1 } level = None elif( MT == 4 ) : productCounts = { chemicalElementMiscPoPsModule.idFromZA( projectileZA ) : 1 } level = None else : productCounts = endfMTtoC_ProductLists[MT].productCounts level = endfMTtoC_ProductLists[MT].residualLevel compoundZA = projectileZA + targetZA residualZA = compoundZA productCountList = [] adder, equationZA, equation = '', [], '%s + %s ->' % \ ( chemicalElementMiscPoPsModule.idFromZA( projectileZA ), chemicalElementMiscPoPsModule.idFromZA( targetZA ) ) for product in productCounts : if( product == IDsPoPsModule.photon ) : productZA = 0 else : productZA = miscENDLModule.getZ_A_suffix_andZAFromName( product )[-1] if( productCounts[product] > 0 ) : productCountList.append( [ productZA, product, productCounts[product] ] ) productCountList.sort( ) for productZA, token, count in productCountList : residualZA -= count * productZA for idx in range( count ) : equation += ' %s%s' % ( adder, token ) equationZA.append( productZA ) adder = '+ ' levelStr = '' if( not( level is None ) ) : if( isinstance( level, int ) ) : if( level < 0 ) : ValueError( 'level = %s must be >= 0' % level ) if( level > 0 ) : levelStr = "_e%s" % level else : raise Exception( 'Unknown level specifier = %s' % level ) equation += ' %s%s%s' % ( adder, chemicalElementMiscPoPsModule.idFromZA( residualZA ), levelStr ) equationZA.append( residualZA ) return( equationZA, equation ) def setReactionsOutputChannelFromOutputChannel( info, reaction, outputChannel ) : for conversion in info.ENDFconversionFlags.flags : if( isinstance( conversion.link, QModule.component ) ) : if( conversion.link == outputChannel.Q ) : conversion.link = reaction.outputChannel.Q reaction.outputChannel.process = outputChannel.process reaction.outputChannel.genre = outputChannel.genre for Q in outputChannel.Q : reaction.outputChannel.Q.add( Q ) for product in outputChannel.products : reaction.outputChannel.products.add( product ) for delayedNeutron in outputChannel.fissionFragmentData.delayedNeutrons : reaction.outputChannel.fissionFragmentData.delayedNeutrons.add( delayedNeutron ) for fissionEnergyRelease in outputChannel.fissionFragmentData.fissionEnergyReleases : reaction.outputChannel.fissionFragmentData.fissionEnergyReleases.add( fissionEnergyRelease )
nilq/baby-python
python
import numpy as np from scipy.linalg import expm from tqdm import tqdm import time from utils import visualize_trajectory_2d,load_data ############## All Utility Functions ############### def round_dot(vec): ''' :param vec: A point in homogeneous co-ordinate :return: 0 representation for the vector ''' assert vec.shape == (4,1) vec_homog = to_homog(vec) vec_hat = hat(vec_homog[0:3,0]) vec_round_dot = np.hstack((np.eye(3),-vec_hat)) vec_round_dot = np.vstack((vec_round_dot,np.zeros((1,6)))) return vec_round_dot def to_homog(vec): ''' :param vec: A vector :return: A vector scaled to make sure the last component is one ''' assert vec.shape == (4,1) return vec / vec[3,0] def pi(point): ''' :param : point in 3D :return: projected point in 2D ''' point = point.reshape(4) return point / point[2] def inv_pi(point): ''' :param point: In 2D :return: in 3D - Gain depth info ''' assert point.shape == (4,1) return point * point[3] def deri_pi(point): ''' :param point: derivative of the projection this point is taken :return: derivative ''' point = point.reshape(4) return np.array([[1,0,-point[0]/point[2],0], [0,1,-point[1]/point[2],0], [0,0,0,0], [0,0,-point[3]/point[2],1]]) / point[2] def hat(vec): ''' This function computes the hat function ''' assert vec.ndim == 1 return np.array([[0,-vec[2],vec[1]], [vec[2],0,vec[1]], [-vec[1],vec[0],0]]) def curly_hat(omega_hat,v): ''' This function Computes the curly hat operations ''' v_hat = hat(v) curly_u = np.hstack((omega_hat,v_hat)) curly_u = np.vstack((curly_u,np.hstack((np.zeros((3,3)),omega_hat)))) return curly_u ############## All Utility Functions Ends ############### def imu_ekf(data_set): ''' This function performs the update for the data received from the Binocular Camera. This method is used to solve the predict only step in the problem :param data_set-> The data set which is to be read for processing :return -> The mean poses for the IMU is returned ''' time_stamp,features,v,omega,K,b,cam_T_imu = load_data(data_set) start_time = time.time() z = features opt_T_imu = cam_T_imu #### Initializations #### prev_pose = np.eye(4) prev_cov = np.eye(6) pose_mean = np.zeros((4,4,time_stamp.shape[1])) for t in tqdm(range(time_stamp.shape[1]-1)): tau = time_stamp[0,t+1] - time_stamp[0,t] omega_hat = hat(omega[:,t]) u_hat = np.hstack((omega_hat,v[:,t].reshape(3,1))) u_hat = np.vstack((u_hat,np.zeros((1,4)))) #### Predict IMU Pose #### #### Mean #### pose_mean[:,:,t] = expm(-tau * u_hat) @ prev_pose prev_pose = pose_mean[:,:,t] #### Co-variance #### W = np.diag(np.random.randn(6)) pose_cov = expm(-tau * curly_hat(omega_hat,v[:,t])) @ prev_cov \ @ expm(-tau * curly_hat(omega_hat,v[:,t])).T + W #visualize_trajectory_2d(pose_mean) print("Done IMU Predict and time taken is ", time.time()-start_time) return pose_mean def slam_imu_predict(time_stamp,features,v,omega,K,b,cam_T_imu,t,prev_pose,prev_cov): ''' This function performs the predict step for the Slam problem, This is called once for every time stamp Inputs: Along with the previous pose and covariance matrix Output: Predicted covaraince and mean for the IMU pose ''' start_time = time.time() z = features opt_T_imu = cam_T_imu tau = time_stamp[0,t+1] - time_stamp[0,t] omega_hat = hat(omega[:,t]) u_hat = np.hstack((omega_hat,v[:,t].reshape(3,1))) u_hat = np.vstack((u_hat,np.zeros((1,4)))) #### Predict IMU Pose #### #### Mean #### pose_mean = expm(-tau * u_hat) @ prev_pose #### Co-variance #### W = np.diag(np.random.randn(6)) pose_cov = expm(-tau * curly_hat(omega_hat,v[:,t])) @ prev_cov \ @ expm(-tau * curly_hat(omega_hat,v[:,t])).T + W #visualize_trajectory_2d(pose_mean) #print("Done IMU Predict and time taken is ", time.time()-start_time) return pose_mean, pose_cov def slam(data_set): ''' This performs slam for the visual odometry data Step 1: Performs predict for IMU pose Step 2: Performs update for IMU pose and landmark points Substep: Compute Jacobian for H_l and H_u Substep: Concatenate both of them Substep: Perform overall update for Covraince and Kalman Gain Substep: Perform individual update for the means of IMU pose and Landmark Locations :return: Plot of the localization and Mapping for the Particle ''' time_stamp,z,v,omega,k,b,cam_T_imu = load_data(data_set) #Choosing Points in Map chose_landmark_option = 1 if(chose_landmark_option == 1): chosen_landmarks = [i for i in range(z.shape[1]) if i%10 == 0] elif(chose_landmark_option == 2): chosen_landmarks = np.random.randint(0,z.shape[1],500) last_landmark = max(chosen_landmarks) #Temprory variables landmark_mean_cam = np.zeros(3) first_observation = np.zeros(3) #Projection Constants P_T = np.hstack((np.eye(3),np.zeros((3,1)))).T M = np.hstack((k[0:2,0:3],np.zeros((2,1)))) M = np.vstack((M,M)) landmark_mean = np.zeros((3 * len(chosen_landmarks))) # Total LandMarks are 3M state_cov = 2 * np.eye(3*len(chosen_landmarks)+6) #New State Variable with Size 3M+6 imu_prev_pose, imu_prev_cov = np.eye(4), np.eye(6) # To predict module Initialization pose_mean = np.zeros((4,4,features.shape[2])) #For plotting purpose size is 4x4xT for t in tqdm(range(features.shape[2]-1)): #### IMU Predict pos and covariance #### imu_pred_pos,imu_pred_cov = slam_imu_predict(time_stamp,z,v,omega,K,b,cam_T_imu,t,imu_prev_pose,imu_prev_cov) z_tik = np.zeros((4 * len(chosen_landmarks),1)) #Observation Model Readings z_observed = np.zeros((4 * len(chosen_landmarks),1)) #Sensor readings ### Find the legal Readings and Choose the one's in the Points of Interest ### z_sum = np.sum(z[:,0:last_landmark,t],axis=0) valid_scans = np.where(z_sum != -4) valid_and_relevant_scans = [scan for scan in valid_scans[0] if scan in chosen_landmarks] H_l = np.zeros((4*len(chosen_landmarks),3*len(chosen_landmarks))) H_u = np.zeros((4*len(chosen_landmarks),6)) for scan in valid_and_relevant_scans: ###### Jacobian for Mapping Calculation ##### scan_loc = chosen_landmarks.index(scan) # The location of the current scan in the original array str_4x,end_4x = scan_loc*4, scan_loc*4+4 str_3x,end_3x = scan_loc*3, scan_loc*3+3 ##### Initialization for scans seen for the first time ###### if (np.all(landmark_mean[str_3x:end_3x] == first_observation)): ## Convert Z into Camera Cordinates landmark_mean_cam[2] = -M[2, 3] / (z[0, scan, t] - z[2, scan, t]) landmark_mean_cam[1] = (z[1, scan, t] - M[1, 2]) * landmark_mean_cam[2] / M[1, 1] landmark_mean_cam[0] = (z[0, scan, t] - M[0, 2]) * landmark_mean_cam[2] / M[0, 0] landmark_mean_cam_homog = np.vstack((landmark_mean_cam.reshape(3, 1), 1)) landmark_mean_homog = np.linalg.inv(cam_T_imu @ imu_pred_pos) @ landmark_mean_cam_homog landmark_mean[str_3x:end_3x] = landmark_mean_homog[0:3, 0] ##### Perform Update related Operations ###### else: landmark_mean_homo = np.vstack((landmark_mean[str_3x:end_3x].reshape(3, 1), 1)) landmark_camera = cam_T_imu @ imu_pred_pos @ landmark_mean_homo dpi_dq = deri_pi(landmark_camera) H_l[str_4x:end_4x,str_3x:end_3x] = M @ dpi_dq @ cam_T_imu @ imu_pred_pos @ P_T ###### Jacobian for IMU Calculation ##### H_u[str_4x:end_4x,:] = M @ dpi_dq @ cam_T_imu @ round_dot(to_homog(imu_pred_pos @ landmark_mean_homo)) ###### Observed vs Expected ###### z_observed[str_4x:end_4x,0] = z[:,scan,t] z_tik[str_4x:end_4x,0] = M @ pi(landmark_camera) #### Update Combined Covariance#### H = np.hstack((H_l,H_u)) #Main Jacobian N = np.diag(5 * np.random.rand(H.shape[0])) ###### If the inverse leads to Singularity Compute Another Noise ###### try: Kalman_gain = state_cov @ H.T @ np.linalg.inv(H @ state_cov @ H.T + N) except: N = np.diag(6 * np.random.rand(H.shape[0])) Kalman_gain = state_cov @ H.T @ np.linalg.inv(H @ state_cov @ H.T + N) #### Update the Stat_covariance Matrix #### state_cov = (np.eye(3*len(chosen_landmarks)+6) - Kalman_gain @ H) @ state_cov ##IMU Mean Update## perturb_pos = Kalman_gain[-6:,:] @ (z_observed-z_tik) #Pick last few rows to get IMU details perturb_pos_hat = np.hstack((hat(perturb_pos[3:6,0]),perturb_pos[0:3,0].reshape(3,1))) perturb_pos_hat = np.vstack((perturb_pos_hat,np.zeros((1,4)))) imu_update_pose = expm(perturb_pos_hat) @ imu_pred_pos pose_mean[:,:,t] = imu_update_pose ##LandMark Mean Update ## perturb_landmark = Kalman_gain[0:-6,:] @ (z_observed - z_tik) #Pick first 3M rows landmark_mean = landmark_mean + perturb_landmark.reshape(-1) #update imu pos with the updated value of these varaibles imu_prev_pose = imu_update_pose visualize_trajectory_2d(pose_mean, landmark_mean.reshape(-1, 3).T) def visual_ekf(pose_mean,z,k,b,cam_T_imu): ''' :param pose_mean: The estimated pose for the IMU Data set along with the Estimated pose of IMU Computes the Landmark update based on the assumption of IMU poses being golden Uses the Stereo Camera Model to get the output :return: Plot of the localization of the body along with the maps for the sourrounding ''' print("Starting Mapping Update") start_time = time.time() num_landmark = z.shape[1] landmark_mean = np.zeros((3*num_landmark)) # 3M landmark_cov = np.diag(1e-2*np.random.randn(3*num_landmark)) landmark_mean_cam = np.zeros(3) landmark_mean_cam_homog = np.zeros((4,1)) P_T = np.hstack((np.eye(3),np.zeros((3,1)))).T M = np.hstack((k[0:2,0:3],np.zeros((2,1)))) M = np.vstack((M,M)) M[2,3] = -k[0,0] * b #Disparity total_time = z.shape[2] no_observation = np.array([-1,-1,-1,-1]) first_observation = np.zeros(3) for t in tqdm(range(total_time)): jacobian = np.zeros((4*num_landmark, 3*num_landmark)) z_tik = np.zeros((4 * num_landmark)) z_sum = np.sum(z[:,0:num_landmark,t],axis=0) valid_scans = np.where(z_sum != -4) #for landmark in range(num_landmark-1): for landmark in valid_scans[0]: lnd_mrk_strt, lnd_mrk_end = landmark * 3, landmark * 3 + 3 if(np.all(landmark_mean[lnd_mrk_strt:lnd_mrk_end] == first_observation)): landmark_mean_cam[2] = -M[2,3] / (z[0,landmark,t] - z[2,landmark,t]) landmark_mean_cam[1] = (z[1,landmark,t] - M[1,2]) * landmark_mean_cam[2] / M[1,1] landmark_mean_cam[0] = (z[0,landmark,t] - M[0,2]) * landmark_mean_cam[2] / M[0,0] landmark_mean_cam_homog = np.vstack((landmark_mean_cam.reshape(3,1),1)) landmark_mean_homog = np.linalg.inv(cam_T_imu @ pose_mean[:,:,t]) @ landmark_mean_cam_homog landmark_mean[lnd_mrk_strt:lnd_mrk_end] = landmark_mean_homog[0:3,0] #initialize else: landmark_mean_homo = np.vstack((landmark_mean[lnd_mrk_strt:lnd_mrk_end].reshape(3,1),1)) landmark_camera = cam_T_imu @ pose_mean[:, :, t] @ landmark_mean_homo dpi_dq = deri_pi(landmark_camera) strt,end = landmark*3,landmark*3 + 3 #Address z_tik = (M @ pi(landmark_camera)).flatten() jacobian = M @ dpi_dq @ cam_T_imu @ pose_mean[:,:,t] @ P_T k_gain = landmark_cov[strt:end,strt:end] @ jacobian.T @ \ np.linalg.inv(jacobian @ landmark_cov[strt:end,strt:end] @ jacobian.T \ + np.diag(30 * np.random.randn(4))) #np.diag(1e2) also worked landmark_mean[strt:end] = landmark_mean[strt:end] + k_gain @ (z[:,landmark,t] - z_tik) landmark_cov[strt:end,strt:end] = (np.eye(3) - k_gain @ jacobian) @ landmark_cov[strt:end,strt:end] print("Done Mapping update and time taken is ", time.time()-start_time) visualize_trajectory_2d(pose_mean,landmark_mean.reshape(-1,3).T) if __name__ == '__main__': dataset_list = ['data/0022.npz','data/0027.npz','data/0034.npz'] for data_set in dataset_list: t,features,linear_velocity,rotational_velocity,K,b,cam_T_imu = load_data(data_set) ### Run Part a and Part b ### visual_ekf(imu_ekf(data_set),features,K,b,cam_T_imu) ### Run Part c ### slam(data_set)
nilq/baby-python
python
from rule import * from operator import methodcaller def chi_toi_checker(cards): return 0 def ko_ku_shi_checker(cards): return 0 def ron_dfs(handcards): if (len(handcards) == 0): return True return False #no matter whether have yaku def can_ron(cards): if (ron_dfs(cards.handcards)): return True return (chi_toi_checker(cards) > 0) or (ko_ku_shi_checker(cards) > 0) def chin_i_so_checker(cards): ok = True return 1 def ri_chi_checker(cards): return 1 yaku_list = ["chin_i_so","ri_chi"] def get_all_yaku(cards): ret = [] for pattern in yaku_list: check_ret = eval(pattern + "_checker")(cards) if (check_ret > 0): ret.append((check_ret, pattern)) return ret print(get_all_yaku('fuck'))
nilq/baby-python
python
import pytest import torch import time from ynot.datasets import FPADataset from ynot.echelle import Echellogram from torch.utils.data import DataLoader import torch.optim as optim import torch.nn as nn @pytest.mark.parametrize( "device", ["cuda", "cpu"], ) def test_forward_backward(device): """Do the scene models have the right shape""" echellogram = Echellogram(device=device) t0 = time.time() scene_model = echellogram.forward(1) t1 = time.time() scalar = scene_model.sum() t2 = time.time() scalar.backward() t3 = time.time() net_time = t1 - t0 net_time2 = t3 - t2 print(f"\n\t{echellogram.device}: forward {net_time:0.5f} seconds", end="\t") print(f"\n\t{echellogram.device}: backward {net_time2:0.5f} seconds", end="\t") assert scene_model.shape == echellogram.xx.shape assert scene_model.dtype == echellogram.xx.dtype @pytest.mark.parametrize( "device", ["cuda", "cpu"] ) @pytest.mark.slow def test_training_loop(device): """The end-to-end training should operate""" model = Echellogram(device=device) dataset = FPADataset() n_frames_per_batch=1 train_loader = DataLoader(dataset=dataset, batch_size=n_frames_per_batch, pin_memory=True, shuffle=True) loss_fn = nn.MSELoss(reduction='mean') optimizer = optim.Adam(model.parameters(), 0.01) n_epochs = 10 losses = [] initial_params = model.parameters() t0 = time.time() for epoch in range(n_epochs): for data in train_loader: ind, y_batch = data[0].to(device, non_blocking=True), data[1].to(device, non_blocking=True) model.train() yhat = model.forward(ind).unsqueeze(0) loss = loss_fn(yhat, y_batch) loss.backward() optimizer.step() optimizer.zero_grad() losses.append(loss.item()) t1 = time.time() net_time = t1 - t0 print(f"\n\t {n_epochs} epochs on {device}: {net_time:0.1f} seconds", end="\t") for loss in losses: assert loss == loss for parameter in model.parameters(): assert parameter.isfinite().all()
nilq/baby-python
python
import json import discord from discord.ext import commands from utils import get_color import datetime class Modlogs(commands.Cog): def __init__(self, bot): self.bot = bot with open("./bot_config/logging/modlogs_channels.json", "r") as modlogsFile: self.modlogsFile = json.load(modlogsFile) @commands.command(name="messagelogschannel", aliases=["seteditedlogschannel", "setdeletedlogschannel", "setlogschannel", "setlogchannel"], description="Sets the channel in which edited/deleted message logs are sent.") async def set_modlogs_channel(self, ctx, channel: discord.TextChannel): channel_id = channel.id self.modlogsFile[str(ctx.guild.id)] = int(channel_id) with open("./bot_config/logging/modlogs_channels.json", "w") as modlogsFile: json.dump(self.modlogsFile, modlogsFile, indent=4) await ctx.send(f"Edited/Deleted logs channel set as {channel.mention} succesfully.") @commands.Cog.listener() async def on_message_edit(self, before, after): message_channel_id = self.modlogsFile.get(str(before.guild.id)) if message_channel_id is None: return message_channel = self.bot.get_channel(id=int(message_channel_id)) if message_channel is None: return message_link = f"https://discord.com/channels/{before.guild.id}/{before.channel.id}/{before.id}" embed = discord.Embed(title=f"Message edited in {before.channel.name}", color=get_color.get_color(before.author), timestamp=after.created_at) embed.add_field(name="Before", value=before.content, inline=False) embed.add_field(name="After", value=after.content, inline=False) embed.add_field( name="Link", value=f"__[Message]({message_link})__", inline=False) embed.set_footer(text=f"Author • {before.author} | Edited") embed.set_thumbnail(url=before.author.avatar_url) # the edited timestamp would come in the right, so we dont need to specify it in the footer try: await message_channel.send(embed=embed) except: # embeds dont have a message.content, so it gives us an error pass # from mahasvan#0001 ape botman.py @commands.Cog.listener() async def on_message_delete(self, message): embed = discord.Embed(title=f"Message deleted in {message.channel.name}", color=get_color.get_color(message.author), timestamp=message.created_at) embed.add_field(name="Content", value=message.content, inline=False) embed.set_footer(text=f"Author • {message.author} | Created", icon_url=message.author.avatar_url) # the edited timestamp would come in the right, so we dont need to specify it in the footer message_channel = self.bot.get_channel(id=int(self.modlogsFile.get(str(message.guild.id)))) if message_channel is None: return await message_channel.send(embed=embed) @commands.Cog.listener() async def on_bulk_message_delete(self, messages): if self.modlogsFile.get(str(messages[0].guild.id)) is None: return with open(f"./bot_config/tempText/{messages[0].guild.id}.txt", "w") as temp_textfile: for x in messages: line1 = f"From: {x.author} | in: {x.channel.name} | Created at: {x.created_at}\n" temp_textfile.write(line1) temp_textfile.write(f"{x.content}\n\n") file = discord.File(f"./bot_config/tempText/{messages[0].guild.id}.txt") message_channel = self.bot.get_channel(id=int(self.modlogsFile.get(str(messages[0].guild.id)))) if message_channel is None: return await message_channel.send(file=file, content=f"{len(messages)} messages deleted. " f"Sending information as text file.") # member update event @commands.Cog.listener() async def on_member_update(self, before, after): message_channel_id = self.modlogsFile.get(str(before.guild.id)) if message_channel_id is None: return message_channel = self.bot.get_channel(id=int(message_channel_id)) if message_channel is None: return # nickname change if not before.nick == after.nick: embed = discord.Embed(title=f"{before}'s nickname has been updated", description=f"ID: {before.id}", color=get_color.get_color(after), timestamp=before.created_at) embed.add_field( name="Before", value=before.display_name, inline=False) embed.add_field( name="After", value=after.display_name, inline=False) embed.set_thumbnail(url=after.avatar_url) embed.set_footer(text="Account created at") await message_channel.send(embed=embed) # role change if not before.roles == after.roles: embed = discord.Embed(title=f"{before}'s roles have been updated", description=f"ID: {before.id}", color=after.color, timestamp=before.created_at) before_roles_str, after_roles_str = "", "" for x in before.roles[::-1]: before_roles_str += f"{x.mention} " for x in after.roles[::-1]: after_roles_str += f"{x.mention} " embed.add_field( name="Before", value=before_roles_str, inline=False) embed.add_field(name="After", value=after_roles_str, inline=False) embed.set_thumbnail(url=after.avatar_url) embed.set_footer(text="Account created at") await message_channel.send(embed=embed) # from mahasvan#0001 ape botman.py # ban event @commands.Cog.listener() async def on_member_ban(self, guild, member:discord.Member): message = discord.Message message_channel = self.bot.get_channel(id=int(self.modlogsFile.get(str(guild.id)))) if message_channel is None: return embed = discord.Embed(title="**Member Banned**", color=member.color, timestamp=datetime.datetime.utcnow()) embed.set_thumbnail(url=f"{member.avatar_url}") embed.add_field(name=f"{member} was banned from the server", value=f"**Moderator**: {message.author}") embed.set_footer(text=f"UUID: {member.id}") await message_channel.send(embed=embed) # unban event @commands.Cog.listener() async def on_member_unban(self, guild, member: discord.Member): message_channel = self.bot.get_channel(id=int(self.modlogsFile.get(str(guild.id)))) if message_channel is None: return embed = discord.Embed(title=f"{member} has been unbanned", description=f"ID: {member.id}", color=get_color.get_color(discord.Color.random())) embed.set_thumbnail(url=member.avatar_url) await message_channel.send(embed=embed) # join event @commands.Cog.listener() async def on_member_join(self, member): message_channel_id = self.modlogsFile.get(str(member.guild.id)) if message_channel_id is None: return message_channel = self.bot.get_channel(id=int(message_channel_id)) if message_channel is None: return embed = discord.Embed(title=f"Member {member} joined the the server.", color=member.color, timestamp=datetime.datetime.utcnow(), description=f"**Their account was created at:** {member.created_at}") embed.set_thumbnail(url=member.avatar_url) embed.set_footer(text=f"UUID: {member.id}") await message_channel.send(embed=embed) # leave event @commands.Cog.listener() async def on_member_remove(self, member): message_channel_id = self.modlogsFile.get(str(member.guild.id)) if message_channel_id is None: return message_channel = self.bot.get_channel(id=int(message_channel_id)) if message_channel is None: return roles = [role for role in member.roles] embed = discord.Embed(title=f"Member {member} left from the server.", color=member.color, timestamp=datetime.datetime.utcnow(), description=f"**Their account was created at:** {member.created_at}") embed.add_field(name="Their roles:", value=" ".join( [role.mention for role in roles])) embed.set_footer(text=f"UUID: {member.id}") embed.set_thumbnail(url=member.avatar_url) await message_channel.send(embed=embed) def setup(bot): bot.add_cog(Modlogs(bot))
nilq/baby-python
python
import pytest from django.contrib.auth import get_user_model from django.urls import reverse from tahoe_idp.tests.magiclink_fixtures import user # NOQA: F401 User = get_user_model() @pytest.mark.django_db def test_studio_login_must_be_authenticated(client, settings): # NOQA: F811 url = reverse('studio_login') response = client.get(url) assert response.status_code == 302 assert response.url.startswith(settings.LOGIN_URL) @pytest.mark.django_db def test_studio_login(settings, client, user): # NOQA: F811 url = reverse('studio_login') client.login(username=user.username, password='password') response = client.get(url) assert response.status_code == 302 assert response.url.startswith('http://{studio_domain}'.format(studio_domain=settings.MAGICLINK_STUDIO_DOMAIN))
nilq/baby-python
python
UI_INTERACTIONS = { 'learn-more': { 'interaction_type': 'click', 'element_location': 'ct_menu_tree', 'element_name': 'ct_learn_more_btn', 'icon_name': 'document', 'color': 'yellow', 'cta_text': 'Learn More' }, 'advanced-metrics': { 'interaction_type': 'click', 'element_location': 'ct_menu_tree', 'element_name': 'ct_web_metrics_btn', 'icon_name': 'paw', 'color': 'blue', 'cta_text': 'See advanced metrics' }, 'open-dashboard': { 'interaction_type': 'click', 'element_location': 'ct_menu_tree', 'element_name': 'ct_summary_btn', 'icon_name': 'guage', 'color': 'purple', 'cta_text': 'View summary' }, 'toggle-status-metrics': { 'interaction_type': 'click', 'element_location': 'ct_menu_tree', 'element_name': 'ct_toggle_status_bar_metrics_btn', 'icon_name': 'slash-eye', 'color': 'blue', 'cta_text': 'Hide status bar metrics' }, 'submit-feedback': { 'interaction_type': 'click', 'element_location': 'ct_menu_tree', 'element_name': 'ct_submit_feedback_btn', 'icon_name': 'text-bubble', 'color': 'green', 'cta_text': 'Submit feedback' }, 'google-signup': { 'interaction_type': 'click', 'element_location': 'ct_menu_tree', 'element_name': 'ct_sign_up_google_btn', 'icon_name': 'google', 'color': '', 'cta_text': 'Sign up with Google' }, 'github-signup': { 'interaction_type': 'click', 'element_location': 'ct_menu_tree', 'element_name': 'ct_sign_up_github_btn', 'icon_name': 'github', 'color': 'white', 'cta_text': 'Sign up with Github' }, 'email-signup': { 'interaction_type': 'click', 'element_location': 'ct_menu_tree', 'element_name': 'ct_sign_up_email_btn', 'icon_name': 'envelope', 'color': 'gray', 'cta_text': 'Sign up with email' }, 'code-time': { 'interaction_type': 'click', 'element_location': 'ct_metrics_tree', 'element_name': 'ct_codetime_toggle_node', 'icon_name': '', 'color': 'blue', 'cta_text': 'Code time' }, 'active-code-time': { 'interaction_type': 'click', 'element_location': 'ct_metrics_tree', 'element_name': 'ct_active_codetime_toggle_node', 'icon_name': '', 'color': 'blue', 'cta_text': 'Active code time' }, 'lines-added': { 'interaction_type': 'click', 'element_location': 'ct_metrics_tree', 'element_name': 'ct_lines_added_toggle_node', 'icon_name': '', 'color': 'blue', 'cta_text': 'Lines added' }, 'lines-removed': { 'interaction_type': 'click', 'element_location': 'ct_metrics_tree', 'element_name': 'ct_lines_removed_toggle_node', 'icon_name': '', 'color': 'blue', 'cta_text': 'Lines removed' }, 'keystrokes': { 'interaction_type': 'click', 'element_location': 'ct_metrics_tree', 'element_name': 'ct_keystrokes_toggle_node', 'icon_name': '', 'color': 'blue', 'cta_text': 'Keystrokes' }, 'files-changed': { 'interaction_type': 'click', 'element_location': 'ct_metrics_tree', 'element_name': 'ct_files_changed_toggle_node', 'icon_name': '', 'color': 'blue', 'cta_text': 'Files changed today' }, 'top-kpm-files': { 'interaction_type': 'click', 'element_location': 'ct_metrics_tree', 'element_name': 'ct_top_files_by_kpm_toggle_node', 'icon_name': '', 'color': 'blue', 'cta_text': 'Top files by KPM' }, 'top-keystrokes-files': { 'interaction_type': 'click', 'element_location': 'ct_metrics_tree', 'element_name': 'ct_top_files_by_keystrokes_toggle_node', 'icon_name': '', 'color': 'blue', 'cta_text': 'Top files by keystrokes' }, 'top-codetime-files': { 'interaction_type': 'click', 'element_location': 'ct_metrics_tree', 'element_name': 'ct_top_files_by_codetime_toggle_node', 'icon_name': '', 'color': 'blue', 'cta_text': 'Top files by code time' }, 'open-changes': { 'interaction_type': 'click', 'element_location': 'ct_metrics_tree', 'element_name': 'ct_open_changes_toggle_node', 'icon_name': '', 'color': 'blue', 'cta_text': 'Open changes' }, 'committed-today': { 'interaction_type': 'click', 'element_location': 'ct_metrics_tree', 'element_name': 'ct_committed_today_toggle_node', 'icon_name': '', 'color': 'blue', 'cta_text': 'Committed today' }, 'contributors-repo-link': { 'interaction_type': 'click', 'element_location': 'ct_contributors_tree', 'element_name': 'ct_contributor_repo_identifier_btn', 'icon_name': '', 'color': 'blue', 'cta_text': 'redacted' }, 'view-dashboard': { 'interaction_type': 'keyboard', 'element_location': 'ct_command_palette', 'element_name': 'ct_summary_cmd', 'icon_name': '', 'color': '', 'cta_text': 'View Dashboard' }, 'toggle-status-bar-metrics': { 'interaction_type': 'keyboard', 'element_location': 'ct_command_palette', 'element_name': 'ct_toggle_status_bar_metrics_cmd', 'icon_name': '', 'color': '', 'cta_text': 'Show/Hide Status Bar Metrics' }, 'view-web-dashboard': { 'interaction_type': 'keyboard', 'element_location': 'ct_command_palette', 'element_name': 'ct_web_metrics_cmd', 'icon_name': '', 'color': '', 'cta_text': '' }, 'show-tree-view': { 'interaction_type': 'keyboard', 'element_location': 'ct_command_palette', 'element_name': 'ct_show_tree_view_cmd', 'icon_name': '', 'color': '', 'cta_text': 'Code Time: Show Tree View' }, 'pause-telemetry': { 'interaction_type': 'keyboard', 'element_location': 'ct_command_palette', 'element_name': 'ct_pause_telemetry_cmd', 'icon_name': '', 'color': '', 'cta_text': 'Code Time: Pause' }, 'enable-telemetry': { 'interaction_type': 'keyboard', 'element_location': 'ct_command_palette', 'element_name': 'ct_enable_telemetry_cmd', 'icon_name': '', 'color': '', 'cta_text': 'Code Time: Enable' } }
nilq/baby-python
python
# -*- coding: utf-8 -*- # # Copyright 2019 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The command group for topic bridging table.""" from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals import argparse from googlecloudsdk.api_lib.functions import transforms from googlecloudsdk.calliope import actions from googlecloudsdk.calliope import base from googlecloudsdk.core import properties @base.ReleaseTracks(base.ReleaseTrack.ALPHA) class TopicBridge(base.Group): """Manage topic bridging table for the Edge device. Manage topic bridging table for the Edge device. A topic bridging table is similar to routing table. Each row of the table defines a rule. A rule will route topic from one domain (Edge or cloud) to another domain, and it can also route messages from a topic to another topic. """
nilq/baby-python
python
from django.views.generic import ( ListView, CreateView, UpdateView, DeleteView, DetailView, ) from django.urls import reverse_lazy from .models import Todo class ListTodosView(ListView): model = Todo class DetailTodoView(DetailView): model = Todo class CreateTodoView(CreateView): model = Todo fields = ["title", "description", "priority"] def get_success_url(self): return reverse_lazy("todos:list") class UpdateTodoView(UpdateView): model = Todo fields = ["title", "description", "priority"] def get_success_url(self): return reverse_lazy("todos:list") class DeleteTodoView(DeleteView): model = Todo def get_success_url(self): return reverse_lazy("todos:list")
nilq/baby-python
python
import sys import bpy import threading from .signal import Signal from .utils import find_first_view3d class AnimationController: '''Provides an interface to Blender's animation system with fine-grained callbacks. To play nice with Blender, blendtorch provides a callback based class for interacting with the Blender's animation and rendering system. The usual way to interact with this class is through an object of AnimationController. Depending on the required callbacks, one or more signals are connected to Python functions. `AnimationController.play` starts the animation loop. By default `AnimationController.play` is non-blocking and therefore requires a non background instance of Blender. In case `--background` is required, `AnimationController.play` also supports blocking animation loop variant. In blocking execution, offscreen rendering works but may crash Blender once the loop is exited (2.83.2), and is therefore not recommended when image data is required. `AnimationController` exposes the following signals - pre_play() invoked before playing starts - pre_animation() invoked before first frame of animation range is processed - pre_frame() invoked before a frame begins - post_frame() invoked after a frame is finished - post_animation() invoked after the last animation frame has completed - post_play() invoked after playing ends ''' def __init__(self): '''Create a new instance.''' self.pre_animation = Signal() self.pre_frame = Signal() self.post_frame = Signal() self.post_animation = Signal() self.pre_play = Signal() self.post_play = Signal() self._plyctx = None class _PlayContext: '''Internal bookkeeping of animation veriables.''' def __init__(self, frame_range, num_episodes, use_animation, use_offline_render): self.frame_range = frame_range self.use_animation = use_animation self.use_offline_render = use_offline_render self.episode = 0 self.num_episodes = num_episodes self.pending_post_frame = False self.draw_handler = None self.draw_space = None self.last_post_frame = 0 def skip_post_frame(self, current_frame): return ( not self.pending_post_frame or self.last_post_frame == current_frame or ( self.use_animation and self.use_offline_render and bpy.context.space_data != self.draw_space ) ) @property def frameid(self): '''Returns the current frame id.''' return bpy.context.scene.frame_current def play(self, frame_range=None, num_episodes=-1, use_animation=True, use_offline_render=True, use_physics=True): '''Start the animation loop. Params ------ frame_range: tuple Start and end of frame range to play. Note that start and end are inclusive. num_episodes: int The number of loops to play. -1 loops forever. use_animation: bool Whether to use Blender's non-blocking animation system or use a blocking variant. By default True. When True, allows BlenderUI to refresh and be responsive. The animation will be run in target FPS. When false, does not allow Blender UI to refresh. The animation runs as fast as it can. use_offline_render: bool Whether offline rendering should be supported. By default True. When True, calls to `OffscreenRenderer` are safe inside the `post_frame` callback. use_physics: bool Whether physics should be enabled. Default is True. When True, sets the simulation range to match the animation frame range. ''' assert self._plyctx is None, 'Animation already running' self._plyctx = AnimationController._PlayContext( frame_range=AnimationController.setup_frame_range(frame_range, physics=use_physics), num_episodes=(num_episodes if num_episodes >= 0 else sys.maxsize), use_animation=use_animation, use_offline_render=use_offline_render ) if use_animation: self._play_animation() else: self._play_manual() @staticmethod def setup_frame_range(frame_range, physics=True): '''Setup the animation and physics frame range. Params ------ frame_range: tuple Start and end (inclusive) frame range to be animated. Can be None, in which case the scenes frame range is used. physics: bool Whether or not to apply the frame range settings to the rigid body simulation. Returns ------- frame_range: tuple the updated frame range. ''' if frame_range is None: frame_range = (bpy.context.scene.frame_start, bpy.context.scene.frame_end) bpy.context.scene.frame_start = frame_range[0] bpy.context.scene.frame_end = frame_range[1] if physics and bpy.context.scene.rigidbody_world: bpy.context.scene.rigidbody_world.point_cache.frame_start = frame_range[0] bpy.context.scene.rigidbody_world.point_cache.frame_end = frame_range[1] return frame_range def _play_animation(self): '''Setup and start Blender animation loop.''' self.pre_play.invoke() bpy.app.handlers.frame_change_pre.append(self._on_pre_frame) if self._plyctx.use_offline_render: # To be save, we need to draw from `POST_PIXEL` not `frame_change_post`. # However `POST_PIXEL` might be called more than once per frame. We therefore # set and release `pending_post_pixel` to match things up. _, self._plyctx.draw_space, _ = find_first_view3d() self._plyctx.draw_handler = bpy.types.SpaceView3D.draw_handler_add(self._on_post_frame, (), 'WINDOW', 'POST_PIXEL') else: bpy.app.handlers.frame_change_post.append(self._on_post_frame) # Set to first frame. bpy.context.scene.frame_set(self._plyctx.frame_range[0]) # The following does not block. Note, in --offscreen this does nothing. bpy.ops.screen.animation_play() def _play_manual(self): '''Setup and start blocking animation loop.''' self.pre_play.invoke() bpy.app.handlers.frame_change_pre.append(self._on_pre_frame) bpy.app.handlers.frame_change_post.append(self._on_post_frame) while self._plyctx.episode < self._plyctx.num_episodes: bpy.context.scene.frame_set(self._plyctx.frame_range[0]) while self.frameid < self._plyctx.frame_range[1]: bpy.context.scene.frame_set(self.frameid+1) if self._plyctx == None: # The above frame_set might have called _cancel, return # which in turn deletes _plyctx def rewind(self): '''Request resetting the animation to first frame.''' if self._plyctx is not None: self._set_frame(self._plyctx.frame_range[0]) def _set_frame(self, frame_index): '''Step to a specific frame.''' bpy.context.scene.frame_set(frame_index) def _on_pre_frame(self, scene, *args): '''Handle pre-frame events internally.''' pre_first = (self.frameid == self._plyctx.frame_range[0]) if pre_first: self.pre_animation.invoke() self.pre_frame.invoke() # The following guards us from multiple calls to `_on_post_frame` # when we hooked into `POST_PIXEL` self._plyctx.pending_post_frame = True def _on_post_frame(self, *args): '''Handle post-frame events internally.''' if self._plyctx.skip_post_frame(self.frameid): return self._plyctx.pending_post_frame = False self._plyctx.last_post_frame = self.frameid self.post_frame.invoke() post_last = (self.frameid == self._plyctx.frame_range[1]) if post_last: self.post_animation.invoke() self._plyctx.episode += 1 if self._plyctx.episode == self._plyctx.num_episodes: self._cancel() def _cancel(self): '''Stop the animation.''' bpy.app.handlers.frame_change_pre.remove(self._on_pre_frame) if self._plyctx.draw_handler != None: bpy.types.SpaceView3D.draw_handler_remove(self._plyctx.draw_handler, 'WINDOW') self._plyctx.draw_handler = None else: bpy.app.handlers.frame_change_post.remove(self._on_post_frame) bpy.ops.screen.animation_cancel(restore_frame=False) self.post_play.invoke() del self._plyctx self._plyctx = None
nilq/baby-python
python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon May 17 11:01:58 2021 @author: root """ import sklearn from sklearn import datasets import numpy as np import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap import seaborn as sns import torch.nn.functional as F import torch.nn as nn from torch.utils.data import DataLoader from torch.utils.data import Dataset import torch from utils_two_moons import evaluate_model, brier_score, expectation_calibration_error from utils_two_moons import NeuralNet, MCDropout, EnsembleNeuralNet from utils_two_moons import mixup_log_loss from training_loops import train_model_dropout from utils_two_moons import MyData from sklearn.neighbors import KernelDensity from sklearn.decomposition import PCA from sklearn.model_selection import GridSearchCV def get_device(): if torch.cuda.is_available(): device = 'cuda:0' else: device = 'cpu' return device device = get_device() ################# 1.CREATE THE DATASETS ################# cm_bright = ListedColormap(['#FF0000', '#0000FF']) batch_sample = 1000 X,Y = datasets.make_moons(n_samples=batch_sample, shuffle=True, noise=.1, random_state=None) X_test,Y_test = datasets.make_moons(n_samples=batch_sample, shuffle=True, noise=.1, random_state=None) plt.scatter(X[:, 0], X[:, 1], c=Y) # Scale in x and y directions aug_x = (1.5 - 0.5) * np.random.rand() + 0.5 aug_y = (2.5 - 1.5) * np.random.rand() + 1.5 aug = np.array([aug_x, aug_y]) X_scale = X * aug plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=cm_bright) plt.scatter(X_scale[:, 0], X_scale[:, 1], marker='+',c=Y, cmap=cm_bright, alpha=0.4) ## rotation of 45 degrees theta = (np.pi/180)* -35 rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]]) X_rot = np.dot(X,rotation_matrix) plt.scatter(X[:, 0], X[:, 1], c=Y,cmap=cm_bright) plt.scatter(X_rot[:, 0], X_rot[:, 1], marker='+', c=Y, cmap=cm_bright, alpha=0.4) # We create the same dataset with more noise X_noise,Y_noise = datasets.make_moons(n_samples=batch_sample, shuffle=True, noise=.3, random_state=None) plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=cm_bright) plt.scatter(X_noise[:, 0], X_noise[:, 1], marker='+', c=Y_noise, cmap=cm_bright, alpha=0.4) train_dataset = MyData(data=X,labels=Y) test_dataset = MyData(data=X_test,labels=Y_test) scale_dataset = MyData(X_scale, Y) rot_dataset = MyData(X_rot, Y) noise_dataset = MyData(X_noise, Y_noise) trainLoader = DataLoader(train_dataset, batch_size=batch_sample) testLoader = DataLoader(test_dataset, batch_size=batch_sample) scaleLoader = DataLoader(scale_dataset, batch_size=batch_sample) rotLoader = DataLoader(rot_dataset, batch_size=batch_sample) noiseLoader = DataLoader(noise_dataset, batch_size=batch_sample) ################# 2.TRAINING ################# # Simple Neural Network base_nn = NeuralNet(input_dim=2, hidden_dim=10, output_dim=2).double() optimizer = torch.optim.Adam(base_nn.parameters(), lr=0.01) MC_sample=1 crit = nn.CrossEntropyLoss() n_epochs = 500 _, training_loss = train_model_dropout(base_nn, None, MC_sample, trainLoader, n_epochs, crit, optimizer, no_classes=2) # Neural Network with MC Dropout vi_nn = MCDropout(input_dim=2, hidden_dim=10, output_dim=2).double() optimizer = torch.optim.Adam(vi_nn.parameters(), lr=0.01) MC_sample=50 crit = nn.CrossEntropyLoss() n_epochs = 500 _, training_loss = train_model_dropout(vi_nn, None, MC_sample, trainLoader, n_epochs, crit, optimizer, no_classes=2) # project the 64-dimensional data to a lower dimension def estimate_input_density(data): # project the 64-dimensional data to a lower dimension pca = PCA(n_components=2, whiten=False) data = pca.fit_transform(data) # use grid search cross-validation to optimize the bandwidth params = {'bandwidth': np.logspace(-1, 1, 20)} grid = GridSearchCV(KernelDensity(), params) grid.fit(data) print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth)) # use the best estimator to compute the kernel density estimate kde = grid.best_estimator_ return kde, pca kde, pca = estimate_input_density(X) ## Train an ensemble of NN def train_ensemble(N, n_epochs, trainLoader): ensembles = [] for i in range(N): base_nn = NeuralNet(input_dim=2, hidden_dim=10, output_dim=2).double() optimizer = torch.optim.Adam(base_nn.parameters(), lr=0.01) MC_sample=1 crit = nn.CrossEntropyLoss() _, training_loss = train_model_dropout(base_nn, None, MC_sample, trainLoader, n_epochs, crit, optimizer, no_classes=2) ensembles.append(base_nn) return ensembles ensemble = train_ensemble(5, 500, trainLoader) ensemble_nn = EnsembleNeuralNet(ensemble) ## Train with mixup # Simple Neural Network mu_nn = NeuralNet(input_dim=2, hidden_dim=10, output_dim=2).double() optimizer = torch.optim.Adam(mu_nn.parameters(), lr=0.01) MC_sample=1 crit = mixup_log_loss n_epochs = 500 _, training_loss = train_model_dropout(mu_nn, None, MC_sample, trainLoader, n_epochs, crit, optimizer, no_classes=2, mixup=True) ## Train Fast Gradient Sign Method # Simple Neural Network fgsm_nn = NeuralNet(input_dim=2, hidden_dim=10, output_dim=2).double() optimizer = torch.optim.Adam(fgsm_nn.parameters(), lr=0.01) MC_sample=1 crit = nn.CrossEntropyLoss() n_epochs = 500 _, training_loss = train_model_dropout(fgsm_nn, None, MC_sample, trainLoader, n_epochs, crit, optimizer, no_classes=2, mixup=False, fgsm=True) #plt.plot(training_loss) # Train using the density #base_density_nn = NeuralNet(input_dim=2, hidden_dim=10, output_dim=2).double() #optimizer = torch.optim.Adam(base_density_nn.parameters(), lr=0.01) #MC_sample=1 #crit = nn.CrossEntropyLoss() #n_epochs = 500 #_, training_loss = train_model_dropout(base_density_nn, None, MC_sample, trainLoader, n_epochs, crit, optimizer, no_classes=2, kde=kde, pca=pca) ################# 3.EVALUATION BASED ON ACCURCAY ################# from uncertainty import sample_lowest_entropy, sample_highest_density, sample_lowest_entropy_highest_density retained = [50, 60, 70, 80, 90, 100] def model_accuracy_over_low_entropy_high_density_data_retained(model,kde, pca, data, label, MC_sample, no_classes): """ This function will retain the data with the highest density at 6 different levels and put them in loaders. Furthermore, the accuracy at each level will be computed The accuracies can be used to plot how the accuracy drops while we increase data the loader allow to have access to the data sampled with the high density criterion. """ loader50 = sample_lowest_entropy_highest_density(.5, model, kde, pca, data, label, MC_sample, no_classes) loader60 = sample_lowest_entropy_highest_density(.6, model, kde, pca, data, label, MC_sample, no_classes) loader70 = sample_lowest_entropy_highest_density(.7, model, kde, pca, data, label, MC_sample, no_classes) loader80 = sample_lowest_entropy_highest_density(.8, model, kde, pca, data, label, MC_sample, no_classes) loader90 = sample_lowest_entropy_highest_density(.9, model, kde, pca, data, label, MC_sample, no_classes) loader100 = sample_lowest_entropy_highest_density(1., model, kde, pca, data, label, MC_sample, no_classes) acc_50 = evaluate_model(model, loader50, MC_sample, no_classes=2) acc_60 = evaluate_model(model, loader60, MC_sample, no_classes=2) acc_70 = evaluate_model(model, loader70, MC_sample, no_classes=2) acc_80 = evaluate_model(model, loader80, MC_sample, no_classes=2) acc_90 = evaluate_model(model, loader90, MC_sample, no_classes=2) acc_100 = evaluate_model(model, loader100, MC_sample, no_classes=2) acc = [acc_50, acc_60, acc_70, acc_80, acc_90, acc_100] loaders = [loader50, loader60, loader70, loader80, loader90, loader100] return acc, loaders def model_accuracy_over_high_density_data_retained(model,kde, pca, data, label, MC_sample, no_classes): """ This function will retain the data with the highest density at 6 different levels and put them in loaders. Furthermore, the accuracy at each level will be computed The accuracies can be used to plot how the accuracy drops while we increase data the loader allow to have access to the data sampled with the high density criterion. """ loader50 = sample_highest_density(0.5, kde, pca, data, label) loader60 = sample_highest_density(0.6, kde, pca, data, label) loader70 = sample_highest_density(0.7, kde, pca, data, label) loader80 = sample_highest_density(0.8, kde, pca, data, label) loader90 = sample_highest_density(0.9, kde, pca, data, label) loader100 = sample_lowest_entropy(1., model, data, label, MC_sample, no_classes) acc_50 = evaluate_model(model, loader50, MC_sample, no_classes=2) acc_60 = evaluate_model(model, loader60, MC_sample, no_classes=2) acc_70 = evaluate_model(model, loader70, MC_sample, no_classes=2) acc_80 = evaluate_model(model, loader80, MC_sample, no_classes=2) acc_90 = evaluate_model(model, loader90, MC_sample, no_classes=2) acc_100 = evaluate_model(model, loader100, MC_sample, no_classes=2) acc = [acc_50, acc_60, acc_70, acc_80, acc_90, acc_100] loaders = [loader50, loader60, loader70, loader80, loader90, loader100] return acc, loaders def model_accuracy_over_low_entropy_data_retained(model, data, label, MC_sample, no_classes): """ This function will retain the data with the lowest entropy at 6 different levels and put them in loaders. Furthermore, the accuracy at each level will be computed and returned along with the associated loaders. The accuracies can be used to plot how the accuracy drops while we increase data the loader allow to have access to the data sampled with the low entropy criterion. """ loader50 = sample_lowest_entropy(0.5, model, data, label, MC_sample, no_classes) loader60 = sample_lowest_entropy(0.6, model, data, label, MC_sample, no_classes) loader70 = sample_lowest_entropy(0.7, model, data, label, MC_sample, no_classes) loader80 = sample_lowest_entropy(0.8, model, data, label, MC_sample, no_classes) loader90 = sample_lowest_entropy(0.9, model, data, label, MC_sample, no_classes) loader100 = sample_lowest_entropy(1., model, data, label, MC_sample, no_classes) acc_50 = evaluate_model(model, loader50, MC_sample, no_classes=2) acc_60 = evaluate_model(model, loader60, MC_sample, no_classes=2) acc_70 = evaluate_model(model, loader70, MC_sample, no_classes=2) acc_80 = evaluate_model(model, loader80, MC_sample, no_classes=2) acc_90 = evaluate_model(model, loader90, MC_sample, no_classes=2) acc_100 = evaluate_model(model, loader100, MC_sample, no_classes=2) acc = [acc_50, acc_60, acc_70, acc_80, acc_90, acc_100] loaders = [loader50, loader60, loader70, loader80, loader90, loader100] return acc, loaders ### Comparing sampling methods against each others def aggregate_accuracy_perturbation_retained_data(model, kde, pca, datasets, labels, MC_sample, no_classes): X_test, X_scale, X_rot, X_noise = datasets Y_test, Y, Y_noise = labels test_ende_acc, test_ende_loaders = model_accuracy_over_low_entropy_high_density_data_retained(model,kde, pca, X_test, Y_test, MC_sample=1, no_classes=2) test_en_acc, test_en_loaders = model_accuracy_over_low_entropy_data_retained(model, X_test, Y_test, MC_sample=1, no_classes=2) test_de_acc, test_de_loaders = model_accuracy_over_high_density_data_retained(model,kde, pca, X_test, Y_test, MC_sample=1, no_classes=2) scale_ende_acc, scale_ende_loaders = model_accuracy_over_low_entropy_high_density_data_retained(model,kde, pca, X_scale, Y, MC_sample=1, no_classes=2) scale_en_acc, scale_en_loaders = model_accuracy_over_low_entropy_data_retained(model, X_scale, Y, MC_sample=1, no_classes=2) scale_de_acc, scale_de_loaders = model_accuracy_over_high_density_data_retained(model,kde, pca, X_scale, Y, MC_sample=1, no_classes=2) noise_ende_acc, noise_ende_loaders = model_accuracy_over_low_entropy_high_density_data_retained(model,kde, pca, X_noise, Y_noise, MC_sample=1, no_classes=2) noise_en_acc, noise_en_loaders = model_accuracy_over_low_entropy_data_retained(model, X_noise, Y_noise, MC_sample=1, no_classes=2) noise_de_acc, noise_de_loaders = model_accuracy_over_high_density_data_retained(model,kde, pca, X_noise, Y_noise, MC_sample=1, no_classes=2) rot_ende_acc, rot_ende_loaders = model_accuracy_over_low_entropy_high_density_data_retained(model,kde, pca, X_rot, Y, MC_sample=1, no_classes=2) rot_en_acc, rot_en_loaders = model_accuracy_over_low_entropy_data_retained(model, X_rot, Y, MC_sample=1, no_classes=2) rot_de_acc, rot_de_loaders = model_accuracy_over_high_density_data_retained(model,kde, pca, X_rot, Y, MC_sample=1, no_classes=2) aggregate_ende = np.concatenate([test_ende_acc, scale_ende_acc, noise_ende_acc, rot_ende_acc], 1) aggregate_en = np.concatenate([test_en_acc, scale_en_acc, noise_en_acc, rot_en_acc], 1) aggregate_de = np.concatenate([test_de_acc, scale_de_acc, noise_de_acc, rot_de_acc], 1) loaders_ende = [test_ende_loaders, scale_ende_loaders, noise_ende_loaders, rot_ende_loaders] loaders_en = [test_en_loaders, scale_en_loaders, noise_en_loaders, rot_en_loaders] loaders_de = [test_de_loaders, scale_de_loaders, noise_de_loaders, rot_de_loaders] return (aggregate_ende, aggregate_en, aggregate_de), (loaders_ende, loaders_en, loaders_de) datasets = [X_test, X_scale, X_rot, X_noise] labels = [Y_test, Y, Y_noise] (base_ende, base_en, base_de), base_loaders = aggregate_accuracy_perturbation_retained_data(base_nn, kde, pca, datasets, labels, 1, 2) vi_ende, vi_en, vi_de = aggregate_accuracy_perturbation_retained_data(vi_nn, kde, pca, datasets, labels, 50, 2) en_ende, en_en, en_de = aggregate_accuracy_perturbation_retained_data(ensemble_nn, kde, pca, datasets, labels, 1, 2) mu_ende, mu_en, mu_de = aggregate_accuracy_perturbation_retained_data(mu_nn, kde, pca, datasets, labels, 1, 2) ad_ende, ad_en, ad_de = aggregate_accuracy_perturbation_retained_data(fgsm_nn, kde, pca, datasets, labels, 1, 2) fig, ax = plt.subplots(1,5, figsize=(22,4)) ax[0].set_ylabel("Aggregate over perturbations") ax[0].plot(base_ende.mean(1), label="Entropy-Density") ax[0].plot(base_en.mean(1), label="Entropy") ax[0].plot(base_de.mean(1), label="Density") ax[0].legend() ax[0].set_title("Softmax") ax[1].plot(vi_ende.mean(1), label="Entropy-Density") ax[1].plot(vi_en.mean(1), label="Entropy") ax[1].plot(vi_de.mean(1), label="Density") ax[1].legend() ax[1].set_title("Dropout") ax[2].plot(en_ende.mean(1), label="Entropy-Density") ax[2].plot(en_en.mean(1), label="Entropy") ax[2].plot(en_de.mean(1), label="Density") ax[2].legend() ax[2].set_title("Ensemble") ax[3].plot(mu_ende.mean(1), label="Entropy-Density") ax[3].plot(mu_en.mean(1), label="Entropy") ax[3].plot(mu_de.mean(1), label="Density") ax[3].legend() ax[3].set_title("Mixup") ax[4].plot(ad_ende.mean(1), label="Entropy-Density") ax[4].plot(ad_en.mean(1), label="Entropy") ax[4].plot(ad_de.mean(1), label="Density") ax[4].legend() ax[4].set_title("FGSM") plt.savefig("retained_aggregate_over_perturbation") # Plot the aggregate accuracy with data retained fig, ax = plt.subplots(1,4, figsize=(22,4)) ax[0].plot(base_en[0], label="Entropy") ax[0].plot(base_de[0], label="Density") #ax[0].plot(base_test_de2_acc, label="Density relaxed 2") #ax[0].plot(base_test_de1_1_acc, label="Density relaxed 1.1") ax[0].plot(base_ende[0], label="Entropy-Density") ax[0].legend() ax[0].set_title("Test data") ax[1].plot(base_en[1], label="Entropy") ax[1].plot(base_de[1], label="Density") #ax[1].plot(base_scale_de2_acc, label="Density relaxed 2") #ax[1].plot(base_scale_de1_1_acc, label="Density relaxed 1.1") ax[1].plot(base_ende[1], label="Entropy-Density") ax[1].legend() ax[1].set_title("Scale data") ax[2].plot(base_en[2], label="Entropy") ax[2].plot(base_de[2], label="Density") #ax[2].plot(base_noise_de2_acc, label="Density relaxed 2") #ax[2].plot(base_noise_de1_1_acc, label="Density relaxed 1.1") ax[2].plot(base_ende[2], label="Entropy-Density") ax[2].legend() ax[2].set_title("Noise data") ax[3].plot(base_en[3], label="Entropy") ax[3].plot(base_de[3], label="Density") #ax[3].plot(base_rot_de2_acc, label="Density relaxed 2") #ax[3].plot(base_rot_de1_1_acc, label="Density relaxed 1.1") ax[3].plot(base_ende[3], label="Entropy-Density") ax[3].legend() ax[3].set_title("Rotation data") plt.savefig("retained_lowestEntropy_highestDensity") ### Comparing methods agains each others # Accuracies for data retained on the test set base_test_acc, base_test_loaders = model_accuracy_over_low_entropy_data_retained(base_nn, X_test, Y_test, MC_sample=1, no_classes=2) vi_test_acc, vi_test_loaders = model_accuracy_over_low_entropy_data_retained(vi_nn, X_test, Y_test, MC_sample=50, no_classes=2) en_test_acc, en_test_loaders = model_accuracy_over_low_entropy_data_retained(ensemble_nn, X_test, Y_test, MC_sample=1, no_classes=2) mu_test_acc, mu_test_loaders = model_accuracy_over_low_entropy_data_retained(mu_nn, X_test, Y_test, MC_sample=1, no_classes=2) ad_test_acc, ad_test_loaders = model_accuracy_over_low_entropy_data_retained(fgsm_nn, X_test, Y_test, MC_sample=1, no_classes=2) pde_test_acc, pde_test_loaders = model_accuracy_over_high_density_data_retained(base_nn,kde, pca, X_test, Y_test, MC_sample=1, no_classes=2) # Accuracies for data retained on the scale perturbation set base_scale_acc, base_scale_loaders = model_accuracy_over_low_entropy_data_retained(base_nn, X_scale, Y, MC_sample=1, no_classes=2) vi_scale_acc, vi_scale_loaders = model_accuracy_over_low_entropy_data_retained(vi_nn, X_scale, Y, MC_sample=50, no_classes=2) en_scale_acc, en_scale_loaders = model_accuracy_over_low_entropy_data_retained(ensemble_nn, X_scale, Y, MC_sample=1, no_classes=2) mu_scale_acc, mu_scale_loaders = model_accuracy_over_low_entropy_data_retained(mu_nn, X_scale, Y, MC_sample=1, no_classes=2) ad_scale_acc, ad_scale_loaders = model_accuracy_over_low_entropy_data_retained(fgsm_nn, X_scale, Y, MC_sample=1, no_classes=2) pde_scale_acc, pde_scale_loaders = model_accuracy_over_high_density_data_retained(base_nn,kde, pca, X_scale, Y, MC_sample=1, no_classes=2) # Accuracies for data retained on the scale rotation set base_rot_acc, base_rot_loaders = model_accuracy_over_low_entropy_data_retained(base_nn, X_rot, Y, MC_sample=1, no_classes=2) vi_rot_acc, vi_rot_loaders = model_accuracy_over_low_entropy_data_retained(vi_nn, X_rot, Y, MC_sample=50, no_classes=2) en_rot_acc, en_rot_loaders = model_accuracy_over_low_entropy_data_retained(ensemble_nn, X_rot, Y, MC_sample=1, no_classes=2) mu_rot_acc, mu_rot_loaders = model_accuracy_over_low_entropy_data_retained(mu_nn, X_rot, Y, MC_sample=1, no_classes=2) ad_rot_acc, ad_rot_loaders = model_accuracy_over_low_entropy_data_retained(fgsm_nn, X_rot, Y, MC_sample=1, no_classes=2) pde_rot_acc, pde_rot_loaders = model_accuracy_over_high_density_data_retained(base_nn,kde, pca, X_rot, Y, MC_sample=1, no_classes=2) # Accuracies for data retained on the scale noise set base_noise_acc, base_noise_loaders = model_accuracy_over_low_entropy_data_retained(base_nn, X_noise, Y_noise, MC_sample=1, no_classes=2) vi_noise_acc, vi_noise_loaders = model_accuracy_over_low_entropy_data_retained(vi_nn, X_noise, Y_noise, MC_sample=50, no_classes=2) en_noise_acc, en_noise_loaders = model_accuracy_over_low_entropy_data_retained(ensemble_nn, X_noise, Y_noise, MC_sample=1, no_classes=2) mu_noise_acc, mu_noise_loaders = model_accuracy_over_low_entropy_data_retained(mu_nn, X_noise, Y_noise, MC_sample=1, no_classes=2) ad_noise_acc, ad_noise_loaders = model_accuracy_over_low_entropy_data_retained(fgsm_nn, X_noise, Y_noise, MC_sample=1, no_classes=2) pde_noise_acc, pde_noise_loaders = model_accuracy_over_high_density_data_retained(base_nn,kde, pca, X_noise, Y_noise, MC_sample=1, no_classes=2) # Plot the aggregate accuracy with data retained fig, ax = plt.subplots(1,4, figsize=(22,4)) ax[0].plot(retained, base_test_acc, label="Base") ax[0].plot(retained, vi_test_acc, label="Dropout") ax[0].plot(retained, en_test_acc, label="Ensemble") ax[0].plot(retained, mu_test_acc, label="Mixup") ax[0].plot(retained, ad_test_acc, label="FGSM") ax[0].plot(retained, pde_test_acc, label="PDE") ax[0].set_title("Test Set") ax[1].plot(retained, base_scale_acc, label="Base") ax[1].plot(retained, vi_scale_acc, label="Dropout") ax[1].plot(retained, en_scale_acc, label="Ensemble") ax[1].plot(retained, mu_scale_acc, label="Mixup") ax[1].plot(retained, ad_scale_acc, label="FGSM") ax[1].plot(retained, pde_scale_acc, label="PDE") ax[1].set_title("Scale Perturbation") ax[2].plot(retained, base_rot_acc, label="Base") ax[2].plot(retained, vi_rot_acc, label="Dropout") ax[2].plot(retained, en_rot_acc, label="Ensemble") ax[2].plot(retained, mu_rot_acc, label="Mixup") ax[2].plot(retained, ad_rot_acc, label="FGSM") ax[2].plot(retained, pde_rot_acc, label="PDE") ax[2].set_title("Rotation Perturbation") ax[3].plot(retained, base_noise_acc, label="Base") ax[3].plot(retained, vi_noise_acc, label="Dropout") ax[3].plot(retained, en_noise_acc, label="Ensemble") ax[3].plot(retained, mu_noise_acc, label="Mixup") ax[3].plot(retained, ad_noise_acc, label="FGSM") ax[3].plot(retained, pde_noise_acc, label="PDE") ax[3].set_title("Noise Perturbation") ax[3].legend(loc="upper left", bbox_to_anchor=(1,1)) plt.savefig("retained_aggregate_accuracy", dpi=300) ################ 4. EVALUATION BASED ON AUC ################ def compute_auc_models(model, loaders, vi=False): loader50, loader60, loader70, loader80, loader90, loader100 = loaders if vi==True: Y_pred50 = torch.cat([torch.sigmoid(model(torch.tensor(loader50.dataset.data)))[:,1:] for i in range(50)],1).mean(1).detach().numpy() Y_pred60 = torch.cat([torch.sigmoid(model(torch.tensor(loader60.dataset.data)))[:,1:] for i in range(50)],1).mean(1).detach().numpy() Y_pred70 = torch.cat([torch.sigmoid(model(torch.tensor(loader70.dataset.data)))[:,1:] for i in range(50)],1).mean(1).detach().numpy() Y_pred80 = torch.cat([torch.sigmoid(model(torch.tensor(loader80.dataset.data)))[:,1:] for i in range(50)],1).mean(1).detach().numpy() Y_pred90 = torch.cat([torch.sigmoid(model(torch.tensor(loader90.dataset.data)))[:,1:] for i in range(50)],1).mean(1).detach().numpy() Y_pred100 = torch.cat([torch.sigmoid(model(torch.tensor(loader100.dataset.data)))[:,1:] for i in range(50)],1).mean(1).detach().numpy() else: Y_pred50 = torch.sigmoid(model(torch.tensor(loader50.dataset.data)))[:,1].detach().numpy() Y_pred60 = torch.sigmoid(model(torch.tensor(loader60.dataset.data)))[:,1].detach().numpy() Y_pred70 = torch.sigmoid(model(torch.tensor(loader70.dataset.data)))[:,1].detach().numpy() Y_pred80 = torch.sigmoid(model(torch.tensor(loader80.dataset.data)))[:,1].detach().numpy() Y_pred90 = torch.sigmoid(model(torch.tensor(loader90.dataset.data)))[:,1].detach().numpy() Y_pred100 = torch.sigmoid(model(torch.tensor(loader100.dataset.data)))[:,1].detach().numpy() auc50 = sklearn.metrics.roc_auc_score(loader50.dataset.labels, Y_pred50) auc60 = sklearn.metrics.roc_auc_score(loader60.dataset.labels, Y_pred60) auc70 = sklearn.metrics.roc_auc_score(loader70.dataset.labels, Y_pred70) auc80 = sklearn.metrics.roc_auc_score(loader80.dataset.labels, Y_pred80) auc90 = sklearn.metrics.roc_auc_score(loader90.dataset.labels, Y_pred90) auc100 = sklearn.metrics.roc_auc_score(loader100.dataset.labels, Y_pred100) return [auc50, auc60, auc70, auc80, auc90, auc100] # AUC for data retained on the test set base_auc_test = compute_auc_models(base_nn, base_test_loaders, vi=False) vi_auc_test = compute_auc_models(vi_nn, vi_test_loaders, vi=True) en_auc_test = compute_auc_models(ensemble_nn, en_test_loaders, vi=False) mu_auc_test = compute_auc_models(mu_nn, mu_test_loaders, vi=False) ad_auc_test = compute_auc_models(fgsm_nn, ad_test_loaders, vi=False) pde_auc_test = compute_auc_models(base_nn, pde_test_loaders, vi=False) # AUC for data retained on the scale perturbation set base_auc_scale = compute_auc_models(base_nn, base_scale_loaders, vi=False) vi_auc_scale = compute_auc_models(vi_nn, vi_scale_loaders, vi=True) en_auc_scale = compute_auc_models(ensemble_nn, en_scale_loaders, vi=False) mu_auc_scale = compute_auc_models(mu_nn, mu_scale_loaders, vi=False) ad_auc_scale = compute_auc_models(fgsm_nn, ad_scale_loaders, vi=False) pde_auc_scale = compute_auc_models(base_nn, pde_scale_loaders, vi=False) # AUC for data retained on the rotation perturbation set base_auc_rot = compute_auc_models(base_nn, base_rot_loaders, vi=False) vi_auc_rot = compute_auc_models(vi_nn, vi_rot_loaders, vi=True) en_auc_rot = compute_auc_models(ensemble_nn, en_rot_loaders, vi=False) mu_auc_rot = compute_auc_models(mu_nn, mu_rot_loaders, vi=False) ad_auc_rot = compute_auc_models(fgsm_nn, ad_rot_loaders, vi=False) pde_auc_rot = compute_auc_models(base_nn, pde_rot_loaders, vi=False) # AUC for data retained on the noise perturbation set base_auc_noise = compute_auc_models(base_nn, base_noise_loaders, vi=False) vi_auc_noise = compute_auc_models(vi_nn, vi_noise_loaders, vi=True) en_auc_noise = compute_auc_models(ensemble_nn, en_noise_loaders, vi=False) mu_auc_noise = compute_auc_models(mu_nn, mu_noise_loaders, vi=False) ad_auc_noise = compute_auc_models(fgsm_nn, ad_noise_loaders, vi=False) pde_auc_noise = compute_auc_models(base_nn, pde_noise_loaders, vi=False) # Plot the aggregate accuracy with data retained fig, ax = plt.subplots(1,4, figsize=(22,4)) ax[0].plot(retained, base_auc_test, label="Base") ax[0].plot(retained, vi_auc_test, label="Dropout") ax[0].plot(retained, en_auc_test, label="Ensemble") ax[0].plot(retained, mu_auc_test, label="Mixup") ax[0].plot(retained, ad_auc_test, label="FGSM") ax[0].plot(retained, pde_auc_test, label="PDE") ax[0].set_title("Test Set") ax[1].plot(retained, base_auc_scale, label="Base") ax[1].plot(retained, vi_auc_scale, label="Dropout") ax[1].plot(retained, en_auc_scale, label="Ensemble") ax[1].plot(retained, mu_auc_scale, label="Mixup") ax[1].plot(retained, ad_auc_scale, label="FGSM") ax[1].plot(retained, pde_auc_scale, label="PDE") ax[1].set_title("Scale Perturbation") ax[2].plot(retained, base_auc_rot, label="Base") ax[2].plot(retained, vi_auc_rot, label="Dropout") ax[2].plot(retained, en_auc_rot, label="Ensemble") ax[2].plot(retained, mu_auc_rot, label="Mixup") ax[2].plot(retained, ad_auc_rot, label="FGSM") ax[2].plot(retained, pde_auc_rot, label="PDE") ax[2].set_title("Rotation Perturbation") ax[3].plot(retained, base_auc_noise, label="Base") ax[3].plot(retained, vi_auc_noise, label="Dropout") ax[3].plot(retained, en_auc_noise, label="Ensemble") ax[3].plot(retained, mu_auc_noise, label="Mixup") ax[3].plot(retained, ad_auc_noise, label="FGSM") ax[3].plot(retained, pde_auc_noise, label="PDE") ax[3].set_title("Noise Perturbation") ax[3].legend(loc="upper left", bbox_to_anchor=(1,1)) plt.savefig("retained_aggregate_auc", dpi=300) ################# 5.DRAW DECISION BOUNDARIES ################# def negatify(X): X = np.copy(X) neg = X < 0.5 X[neg] =X[neg]-1 return X # Create a mesh h = .02 # step size in the mesh x_min = np.concatenate([X[:, 0], X_rot[:, 0], X_scale[:, 0], X_noise[:, 0]]).min() x_max = np.concatenate([X[:, 0], X_rot[:, 0], X_scale[:, 0], X_noise[:, 0]]).max() y_min = np.concatenate([X[:, 1], X_rot[:, 1], X_scale[:, 1], X_noise[:, 1]]).min() y_max = np.concatenate([X[:, 1], X_rot[:, 1], X_scale[:, 1], X_noise[:, 1]]).max() xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # Predict for each point of the mesh base_Z = torch.sigmoid(base_nn(torch.tensor(np.c_[xx.ravel(), yy.ravel()]))[:, 1]) # Here we create a list that we concatenate and we average the result vi_Z = torch.cat([torch.sigmoid(vi_nn(torch.tensor(np.c_[xx.ravel(), yy.ravel()]))[:, 1:]) for i in range(50)],1).mean(1) en_Z = torch.sigmoid(ensemble_nn(torch.tensor(np.c_[xx.ravel(), yy.ravel()]))[:, 1]) mu_Z = torch.sigmoid(mu_nn(torch.tensor(np.c_[xx.ravel(), yy.ravel()]))[:, 1]) ad_Z = torch.sigmoid(fgsm_nn(torch.tensor(np.c_[xx.ravel(), yy.ravel()]))[:, 1]) base_Z = base_Z.reshape(xx.shape).detach().numpy() base_Z_ = negatify(base_Z) vi_Z = vi_Z.reshape(xx.shape).detach().numpy() vi_Z_ = negatify(vi_Z) en_Z = en_Z.reshape(xx.shape).detach().numpy() en_Z_ = negatify(en_Z) mu_Z = mu_Z.reshape(xx.shape).detach().numpy() mu_Z_ = negatify(mu_Z) ad_Z = ad_Z.reshape(xx.shape).detach().numpy() ad_Z_ = negatify(ad_Z) p_x = kde.score_samples(pca.transform(np.c_[xx.ravel(), yy.ravel()])) p_x = p_x.reshape(xx.shape) p_x_e = np.power(np.exp(1), p_x) p_x_2 = np.power(2, p_x) p_x_1_5 = np.power(1.5, p_x) cm = plt.cm.RdBu plt.rcParams.update({'font.size': 14}) ##### 5.1 Plot on the test dataset fig, ax = plt.subplots(6,6, figsize=(24,22)) ax[0,0].set_title("50 % retained") ax[0,1].set_title("60 % retained") ax[0,2].set_title("70 % retained") ax[0,3].set_title("80 % retained") ax[0,4].set_title("90 % retained") ax[0,5].set_title("100 % retained") ax[0,0].set_ylabel("Softmax") ax[1,0].set_ylabel("Dropout") ax[2,0].set_ylabel("Ensemble") ax[3,0].set_ylabel("Mixup") ax[4,0].set_ylabel("FGSM") ax[5,0].set_ylabel("PDE") for i in range(0,6): if i==0: loaders = base_test_loaders Z = base_Z elif i==1: loaders = vi_test_loaders Z = vi_Z elif i==2: loaders = en_test_loaders Z = en_Z elif i==3: loaders = mu_test_loaders Z = mu_Z elif i==4: loaders = ad_test_loaders Z = ad_Z else: loaders = pde_test_loaders Z = base_Z_ * p_x_e for j in range(0,6): base_x, base_y = next(iter(loaders[j])) im = ax[i,j].contourf(xx, yy, Z, cmap=cm, alpha=.8) ax[i,j].scatter(base_x[:, 0], base_x[:, 1], c=base_y, cmap=cm_bright) ax[i,j].scatter(X_test[:, 0], X_test[:, 1], c=Y_test, cmap=cm_bright, alpha=0.1) plt.savefig("retained_test", dpi=300) ##### 5.2 Plot on the scale dataset fig, ax = plt.subplots(6,6, figsize=(24,22)) ax[0,0].set_title("50 % retained") ax[0,1].set_title("60 % retained") ax[0,2].set_title("70 % retained") ax[0,3].set_title("80 % retained") ax[0,4].set_title("90 % retained") ax[0,5].set_title("100 % retained") ax[0,0].set_ylabel("Softmax") ax[1,0].set_ylabel("Dropout") ax[2,0].set_ylabel("Ensemble") ax[3,0].set_ylabel("Mixup") ax[4,0].set_ylabel("FGSM") ax[5,0].set_ylabel("PDE") for i in range(0,6): if i==0: loaders = base_scale_loaders Z = base_Z elif i==1: loaders = vi_scale_loaders Z = vi_Z elif i==2: loaders = en_scale_loaders Z = en_Z elif i==3: loaders = mu_scale_loaders Z = mu_Z elif i==4: loaders = ad_scale_loaders Z = ad_Z else: loaders = pde_scale_loaders Z = base_Z_ * p_x_e for j in range(0,6): base_x, base_y = next(iter(loaders[j])) im = ax[i,j].contourf(xx, yy, Z, cmap=cm, alpha=.8) ax[i,j].scatter(base_x[:, 0], base_x[:, 1], c=base_y, cmap=cm_bright) ax[i,j].scatter(X_scale[:, 0], X_scale[:, 1], c=Y, cmap=cm_bright, alpha=0.1) plt.savefig("retained_scale", dpi=300) ##### 5.3 Plot on the rotation dataset fig, ax = plt.subplots(6,6, figsize=(24,22)) ax[0,0].set_title("50 % retained") ax[0,1].set_title("60 % retained") ax[0,2].set_title("70 % retained") ax[0,3].set_title("80 % retained") ax[0,4].set_title("90 % retained") ax[0,5].set_title("100 % retained") ax[0,0].set_ylabel("Softmax") ax[1,0].set_ylabel("Dropout") ax[2,0].set_ylabel("Ensemble") ax[3,0].set_ylabel("Mixup") ax[4,0].set_ylabel("FGSM") ax[5,0].set_ylabel("PDE") for i in range(0,6): if i==0: loaders = base_rot_loaders Z = base_Z elif i==1: loaders = vi_rot_loaders Z = vi_Z elif i==2: loaders = en_rot_loaders Z = en_Z elif i==3: loaders = mu_rot_loaders Z = mu_Z elif i==4: loaders = ad_rot_loaders Z = ad_Z else: loaders = pde_rot_loaders Z = base_Z_ * p_x_e for j in range(0,6): base_x, base_y = next(iter(loaders[j])) im = ax[i,j].contourf(xx, yy, Z, cmap=cm, alpha=.8) ax[i,j].scatter(base_x[:, 0], base_x[:, 1], c=base_y, cmap=cm_bright) ax[i,j].scatter(X_rot[:, 0], X_rot[:, 1], c=Y, cmap=cm_bright, alpha=0.1) plt.savefig("retained_rot", dpi=300) ##### 5.4 Plot on the noise dataset fig, ax = plt.subplots(6,6, figsize=(24,22)) ax[0,0].set_title("50 % retained") ax[0,1].set_title("60 % retained") ax[0,2].set_title("70 % retained") ax[0,3].set_title("80 % retained") ax[0,4].set_title("90 % retained") ax[0,5].set_title("100 % retained") ax[0,0].set_ylabel("Softmax") ax[1,0].set_ylabel("Dropout") ax[2,0].set_ylabel("Ensemble") ax[3,0].set_ylabel("Mixup") ax[4,0].set_ylabel("FGSM") ax[5,0].set_ylabel("PDE") for i in range(0,6): if i==0: loaders = base_noise_loaders Z = base_Z elif i==1: loaders = vi_noise_loaders Z = vi_Z elif i==2: loaders = en_noise_loaders Z = en_Z elif i==3: loaders = mu_noise_loaders Z = mu_Z elif i==4: loaders = ad_noise_loaders Z = ad_Z else: loaders = pde_noise_loaders Z = base_Z_ * p_x_e for j in range(0,6): base_x, base_y = next(iter(loaders[j])) im = ax[i,j].contourf(xx, yy, Z, cmap=cm, alpha=.8) ax[i,j].scatter(base_x[:, 0], base_x[:, 1], c=base_y, cmap=cm_bright) ax[i,j].scatter(X_noise[:, 0], X_noise[:, 1], c=Y_noise, cmap=cm_bright, alpha=0.1) plt.savefig("retained_noise", dpi=300) ##### 5.5 Compare ENDE, DE, EN with base_nn # on test dataset fig, ax = plt.subplots(3,6, figsize=(24,18)) ax[0,0].set_title("50 % retained") ax[0,1].set_title("60 % retained") ax[0,2].set_title("70 % retained") ax[0,3].set_title("80 % retained") ax[0,4].set_title("90 % retained") ax[0,5].set_title("100 % retained") ax[0,0].set_ylabel("Entropy-Density") ax[1,0].set_ylabel("Entropy") ax[2,0].set_ylabel("Density") for i in range(0,3): if i==0: loaders = base_loaders[0][0] Z = base_Z elif i==1: loaders = base_loaders[1][0] Z = base_Z else: loaders = base_loaders[2][0] Z = base_Z_ * p_x_e for j in range(0,6): base_x, base_y = next(iter(loaders[j])) im = ax[i,j].contourf(xx, yy, Z, cmap=cm, alpha=.8) ax[i,j].scatter(base_x[:, 0], base_x[:, 1], c=base_y, cmap=cm_bright) ax[i,j].scatter(X_noise[:, 0], X_noise[:, 1], c=Y_noise, cmap=cm_bright, alpha=0.1) plt.savefig("retained_test_ende_en_de", dpi=300) # on scale dataset fig, ax = plt.subplots(3,6, figsize=(24,18)) ax[0,0].set_title("50 % retained") ax[0,1].set_title("60 % retained") ax[0,2].set_title("70 % retained") ax[0,3].set_title("80 % retained") ax[0,4].set_title("90 % retained") ax[0,5].set_title("100 % retained") ax[0,0].set_ylabel("Entropy-Density") ax[1,0].set_ylabel("Entropy") ax[2,0].set_ylabel("Density") for i in range(0,3): if i==0: loaders = base_loaders[0][1] Z = base_Z elif i==1: loaders = base_loaders[1][1] Z = base_Z else: loaders = base_loaders[2][1] Z = base_Z_ * p_x_e for j in range(0,6): base_x, base_y = next(iter(loaders[j])) im = ax[i,j].contourf(xx, yy, Z, cmap=cm, alpha=.8) ax[i,j].scatter(base_x[:, 0], base_x[:, 1], c=base_y, cmap=cm_bright) ax[i,j].scatter(X_noise[:, 0], X_noise[:, 1], c=Y_noise, cmap=cm_bright, alpha=0.1) plt.savefig("retained_scale_ende_en_de", dpi=300)
nilq/baby-python
python
from django.apps import AppConfig class MetricConfig(AppConfig): label = "metric" name = "edd.metric" verbose_name = "Metric" def ready(self): # make sure to load/register all the signal handlers from . import signals # noqa: F401
nilq/baby-python
python
from scipy.misc.common import logsumexp from kameleon_rks.densities.gaussian import sample_gaussian, \ log_gaussian_pdf_multiple from kameleon_rks.proposals.ProposalBase import ProposalBase import kameleon_rks.samplers.tools from kameleon_rks.tools.covariance_updates import log_weights_to_lmbdas, \ update_mean_cov_L_lmbda from kameleon_rks.tools.log import Log import numpy as np logger = Log.get_logger() class StaticMetropolis(ProposalBase): """ Implements the classic (isotropic) MH. Allows for tuning the scaling from acceptance rate. """ def __init__(self, D, target_log_pdf, step_size, schedule=None, acc_star=None): ProposalBase.__init__(self, D, target_log_pdf, step_size, schedule, acc_star) self.L_C = np.linalg.cholesky(np.eye(D)) def proposal_log_pdf(self, current, proposals): log_probs = log_gaussian_pdf_multiple(proposals, mu=current, Sigma=self.L_C, is_cholesky=True, cov_scaling=self.step_size) return log_probs def proposal(self, current, current_log_pdf, **kwargs): if current_log_pdf is None: current_log_pdf = self.target_log_pdf(current) proposal = sample_gaussian(N=1, mu=current, Sigma=self.L_C, is_cholesky=True, cov_scaling=self.step_size)[0] forw_backw_log_prob = self.proposal_log_pdf(current, proposal[np.newaxis, :])[0] proposal_log_pdf = self.target_log_pdf(proposal) results_kwargs = {} # probability of proposing current when would be sitting at proposal is symmetric return proposal, proposal_log_pdf, current_log_pdf, forw_backw_log_prob, forw_backw_log_prob, results_kwargs class AdaptiveMetropolis(StaticMetropolis): """ Implements the adaptive MH. Performs efficient low-rank updates of Cholesky factor of covariance. Covariance itself is not stored/updated, only its Cholesky factor. """ def __init__(self, D, target_log_pdf, step_size, gamma2, schedule=None, acc_star=None): StaticMetropolis.__init__(self, D, target_log_pdf, step_size, schedule, acc_star) self.gamma2 = gamma2 # assume that we have observed fake samples (makes system well-posed) # these have covariance gamma2*I, which is a regulariser # the mean and log_sum_weights however, is taken from the first set of samples in update self.mu = None self.L_C = None self.log_sum_weights = None def set_batch(self, Z): # override streaming solution self.mu = np.mean(Z, axis=0) cov = np.cov(Z.T) self.L_C = np.linalg.cholesky(cov + np.eye(self.D) * self.gamma2) self.log_sum_weights = np.log(len(Z)) def update(self, Z, num_new=1, log_weights=None): assert(len(Z) >= num_new) # dont do anything if no data observed if num_new == 0: return if log_weights is not None: assert len(log_weights) == len(Z) else: log_weights = np.zeros(len(Z)) Z_new = Z[-num_new:] log_weights_new = log_weights[-num_new:] # first update: use first of X and log_weights, and then discard if self.log_sum_weights is None: # assume have observed fake terms, which is needed for making the system well-posed # the L_C says that the fake terms had covariance self.lmbda, which is a regulariser self.L_C = np.eye(self.D) * np.sqrt(self.gamma2) self.log_sum_weights = log_weights_new[0] self.mu = Z_new[0] Z_new = Z_new[1:] log_weights_new = log_weights_new[1:] num_new -= 1 # dont do anything if no data observed if len(Z_new) == 0: return # generate lmbdas that correspond to weighted averages lmbdas = log_weights_to_lmbdas(self.log_sum_weights, log_weights_new) # low-rank update of Cholesky, costs O(d^2) only old_L_C = np.array(self.L_C, copy=True) self.mu, self.L_C = update_mean_cov_L_lmbda(Z_new, self.mu, self.L_C, lmbdas) if np.any(np.isnan(self.L_C)) or np.any(np.isinf(self.L_C)): logger.warning("Numerical error while updating Cholesky factor of C.\n" "Before update:\n%s\n" "After update:\n%s\n" "Updating data:\n%s\n" "Updating log weights:\n%s\n" "Updating lmbdas:\n%s\n" % (str(old_L_C), str(self.L_C), str(Z_new), str(log_weights_new), str(lmbdas)) ) raise RuntimeError("Numerical error while updating Cholesky factor of C.") # update terms and weights self.log_sum_weights = logsumexp(list(log_weights) + [self.log_sum_weights]) class AdaptiveIndependentMetropolis(AdaptiveMetropolis): """ Implements an independent Gaussian proposal with given parameters. However, stores mean and covariance in the same fashion as AdaptiveMetropolis for debugging purposes, and debug outputs them Schedule and acc_star are ignored. """ def __init__(self, D, target_log_pdf, step_size, gamma2, proposal_mu, proposal_L_C): AdaptiveMetropolis.__init__(self, D, target_log_pdf, step_size, gamma2) self.proposal_mu = proposal_mu self.proposal_L_C = proposal_L_C # store all log_weights of all proposals self.log_weights = [] def proposal_log_pdf(self, current, proposals): log_probs = log_gaussian_pdf_multiple(proposals, mu=self.proposal_mu, Sigma=self.proposal_L_C, is_cholesky=True, cov_scaling=self.step_size) return log_probs def proposal(self, current, current_log_pdf, **kwargs): if current_log_pdf is None: current_log_pdf = self.target_log_pdf(current) proposal = sample_gaussian(N=1, mu=self.proposal_mu, Sigma=self.proposal_L_C, is_cholesky=True, cov_scaling=self.step_size)[0] forw_backw_log_prob = self.proposal_log_pdf(None, proposal[np.newaxis, :])[0] backw_backw_log_prob = self.proposal_log_pdf(None, current[np.newaxis, :])[0] proposal_log_pdf = self.target_log_pdf(proposal) results_kwargs = {} self.log_weights.append(proposal_log_pdf - forw_backw_log_prob) # probability of proposing current when would be sitting at proposal is symmetric return proposal, proposal_log_pdf, current_log_pdf, forw_backw_log_prob, backw_backw_log_prob, results_kwargs def get_current_ess(self): return kameleon_rks.samplers.tools.compute_ess(self.log_weights, normalize=True) def update(self, Z, num_new, log_weights): AdaptiveMetropolis.update(self, Z, num_new, log_weights) cov = np.dot(self.L_C, self.L_C.T) var = np.diag(cov) logger.debug("mu: %s" % str(self.mu)) logger.debug("var: %s" % str(var)) logger.debug("cov: %s" % str(cov)) logger.debug("norm(mu): %.3f" % np.linalg.norm(self.mu)) logger.debug("np.mean(var): %.3f" % np.mean(var))
nilq/baby-python
python
__copyright__ = 'Copyright(c) Gordon Elliott 2017' """ """ from enum import IntEnum from a_tuin.metadata import ( ObjectFieldGroupBase, StringField, ObjectReferenceField, Collection, DescriptionField, IntField, IntEnumField, ) class PPSStatus(IntEnum): Requested = 1 Provided = 2 NotIncomeTaxPayer = 3 # parishioner is not an Irish income tax payer NotProvided = 4 # parishioner responded but refused to provide PPS ExcludedByAdmin = 5 # parishioner excluded through admin discretion class PPSStatusField(IntEnumField): def __init__(self, name, is_mutable=True, required=False, default=None, description=None, validation=None): super().__init__(name, PPSStatus, is_mutable, required, default, description, validation) class PPS(ObjectFieldGroupBase): # Data usage # # Records PPS number for an individual in order that a tax rebate may be claimed # on funds donated to the parish. public_interface = ( ObjectReferenceField('person', required=True), PPSStatusField( 'status', required=True, default=PPSStatus.Requested, description='Has the parishioner responded to a request for a PPS?' ), StringField('pps'), StringField('name_override'), IntField('chy3_valid_year', description='The first financial year the most recent CHY3 form is valid from'), DescriptionField('notes') ) class PPSCollection(Collection): pass
nilq/baby-python
python
""" Tools for segmenting positional AIS messages into continuous tracks. Includes a CLI plugin for `gpsdio` to run the algorithm. """ from gpsdio_segment.segment import BadSegment from gpsdio_segment.segment import Segment from gpsdio_segment.core import Segmentizer __version__ = '0.20.2' __author__ = 'Paul Woods' __email__ = 'paul@skytruth.org' __source__ = 'https://github.com/SkyTruth/gpsdio-segment' __license__ = """ Copyright 2015-2017 SkyTruth Authors: Kevin Wurster <kevin@skytruth.org> Paul Woods <paul@skytruth.org> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """
nilq/baby-python
python
# -*- coding: utf-8 -*- """ A utility library for interfacing with the SRF-08 and SRF-10 ultrasonic rangefinders. http://www.robot-electronics.co.uk/htm/srf08tech.shtml http://www.robot-electronics.co.uk/htm/srf10tech.htm Utilizes I2C library for reads and writes. The MIT License (MIT) Copyright (c) 2015 Martin Clemons Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from pyb import I2C class SRF_RANGE_UNITS: """ SRF-XX rangefinder constants. """ IN = 0x50 CM = 0x51 US = 0x52 class SRFBase(object): """ A base class for SRF08 and SRF10 rangefinders. Essentially a SRF-xx rangefinder emulates a 24xx series EEPROM and implements a number of user readable and writable registers. These registers map to the specific hardware functions and readings from the rangefinder. Since the '08 and '10 are very similar in their functionality this class serves as a base implementation which can be overridden to form a class for a specific sensor. """ def __init__(self, *args, **kwargs): """ If any arguments are present self.init() is called. """ super(SRFBase, self).__init__() self.i2c = None self.bus_addr = None self.rxb = bytearray(4) if len(args) > 0: self.init(*args, **kwargs) def init(self, *args, **kwargs): """ Initialize a SRF sensor instance. There are two options for parameters passed when calling this function: 1. Pass the initialization parameters for an pyb.I2C object. The initialization parameters will be used to initialize a new I2C instance which will be used to communicate with the sensor. If bus_address has not been set, a bus scan will be performed and the first available address will be used. 2. Pass an already initialized pyb.I2C object. The instance passed in will be used to communicate with the sensor. The I2C instance should be initialized before any methods which require communication are called. """ if len(args) < 1: raise TypeError('Please supply an I2C object or bus number.') if type(args[0]) is int: # assume first argument is bus number for I2C constructor self.i2c = I2C(*args, **kwargs) if self.bus_addr is None: try: # assign address of first device found self.bus_addr = self.i2c.scan()[0] except TypeError: raise Exception('Sensor not found on I2C bus.') else: # first argument is initialized I2C bus object self.i2c = args[0] def deinit(self): """ De-init sensor instance. Calls deinit() on I2C instance associated with sensor, and also resets sensor bus address. """ try: self.i2c.deinit() except AttributeError: pass self.i2c = None self.bus_addr = None def bus_address(self, *args): """ Sets the rangefinder I2C bus address if provided, otherwise returns the current rangefinder bus address. """ if len(args) > 0: self.bus_addr = args[0] else: return self.bus_addr def scan_bus(self): """ Scans I2C bus and returns a list of addresses found. """ return self.i2c.scan() def sw_rev(self): """ Returns the software revision of sensor. """ rev = bytearray((256,)) self.i2c.mem_read(rev, self.bus_addr, 0) if rev[0] > 255: raise Exception('Error reading from sensor.') return rev[0] def set_max_range(self, range_mm): """ Sets the maximum range of the sensor. :param range_mm: Integer range in mm, min. 43mm max 11008mm. :return: """ if range_mm < 43: raise ValueError('Minimum range is 43mm.') if range_mm > 11008: raise ValueError('Maximum range is 11008mm.') c = int(range_mm) // 43 - 1 self.i2c.mem_write(c, self.bus_addr, 2) def set_analog_gain(self, gain): """ Sets the analog gain of the sensor. :param gain: Sensor gain register value. :return: """ if gain < 0: raise ValueError('Gain register must be greater than 0.') self.i2c.mem_write(int(gain), self.bus_addr, 1) def measure_range(self, units=SRF_RANGE_UNITS.CM): """ Initiate rangefinder ranging. :param units: SRF_RANGE_UNITS, either IC, CM, or US for µ seconds. :return: """ self.i2c.mem_write(units, self.bus_addr, 0) def read_range(self): """ Read the range registers after ranging has completed. :param: :return: A list of integer range values in the units specified by measure_range(). In the case of sensors which report multiple echos, the first item in the list represents the first echo and the nth item represents the nth echo. If no echos were returned list will be empty. """ self.i2c.mem_read(self.rxb, self.bus_addr, 0) values = [] # skip first 2 bytes, then unpack high and low bytes from buffer data # data is pack in big-endian form for i in range(2, len(self.rxb), 2): range_val = (self.rxb[i] << 8) + self.rxb[i+1] if range_val > 0: values.append(range_val) return values class SRF08(SRFBase): """ A SRF08 Rangefinder. Supports up to 17 echo range values. Maximum analog gain of 31. TODO: Add ability to read light meter. """ def __init__(self, *args, **kwargs): super(SRF08, self).__init__(*args, **kwargs) self.rxb = bytearray(36) def __str__(self): return '<SRF08 address {} on {}>'.format(self.bus_addr, self.i2c) def set_analog_gain(self, gain): if gain > 31: raise ValueError('Gain register must be less than or equal to 31.') super(SRF08, self).set_analog_gain(gain) class SRF10(SRFBase): """ A SRF10 rangefinder. Supports single echo range value. Maximum analog gain of 16. """ def __str__(self): return '<SRF10 address {} on {}>'.format(self.bus_addr, self.i2c) def set_analog_gain(self, gain): if gain > 16: raise ValueError('Gain register must be less than or equal to 16.') super(SRF10, self).set_analog_gain(gain)
nilq/baby-python
python
import sys import time from datetime import timedelta, datetime as dt from monthdelta import monthdelta import holidays import re import threading import inspect from contextlib import contextmanager import traceback import logging # default logging configuration # logging.captureWarnings(True) LOG_FORMATTER = logging.Formatter('%(message)s') LOGGER_NAME = 'flask_production' LOGGER = logging.getLogger(LOGGER_NAME) LOGGER.setLevel(logging.INFO) from ._capture import print_capture USHolidays = holidays.US() class _JobRunLogger(object): ''' logging class to capture any print statements within a job also captures start time, end time and error traceback ''' def __init__(self): self._lock = threading.Lock() self._reset() @property def log(self): with self._lock: return self._run_log @property def error(self): with self._lock: return self._err_log @property def started_at(self): with self._lock: return self._started_at @property def ended_at(self): with self._lock: return self._ended_at def _reset(self): '''clear previous run info''' with self._lock: self._run_log = '' self._err_log = '' self._started_at = None self._ended_at = None def _log_callback(self, msg: str): ''' writting to stderr since stdout is being redirected here. Using print() will be circular log to file using the logging library if LOGGER handler is set by TaskScheduler ''' if msg.strip()=='':return msg = msg.replace('\r\n', '\n') # replace line endings to work correctly sys.stderr.write(msg) if len(LOGGER.handlers)>0: LOGGER.info(msg.strip()) with self._lock: self._run_log += msg @contextmanager def start_capture(self): ''' begin recording print statements ''' self._reset() # clear previous run info with self._lock: self._started_at = dt.now() with print_capture(callback=self._log_callback): yield with self._lock: self._ended_at = dt.now() def set_error(self): '''called when job throws error''' with self._lock: self._err_log = traceback.format_exc() def to_dict(self): with self._lock: return dict( log=self._run_log, err=self._err_log, start=self._started_at, end=self._ended_at, ) class Job(object): '''standard job class''' RUNABLE_DAYS = { 'day': lambda d, hols : True, 'weekday': lambda d, hols : d.isoweekday() < 6, 'weekend': lambda d, hols : d.isoweekday() > 5, 'businessday': lambda d, hols : d not in hols and d.isoweekday() < 6, 'holiday': lambda d, hols : d in hols or d.isoweekday() > 5, 'trading-holiday': lambda d, hols : d in hols, # days of the week 'monday': lambda d, hols: d.isoweekday() == 1, 'tuesday': lambda d, hols: d.isoweekday() == 2, 'wednesday': lambda d, hols: d.isoweekday() == 3, 'thursday': lambda d, hols: d.isoweekday() == 4, 'friday': lambda d, hols: d.isoweekday() == 5, 'saturday': lambda d, hols: d.isoweekday() == 6, 'sunday': lambda d, hols: d.isoweekday() == 7, } @classmethod def is_valid_interval(cls, interval): return interval in cls.RUNABLE_DAYS def __init__(self, every, at, func, kwargs): if str(every) == 'holiday': print("!!", "="*20, "!!") print("'holiday' interval is deprecated and will be removed. \r\nUse 'weekend' and 'trading-holiday' instead") print("!!", "="*20, "!!") self.interval = every self.time_string = at self.func = func self.kwargs = kwargs self.is_running = False self._run_silently = False self._generic_err_handler = None self._err_handler = None self._func_src_code = inspect.getsource(self.func) def init(self, calendar, generic_err_handler=None, startup_offset=300): '''initialize extra attributes of job''' self.calendar = calendar self._generic_err_handler = generic_err_handler self._startup_offset = startup_offset self._run_info = _JobRunLogger() self.schedule_next_run() print(self) return self def silently(self): self._run_silently = True return self def catch(self, err_handler): '''register job specific error handler''' self._err_handler = err_handler return self @staticmethod def to_timestamp(d): return time.mktime(d.timetuple())+d.microsecond/1000000.0 def schedule_next_run(self, just_ran=False): '''compute timestamp of the next run''' h, m = self.time_string.split(':') n = dt.now() n = dt(n.year, n.month, n.day, int(h), int(m), 0) ts = self.to_timestamp(n) if self._job_must_run_today() and not just_ran and time.time() < ts+self._startup_offset: self.next_timestamp = ts else: next_day = n + timedelta(days=1) while not self._job_must_run_today(next_day): next_day += timedelta(days=1) self.next_timestamp = self.to_timestamp(next_day)#next_day.timestamp() def _job_must_run_today(self, date=None): return self.RUNABLE_DAYS[self.interval](date or dt.now(), self.calendar) def is_due(self): '''test if job should run now''' return (time.time() >= self.next_timestamp) and not self.is_running def did_fail(self): '''test if job failed''' return self._run_info.error != '' def run(self, is_rerun=False): ''' begin job run redirected all print statements to _JobRunLogger call error handlers if provided ''' with self._run_info.start_capture(): # captures all writes to stdout self.is_running = True try: if not self._run_silently: # add print statements print("========== Job {} [{}] =========".format( "Rerun Start" if is_rerun else "Start", dt.now().strftime("%Y-%m-%d %H:%M:%S") )) print("Executing {}".format(self)) print("*") # job log seperator start_time = time.time() return self.func(**self.kwargs) except Exception: print("Job", self.func.__name__, "failed!") err_msg = "Error in <{}>\n\n\n{}".format(self.func.__name__, traceback.format_exc()) self._run_info.set_error() try: if self._err_handler is not None: self._err_handler(err_msg) # job specific error callback registered through .catch() elif self._generic_err_handler is not None: self._generic_err_handler(err_msg) # generic error callback from scheduler except: traceback.print_exc() finally: # if the job was forced to rerun, we should not schedule the next run if not is_rerun: self.schedule_next_run(just_ran=True) if not self._run_silently: # add print statements print("*") # job log seperator print( "Finished in {:.2f} minutes".format((time.time()-start_time)/60)) print(self) print("========== Job {} [{}] =========".format( "Rerun End" if is_rerun else "End", dt.now().strftime("%Y-%m-%d %H:%M:%S") )) self.is_running = False def _next_run_dt(self): return dt.fromtimestamp(self.next_timestamp) if self.next_timestamp!=0 else None def to_dict(self): '''property to access job info dict''' return dict( func=self.func.__name__, src=self._func_src_code, doc=self.func.__doc__, type=self.__class__.__name__, every=self.interval, at=self.time_string, is_running=self.is_running, next_run=self._next_run_dt(), logs=self._run_info.to_dict() if hasattr(self, '_run_info') else {} ) def __repr__(self): d = self._next_run_dt() return "{} {}. Next run = {}".format( self.__class__.__name__, self.func.__name__, d.strftime("%Y-%m-%d %H:%M:%S") if isinstance(d, dt) else 'Never' ) class OneTimeJob(Job): '''type of job that runs only once''' @classmethod def is_valid_interval(cls, interval): try: dt.strptime(interval, "%Y-%m-%d") return True except: return False def schedule_next_run(self, just_ran=False): H, M = self.time_string.split(':') Y, m, d = self.interval.split('-') n = dt(int(Y), int(m), int(d), int(H), int(M), 0) if just_ran or dt.now() > n + timedelta(minutes=3): self.next_timestamp = 0 else: self.next_timestamp = self.to_timestamp(n) def is_due(self): if self.next_timestamp==0: raise JobExpired('remove me!') return super().is_due() class RepeatJob(Job): '''type of job that runs every n seconds''' @classmethod def is_valid_interval(cls, interval): return isinstance(interval, (int, float)) def schedule_next_run(self, just_ran=False): if not isinstance(self.interval, (int, float)) or self.interval <= 0: raise BadScheduleError("Illegal interval for repeating job. Expected number of seconds") if just_ran: self.next_timestamp += self.interval else: self.next_timestamp = time.time() + self.interval class MonthlyJob(Job): ''' type of job that can be scheduled to run once per month example interval 1st, 22nd, 30th limitation: we cannot intuitively handle dates >= 29 for all months - ex: 29th will fail for non leap-Feb, 31st will fail for months having less than 31 days - use '_strict_date' when handing dates >= 29: if self._strict_date == True: job is scheduled only on months which have the date (ex: 31st) elif self._strict_date == False: run on the last day of the month if date exceeds current month ''' PATTERN = re.compile(r"^(\d{1,2})(st|nd|rd|th)$", re.IGNORECASE) def __init__(self, every, at, func, kwargs, strict_date): if not isinstance(strict_date, bool): raise BadScheduleError("call to .strict_date() required for monthly schedule. ex: .every('31st').strict_date(True)..") self._strict_date = strict_date super().__init__(every, at, func, kwargs) @classmethod def is_valid_interval(cls, interval): # example intervals - 1st, 22nd, 30th match = cls.PATTERN.match(str(interval)) return match is not None and int(match.groups()[0]) <= 31 def __last_day_of_month(self, d): return ((d + monthdelta(1)).replace(day=1) - timedelta(days=1)).day def schedule_next_run(self, just_ran=False): interval = int(self.PATTERN.match(self.interval).groups()[0]) H, M = self.time_string.split(':') sched_day = dt.now() # switch to next month if # - task just ran, or # - day has already passed, or # - day is today, but time has already passed day_passed = interval < sched_day.day # True if day already passed this month time_passed = interval == sched_day.day and (int(H) < sched_day.hour or (int(H) == sched_day.hour and (int(M) + 3 ) < sched_day.minute)) # 3 min look back on tasks if just_ran or day_passed or time_passed: sched_day += monthdelta(1) # switch to next month # handle cases where the interval day doesn't occur in all months (ex: 31st) if interval > self.__last_day_of_month(sched_day): if self._strict_date==False: interval = self.__last_day_of_month(sched_day) # if strict is false, run on what ever is last day of the month else: # strict while interval > self.__last_day_of_month(sched_day): # run only on months which have the date sched_day += monthdelta(1) n = sched_day.replace(day=interval, hour=int(H), minute=int(M), second=0, microsecond=0) self.next_timestamp = self.to_timestamp(n) def __repr__(self): return "{}[ strict={} ] {}. Next run = {}".format( self.__class__.__name__, self._strict_date, self.func.__name__, self._next_run_dt().strftime("%Y-%m-%d %H:%M:%S") ) class AsyncJobWrapper(object): '''wrapper to run the job on a parallel thread''' def __init__(self, job): self.job = job self.proc = None def __getattr__(self, name): return self.job.__getattribute__(name) def is_due(self): return self.job.is_due() def run(self, *args, **kwargs): self.proc = threading.Thread(target=self.job.run, args=args, kwargs=kwargs) self.proc.daemon = True self.proc.start() class JobExpired(Exception): pass class BadScheduleError(Exception): pass class TaskScheduler(object): '''task scheduler class to manage and run jobs''' def __init__(self, check_interval=5, holidays_calendar=None, on_job_error=None, log_filepath=None): self.jobs = [] self.on = self.every self._check_interval = check_interval self.interval = None self.temp_time = None if holidays_calendar is not None: self.holidays_calendar = holidays_calendar else: self.holidays_calendar = USHolidays self.on_job_error = on_job_error self.log_filepath = log_filepath if self.log_filepath is not None: fh = logging.FileHandler(self.log_filepath) fh.setFormatter(LOG_FORMATTER) LOGGER.addHandler(fh) self._strict_monthly = None def __current_timestring(self): return dt.now().strftime("%H:%M") def every(self, interval): ''' interval is either one of the keys of Job.RUNABLE_DAYS or integer denoting number of seconds for RepeatJob ''' self.interval = interval return self def strict_date(self, strict): ''' required to be called when scheduling MonthlyJob - see MonthlyJob docstring ''' if not MonthlyJob.is_valid_interval(self.interval) or not isinstance(strict, bool): raise BadScheduleError(".strict_date(bool) only used for monthly schedule. ex: .every('31st').strict_date(True)..") self._strict_monthly = strict return self def at(self, time_string): ''' 24 hour time string of when to run job example: '15:00' for 3PM ''' if self.interval is None: self.interval = 'day' self.temp_time = time_string return self def do(self, func, do_parallel=False, **kwargs): ''' register 'func' for the job run in a prallel thread if do_parallel is True pass kwargs into 'func' at execution ''' if self.interval is None: raise Exception('Run .at()/.every().at() before .do()') if self.temp_time is None: self.temp_time = self.__current_timestring() if RepeatJob.is_valid_interval(self.interval): j = RepeatJob(self.interval, None, func, kwargs) elif OneTimeJob.is_valid_interval(self.interval): j = OneTimeJob(self.interval, self.temp_time, func, kwargs) elif MonthlyJob.is_valid_interval(self.interval): j = MonthlyJob(self.interval, self.temp_time, func, kwargs, strict_date=self._strict_monthly) elif Job.is_valid_interval(self.interval): j = Job(self.interval, self.temp_time, func, kwargs) else: raise BadScheduleError("{} is not valid\n".format(self.interval)) j.init( calendar=self.holidays_calendar, generic_err_handler=self.on_job_error ) if do_parallel: j = AsyncJobWrapper(j) self.jobs.append(j) self.temp_time = None self.interval = None self._strict_monthly = None return j def check(self): '''check if a job is due''' for j in self.jobs.copy(): # work on copy of this list - safer in case the list changes try: if j.is_due(): j.run() except JobExpired: self.jobs.remove(j) def start(self): '''blocking function that checks for jobs every 'check_interval' seconds''' self._running_auto = True try: while self._running_auto: try: self.check() time.sleep(self._check_interval) except KeyboardInterrupt: print("KeyboardInterrupt") self.stop() finally: print("Stopping. Please wait, checking active async jobs ..") self.join() print(self, "Done!") def join(self): '''wait for any async jobs to complete''' for j in self.jobs: if isinstance(j, AsyncJobWrapper) and j.is_running: # Kill any running parallel tasks j.proc.join() print(j, "exited") def stop(self): '''stop job started with .start() method''' self._running_auto = False def rerun(self, job_index): if job_index < 0 or job_index >= len(self.jobs): raise IndexError("Invalid job index") j = self.jobs[job_index] if j.is_running: raise RuntimeError("Cannot rerun a running task") if not isinstance(j, AsyncJobWrapper): j = AsyncJobWrapper(j) j.run(is_rerun=True)
nilq/baby-python
python
token = '1271828065:AAFCFSuz_vX71bxzZSdhLSLhUnUgwWc0t-k'
nilq/baby-python
python
########################### # # #764 Asymmetric Diophantine Equation - Project Euler # https://projecteuler.net/problem=764 # # Code by Kevin Marciniak # ###########################
nilq/baby-python
python
## What is Lambda : anonymous function or function without name ## Usecase is you can pass a function as an argument, quick function # def double(num): # x = num + num # return x # print(double(6)) # lambda num: num + num # x = lambda a : a + 10 # print(x(5)) #Example 2 # my_list = [1, 5, 4, 6, 8, 11, 3, 12] # new_list = list(filter(lambda x: (x%2 == 0) , my_list)) # print(new_list) # #Example 3 # my_list = [1, 5, 4, 6, 8, 11, 3, 12] # new_list = list(map(lambda x: x * 2 , my_list)) # print(new_list) #Example 4 import pandas as pd df = pd.DataFrame({ 'Name': ['Luke','Gina','Sam','Emma'], 'Status': ['Father', 'Mother', 'Son', 'Daughter'], 'Birthyear': [1976, 1984, 2013, 2016], }) df['age'] = df['Birthyear'].apply(lambda x: 2021-x) print(df) # listA = [4, "string1", lambda num: num * num] # print(listA[2](8)) # array = [3,6,7] # def double(num): # return num + num # print(list(map(double, array))) # print(list(map(lambda num: num + num, array)))
nilq/baby-python
python
""" Utility functions for dealing with NER tagging. """ import logging logger = logging.getLogger('stanza') def is_basic_scheme(all_tags): """ Check if a basic tagging scheme is used. Return True if so. Args: all_tags: a list of NER tags Returns: True if the tagging scheme does not use B-, I-, etc, otherwise False """ for tag in all_tags: if len(tag) > 2 and tag[:2] in ('B-', 'I-', 'S-', 'E-'): return False return True def is_bio_scheme(all_tags): """ Check if BIO tagging scheme is used. Return True if so. Args: all_tags: a list of NER tags Returns: True if the tagging scheme is BIO, otherwise False """ for tag in all_tags: if tag == 'O': continue elif len(tag) > 2 and tag[:2] in ('B-', 'I-'): continue else: return False return True def to_bio2(tags): """ Convert the original tag sequence to BIO2 format. If the input is already in BIO2 format, the original input is returned. Args: tags: a list of tags in either BIO or BIO2 format Returns: new_tags: a list of tags in BIO2 format """ new_tags = [] for i, tag in enumerate(tags): if tag == 'O': new_tags.append(tag) elif tag[0] == 'I': if i == 0 or tags[i-1] == 'O' or tags[i-1][1:] != tag[1:]: new_tags.append('B' + tag[1:]) else: new_tags.append(tag) else: new_tags.append(tag) return new_tags def basic_to_bio(tags): """ Convert a basic tag sequence into a BIO sequence. You can compose this with bio2_to_bioes to convert to bioes Args: tags: a list of tags in basic (no B-, I-, etc) format Returns: new_tags: a list of tags in BIO format """ new_tags = [] for i, tag in enumerate(tags): if tag == 'O': new_tags.append(tag) elif i == 0 or tags[i-1] == 'O' or tags[i-1] != tag: new_tags.append('B-' + tag) else: new_tags.append('I-' + tag) return new_tags def bio2_to_bioes(tags): """ Convert the BIO2 tag sequence into a BIOES sequence. Args: tags: a list of tags in BIO2 format Returns: new_tags: a list of tags in BIOES format """ new_tags = [] for i, tag in enumerate(tags): if tag == 'O': new_tags.append(tag) else: if len(tag) < 2: raise Exception(f"Invalid BIO2 tag found: {tag}") else: if tag[:2] == 'I-': # convert to E- if next tag is not I- if i+1 < len(tags) and tags[i+1][:2] == 'I-': new_tags.append(tag) else: new_tags.append('E-' + tag[2:]) elif tag[:2] == 'B-': # convert to S- if next tag is not I- if i+1 < len(tags) and tags[i+1][:2] == 'I-': new_tags.append(tag) else: new_tags.append('S-' + tag[2:]) else: raise Exception(f"Invalid IOB tag found: {tag}") return new_tags def process_tags(sentences, scheme): res = [] # check if tag conversion is needed convert_bio_to_bioes = False convert_basic_to_bioes = False is_bio = is_bio_scheme([x[1] for sent in sentences for x in sent]) is_basic = not is_bio and is_basic_scheme([x[1] for sent in sentences for x in sent]) if is_bio and scheme.lower() == 'bioes': convert_bio_to_bioes = True logger.debug("BIO tagging scheme found in input; converting into BIOES scheme...") elif is_basic and scheme.lower() == 'bioes': convert_basic_to_bioes = True logger.debug("Basic tagging scheme found in input; converting into BIOES scheme...") # process tags for sent in sentences: words, tags = zip(*sent) # NER field sanity checking if any([x is None or x == '_' for x in tags]): raise ValueError("NER tag not found for some input data.") if convert_basic_to_bioes: # if basic, convert tags -> bio -> bioes tags = bio2_to_bioes(basic_to_bio(tags)) else: # first ensure BIO2 scheme tags = to_bio2(tags) # then convert to BIOES if convert_bio_to_bioes: tags = bio2_to_bioes(tags) res.append([(w,t) for w,t in zip(words, tags)]) return res def decode_from_bioes(tags): """ Decode from a sequence of BIOES tags, assuming default tag is 'O'. Args: tags: a list of BIOES tags Returns: A list of dict with start_idx, end_idx, and type values. """ res = [] ent_idxs = [] cur_type = None def flush(): if len(ent_idxs) > 0: res.append({ 'start': ent_idxs[0], 'end': ent_idxs[-1], 'type': cur_type}) for idx, tag in enumerate(tags): if tag is None: tag = 'O' if tag == 'O': flush() ent_idxs = [] elif tag.startswith('B-'): # start of new ent flush() ent_idxs = [idx] cur_type = tag[2:] elif tag.startswith('I-'): # continue last ent ent_idxs.append(idx) cur_type = tag[2:] elif tag.startswith('E-'): # end last ent ent_idxs.append(idx) cur_type = tag[2:] flush() ent_idxs = [] elif tag.startswith('S-'): # start single word ent flush() ent_idxs = [idx] cur_type = tag[2:] flush() ent_idxs = [] # flush after whole sentence flush() return res
nilq/baby-python
python
from dataclasses import dataclass from typing import Callable import torch Logits = torch.FloatTensor @dataclass class Sample: logits: Logits tokens: torch.LongTensor Sampler = Callable[[Logits], Sample] def standard(temperature: float = 1.0) -> Sampler: def sample(logits: Logits) -> Sample: logits = logits / (temperature + 1e-7) # There was a regression in torch that made categorical only work with fp32. # We can track the issue on github and remove this once it makes it into a # pytorch release or nightly: # # https://github.com/pytorch/pytorch/issues/29211 # logits_fp32 = logits.float() return Sample( logits=logits, tokens=torch.distributions.Categorical(logits=logits_fp32).sample() ) return sample def argmax() -> Sampler: def sample(logits: Logits) -> Sample: return Sample(logits=logits, tokens=torch.argmax(logits, dim=-1)) return sample def nucleus_sampler(top_p: float = 0.9, temperature=1.0) -> Sampler: """ Return a sampler that decides diversity via nucleus sampling. p=0.9 means that the top 90% of likelihood-weighted options are considered. p=0.0 is equivalent to argmax, p=1.0 has no effect. When a logit is on the boundary of being included or not being included, default to including it. """ if top_p == 0.0: return argmax() if top_p == 1.0: return standard(temperature=temperature) def sample(logits: Logits) -> Sample: """ Remove logits that do not represent the top_p proportion of likelihoods. When a logit is on the boundary of being included or not being included, default to including it. """ logits = logits.clone() sorted_logits, sorted_indices = torch.sort(logits, descending=True, dim=-1) cumulative_probs = torch.cumsum(torch.nn.functional.softmax(sorted_logits, dim=-1), dim=-1) # Remove tokens with cumulative probability above the threshold. sorted_indices_to_remove = cumulative_probs > top_p # Shift the indices to the right to keep also the first token above the threshold. sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() sorted_indices_to_remove[..., 0] = 0 indices_to_remove = torch.zeros_like(logits, dtype=torch.bool).scatter_( dim=-1, index=sorted_indices, src=sorted_indices_to_remove ) logits[indices_to_remove] = -float("Inf") return standard(temperature=temperature)(logits) return sample
nilq/baby-python
python
from numpy import NaN import pandas as pd import requests import math import re from bs4 import BeautifulSoup class Propertypro: """ web-scraper tool for scraping data on propertypro.ng Parameters: num_samples (int): The number of samples of data to be scraped. location (list): list of keywords to scrape Returns: pd.DataFrame: Returns a dataframe with the following categories as columns: title, location, price, number of bedrooms, toilets, bathroom, whether it is furnished, serviced and newly built """ def __init__(self) -> None: self.no_samples = 0 def process_data(self, dataframe: pd.DataFrame) -> pd.DataFrame: """ cleans data from the provided Dataframe. :param data: Scraped data . :return: pandas dataframe """ data = dataframe data = data.dropna() data['rooms'] = data['rooms'].str.split('\n') data[['nothing', 'bedroom', 'bathroom', 'toilet', 'remove']] = pd.DataFrame(data['rooms'].tolist(), index= data.index) data['bedroom'] = data['bedroom'].str.strip('beds') data['bathroom'] = data['bathroom'].str.strip('baths') data['toilet'] = data['toilet'].str.strip('Toilets') data['price'] = data['price'].str.replace(r'[^0-9]+','') data['furnishing'] = data['furnishing'].str.split('\n') data['newly_built'] = data['furnishing'].apply(lambda x: ''.join(['1' if "Newly Built" in x else '0'])) data['furnished'] = data['furnishing'].apply(lambda x: ''.join(['1' if "Furnished" in x else '0'])) data['serviced'] = data['furnishing'].apply(lambda x: ''.join(['1' if "Serviced" in x else '0'])) data = data.drop(columns=['rooms', 'nothing', 'remove', 'furnishing']) return data def scrape_data(self, no_samples, keywords): """ Scrapes data from provided urls :param : no_samples, keywords :return: pandas dataFrame. """ data = {"title": [], "location": [], "furnishing": [], "rooms": [], "price": []} for keyword in keywords: page_url = [] for i in range(0,round((no_samples/22))): page_url.append('https://www.propertypro.ng/property-for-rent/in/' + keyword + '?search=&type=&bedroom=&min_price=&max_price=&page=' + str(i)) for links in page_url: response = requests.get(links) soup = BeautifulSoup(response.content, 'html.parser') for title in soup.find_all('h2', { 'class':"listings-property-title" }): data["title"].append(title.text) data["location"].append(keyword) for furnishing in soup.find_all('div', {'class': "furnished-btn"}): data["furnishing"].append(furnishing.text) for rooms in soup.find_all('div', {'class': "fur-areea"}): data["rooms"].append(rooms.text) for price in soup.find_all('h3', { 'class': 'listings-price' }): data["price"].append(price.text) page_url.clear() # df = pd.DataFrame(data) df = pd.DataFrame.from_dict(data, orient='index') df = df.transpose() pd.set_option("display.max_rows", None, "display.max_columns", None) df = self.process_data(df) return df
nilq/baby-python
python
from oslo_log import log as logging from oslo_messaging import RemoteError from nca47.api.controllers.v1 import base from nca47.api.controllers.v1 import tools from nca47.common.exception import NonExistParam from nca47.common.exception import ParamFormatError from nca47.common.exception import ParamNull from nca47.common.exception import ParamValueError from nca47.common.exception import Nca47Exception from nca47.common.exception import BadRequest from nca47.common.i18n import _ from nca47.common.i18n import _LE from nca47.manager import central from oslo_serialization import jsonutils as json from nca47.api.controllers.v1.tools import check_areaname from nca47.api.controllers.v1.tools import check_ttl from nca47.api.controllers.v1.tools import check_renewal from nca47.api.controllers.v1.tools import is_not_list LOG = logging.getLogger(__name__) class DnsZonesController(base.BaseRestController): """ nca47 dnsZones class, using for add/delete/update/query the zones info, validate parameters whether is legal, handling DB operations and calling rpc client's corresponding method to send messaging to agent endpoints """ def __init__(self): self.manager = central.CentralManager.get_instance() super(DnsZonesController, self).__init__() def create(self, req, *args, **kwargs): """create the dns zones""" # get the context context = req.context try: # get the body values = json.loads(req.body) # get the url url = req.url # if len(args) != 1: # raise BadRequest(resource="zone create", msg=url) if 'default_ttl' not in values.keys(): values['default_ttl'] = "3600" if 'renewal' not in values.keys(): raise NonExistParam(param_name='renewal') if values['renewal'] == 'no': # check the in values valid_attributes = ['name', 'owners', 'default_ttl', 'renewal', 'tenant_id'] elif values['renewal'] == 'yes': # check the in values valid_attributes = ['name', 'owners', 'default_ttl', 'renewal', 'zone_content', 'slaves', 'tenant_id'] else: raise ParamValueError(param_name='renewal') # check the in values recom_msg = self.validat_parms(values, valid_attributes) LOG.info(_("the in value body is %(body)s"), {"body": values}) # from rpc server create the zones in db and device zones = self.manager.create_zone(context, recom_msg) except Nca47Exception as e: self.response.status = e.code LOG.error(_LE('Error exception! error info: %' + e.message)) LOG.exception(e) return tools.ret_info(e.code, e.message) except RemoteError as e: self.response.status = 500 message = e.value return tools.ret_info(self.response.status, message) except Exception as exception: LOG.exception(exception) self.response.status = 500 return tools.ret_info(self.response.status, exception.message) return zones def update(self, req, id, *args, **kwargs): """update the dns zones by currentUser/owners""" # get the context context = req.context try: # get the url url = req.url # if len(args) > 2: # raise BadRequest(resource="zone update", msg=url) # get the body values = json.loads(req.body) values['id'] = id LOG.info(_("the in value body is %(body)s"), {"body": values}) LOG.info(_("the id is %(id)s"), {"id": id}) if kwargs.get('owners'): # check the in values valid_attributes = ['id', 'tenant_id', 'owners'] recom_msg = self.validat_parms(values, valid_attributes) # from rpc server update the zones in db and device zones = self.manager.update_zone_owners(context, recom_msg, recom_msg['id']) else: # check the in values valid_attributes = ['id', 'tenant_id', 'default_ttl'] recom_msg = self.validat_parms(values, valid_attributes) # from rpc server update the zones in db and device zones = self.manager.update_zone(context, recom_msg, recom_msg['id']) except Nca47Exception as e: self.response.status = e.code LOG.error(_LE('Error exception! error info: %' + e.message)) LOG.exception(e) return tools.ret_info(e.code, e.message) except RemoteError as exception: self.response.status = 500 message = exception.value return tools.ret_info(self.response.status, message) except Exception as exception: LOG.exception(exception) self.response.status = 500 return tools.ret_info(self.response.status, exception.message) return zones def remove(self, req, id, *args, **kwargs): """delete the dns zones""" # get the context context = req.context try: # get the url url = req.url # if len(args) != 1: # raise BadRequest(resource="zone delete", msg=url) # get the body values = {} values.update(kwargs) values['id'] = id LOG.info(_("the in value body is %(body)s"), {"body": values}) # check the in values valid_attributes = ['tenant_id', 'id'] recom_msg = self.validat_parms(values, valid_attributes) # from rpc server delete the zones in db and device zones = self.manager.delete_zone(context, recom_msg['id']) except Nca47Exception as e: LOG.error(_LE('Error exception! error info: %' + e.message)) LOG.exception(e) self.response.status = e.code return tools.ret_info(e.code, e.message) except RemoteError as exception: self.response.status = 500 message = exception.value return tools.ret_info(self.response.status, message) except Exception as exception: LOG.exception(exception) self.response.status = 500 return tools.ret_info(self.response.status, exception.message) return zones def list(self, req, *args, **kwargs): """get the list of the dns zones""" # get the context context = req.context try: if kwargs.get('device'): LOG.info(_(" args is %(args)s, kwargs is %(kwargs)s"), {"args": args, "kwargs": kwargs}) # from rpc server get the zones in device zones = self.manager.get_zones(context) else: # get the body values = {} values.update(kwargs) LOG.info(_(" args is %(args)s, kwargs is %(kwargs)s"), {"args": args, "kwargs": kwargs}) # check the in values valid_attributes = ['tenant_id'] recom_msg = self.validat_parms(values, valid_attributes) # from rpc server get the zones in db zones = self.manager.get_db_zones(context, recom_msg) LOG.info(_("Return of get_all_db_zone JSON is %(zones)s !"), {"zones": zones}) except Nca47Exception as e: self.response.status = e.code LOG.error(_LE('Error exception! error info: %' + e.message)) LOG.exception(e) return tools.ret_info(e.code, e.message) except RemoteError as exception: self.response.status = 500 message = exception.value return tools.ret_info(self.response.status, message) except Exception as exception: LOG.exception(exception) self.response.status = 500 return tools.ret_info(self.response.status, exception.message) return zones def show(self, req, id, *args, **kwargs): """get one dns zone info""" # get the context context = req.context try: if kwargs.get('device'): LOG.info(_(" args is %(args)s"), {"args": args}) # from rpc server get the zone in device zones = self.manager.get_zones(context) else: LOG.info(_(" args is %(args)s"), {"args": args}) # from rpc server get the zone in db zones = self.manager.get_zone_db_details(context, id) except Nca47Exception as e: self.response.status = e.code LOG.error(_LE('Error exception! error info: %' + e.message)) LOG.exception(e) return tools.ret_info(e.code, e.message) except RemoteError as exception: self.response.status = 500 message = exception.value return tools.ret_info(self.response.status, message) except Exception as exception: LOG.exception(exception) self.response.status = 500 return tools.ret_info(self.response.status, exception.message) return zones def validat_parms(self, values, valid_keys): """check the in value is null and nums""" recom_msg = tools.validat_values(values, valid_keys) for value in recom_msg: if value == "name": try: spe_char = '.' char = values[value][-1] if not cmp(spe_char, char): recom_msg[value] = values[value][:-1] if not check_areaname(recom_msg[value]): raise ParamFormatError(param_name=value) except Exception: raise ParamFormatError(param_name=value) elif value == "default_ttl": if not check_ttl(values['default_ttl']): raise ParamFormatError(param_name=value) elif value == "renewal": if not check_renewal(values['renewal']): raise ParamValueError(param_name=value) elif value == "owners": flag = is_not_list(values['owners']) if flag == "0": raise ParamFormatError(param_name=value) elif flag == "1": raise ParamNull(param_name=value) elif value == "slaves": flag = is_not_list(values['slaves']) if flag == "0": raise ParamFormatError(param_name=value) elif flag == "1": raise ParamNull(param_name=value) return recom_msg
nilq/baby-python
python
import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable from layers import * from layers.modules.l2norm import L2Norm from data import * import os import math #from vis_features import plot_features class ResNet(nn.Module): def __init__(self, block, layers, num_classes=1000): self.inplanes = 64 super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=1, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) # self.avgpool = nn.AvgPool2d(7) # self.fc = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) # x = self.avgpool(x) # x = x.view(x.size(0), -1) # x = self.fc(x) return x def conv3x3(in_planes, out_planes, stride=1): "3x3 convolution with padding" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out def resnet18(pretrained=False, **kwargs): """Constructs a ResNet-18 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs) return model class SSD(nn.Module): """Single Shot Multibox Architecture The network is composed of a base VGG network followed by the added multibox conv layers. Each multibox layer branches into 1) conv2d for class conf scores 2) conv2d for localization predictions 3) associated priorbox layer to produce default bounding boxes specific to the layer's feature map size. See: https://arxiv.org/pdf/1512.02325.pdf for more details. Args: phase: (string) Can be "test" or "train" size: input image size base: VGG16 layers for input, size of either 300 or 500 extras: extra layers that feed to multibox loc and conf layers head: "multibox head" consists of loc and conf conv layers """ def __init__(self, phase, size, base, extras, head, num_classes, resnet18): super(SSD, self).__init__() self.phase = phase self.num_classes = num_classes self.cfg = VOC_512_3 self.priorbox = PriorBox(self.cfg) self.priors = Variable(self.priorbox.forward(), volatile=True) self.size = size self.conv1 = resnet18.conv1 self.bn1 = resnet18.bn1 self.relu = resnet18.relu self.maxpool = resnet18.maxpool self.layer1 = resnet18.layer1 self.layer2 = resnet18.layer2 self.layer3 = resnet18.layer3 self.layer4 = resnet18.layer4 # self.vgg = nn.ModuleList(base) # Layer learns to scale the l2 normalized features from conv4_3 self.L2Norm = L2Norm(256, 20) self.L2Norm2 = L2Norm(512, 20) self.vgg1 = nn.ModuleList(base[0]) self.vgg2 = nn.ModuleList(base[1]) # self.vgg3 = nn.ModuleList(base[2]) # self.vgg4 = nn.ModuleList(base[3]) self.vgg5 = nn.ModuleList(base[4]) self.vgg6 = nn.ModuleList(base[5]) self.vgg7 = nn.ModuleList(base[6]) self.vgg8 = nn.ModuleList(base[7]) self.de1 = nn.ModuleList(base[8]) self.de2 = nn.ModuleList(base[9]) self.de3 = nn.ModuleList(base[10]) self.de4 = nn.ModuleList(base[11]) self.d19sample1 = nn.Sequential( nn.Conv2d(1024, 64, kernel_size=1, stride=1, bias=False), nn.BatchNorm2d(64), nn.ReLU(inplace=True)) self.d19sample2 = nn.Sequential( nn.Conv2d(1024, 64, kernel_size=1, stride=2, bias=False), nn.BatchNorm2d(64), nn.ReLU(inplace=True)) self.d19sample3 = nn.Sequential( nn.Conv2d(1024, 64, kernel_size=2, stride=4, bias=False), nn.BatchNorm2d(64), nn.ReLU(inplace=True)) self.ds38_19 = nn.Sequential( nn.Conv2d(512, 128, kernel_size=(1, 1), stride=2), nn.BatchNorm2d(128), nn.ReLU(inplace=True)) self.ds19_10 = nn.Sequential( nn.Conv2d(1024, 128, kernel_size=(1, 1), stride=2), nn.BatchNorm2d(128), nn.ReLU(inplace=True)) self.ds10_5 = nn.Sequential( nn.Conv2d(512, 128, kernel_size=(1, 1), stride=2), nn.BatchNorm2d(128), nn.ReLU(inplace=True)) self.ds5_3 = nn.Sequential( nn.Conv2d(512, 128, kernel_size=(1, 1), stride=2), nn.BatchNorm2d(128), nn.ReLU(inplace=True)) ''' self.de5_19 = nn.Sequential( nn.ConvTranspose2d(512, 512, kernel_size=3, stride=4, padding=0, output_padding=0), nn.BatchNorm2d(512), nn.Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1)), nn.BatchNorm2d(1024), nn.ReLU(inplace=True)) self.de10_38 = nn.Sequential( nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1, output_padding=0), nn.BatchNorm2d(256), nn.ConvTranspose2d(256, 256, kernel_size=3, stride=2, padding=1, output_padding=1), nn.BatchNorm2d(256), nn.Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1)), nn.BatchNorm2d(128), nn.ReLU(inplace=True)) ''' self.extras = nn.ModuleList(extras) self.loc = nn.ModuleList(head[0]) self.conf = nn.ModuleList(head[1]) self.con_press38 = nn.Sequential(nn.Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1)), nn.BatchNorm2d(128)) ''' self.con_press19 = nn.Sequential(nn.Conv2d(1024, 128, kernel_size=(1, 1), stride=(1, 1)), nn.BatchNorm2d(128)) self.con_press10 = nn.Sequential(nn.Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1)), nn.BatchNorm2d(128)) ''' if phase == 'test': self.softmax = nn.Softmax() # self.detect = Detect(num_classes, 0, 200, 0.01, 0.45) def forward(self, x): """Applies network layers and ops on input image(s) x. Args: x: input image or batch of images. Shape: [batch,3,300,300]. Return: Depending on phase: test: Variable(tensor) of output class label predictions, confidence score, and corresponding location predictions for each object detected. Shape: [batch,topk,7] train: list of concat outputs from: 1: confidence layers, Shape: [batch*num_priors,num_classes] 2: localization layers, Shape: [batch,num_priors*4] 3: priorbox layers, Shape: [2,num_priors*4] """ sources = list() sources1=list() loc = list() conf = list() x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) ''' res=x x = self.layer3[0].conv1(x) x = self.layer3[0].bn1(x) x = self.layer3[0].relu(x) x = self.layer3[0].conv2(x) x = self.layer3[0].bn2(x) res= self.layer3[0].downsample(res) x=x+res x=self.layer3[0].relu(x) x = self.layer3[1](x) ''' x = self.layer3(x) res38 = x s = self.L2Norm(res38) #x1=F.interpolate(s,size=[38,38]) #print(x1.size()) #sources1.append(x1) s2 = s for k in range(len(self.vgg2)): s2 = self.vgg2[k](s2) # s4 = s # for k in range(len(self.vgg3)): # s4 = self.vgg3[k](s4) # s6 = s # for k in range(len(self.vgg4)): # s6 = self.vgg4[k](s6) s8 = s for k in range(len(self.vgg5)): s8 = self.vgg5[k](s8) for k in range(len(self.vgg6)): s = self.vgg6[k](s) s = torch.cat((s, s2, s8), 1) for k in range(len(self.vgg7)): s = self.vgg7[k](s) s38 = self.L2Norm2(s) # sources.append(s) ds19 = self.ds38_19(s38) x = self.layer4(x) # apply vgg up to fc7 for k in range(len(self.vgg1)): x = self.vgg1[k](x) # if (k == 2): # x=x*0.5+res19*0.5 ds10 = self.ds19_10(x) xde38 = x for k in range(len(self.de4)): xde38 = self.de4[k](xde38) s38_1 = self.con_press38(s38) # sources.append(s38) x19 = self.extras[21](x) s19 = self.extras[22](x19) # sources.append(x19) res10 = self.d19sample1(x) res5 = self.d19sample2(x) res3 = self.d19sample3(x) feamp = [res10, res5, res3] # apply extra layers and cache source layer outputs for k in range(len(self.extras)): if (k == 21): break x = self.extras[k](x) if (k == 6): # s38_2 = self.de10_38(x) # s38_2=s38_1+s38_2 s38 = torch.cat((s38, s38_1, xde38), 1) for k in range(len(self.vgg8)): s38 = self.vgg8[k](s38) sources.append(s38) ds5 = self.ds10_5(x) xde19 = x for k in range(len(self.de3)): xde19 = self.de3[k](xde19) xde19 = ds19 + xde19 s19 = torch.cat((s19, ds19, xde19), 1) s19 = self.extras[23](s19) s19 = self.extras[24](s19) s19 = self.extras[25](s19) sources.append(s19) s10 = x # sources.append(x10) elif (k == 13): # s19_2 = self.de5_19(x) # s19 = s19 + s19_2 s5 = x ds3 = self.ds5_3(x) xde10 = x for k in range(len(self.de2)): xde10 = self.de2[k](xde10) xde10 = xde10 + ds10 s10 = torch.cat((s10, ds10, xde10), 1) x10 = self.extras[26](s10) s10 = self.extras[27](x10) s10 = self.extras[28](s10) # s10 = s10 + xde10 sources.append(s10) # sources.append(x5) elif (k == 20): xde5 = x for k in range(len(self.de1)): xde5 = self.de1[k](xde5) xde5 = xde5 + ds5 s5 = torch.cat((s5, ds5, xde5), 1) x5 = self.extras[29](s5) s5 = self.extras[30](x5) s5 = self.extras[31](s5) sources.append(s5) s3 = torch.cat((x, ds3), 1) x3 = self.extras[32](s3) s3 = self.extras[33](x3) s3 = self.extras[34](s3) sources.append(s3) if (k == 0): x = torch.cat((x, res10), 1) elif (k == 7): x = torch.cat((x, res5), 1) elif (k == 14): x = torch.cat((x, res3), 1) # plot_features(sources1[0], 64, 1, "figure2/", (38, 38), "liye") # for i in range(10000): # print("ok") # apply multibox head to source layers for (x, l, c) in zip(sources, self.loc, self.conf): loc.append(l(x).permute(0, 2, 3, 1).contiguous()) conf.append(c(x).permute(0, 2, 3, 1).contiguous()) loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1) conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1) if self.phase == "test": output = ( loc.view(loc.size(0), -1, 4), # loc preds self.softmax(conf.view(-1, self.num_classes)), # conf preds ) else: output = ( loc.view(loc.size(0), -1, 4), conf.view(conf.size(0), -1, self.num_classes), ) return output def load_weights(self, base_file): other, ext = os.path.splitext(base_file) if ext == '.pkl' or '.pth': print('Loading weights into state dict...') self.load_state_dict(torch.load(base_file, map_location=lambda storage, loc: storage)) print('Finished!') else: print('Sorry only .pth and .pkl files supported.') # This function is derived from torchvision VGG make_layers() # https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py def vgg(cfg, i, batch_norm=False): layers = [] in_channels = i for v in cfg: if v == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] elif v == 'C': layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)] else: conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers += [conv2d, nn.ReLU(inplace=True)] in_channels = v pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6) conv7 = nn.Conv2d(1024, 1024, kernel_size=1) conv8_2 = nn.Conv2d(256, 512, kernel_size=3, padding=2, dilation=2) conv8_4 = nn.Conv2d(256, 512, kernel_size=3, padding=4, dilation=4) conv8_6 = nn.Conv2d(256, 512, kernel_size=3, padding=6, dilation=6) conv8_8 = nn.Conv2d(256, 512, kernel_size=3, padding=8, dilation=8) conv9_2 = nn.Conv2d(512, 512, kernel_size=1) conv9_4 = nn.Conv2d(512, 512, kernel_size=1) conv9_6 = nn.Conv2d(512, 512, kernel_size=1) conv9_8 = nn.Conv2d(512, 512, kernel_size=1) conv9_2_ = nn.Conv2d(512, 512, kernel_size=3, padding=1, groups=2) conv9_4_ = nn.Conv2d(512, 512, kernel_size=3, padding=1, groups=2) conv9_6_ = nn.Conv2d(512, 512, kernel_size=3, padding=1, groups=2) conv9_8_ = nn.Conv2d(512, 512, kernel_size=3, padding=1, groups=2) conv10_2 = nn.Conv2d(512, 128, kernel_size=1) conv10_4 = nn.Conv2d(512, 128, kernel_size=1) conv10_6 = nn.Conv2d(512, 128, kernel_size=1) conv10_8 = nn.Conv2d(512, 128, kernel_size=1) conv11 = nn.Conv2d(256, 1024, kernel_size=1) conv12 = nn.Conv2d(1280, 512, kernel_size=1) conv13 = nn.Conv2d(768, 512, kernel_size=1) de3_5 = torch.nn.ConvTranspose2d(512, 512, kernel_size=3, stride=2, padding=1, output_padding=1) de3_5_0 = nn.BatchNorm2d(512) de3_5_1 = torch.nn.Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1)) de3_5_2 = nn.BatchNorm2d(128) de3_5_3 = nn.ReLU(inplace=True) de5_10 = torch.nn.ConvTranspose2d(512, 512, kernel_size=3, stride=2, padding=1, output_padding=1) de5_10_0 = nn.BatchNorm2d(512) de5_10_1 = torch.nn.Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1)) de5_10_2 = nn.BatchNorm2d(128) de5_10_3 = nn.ReLU(inplace=True) de10_19 = torch.nn.ConvTranspose2d(512, 512, kernel_size=3, stride=2, padding=1, output_padding=1) de10_19_0 = nn.BatchNorm2d(512) de10_19_1 = torch.nn.Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1)) de10_19_2 = nn.BatchNorm2d(128) de10_19_3 = nn.ReLU(inplace=True) de19_38 = torch.nn.ConvTranspose2d(1024, 512, kernel_size=3, stride=2, padding=1, output_padding=1) de19_38_0 = nn.BatchNorm2d(512) de19_38_1 = torch.nn.Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1)) de19_38_2 = nn.BatchNorm2d(128) de19_38_3 = nn.ReLU(inplace=True) layers += [pool5, conv6, nn.BatchNorm2d(1024), nn.ReLU(inplace=True), conv7, nn.BatchNorm2d(1024), nn.ReLU(inplace=True)] layer1 = layers layer21 = [conv8_2, nn.BatchNorm2d(512), nn.ReLU(inplace=True), conv9_2, nn.BatchNorm2d(512), nn.ReLU(inplace=True), conv9_2_, nn.BatchNorm2d(512), nn.ReLU(inplace=True), conv10_2, nn.BatchNorm2d(128), nn.ReLU(inplace=True)] layer22 = [conv8_4, nn.BatchNorm2d(512), nn.ReLU(inplace=True), conv9_4, nn.BatchNorm2d(512), nn.ReLU(inplace=True), conv9_4_, nn.BatchNorm2d(512), nn.ReLU(inplace=True), conv10_4, nn.BatchNorm2d(128), nn.ReLU(inplace=True)] layer23 = [conv8_6, nn.BatchNorm2d(512), nn.ReLU(inplace=True), conv9_6, nn.BatchNorm2d(512), nn.ReLU(inplace=True), conv9_6_, nn.BatchNorm2d(512), nn.ReLU(inplace=True), conv10_6, nn.BatchNorm2d(128), nn.ReLU(inplace=True)] layer24 = [conv8_8, nn.BatchNorm2d(512), nn.ReLU(inplace=True), conv9_8, nn.BatchNorm2d(512), nn.ReLU(inplace=True), conv9_8_, nn.BatchNorm2d(512), nn.ReLU(inplace=True), conv10_8, nn.BatchNorm2d(128), nn.ReLU(inplace=True)] layer25 = [conv11, nn.BatchNorm2d(1024), nn.ReLU(inplace=True)] layer26 = [conv12, nn.BatchNorm2d(512), nn.ReLU(inplace=True)] layer27 = [conv13, nn.BatchNorm2d(512), nn.ReLU(inplace=True)] layer3 = [de3_5, de3_5_0, de3_5_1, de3_5_2, de3_5_3] layer4 = [de5_10, de5_10_0, de5_10_1, de5_10_2, de5_10_3] layer5 = [de10_19, de10_19_0, de10_19_1, de10_19_2, de10_19_3] layer6 = [de19_38, de19_38_0, de19_38_1, de19_38_2, de19_38_3] # layer3 = [conv13, nn.BatchNorm2d(128), nn.ReLU(inplace=True)] # layer4 = [conv14, nn.BatchNorm2d(128), nn.ReLU(inplace=True)] layers = [layer1, layer21, layer22, layer23, layer24, layer25, layer26, layer27, layer3, layer4, layer5, layer6] return layers def add_extras(cfg, i, batch_norm=False): # Extra layers added to VGG for feature scaling layers = [] cc0 = torch.nn.Conv2d(1024, 192, kernel_size=(1, 1), stride=(1, 1)) cc0_1 = nn.BatchNorm2d(256) cc0_2 = nn.ReLU(inplace=True) cc1 = torch.nn.Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) cc1_0 = torch.nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) cc1_1 = nn.BatchNorm2d(512) cc1_2 = nn.ReLU(inplace=True) cc2 = torch.nn.Conv2d(512, 192, kernel_size=(1, 1), stride=(1, 1)) cc2_1 = nn.BatchNorm2d(256) cc2_2 = nn.ReLU(inplace=True) cc3 = torch.nn.Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) cc3_0 = torch.nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) cc3_1 = nn.BatchNorm2d(512) cc3_2 = nn.ReLU(inplace=True) cc4 = torch.nn.Conv2d(512, 192, kernel_size=(1, 1), stride=(1, 1)) cc4_1 = nn.BatchNorm2d(256) cc4_2 = nn.ReLU(inplace=True) cc5 = torch.nn.Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2),padding=1) cc5_0 = torch.nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) cc5_1 = nn.BatchNorm2d(512) cc5_2 = nn.ReLU(inplace=True) cc6 = torch.nn.Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1)) cc6_1 = nn.BatchNorm2d(256) cc6_2 = nn.ReLU(inplace=True) #cc7 = torch.nn.Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1)) cc7 = torch.nn.Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) cc7_1 = nn.BatchNorm2d(512) cc7_2 = nn.ReLU(inplace=True) cc8 = torch.nn.Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1)) cc8_1 = nn.BatchNorm2d(1024) cc8_2 = torch.nn.Conv2d(1280, 1024, kernel_size=(1, 1), stride=(1, 1)) cc8_3 = nn.BatchNorm2d(1024) cc9 = torch.nn.Conv2d(768, 512, kernel_size=(1, 1), stride=(1, 1)) cc9_1 = nn.BatchNorm2d(512) cc10 = torch.nn.Conv2d(768, 512, kernel_size=(1, 1), stride=(1, 1)) cc10_1 = nn.BatchNorm2d(512) cc11 = torch.nn.Conv2d(640, 512, kernel_size=(1, 1), stride=(1, 1)) cc11_1 = nn.BatchNorm2d(512) ''' cc12 = torch.nn.Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1)) cc12_1 = nn.BatchNorm2d(512) ''' layers = [cc0, cc0_1, cc0_2, cc1, cc1_0, cc1_1, cc1_2, cc2, cc2_1, cc2_2, cc3, cc3_0, cc3_1, cc3_2, cc4, cc4_1, cc4_2, cc5, cc5_0, cc5_1, cc5_2, cc8, cc8_1, cc8_2, cc8_3, nn.ReLU(inplace=True), cc9, cc9_1, nn.ReLU(inplace=True), cc10, cc10_1, nn.ReLU(inplace=True), cc11, cc11_1, nn.ReLU(inplace=True), cc6, cc6_1, cc6_2, cc7, cc7_1, cc7_2, ] return layers def multibox(vgg, extra_layers, cfg, num_classes): loc_layers = [ torch.nn.Conv2d(512, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), # torch.nn.Conv2d(512, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), # torch.nn.Conv2d(512, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), torch.nn.Conv2d(1024, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), torch.nn.Conv2d(512, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), torch.nn.Conv2d(512, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), # , torch.nn.Conv2d(512, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), #torch.nn.Conv2d(512, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) ] conf_layers = [ torch.nn.Conv2d(512, 6 * 21, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), # torch.nn.Conv2d(512, 126, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), # torch.nn.Conv2d(512, 84, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), torch.nn.Conv2d(1024, 6 * 21, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), torch.nn.Conv2d(512, 6 * 21, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), torch.nn.Conv2d(512, 6 * 21, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), # , torch.nn.Conv2d(512, 6 * 21, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), #torch.nn.Conv2d(512, 6*21, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) ] return vgg, extra_layers, (loc_layers, conf_layers) base = { '320': [], '300': [], '512': [], } extras = { '320': [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256], '300': [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256], '512': [], } mbox = { '320': [4, 6, 6, 6, 4, 4], '300': [4, 6, 6, 6, 4, 4], # number of boxes per feature map location '512': [], } def build_net(phase, size=300, num_classes=21): if phase != "test" and phase != "train": print("ERROR: Phase: " + phase + " not recognized") return # if size != 300: # print("ERROR: You specified size " + repr(size) + ". However, " + # "currently only SSD300 (size=300) is supported!") # return base_, extras_, head_ = multibox(vgg(base[str(size)], 3), add_extras(extras[str(size)], 1024), mbox[str(size)], num_classes) return SSD(phase, size, base_, extras_, head_, num_classes, resnet18())
nilq/baby-python
python
""" This module defines views used in CRUD operations on articles. """ from rest_framework import generics, status from rest_framework.response import Response from rest_framework.permissions import ( AllowAny, IsAuthenticatedOrReadOnly, IsAuthenticated ) from rest_framework.serializers import ValidationError from datetime import datetime from rest_framework.views import APIView from django.db.models import Avg from django.db.models.signals import post_save from django.dispatch import receiver from django.core.exceptions import ObjectDoesNotExist from django.views.generic import ListView from rest_framework.views import APIView from rest_framework.renderers import JSONRenderer from rest_framework import authentication from .serializers import CommentSerializer, ArticleSerializer, ArticleRatingSerializer, LikesSerializer, TagsSerializer # Add pagination from rest_framework.pagination import PageNumberPagination # Add search package from rest_framework.filters import SearchFilter from django_filters.rest_framework import DjangoFilterBackend from .renderers import ArticleJSONRenderer, BookmarkJSONRenderer from .serializers import ( ArticleSerializer, ArticleRatingSerializer, LikesSerializer, TagsSerializer, ArticleReportSerializer, ArticleReportRetrieveSerializer, BookmarkSerializer ) from .models import ( Article, ArticleRating, Likes, ArticleTags, ArticleReport, Bookmark) from authors.apps.notifications.models import notify_follower def create_tag(tags, article): """ This method checks whether a tag with tag provided exists in the database and creates it if it does not exist. :params str tag: name of the new tag or tag to query from the database :returns cls object ArticleTags: the tag as retrieved from the database """ # retrieve all tag names and create new ones if they do not exist # also, add them to the articles and save the article instance for tag in tags.split(','): article_tag = ArticleTags.objects.filter(tag__icontains=tag.strip()) if not article_tag: data = {'tag': tag.strip()} serializer = TagsSerializer(data=data) serializer.is_valid(raise_exception=True) article_tag = serializer.save() article.article_tags.add(article_tag) else: article.article_tags.add(article_tag.first()) article.save() return None from .models import Article, ArticleRating, Likes, Comment class ArticleAPIView(generics.ListCreateAPIView): """ get: Retrieve all articles post: Create a new article """ queryset = Article.objects.all() serializer_class = ArticleSerializer renderer_classes = (ArticleJSONRenderer,) permission_classes = (IsAuthenticatedOrReadOnly,) # Apply pagination to view pagination_class = PageNumberPagination # Add search class and fields filter_backends = (SearchFilter, DjangoFilterBackend, ) # Define search and filter fields with the field names mapped to a list of lookups fields = { 'author__username': ['icontains'], 'title': ['icontains'], 'article_tags__tag': ['icontains'], } search_fields = fields filter_fields = fields def post(self, request): """ Creates an article :params HttpRequest: a post request with article data sent by clients to create a new article. :return aricleObject:returns a successfully created article """ # Retrieve article data from the request object and convert it # to a kwargs object # get user data at this point article = { 'title': request.data.get('title', None), 'body': request.data.get('body', None), 'description': request.data.get('description', None), 'author': request.user.username } # pass article data to the serializer class, check whether the data is # valid and if valid, save it. serializer = self.serializer_class(data=article) serializer.is_valid(raise_exception=True) article = serializer.save() # retrieve the tags as passed on in the article data tags = request.data.get('tags', None) if tags: create_tag(tags, article) return Response(serializer.data, status.HTTP_201_CREATED) @receiver(post_save, sender=Article) def notify_follower_reciever(sender, instance, created, **kwargs): """ Send a notification after the article being created is saved. """ if created: message = (instance.author.username + " has created an article. Title: " + instance.title) notify_follower(instance.author, message, instance) class ArticleDetailsView(generics.RetrieveUpdateDestroyAPIView): """ get: put: delete: """ serializer_class = ArticleSerializer renderer_classes = (ArticleJSONRenderer,) permission_classes = (IsAuthenticatedOrReadOnly,) def get_object(self, slug): try: return Article.objects.get(slug=slug) except ObjectDoesNotExist: return None def get(self, request, slug): """ Retrieve a specific article from the database given it's article id. :params str slug: a slug of an article you want to retrieve :returns article: a json data for the requested article """ article = self.get_object(slug) if article: serializer = self.serializer_class( article, context={'request': request}) return Response(serializer.data, status.HTTP_200_OK) else: # return error message indicating article requested is not found. return Response({ 'error': 'Article with given id does not exist' }, status.HTTP_404_NOT_FOUND) def delete(self, request, slug): """ Delete a given article. :params slug: a slug of the article to be deleted request: a request object with authenticated user credentials :returns json message: a json object containing message to indicate that the article has been deleted """ article = self.get_object(slug) if not article: # return error message for non-existing article return Response({ 'error': 'Article with given id does not exist' }, status.HTTP_404_NOT_FOUND) # check whether user owns this article before attempting to delete it if article.author.id == request.user.id: article.delete() return Response( { 'message': "Article deleted successfully" }, status.HTTP_200_OK) else: # prevent a user from deleting an article s/he does not own return Response({ 'error': 'You cannot delete articles belonging to other users.' }, status.HTTP_403_FORBIDDEN) def put(self, request, slug): """ Update a single article :params str slug: a slug for the article to be updated request: a request object with new data for the article :returns article: An updated article in json format """ article = self.get_object(slug) if not article: # Tell client we have not found the requested article return Response({ 'error': 'Article requested does not exist' }, status.HTTP_404_NOT_FOUND) # check whether user owns this article and proceed if they do if article.author.id == request.user.id: request.data['author'] = request.user.username serializer = self.serializer_class(article, data=request.data) serializer.is_valid(raise_exception=True) serializer.save() tags = request.data.get('tags', None) # clear all tags in the article before adding new ones article.article_tags.clear() if tags: # add tags to the article create_tag(tags, article) return Response(serializer.data, status.HTTP_200_OK) else: # prevent a user from updating an article s/he does not own return Response( { 'error': 'You cannot edit an article you do not own.' }, status.HTTP_403_FORBIDDEN) class FavoriteArticle(generics.CreateAPIView): """ A user is able to favourite an article if they had not favourited it. If they had favourited it, the article becomes unfavourited. """ permission_classes = (IsAuthenticated,) queryset = Article.objects.all() serializer_class = ArticleSerializer def post(self, request, slug): """ This method handles favouriting and unfavouriting of articles. Checks whether the article exists. Checks whether the user has favourited the article in order to favourite it or unfavourite it if the user had already favourited it. """ try: article = Article.objects.get(slug=slug) except ObjectDoesNotExist: response = { "message": "The article does not exist", } return Response(response, status=status.HTTP_404_NOT_FOUND) user = request.user if user in article.favourited.all(): # User has already favourited it, unfavourites the article article.favourited.remove(user.id) article.save() serializer = self.get_serializer(article) message = "You have successfully unfavourited this article" response = {"message": message, "article": serializer.data} return Response(response, status=status.HTTP_200_OK) else: # Favourites the article article.favourited.add(user.id) article.save() serializer = self.get_serializer(article) message = "You have successfully favourited this article" response = {"message": message, "article": serializer.data} return Response(response, status=status.HTTP_200_OK) class ArticleRatingAPIView(generics.ListCreateAPIView): """ get: Retrieve all article ratings post: Create a new article rating """ permission_classes = (IsAuthenticated,) queryset = ArticleRating.objects.all() serializer_class = ArticleRatingSerializer renderer_classes = (ArticleJSONRenderer,) def post(self, request, slug): """ Creates an article rating :params HttpRequest: A post request with article rating data sent by clients to create a new article rating. :return: Returns a successfully created article rating """ # Retrieve article rating data from the request object and convert it # to a kwargs object # get user data at this point try: article = Article.objects.get(slug=slug) except Exception: response = {'message': 'That article does not exist'} return Response(response, status=status.HTTP_404_NOT_FOUND) if article.author.id == request.user.id: wink_emoji = u"\U0001F609" data = { 'message': 'We see what you did there {}. Sorry, but you cannot rate your ' 'own article.'.format(wink_emoji) } return Response(data, status.HTTP_403_FORBIDDEN) article_rating = { 'article': article.id, 'user': request.user.id, 'rating': request.data.get('rating', None), } # pass article data to the serializer class, check whether the data is # valid and if valid, save it. serializer = self.serializer_class(data=article_rating) serializer.is_valid(raise_exception=True) serializer.save() # Save the average article rating to the Article model q = ArticleRating.objects.filter(article_id=article.id).aggregate( Avg('rating')) article.rating_average = q['rating__avg'] article.save(update_fields=['rating_average']) data = {"message": "Thank you for taking time to rate this article."} data = { "message": "Thank you for taking time to rate this article." } return Response(data, status.HTTP_201_CREATED) class ArticleLikes(generics.ListCreateAPIView): """ post: like or dislike an article """ serializer_class = LikesSerializer def get_object(self, slug): try: return Article.objects.get(slug=slug) except ObjectDoesNotExist: return None def post(self, request, slug): """ creates an article like or a dislike :params HttpRequest: this request contains a user authorization token and a json payload in the form{ "like": True/False }. True is a like while False is a dislike slug: a slug for the article user wants to like or dislike :returns str:message thanking user for taking time to give their opinion on this article status code 201: Indicates the a new record has been created for a lik or dislike """ # Let's check whether we have the correct payload before doing any # database transaction since they are very expensive to us. # This variable, `like`, holds user intention which can be a # like or dislike like = request.data.get('like', None) if like is None or type(like) != type(True): return Response( {'message': 'You must indicate whether you like or dislike this article' }, status.HTTP_400_BAD_REQUEST) # we continue now since we are sure we have a valid payload # Check whether user has already like or dislike this article likes = None # Let's check whether the article requested exists in our # database and retrieve it article = self.get_object(slug) try: likes = Likes.objects.get(user=request.user.id, article=article) except ObjectDoesNotExist: # let's do nothing here since we are only checking whether user has # liked or disliked this article pass # Alert user if article does not exist if not article: return Response( { 'message': 'Article requested does not exist' }, status.HTTP_404_NOT_FOUND ) new_like = { 'article': article.id, 'user': request.user.id, 'like': like } # If there is a record for this article and the current user in the # system, we modify it instead of creating a new one. if likes: # user had liked the article but now wants to dislike it if likes.like and not like: article.userLikes.remove(request.user) article.userDisLikes.add(request.user) # user had disliked this article but now wants to like it elif not likes.like and like: article.userLikes.add(request.user) article.userDisLikes.remove(request.user) elif like: # User can only like an article once or dislike an article once msg = '{}, you already liked this article.'.format( request.user.username) return Response( { 'message': msg }, status.HTTP_403_FORBIDDEN ) else: msg = '{}, you already disliked this article.'.format( request.user.username) return Response( { 'message': msg }, status.HTTP_403_FORBIDDEN ) # save the new value/state of the article article.save() # There is no need to create a new record; edit the existing one likes.like = like likes.save() else: # We don't need to do any more operations here # because this is user's first time to see this article serializer = self.serializer_class(data=new_like) serializer.is_valid(raise_exception=True) serializer.save() # update likes count or dislikes count for the article if like: article.userLikes.add(request.user) else: article.userDisLikes.add(request.user) # save the new state of our article article.save() # Tell user we are successful return Response( { 'message': ( 'Thank you {} for giving your opinion on this '.format( request.user.username) + 'article.' ) }, status.HTTP_201_CREATED ) class ArticleReportAPIView(generics.ListCreateAPIView): """ get: Retrieve all article reports post: Create a new article report """ permission_classes = (IsAuthenticated,) queryset = ArticleReport.objects.all() serializer_class = ArticleReportSerializer renderer_classes = (ArticleJSONRenderer,) def list(self, request, slug): """Method for listing all reports.""" try: article = Article.objects.get(slug=slug) except Exception: response = { 'message': 'That article does not exist.' } return Response(response, status=status.HTTP_404_NOT_FOUND) if request.user.is_staff: queryset = self.get_queryset() else: queryset = ArticleReport.objects.filter(user_id=request.user.id) if not queryset.exists(): response = { 'message': 'No concerns have been raised on this article.' } return Response(data=response, status=status.HTTP_404_NOT_FOUND) serializer = ArticleReportRetrieveSerializer(queryset, many=True) return Response(data=serializer.data, status=status.HTTP_200_OK) def post(self, request, slug): """Method for reporting an article.""" try: article = Article.objects.get(slug=slug) except Exception: response = { 'message': 'That article does not exist.' } return Response(response, status=status.HTTP_404_NOT_FOUND) user_report_count = ArticleReport.objects.filter( article_id=article.id, user_id=request.user.id).count() if user_report_count > 4: response = { 'message': 'You are not allowed to report an article more than five times.' } return Response(response, status=status.HTTP_200_OK) article_report = { 'article': article.id, 'user': request.user.id, 'text': request.data.get('text', None), } # pass article data to the serializer class, check whether the data is # valid and if valid, save it. serializer = self.serializer_class(data=article_report) serializer.is_valid(raise_exception=True) serializer.save() # Save the total number of reports flagged on this article. total_report_count = ArticleReport.objects.filter( article_id=article.id).count() article.report_count = total_report_count article.save(update_fields=['report_count']) data = { "message": "Your feedback has been recorded. Authors' " "Haven thanks you for your service." } return Response(data, status.HTTP_201_CREATED) class ArticleReportRUDAPIView(generics.RetrieveUpdateDestroyAPIView): """ get: Retrieve an article report delete: Delete an article report put: Update an article report """ permission_classes = (IsAuthenticated,) serializer_class = ArticleReportSerializer renderer_classes = (ArticleJSONRenderer,) def get_article_object(self, pk): """ Getter method for an ArticleReport using pk (primary key).""" try: return ArticleReport.objects.get(pk=pk) except ObjectDoesNotExist: return None def get(self, request, slug, pk): """The method for retrievieng a sinlge Article Report.""" article_report = self.get_article_object(pk) """ Attempt to get an article using the slug. If article doesn't exist the user will receive a message telling them so """ try: article = Article.objects.get(slug=slug) except Exception: response = { 'message': 'That article does not exist.' } return Response(response, status=status.HTTP_404_NOT_FOUND) if article_report: if request.user.is_staff or request.user == article_report.user: serializer = ArticleReportRetrieveSerializer(article_report) return Response(data=serializer.data, status=status.HTTP_200_OK) else: return Response(data={ 'message': 'You are not allowed to view this report.' }, status=status.HTTP_403_FORBIDDEN) else: # return error message indicating article report is not found. return Response(data={ 'message': 'That article report does not exist.' }, status=status.HTTP_404_NOT_FOUND) def put(self, request, slug, pk): article_report = self.get_article_object(pk) """ Attempt to get an article using the slug. If article doesn't exist the user will receive a message telling them so """ try: article = Article.objects.get(slug=slug) except Exception: response = { 'message': 'That article does not exist.' } return Response(response, status=status.HTTP_404_NOT_FOUND) if article_report: if request.user.is_staff or request.user == article_report.user: article_data = { 'article': article_report.article.id, 'user': request.user.id, 'text': request.data.get('text', None), } serializer = self.serializer_class( article_report, data=article_data) serializer.is_valid(raise_exception=True) serializer.save() return Response(data=serializer.data, status=status.HTTP_200_OK) else: return Response(data={ 'message': 'You are not allowed to update this report.' }, status=status.HTTP_403_FORBIDDEN) else: # return error message indicating article report is not found. return Response(data={ 'message': 'That article report does not exist.' }, status=status.HTTP_404_NOT_FOUND) def delete(self, request, slug, pk): article_report = self.get_article_object(pk) """ Attempt to get an article using the slug. If article doesn't exist the user will receive a message telling them so """ try: article = Article.objects.get(slug=slug) except Exception: response = { 'message': 'That article does not exist.' } return Response(response, status=status.HTTP_404_NOT_FOUND) if article_report: if request.user.is_staff or request.user == article_report.user: article_report.delete() # Save the total number of reports flagged on this article. total_report_count = ArticleReport.objects.filter( article_id=article.id).count() article.report_count = total_report_count article.save(update_fields=['report_count']) return Response(data={ 'message': "Report was deleted successfully" }, status=status.HTTP_200_OK) else: return Response(data={ 'message': 'You are not allowed to delete this report.' }, status=status.HTTP_403_FORBIDDEN) else: # return error message indicating article report is not found. return Response(data={ 'message': 'That article report does not exist.' }, status=status.HTTP_404_NOT_FOUND) class ListCreateCommentAPIView(generics.ListCreateAPIView): """ Get and Post Comments """ permission_classes = (IsAuthenticated, ) queryset = Comment.objects.all() serializer_class = CommentSerializer def create(self, request, *args, **kwargs): """ Post a comment """ article = Article.objects.get(slug=kwargs["slug"]) comment_data = { 'article': article, 'commented_by': request.user.username, 'comment_body': request.data.get('comment_body', None) } serializer = self.serializer_class(data=comment_data) serializer.is_valid(raise_exception=True) serializer.save(article=article) return Response(serializer.data) def get(self, request, slug, *args, **kwargs): """Get all comments for a particular article""" article = Article.objects.get(slug=slug) comments = Comment.objects.filter(article=article) serializer = self.serializer_class(data=comments, many=True) serializer.is_valid() return Response(serializer.data, status=status.HTTP_200_OK) class RetrieveCommentAPIView(generics.RetrieveDestroyAPIView, generics.CreateAPIView): """ This class contains method to retrieve and delete a comment """ permission_classes = (IsAuthenticated, ) queryset = Comment.objects.all() serializer_class = CommentSerializer renderer_classes = (ArticleJSONRenderer, ) def create(self, request, pk, *args, **kwargs): """ This method creates child comment(thread-replies on the parent comment) """ try: parent = Comment.objects.get(pk=pk) article = parent.article except ObjectDoesNotExist: raise ValidationError("comment with this ID doesn't exist") comment_data = { 'article': article.slug, 'commented_by': request.user.username, 'comment_body': request.data.get('comment_body', None) } serializer = self.serializer_class(data=comment_data) serializer.is_valid(raise_exception=True) serializer.save( parent=parent, article=article, commented_by=request.user) return Response(serializer.data, status=status.HTTP_201_CREATED) def get(self, request, pk, *args, **kwargs): """Get a comment instance""" try: comment = Comment.objects.get(pk=pk) except Comment.DoesNotExist: raise ValidationError("The comment your entered does not exist") comment_data = { "comment": comment.comment_body, "commented_by": comment.commented_by.username, "created_at": str(comment.created_at), "parent": comment.parent, "id": comment.id } return Response(comment_data, status=status.HTTP_200_OK) def delete(self, request, pk, *args, **kwargs): """Delete a comment instance""" try: comment = Comment.objects.get(pk=pk) except Comment.DoesNotExist: raise ValidationError( "The comment you are trying to delete does not exist") comment.delete() return Response({"msg": "You have deleted the comment"}) class RetrieveCommentsofAPIView(generics.ListAPIView): """ This class contains method to retrieve comments of a comment """ permission_classes = (IsAuthenticated, ) queryset = Comment.objects.all() serializer_class = CommentSerializer renderer_classes = (ArticleJSONRenderer, ) def list(self, request, pk, slug): """Method for listing all comments of a comment.""" try: comment = self.queryset.get(pk=pk) except Comment.DoesNotExist: raise ValidationError("The comment does not exist") comments = Comment.objects.filter(parent=comment) serializer = self.serializer_class(data=comments, many=True) serializer.is_valid() return Response(serializer.data, status=status.HTTP_200_OK) class ArticleBookmarkAPIView(generics.CreateAPIView): """ post: Bookmark an article for future reading. get: This endpoint is not supported """ renderer_classes = (BookmarkJSONRenderer, ) permission_classes = (IsAuthenticatedOrReadOnly, ) serializer_class = BookmarkSerializer queryset = Bookmark.objects.all() def get(self, request, slug=None): return Response( {'message': 'Sorry {}, this '.format(request.user.username) + 'request on this endpoint is not allowed.' }, status.HTTP_403_FORBIDDEN) def post(self, request, slug): try: article = Article.objects.get(slug=slug) data = { 'article': article.id, 'user': request.user.id } serializer = self.serializer_class(data=data) serializer.is_valid(raise_exception=True) serializer.save() bookmark = { "id": serializer.data['id'], "article": serializer.data['article'] } return Response(bookmark, status.HTTP_201_CREATED) except ObjectDoesNotExist: return Response( { 'message': 'Sorry {}, '.format(request.user.username) + 'the article you have want to bookmark does not exist' }, status.HTTP_404_NOT_FOUND ) class ArticleBookmarkDetailAPIView(generics.RetrieveDestroyAPIView): """ get: Retrieve a singe or all bookmarks for a logged in user delete: Delete a single or all bookmarks """ permission_classes = (IsAuthenticated, ) serializer_class = BookmarkSerializer queryset = Bookmark.objects.all() def get(self, request, pk=None): if pk: bookmarks = Bookmark.objects.filter(user=request.user) serializer = self.serializer_class(data=bookmarks, many=True) serializer.is_valid() return Response(serializer.data) else: bookmarks = Bookmark.objects.filter(user=request.user) serializer = self.serializer_class(data=bookmarks, many=True) serializer.is_valid() return Response(serializer.data) def delete(self, request, pk=None): try: if pk: bookmark = Bookmark.objects.get(pk=pk) if bookmark.user.username == request.user.username: bookmark.delete() return Response({'message': "Bookmark deleted successfully" }, status.HTTP_200_OK) else: # prevent a user from deleting a bookmark s/he does not own return Response({ 'error': 'Sorry {}, '.format(request.user.username) + 'you cannot delete bookmarks belonging to other users.' }, status.HTTP_403_FORBIDDEN) else: bookmarks = Bookmark.objects.filter(user=request.user) bookmarks.delete() return Response({'message': "All bookmarks deleted successfully" }, status.HTTP_200_OK) except ObjectDoesNotExist: return Response({ 'message': 'Sorry {}, '.format(request.user.username) + 'the bookmark you want to delete does not exist' }, status.HTTP_404_NOT_FOUND )
nilq/baby-python
python
from .dynamo import DynamoClient from .s3 import S3Client
nilq/baby-python
python
#! python3 # -*- encoding: utf-8 -*- ''' Current module: rman.app.rm_task.models Rough version history: v1.0 Original version to use ******************************************************************** @AUTHOR: Administrator-Bruce Luo(罗科峰) MAIL: luokefeng@163.com RCS: rman.app.rm_task.models, v1.0 2019年12月5日 FROM: 2019年12月5日 ******************************************************************** ====================================================================== Provide a function for the automation test ''' from rman.app import db from sqlalchemy import Column, Integer, String, DateTime class Rmtask(db.Model): ''' 测试项目 ''' __tablename__ = 't_rtsf_task' id = Column(Integer, primary_key=True) case = Column(String(64), nullable = False, comment = u'测试集名称') desc = Column(String(64), nullable = True, comment = u'任务描述') tid = Column(String(128), nullable = True, comment = u'任务ID') status = Column(Integer, nullable = True, default=0, comment = u'0-未执行, 1-执行中, 2-执行成功, 3-执行失败, 4-无效脚本, 5-redis服务异常') report_url = Column(String(128), nullable = True, comment = u'报告链接') report_path = Column(String(128), nullable = True, comment = u'报告路径') create_time = Column(DateTime, nullable = False) update_time = Column(DateTime, nullable = False) def __init__(self, **kwargs): _ = [setattr(self, k, v) for k,v in kwargs.items()] def __repr__(self): return '<Rmtask %r>' % (self.id)
nilq/baby-python
python
#!/usr/bin/python3.3 # -*- coding: utf-8 -*- # core.py # Functions: # [X] loading servers details # [X] loading servers config (in which is found username, etc.) # [ ] logging to disk commands and status # [ ] loading and providing configuration e.g. is_enabled() # [ ] provides ini reading interface import irc.bot, configparser import sys class ServerSpec(irc.bot.ServerSpec): def __init__(self, host, port, password, nickname, username, realname, channels, modes): if password == '': password = None super().__init__(host, port, password) self.nickname = nickname self.realname = realname self.username = username self.channels = channels self.modes = modes # LOADING SERVERS CONFIG #sys.argv[1] = 'quakenet' #TODO: ^spoofing the cmdline for testing purposes, TO REMOVE print('booting up...') _serversparser = configparser.ConfigParser() _serversparser.read('config/servers.ini') print('available servers:', ', '.join(_serversparser.sections())) assert len(sys.argv) > 1, 'you must provide a server to connect to' assert sys.argv[1] in _serversparser.sections(), '{0} server does not exist'.format(sys.argv[1]) print('will connect to {0} ({1}:{2})'.format(sys.argv[1], _serversparser[sys.argv[1]]['host'], _serversparser[sys.argv[1]]['port'])) #loading server details server_config = configparser.ConfigParser() server_config.read('config/{0}.ini'.format(sys.argv[1])) details = server_config['details'] def write_config(): with open('config/{0}.ini'.format(sys.argv[1]), mode='w') as f: server_config.write(f) #creating the ServerSpec object chosen_server = ServerSpec(_serversparser[sys.argv[1]]['host'], int(_serversparser[sys.argv[1]]['port']), _serversparser[sys.argv[1]]['password'], details['nickname'], details['username'], details['realname'], details['channels'].split(','), details['modes']) with open('VERSION') as file: version = file.read() def split(txt, target): # split according to \n in text # split in 512 bytes (and be careful not to split in the middle of a UTF-8 control code) final_text = [] for i in txt.split('\n'): if len(i.encode())+len(target.encode()) >= 500: # "PRIVMSG #channel :message\r\n" must not exceed 512 bytes s = i.encode() splitted = [] cursor = 500-len(target) while ''.join(j.decode() for j in splitted) != i: try: s[:cursor].decode() except UnicodeDecodeError: cursor -= 1 splitted.append(s[:cursor]) s = s[cursor:] cursor -= len(s) final_text += [k.decode() for k in splitted] else: final_text.append(i) return final_text reloadable_modules = ('functions', 'weather', 'google', 'soundcloud', 'parse_links', 'admin') def stop(): raise StopIteration() triggersparser = configparser.ConfigParser() triggersparser.read('strings/triggers.ini') triggers = triggersparser['triggers'] def savetriggers(): with open('strings/triggers.ini', mode='w') as file: triggersparser.write(file)
nilq/baby-python
python
############################################################## # Customer Issue Prediction Model #------------------------------------------------------------- # Author : Alisa Ai # ############################################################## import dash import dash_core_components as dcc import dash_html_components as html from dash.dependencies import Input, Output, State import plotly import plotly.graph_objs as go from plotly import tools from chart_studio import plotly import csv import numpy as np import pandas as pd import nltk from nltk.tokenize import word_tokenize from nltk.corpus import stopwords from nltk.stem import PorterStemmer from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer from bs4 import BeautifulSoup import re from sklearn.model_selection import train_test_split from sklearn.pipeline import Pipeline, FeatureUnion from sklearn.preprocessing import FunctionTransformer from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.svm import LinearSVC import dill ############################################################## app = dash.Dash() #app.layout = html.Div(children=[ #html.H1(children='Predict Customer Issues', style={'textAlign': 'center'}), #html.Div(children=[ #html.Label('Enter you complaints: '), #dcc.Input(id='complaints-text', placeholder='Complaints', type='text'), #html.Div(id='result') #], style={'textAlign': 'center'}), app.css.append_css({'external_url': 'https://codepen.io/amyoshino/pen/jzXypZ.css'}) layout = dict( autosize=True, height=450, font=dict(color="#191A1A"), titlefont=dict(color="#191A1A", size='14'), margin=dict( l=45, r=15, b=45, t=35 ) ) app.layout = html.Div([ # Title - Row html.Div( [ html.Div( [ html.H1( 'Customer Issue Prediction App', style={'font-family': 'Helvetica', "margin-left": "20", "margin-bottom": "0"}, className='eight columns', ) ], className='row' ), html.Div( [ html.H4( '--Developed by Alisa Ai--', style={'font-family': 'Helvetica', "margin-left": "10", "margin-bottom": "0"}, className='three columns', ) ], className='row' ) ]), #block 2 html.Div([ dcc.Store(id = 'memory'), html.Div( [ html.Div( [ html.Label('Enter your complaints here: '), dcc.Input(id='complaints-text', placeholder='Complaints', type='text', style=dict(width='1000px', height='100px', display='inline-block', verticalAlign="middle"))], className='eight columns', style={"height": "auto", "width": "2000px", "margin-bottom": "auto", 'whiteSpace': 'pre-line' } ), html.Div( [ html.P('Select Your Product:'), dcc.Dropdown(id = 'product', options=[ {'label': 'Checking or savings account', 'value': 1}, {'label': 'Consumer Loan', 'value': 2}, {'label': 'Credit card or prepaid card', 'value': 3}, {'label': 'Credit reporting, credit repair services, or other personal consumer reports', 'value': 4}, {'label': 'Debt collection', 'value': 5}, {'label': 'Money transfer, virtual currency, or money service', 'value': 6}, {'label': 'Mortgage', 'value': 7}, {'label': 'Other financial service', 'value': 8}, {'label': 'Payday loan, title loan, or personal loan', 'value': 9}, {'label': 'Student loan', 'value': 10}, {'label': 'Vehicle loan or lease', 'value': 11}], placeholder="Select Your Product", style=dict(width='300px', height='40px', display='inline-block', verticalAlign="middle"))], className='three columns', style={"height": "auto", "width": "2000px", "margin-bottom": "auto"} ), html.Div( [ html.P('Select Your State:'), dcc.Dropdown( id = 'state', options=[ {'label': 'FL', 'value': 22}, {'label': 'GA', 'value': 23}, {'label': 'IL', 'value': 24}, {'label': 'NC', 'value': 25}, {'label': 'NJ', 'value': 26}, {'label': 'NY', 'value': 27}, {'label': 'OH', 'value': 28}, {'label': 'PA', 'value': 29}, {'label': 'TX', 'value': 30}, {'label': 'Other', 'value': 31}], placeholder="Select Your State", style=dict(width='300px', height='40px', display='inline-block', verticalAlign="middle"))], className='three columns', style={"height": "auto", "width": "2000px", "margin-bottom": "auto"} ), html.Div( [ html.Button('Submit', id='button_1') ], className='one columns', style={'margin-bottom': 'auto'} ), html.Div(id='result')], style={'textAlign': 'center'}) ]) ]) @app.callback( Output(component_id='result', component_property='children'), [Input(component_id='complaints-text', component_property='value'), Input(component_id='product', component_property='value'), Input(component_id='state', component_property='value'), Input('button_1', 'n_clicks')] ) def update_issue(complaints, pro, stat, n_clicks): if n_clicks is not None: if complaints is not None and complaints is not '': try: ############# vaderSentiment text = re.sub("[XX$]"," ", complaints) text = re.sub(r'\s+', ' ', text) analyser = SentimentIntensityAnalyzer() pos = analyser.polarity_scores(text)['pos'] neg = analyser.polarity_scores(text)['neg'] ############# Clean text2 = re.sub("[^a-zA-Z]"," ", text) stopword = set(stopwords.words('english')) text2 = ' '.join([word for word in text2.split() if word not in (stopword)]) porter_stemmer = PorterStemmer() text2 = porter_stemmer.stem(text2) ############# input organize index_dict = { 'Product_Checking or savings account': 1, 'Product_Consumer Loan': 2, 'Product_Credit card or prepaid card': 3, 'Product_Credit reporting, credit repair services, or other personal consumer reports': 4, 'Product_Debt collection': 5, 'Product_Money transfer, virtual currency, or money service': 6, 'Product_Mortgage': 7, 'Product_Other financial service': 8, 'Product_Payday loan, title loan, or personal loan': 9, 'Product_Student loan': 10, 'Product_Vehicle loan or lease': 11, 'State_FL': 12, 'State_GA': 13, 'State_IL': 14, 'State_NC': 15, 'State_NJ': 16, 'State_NY': 17, 'State_OH': 18, 'State_PA': 19, 'State_TX': 20, 'State_Other': 21} def dummy(index_dict, pro, stat): for key, value in index_dict.items(): # for name, age in dictionary.iteritems(): (for Python 2.x) if pro == value: index_dict[key] = 100 if stat == value: index_dict[key] = 100 for key, value in index_dict.items(): if value < 100: index_dict[key] = 0 if value == 100: index_dict[key] = 1 return index_dict attribute_index = dummy(index_dict=index_dict, pro=pro, stat=stat) attribute_index['positive_score'] = pos attribute_index['negative_score'] = neg attribute_index['clean_sentences'] = 'text2' input_data = pd.DataFrame(attribute_index, index=[0]) issue = model.predict(input_data)[0] return 'Guess you facing with this issue: {}. Our customer service manager will come to you very soon'.format(str(issue)) except ValueError: return 'Unable to predict issue' if __name__ == '__main__': with open('/Users/hengyuai/Documents/QMSS_1/PD/Customer-Issue_prediction/pipeline.pkl', 'rb') as file: model = dill.load(file) app.run_server(debug=True)
nilq/baby-python
python