text
string
size
int64
token_count
int64
## # This module provides a powerful 'switch'-like dispatcher system. # Values for switch cases can be anything comparable via '==', a string # for use on the left-hand side of the 'in' operator, or a regular expression. # Iterables of these types can also be used. __author__ = 'Mike Kent' import re class SwitchError(Exception): pass CPAT_TYPE = type(re.compile('.')) STR_TYPE = type('') LIST_TYPE = type([]) TUPLE_TYPE = type(()) class Switch(object): def __init__(self): self.exactCases = {} self.inCases = [] self.patternCases = [] self.defaultHandler = None ## # Try each 'in' case, in the order they were # specified, stopping if we get a match. # Return a tuple of the string we are searching for in the target string, # and the case handler found, or (None, None) if no match found. def _findInCase(self, switchValue): for inStr, aHandler in self.inCases: if inStr in switchValue: return (inStr, aHandler) return (None, None) ## # Try each regex pattern (using re.search), in the order they were # specified, stopping if we get a match. # Return a tuple of the re match object and the case handler found, or # (None, None) if no match found. def _findRegExCase(self, switchValue): for cpat, aHandler in self.patternCases: matchObj = cpat.search(switchValue) if matchObj is not None: return (matchObj, aHandler) return (None, None) ## # Switch on a switch value. A match against the exact # (non-regular-expression) case matches is tried first. If that doesn't # find a match, then if the switch value is a string, the 'in' case # matches are tried next, in the order they were registered. If that # doesn't find a match, then if the switch value is a string, # the regular-expression case matches are tried next, in # the order they were registered. If that doesn't find a match, and # a default case handler was registered, the default case handler is used. # If no match was found, and no default case handler was registered, # SwitchError is raised. # If a switch match is found, the corresponding case handler is called. # The switch value is passed as the first positional parameter, along with # any other positional and keyword parameters that were passed to the # switch method. The switch method returns the return value of the # called case handler. def switch(self, switchValue, *args, **kwargs): caseHandler = None switchType = type(switchValue) try: # Can we find an exact match for this switch value? # For an exact match, we will pass the case value to the case # handler. caseHandler = self.exactCases.get(switchValue) caseValue = switchValue except TypeError: pass # If no exact match, and we have 'in' cases to try, # see if we have a matching 'in' case for this switch value. # For an 'in' operation, we will be passing the left-hand side of # 'in' operator to the case handler. if not caseHandler and switchType in (STR_TYPE, LIST_TYPE, TUPLE_TYPE) \ and self.inCases: caseValue, caseHandler = self._findInCase(switchValue) # If no 'in' match, and we have regex patterns to try, # see if we have a matching regex pattern for this switch value. # For a RegEx match, we will be passing the re.matchObject to the # case handler. if not caseHandler and switchType == STR_TYPE and self.patternCases: caseValue, caseHandler = self._findRegExCase(switchValue) # If still no match, see if we have a default case handler to use. if not caseHandler: caseHandler = self.defaultHandler caseValue = switchValue # If still no case handler was found for the switch value, # raise a SwitchError. if not caseHandler: raise SwitchError("Unknown case value %r" % switchValue) # Call the case handler corresponding to the switch value, # passing it the case value, and any other parameters passed # to the switch, and return that case handler's return value. return caseHandler(caseValue, *args, **kwargs) ## # Register a case handler, and the case value is should handle. # This is a function decorator for a case handler. It doesn't # actually modify the decorated case handler, it just registers it. # It takes a case value (any object that is valid as a dict key), # or any iterable of such case values. def case(self, caseValue): def wrap(caseHandler): # If caseValue is not an iterable, turn it into one so # we can handle everything the same. caseValues = ([ caseValue ] if not hasattr(caseValue, '__iter__') \ else caseValue) for aCaseValue in caseValues: # Raise SwitchError on a dup case value. if aCaseValue in self.exactCases: raise SwitchError("Duplicate exact case value '%s'" % \ aCaseValue) # Add it to the dict for finding exact case matches. self.exactCases[aCaseValue] = caseHandler return caseHandler return wrap ## # Register a case handler for handling a regular expression. def caseRegEx(self, caseValue): def wrap(caseHandler): # If caseValue is not an iterable, turn it into one so # we can handle everything the same. caseValues = ([ caseValue ] if not hasattr(caseValue, '__iter__') \ else caseValue) for aCaseValue in caseValues: # If this item is not a compiled regular expression, compile it. if type(aCaseValue) != CPAT_TYPE: aCaseValue = re.compile(aCaseValue) # Raise SwitchError on a dup case value. for thisCaseValue, _ in self.patternCases: if aCaseValue.pattern == thisCaseValue.pattern: raise SwitchError("Duplicate regex case value '%s'" % \ aCaseValue.pattern) self.patternCases.append((aCaseValue, caseHandler)) return caseHandler return wrap ## # Register a case handler for handling an 'in' operation. def caseIn(self, caseValue): def wrap(caseHandler): # If caseValue is not an iterable, turn it into one so # we can handle everything the same. caseValues = ([ caseValue ] if not hasattr(caseValue, '__iter__') \ else caseValue) for aCaseValue in caseValues: # Raise SwitchError on a dup case value. for thisCaseValue, _ in self.inCases: if aCaseValue == thisCaseValue: raise SwitchError("Duplicate 'in' case value '%s'" % \ aCaseValue) # Add it to the the list of 'in' values. self.inCases.append((aCaseValue, caseHandler)) return caseHandler return wrap ## # This is a function decorator for registering the default case handler. def default(self, caseHandler): self.defaultHandler = caseHandler return caseHandler if __name__ == '__main__': # pragma: no cover # Example uses # Instantiate a switch object. mySwitch = Switch() # Register some cases and case handlers, using the handy-dandy # decorators. # A default handler @mySwitch.default def gotDefault(value, *args, **kwargs): print("Default handler: I got unregistered value %r, "\ "with args: %r and kwargs: %r" % \ (value, args, kwargs)) return value # A single numeric case value. @mySwitch.case(0) def gotZero(value, *args, **kwargs): print("gotZero: I got a %d, with args: %r and kwargs: %r" % \ (value, args, kwargs)) return value # A range of numeric case values. @mySwitch.case(list(range(5, 10))) def gotFiveThruNine(value, *args, **kwargs): print("gotFiveThruNine: I got a %d, with args: %r and kwargs: %r" % \ (value, args, kwargs)) return value # A string case value, for an exact match. @mySwitch.case('Guido') def gotGuido(value, *args, **kwargs): print("gotGuido: I got '%s', with args: %r and kwargs: %r" % \ (value, args, kwargs)) return value # A string value for use with the 'in' operator. @mySwitch.caseIn('lo') def gotLo(value, *args, **kwargs): print("gotLo: I got '%s', with args: %r and kwargs: %r" % \ (value, args, kwargs)) return value # A regular expression pattern match in a string. # You can also pass in a pre-compiled regular expression. @mySwitch.caseRegEx(r'\b([Pp]y\w*)\b') def gotPyword(matchObj, *args, **kwargs): print("gotPyword: I got a matchObject where group(1) is '%s', "\ "with args: %r and kwargs: %r" % \ (matchObj.group(1), args, kwargs)) return matchObj # And lastly, you can pass a iterable to case, caseIn, and # caseRegEx. @mySwitch.case([ 99, 'yo', 200 ]) def gotStuffInSeq(value, *args, **kwargs): print("gotStuffInSeq: I got %r, with args: %r and kwargs: %r" % \ (value, args, kwargs)) return value # Now show what we can do. got = mySwitch.switch(0) # Returns 0, prints "gotZero: I got a 0, with args: () and kwargs: {}" got = mySwitch.switch(6, flag='boring') # Returns 6, prints "gotFiveThruNine: I got a 6, with args: () and # kwargs: {'flag': 'boring'}" got = mySwitch.switch(10, 42) # Returns 10, prints "Default handler: I got unregistered value 10, # with args: (42,) and kwargs: {}" got = mySwitch.switch('Guido', BDFL=True) # Returns 'Guido', prints "gotGuido: I got 'Guido', with args: () and # kwargs: {'BDFL': True}" got = mySwitch.switch('Anyone seen Guido around?') # Returns 'Anyone Seen Guido around?', prints "Default handler: I got # unregistered value 'Anyone seen Guido around?', with args: () and # kwargs: {}", 'cause we used 'case' and not 'caseIn'. got = mySwitch.switch('Yep, and he said "hello".', 99, yes='no') # Returns 'lo', prints "gotLo: I got 'lo', with args: (99,) and # kwargs: {'yes': 'no'}", 'cause we found the 'lo' in 'hello'. got = mySwitch.switch('Bird is the Python word of the day.') # Returns a matchObject, prints "gotPyword: I got a matchObject where # group(1) is 'Python', with args: () and kwargs: {}" got = mySwitch.switch('yo') # Returns 'yo', prints "gotStuffInSeq: I got 'yo', with args: () and # kwargs: {}"
11,509
3,200
from typing import Optional, List from requests import Session from requests.adapters import HTTPAdapter from requests.packages.urllib3.util.retry import Retry from ...config.requests import ( DEFAULT_MAX_RETRIES, DEFAULT_BACKOFF_FACTOR, DEFAULT_STATUS_FORCE_LIST) class SessionWithRetry(Session): def __init__(self, auth=None, num_retries=DEFAULT_MAX_RETRIES, backoff_factor=DEFAULT_BACKOFF_FACTOR, additional_status_force_list: Optional[List[int]] = None): super().__init__() self.auth = auth status_force_list = DEFAULT_STATUS_FORCE_LIST # Retry on additional status codes (ex. HTTP 400) if needed if additional_status_force_list: status_force_list.extend(additional_status_force_list) retry_strategy = Retry( read=0, total=num_retries, status_forcelist=status_force_list, method_whitelist=["HEAD", "GET", "PUT", "POST", "DELETE", "OPTIONS", "TRACE"], backoff_factor=backoff_factor ) adapter = HTTPAdapter(max_retries=retry_strategy) self.mount("https://", adapter) self.mount("http://", adapter)
1,221
368
""" 问题描述:假设链表中每个节点的值都在[0,9]之间,那么链表整体就可以代表一个整数, 例如:9->3->7,可以代表整数937.给定两个这种链表的头结点head1和head2,请生 成代表两个整数相加值的结果链表。例如:链表1为9->3->7,链表2为6->3,最后生成 新的结果链表为1->0->0->0. 思路: 1)如果将链表先转化为整数相加,再转成链表,可能会出现溢出 2)可以使用逆序栈将链表节点压入栈,再进行操作 3)利用链表的逆序求解,这样不会占用额外空间复杂度 """ from linkedlist.toolcls import Node, PrintMixin class ListAddTool(PrintMixin): @staticmethod def add_list(head1, head2): if head1 is None: return head2 if head2 is None: return head1 reversed_list1 = ListAddTool.revert_linked_list(head1) reversed_list2 = ListAddTool.revert_linked_list(head2) new_head = None new_list = None flag = 0 while reversed_list1 is not None or reversed_list2 is not None: if reversed_list1 is None: value1 = 0 else: value1 = reversed_list1.value if reversed_list2 is None: value2 = 0 else: value2 = reversed_list2.value temp = value1 + value2 + flag if temp/10 >= 1: flag = 1 if new_list is None: new_head = Node(temp % 10) new_list = new_head else: new_list.next = Node(temp % 10) new_list = new_list.next else: flag = 0 if new_list is None: new_head = Node(temp) new_list = new_head else: new_list.next = Node(temp) new_list = new_list.next if reversed_list1 is not None: reversed_list1 = reversed_list1.next if reversed_list2 is not None: reversed_list2 = reversed_list2.next if flag == 1: new_list.next = Node(1) reversed_new_head = ListAddTool.revert_linked_list(new_head) return reversed_new_head @staticmethod def revert_linked_list(head): pre = None while head is not None: next = head.next head.next = pre pre = head head = next return pre if __name__ == '__main__': node1 = Node(9) node1.next = Node(9) node1.next.next = Node(9) node2 = Node(1) ListAddTool.print_list(ListAddTool.add_list(node1, node2))
2,397
936
#============================================================================ # This library is free software; you can redistribute it and/or # modify it under the terms of version 2.1 of the GNU Lesser General Public # License as published by the Free Software Foundation. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #============================================================================ # Copyright (C) 2007 International Business Machines Corp. # Author: Stefan Berger <stefanb@us.ibm.com> #============================================================================ """Get the managed policy of the system. """ import os import sys import base64 import struct import xen.util.xsm.xsm as security from xen.util import xsconstants from xen.util.xsm.acm.acm import install_policy_dir_prefix from xen.util.acmpolicy import ACMPolicy, \ ACM_EVTCHN_SHARING_VIOLATION,\ ACM_GNTTAB_SHARING_VIOLATION, \ ACM_DOMAIN_LOOKUP, \ ACM_CHWALL_CONFLICT, \ ACM_SSIDREF_IN_USE from xen.xm.opts import OptionError from xen.xm import main as xm_main from xen.xm.getpolicy import getpolicy from xen.xm.main import server def help(): return """ Usage: xm setpolicy <policytype> <policyname> Set the policy managed by xend. Only 'ACM' and 'FLASK' are supported as valid policytype parameters. ACM: The filename of the policy is the policy name plus the suffic '-security_policy.xml'. The location of the policy file is either the the current directory or '/etc/xen/acm-security/policies'. """ def build_hv_error_message(errors): """ Build a message from the error codes return by the hypervisor. """ txt = "Hypervisor reported errors:" i = 0 while i + 7 < len(errors): code, data = struct.unpack("!ii", errors[i:i+8]) err_msgs = { ACM_EVTCHN_SHARING_VIOLATION : \ ["event channel sharing violation between domains",2], ACM_GNTTAB_SHARING_VIOLATION : \ ["grant table sharing violation between domains",2], ACM_DOMAIN_LOOKUP : \ ["domain lookup",1], ACM_CHWALL_CONFLICT : \ ["Chinese Wall conflict between domains",2], ACM_SSIDREF_IN_USE : \ ["A domain used SSIDREF",1], } num = err_msgs[code][1] if num == 1: txt += "%s %d" % (err_msgs[code][0], data) else: txt += "%s %d and %d" % (err_msgs[code][0], data >> 16 , data & 0xffff) i += 8 return txt def setpolicy(policytype, policy_name, flags, overwrite): if policytype.upper() == xsconstants.ACM_POLICY_ID: xs_type = xsconstants.XS_POLICY_ACM for prefix in [ './', install_policy_dir_prefix+"/" ]: policy_file = prefix + "/".join(policy_name.split(".")) + \ "-security_policy.xml" if os.path.exists(policy_file): break elif policytype.upper() == xsconstants.FLASK_POLICY_ID: xs_type = xsconstants.XS_POLICY_FLASK policy_file = policy_name else: raise OptionError("Unsupported policytype '%s'." % policytype) try: f = open(policy_file,"r") policy = f.read() f.close() except: raise OptionError("Could not read policy file: %s" % policy_file) if xs_type == xsconstants.XS_POLICY_FLASK: policy = base64.b64encode(policy) if xm_main.serverType == xm_main.SERVER_XEN_API: if xs_type != int(server.xenapi.XSPolicy.get_xstype()): raise security.XSMError("Policy type not supported.") try: policystate = server.xenapi.XSPolicy.set_xspolicy(xs_type, policy, flags, overwrite) except Exception, e: raise security.XSMError("An error occurred setting the " "policy: %s" % str(e)) xserr = int(policystate['xserr']) if xserr != xsconstants.XSERR_SUCCESS: txt = "An error occurred trying to set the policy: %s." % \ xsconstants.xserr2string(abs(xserr)) errors = policystate['errors'] if len(errors) > 0: txt += " " + build_hv_error_message(base64.b64decode(errors)) raise security.XSMError(txt) else: print "Successfully set the new policy." if xs_type == xsconstants.XS_POLICY_ACM: getpolicy(False) else: # Non-Xen-API call. if xs_type != server.xend.security.on(): raise security.XSMError("Policy type not supported.") rc, errors = server.xend.security.set_policy(xs_type, policy, flags, overwrite) if rc != xsconstants.XSERR_SUCCESS: txt = "An error occurred trying to set the policy: %s." % \ xsconstants.xserr2string(abs(rc)) if len(errors) > 0: txt += " " + build_hv_error_message( base64.b64decode(errors)) raise security.XSMError(txt) else: print "Successfully set the new policy." if xs_type == xsconstants.XS_POLICY_ACM: getpolicy(False) def main(argv): if len(argv) < 3: raise OptionError("Need at least 3 arguments.") if "-?" in argv: help() return policytype = argv[1] policy_name = argv[2] flags = xsconstants.XS_INST_LOAD | xsconstants.XS_INST_BOOT overwrite = True setpolicy(policytype, policy_name, flags, overwrite) if __name__ == '__main__': try: main(sys.argv) except Exception, e: sys.stderr.write('Error: %s\n' % str(e)) sys.exit(-1)
6,530
2,004
COLLECTION = "notes"
21
9
import numpy as np def mixup_extend_data(x,y,n): """ MIXUP_EXTEND_DATA will use the mixup technique to append n inter-class representations to the given data. y must be in a one hot representation. """ # copy data x_extend = [] y_extend = [] # create new data for i in range(n): # draw two indices first = int(x.shape[0] * np.random.rand()) second = int(x.shape[0] * np.random.rand()) while second is first: second = int(np.round(x.shape[0] * np.random.rand(), 0)) # draw mixup ratio from [0.2,0.4] mix_ratio = 0.2 * (np.random.rand() + 1) # mix up (x_, y_) = mixup(x[first],x[second],y[first],y[second],mix_ratio) # append to extended data set x_extend.append(x_) y_extend.append(y_) # join datasets x_extend = np.stack(x_extend,axis=0) x_extend = np.concatenate([x,x_extend],axis=0) y_extend = np.stack(y_extend, axis=0) y_extend = np.concatenate([y, y_extend], axis=0) # return modified dataset return x_extend, y_extend def mixup(x1,x2,y1,y2,mix_ratio): """ MIXUP creates a inter-class datapoint using mix_ratio """ x = mix_ratio * x1 + (1-mix_ratio) * x2 y = mix_ratio * y1 + (1-mix_ratio) * y2 return (x,y)
1,310
488
from ..base import WriterBase from ..core.object import AccessSpecifier from .serializer import Serializer class Writer(WriterBase): def __init__(self, out_directory): WriterBase.__init__(self, out_directory) def write_class(self, cls): self.set_initial_values(cls) declaration_list = '' initialization_list = '' for member in cls.members: declare, init = self.write_object(member) declaration_list += declare + '\n' if init: initialization_list += init + '\n' functions = '' for method in cls.functions: text = self.write_function(method) functions += text imports = '' name = cls.name extend = '' include_patter = '\nrequire_once "{}.php";' if cls.superclasses: extend = ' extends ' + cls.superclasses[0].name imports += include_patter.format(cls.superclasses[0].name) for obj in cls.members: if self.model.has_class(obj.type): if obj.type != cls.name: imports += include_patter.format(obj.type) elif obj.type in ['list', 'map']: for arg in obj.template_args: if self.model.has_class(arg.type) and arg.type != cls.name: imports += include_patter.format(arg.type) imports += include_patter.format('Factory') if 'DataStorage' in functions: imports += include_patter.format('DataStorage') constructor_args, constructor_body = self.get_constructor_data(cls) out = PATTERN_FILE.format(name=name, extend=extend, declarations=declaration_list, initialize_list=initialization_list, functions=functions, imports=imports, constructor_args=constructor_args, constructor_body=constructor_body) return [ ('%s.php' % cls.name, self.prepare_file(out)) ] def write_object(self, obj): out_init = '' value = obj.initial_value cls_type = self.model.get_class(obj.type) if self.model.has_class(obj.type) else None if (value in [None, '"NONE"'] and not obj.is_pointer) or (cls_type and cls_type.type == 'enum'): if obj.type == "string": value = '""' elif obj.type == "int": value = "0" elif obj.type == "float": value = "0" elif obj.type == "uint": value = "0" elif obj.type == "bool": value = "false" elif obj.type == "list": value = "array()" elif obj.type == "map": value = "array()" else: if cls_type is not None and cls_type.type == 'enum': value = None if obj.initial_value: initial_value = obj.initial_value.replace('::', '::$') else: initial_value = '{}::${}'.format(cls_type.name, cls_type.members[0].name) out_init = '$this->{} = {};'.format(obj.name, initial_value) elif cls_type: out_init = '$this->{} = new {}();'.format(obj.name, obj.type) if obj.is_static: out_declaration = AccessSpecifier.to_string(obj.access) + ' static ${0} = {1};' else: out_declaration = AccessSpecifier.to_string(obj.access) + ' ${0} = {1};' out_declaration = out_declaration.format(obj.name, Serializer().convert_initialize_value(value)) return out_declaration, out_init def prepare_file(self, text): text = self.prepare_file_codestype_php(text) text = text.replace('::TYPE', '::$TYPE') text = text.replace('nullptr', 'null') text = text.replace('foreach(', 'foreach (') text = text.replace('for(', 'for (') text = text.replace('if(', 'if (') text = text.replace(' extends', ' extends') text = text.strip() return text def get_method_arg_pattern(self, obj): return '{type} ${name}={value}' if obj.initial_value is not None else '{type} ${name}' def get_method_pattern(self, method): return PATTERN_METHOD def get_required_args_to_function(self, method): return None def add_static_modifier_to_method(self, text): return 'static ' + text PATTERN_FILE = '''<?php {imports} class {name} {extend} {{ //members: {declarations} public function __construct({constructor_args}) {{ {initialize_list} {constructor_body} }} //functions {functions} }}; ?> ''' PATTERN_METHOD = '''{access} function {name}({args}) {{ {body} }} '''
5,014
1,433
from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from local_time import LocalTime class Logistic_Regression: @staticmethod def get_best_hyperparameter(X_train, y_train, y_val, X_val): # This gets the best hyperparameter for Regularisation best_accuracy = 0.0 best_c = 0.0 for c in [0.01, 0.05, 0.25, 0.5, 1]: lr = LogisticRegression(C=c) lr.fit(X_train, y_train) accuracy_ = accuracy_score(y_val, lr.predict(X_val)) if accuracy_ > best_accuracy: best_accuracy = accuracy_ best_c = c print ("---Accuracy for C=%s: %s" % (c, accuracy_)) print(LocalTime.get(), "best hyperparameter for regularisation: c = ", best_c) return best_c
823
272
class Persona: def __init__(self, nombre) -> None: self.nombre = nombre def avanza(self): print('Ando caminando')
140
47
from os import nice random_string = """Vermont is famous for its verdant summer landscapes and postcard-worthy fall colors. But it’s the Green Mountain State’s winter landscape that truly sparks my photographic eye. New snow transforms the dull shades of stick season into a fresh palette of photographic possibility. Like a life-size Etch A Sketch, the landscape is continually transformed by recurring snowfall.Fresh snow allows lone trees, hay bales and empty swimming pools to cast more accentuated shadows. It isolates and elevates mundane objects and presents them as if on display.""" nice_list = random_string.split() # spaces are the delimiter by default # print(nice_list) # notice that punctuation is included # print(nice_list[:10]) # new list with only the words that start with 'f' from nice_list # create a new list f_words = [] # run a for loop for word in nice_list: # run condition: if word starts with a 'f' if word.lower().startswith('f'): # lower is non-destructive method # append word that match condition to new list f_words.append(word) print(f_words) # out of the loop # next time, create a function that takes two arguments, a list, and what letter # you want to filter by for starting letter
1,269
365
import tensorflow as tf def resettable_metric(metric_fn, metric_params, scope=None): with tf.variable_scope(scope, 'resettable_metric') as sc: metric_returns = metric_fn(**metric_params) reset_op = tf.variables_initializer(tf.local_variables(sc.name)) return metric_returns + (reset_op,) def make_resettable(metric_fn, scope=None): def resettable_metric_fn(*args, **kwargs): with tf.variable_scope(scope, 'resettable_metric') as sc: metric_returns = metric_fn(*args, **kwargs) reset_op = tf.variables_initializer(tf.local_variables(sc.name)) return metric_returns + (reset_op,) return resettable_metric_fn
682
216
class Solution: def simplifyPath(self, path: str) -> str: markMap = {'/': '/', '.': '.'} stack = [] for s in path: if s in markMap: topEle = stack.pop() if stack else None if topEle != markMap[s]: if topEle: stack.append(topEle) stack.append(s) elif not stack and topEle == '.': pass elif s == '.' and stack: pass else: stack.append(s) if len(stack) > 0 and stack[-1] == '/': stack.pop() return ''.join(stack) class SolutionB(object): def simplifyPath(self, path): """ :type path: str :rtype: str """ path = [i for i in path.split('/') if i] path2 = [] for i in path: if i in ['.']: continue elif i == '..': if len(path2) > 0: path2.pop() else: path2.append(i) return '/' + '/'.join(path2) if __name__ == '__main__': sol = SolutionB() print(sol.simplifyPath('/a//b////c/d//././/..'))
1,224
362
import torch from .lconv import LConvBlock class ResidualEncoder(torch.nn.Module): def __init__(self, dim): super().__init__() self.stack1 = torch.nn.Sequential( LConvBlock(dim, 17, 0.1), LConvBlock(dim, 17, 0.1), LConvBlock(dim, 17, 0.1), torch.nn.LayerNorm(dim) ) self.stack2 = torch.nn.Sequential( LConvBlock(dim, 3, 0.1), LConvBlock(dim, 3, 0.1), LConvBlock(dim, 3, 0.1), LConvBlock(dim, 3, 0.1), LConvBlock(dim, 3, 0.1), LConvBlock(dim, 3, 0.1), torch.nn.LayerNorm(dim) ) self.projection = torch.nn.Linear(dim, dim*2) def forward(self, x): x = self.stack1(x) x = self.stack2(x) mean, std = torch.split(self.projection(x), 2, dim=-1) return std, mean
844
347
import dataclasses from alarm.use_cases.data import Detection import uuid from decimal import Decimal from django.forms import model_to_dict from django.test import TestCase from django.utils import timezone from freezegun import freeze_time from alarm.business.in_motion import save_motion from alarm.factories import AlarmStatusFactory from camera.factories import CameraROIFactory, CameraRectangleROIFactory from alarm.models import AlarmStatus from camera.models import CameraMotionDetectedBoundingBox, CameraMotionDetected, CameraRectangleROI from devices.models import Device class SaveMotionTestCase(TestCase): def setUp(self) -> None: self.alarm_status: AlarmStatus = AlarmStatusFactory() self.device: Device = self.alarm_status.device self.event_ref = str(uuid.uuid4()) def test_save_motion(self): start_motion_time = timezone.now() with freeze_time(start_motion_time): save_motion(self.device, [], self.event_ref, True) motion = CameraMotionDetected.objects.filter(device__device_id=self.device.device_id) self.assertTrue(motion.exists()) motion = motion[0] self.assertEqual(motion.motion_started_at, start_motion_time) self.assertEqual(str(motion.event_ref), self.event_ref) self.assertIsNone(motion.motion_ended_at) end_motion_time = timezone.now() with freeze_time(end_motion_time): save_motion(self.device, [], self.event_ref, False) motion = CameraMotionDetected.objects.get(device__device_id=self.device.device_id) self.assertEqual(motion.motion_started_at, start_motion_time) self.assertEqual(motion.motion_ended_at, end_motion_time) self.assertEqual(str(motion.event_ref), self.event_ref) def test_save_motion_rectangles(self): detections = ( Detection( bounding_box=[], bounding_box_point_and_size={'x': 10, 'y': 15, 'w': 200, 'h': 150}, class_id='people', score=0.8 ), ) save_motion(self.device, detections, self.event_ref, True) motions = CameraMotionDetected.objects.filter(device__device_id=self.device.device_id) self.assertTrue(len(motions), 1) motion = motions[0] bounding_boxes = CameraMotionDetectedBoundingBox.objects.filter(camera_motion_detected=motion) self.assertTrue(len(bounding_boxes), len(detections)) bounding_box = bounding_boxes[0] for bounding_box, detection in zip(bounding_boxes, detections): detection_plain = dataclasses.asdict(detection) expected_bounding_box = detection_plain['bounding_box_point_and_size'] expected_bounding_box['score'] = detection.score self.assertEqual( model_to_dict(bounding_box, exclude=('camera_motion_detected', 'id')), expected_bounding_box )
3,017
926
#http://pymysql.readthedocs.io/en/latest/user/examples.html import sys import utils def perpareToJSON(dic1, dic2, blackList, ignoredList): string = '[\n' for key in dic1: if key not in ignoredList: if key in dic2: string += "{ 'label':'" + str(key).replace("'",'"') + "', 'alert':'" + str((key in blackList) and (dic1[key] != dic2[key])) + "', 'value1':'" + str(dic1[key]).replace("'",'"') + "', 'value2':'" + str(dic2[key]).replace("'",'"') + "'},\n" else: string += "{ 'label':'" + str(key).replace("'",'"') + "', 'alert':'" + str(key in blackList) + "', 'value1':'" + str(dic1[key]).replace("'",'"') + "', 'value2':' ABSENT'},\n" for key in dic2: if key not in ignoredList: if key not in dic1 : string += "{ 'label':'" + str(key).replace("'",'"') + "', 'alert':'" + str(key in blackList) + "', 'value1':'ABSENT', 'value2':'" + str(dic2[key]).replace("'",'"') + "'},\n" return string+"]" ##_______________________________________________script starts here #read input from command line if len(sys.argv) > 1 and len(sys.argv) < 4: file1 = sys.argv[1] file2 = sys.argv[2] blackList = [] ignoredList = [] elif len(sys.argv) > 3: file1 = sys.argv[1] file2 = sys.argv[2] with open( sys.argv[3] ) as file: blackList = file.read().split("\n") with open( sys.argv[4] ) as file: ignoredList = file.read().split("\n") else: print "not enought arguments" matchResult = {} #e queryElement = utils.extractRow(file1) galleryElement = utils.extractRow(file2) qResult = {} for qKey in queryElement: if (qKey not in galleryElement) or (galleryElement[qKey] != queryElement[qKey]): qResult[qKey] = queryElement[qKey] gResult = {} for gKey in galleryElement: if (gKey not in queryElement) or (galleryElement[gKey] != queryElement[gKey]): gResult[gKey] = galleryElement[gKey] #add lenght informations gResult['LUNGHEZZA'] = len(galleryElement) qResult['LUNGHEZZA'] = len(queryElement) print ("var data = " + perpareToJSON(qResult, gResult, blackList, ignoredList) + ";")
2,015
792
from __future__ import unicode_literals import errno import os import sys import tempfile import concurrent.futures as futures import json import re from argparse import ArgumentParser from flask import Flask, request, abort from linebot import ( LineBotApi, WebhookHandler ) from linebot.exceptions import ( LineBotApiError, InvalidSignatureError ) from linebot.models import ( MessageEvent, TextMessage, TextSendMessage, SourceUser, SourceGroup, SourceRoom, TemplateSendMessage, ConfirmTemplate, MessageAction, ButtonsTemplate, ImageCarouselTemplate, ImageCarouselColumn, URIAction, PostbackAction, DatetimePickerAction, CameraAction, CameraRollAction, LocationAction, CarouselTemplate, CarouselColumn, PostbackEvent, StickerMessage, StickerSendMessage, LocationMessage, LocationSendMessage, ImageMessage, VideoMessage, AudioMessage, FileMessage, UnfollowEvent, FollowEvent, JoinEvent, LeaveEvent, BeaconEvent, FlexSendMessage, BubbleContainer, ImageComponent, BoxComponent, TextComponent, SpacerComponent, IconComponent, ButtonComponent, SeparatorComponent, QuickReply, QuickReplyButton ) app = Flask(__name__) # get channel_secret and channel_access_token from your environment variable channel_secret = os.getenv('LINE_CHANNEL_SECRET', None) channel_access_token = os.getenv('LINE_CHANNEL_ACCESS_TOKEN', None) if channel_secret is None: print('Specify LINE_CHANNEL_SECRET as environment variable.') sys.exit(1) if channel_access_token is None: print('Specify LINE_CHANNEL_ACCESS_TOKEN as environment variable.') sys.exit(1) print(channel_secret, file=sys.stderr) print(channel_access_token, file=sys.stderr) line_bot_api = LineBotApi(channel_access_token) handler = WebhookHandler(channel_secret) static_tmp_path = os.path.join(os.path.dirname(__file__), 'static', 'tmp') # ========================= whisper独自のフィールド ======================== from UserData import UserData from PlantAnimator import PlantAnimator from beaconWhisperEvent import BeaconWhisperEvent # ここでimport出来ないときは、pip install clova-cek-sdk をたたくこと import cek from flask import jsonify user_data = UserData() plant_animator = PlantAnimator(user_data, line_bot_api) beacon_whisper_event = BeaconWhisperEvent(line_bot_api,user_data) # user_idでエラーをはく場合は、下のidベタ打ちを採用してください # user_id = "U70418518785e805318db128d8014710e" user_id = user_data.json_data["user_id"] # ========================================================================= # =========================Clova用のフィールド============================== # application_id : lineのClovaアプリ?でスキルを登録した際のExtension_IDを入れる clova = cek.Clova( application_id = "com.clovatalk.whisper", default_language = "ja", debug_mode = False ) # ========================================================================= # function for create tmp dir for download content def make_static_tmp_dir(): try: os.makedirs(static_tmp_path) except OSError as exc: if exc.errno == errno.EEXIST and os.path.isdir(static_tmp_path): pass else: raise @app.route("/callback", methods=['POST']) def callback(): # get X-Line-Signature header value signature = request.headers['X-Line-Signature'] # get request body as text body = request.get_data(as_text=True) app.logger.info("Request body: " + body) # handle webhook body try: handler.handle(body, signature) except LineBotApiError as e: print("Got exception from LINE Messaging API: %s\n" % e.message) for m in e.error.details: print(" %s: %s" % (m.property, m.message)) print("\n") except InvalidSignatureError: abort(400) return 'OK' # /clova に対してのPOSTリクエストを受け付けるサーバーを立てる @app.route('/clova', methods=['POST']) def my_service(): body_dict = clova.route(body=request.data, header=request.headers) response = jsonify(body_dict) response.headers['Content-Type'] = 'application/json;charset-UTF-8' return response # 以下はcallback用のhandler # ユーザにフォローされた時のイベント @handler.add(FollowEvent) def follow_event(event): global user_id user_id = event.source.user_id user_data.set_user_id(user_id) line_bot_api.reply_message( event.reply_token, TextSendMessage(text="初めまして。whisperです!\nよろしくね(^^♪")) @handler.add(MessageEvent, message=TextMessage) def handle_text_message(event): print("text message") text = event.message.text split_msg = re.split('[\ | ]', text) reply_texts = create_reply(split_msg, event, source="text") if reply_texts is not None: reply_texts = (reply_texts,) if isinstance(reply_texts, str) else reply_texts msgs = [TextSendMessage(text=s) for s in reply_texts] line_bot_api.reply_message(event.reply_token, msgs) @handler.add(MessageEvent, message=LocationMessage) def handle_location_message(event): line_bot_api.reply_message( event.reply_token, LocationSendMessage( title=event.message.title, address=event.message.address, latitude=event.message.latitude, longitude=event.message.longitude ) ) @handler.add(MessageEvent, message=StickerMessage) def handle_sticker_message(event): line_bot_api.reply_message( event.reply_token, StickerSendMessage( package_id=event.message.package_id, sticker_id=event.message.sticker_id) ) # Other Message Type @handler.add(MessageEvent, message=(ImageMessage, VideoMessage, AudioMessage)) def handle_content_message(event): if isinstance(event.message, ImageMessage): ext = 'jpg' elif isinstance(event.message, VideoMessage): ext = 'mp4' elif isinstance(event.message, AudioMessage): ext = 'm4a' else: return message_content = line_bot_api.get_message_content(event.message.id) with tempfile.NamedTemporaryFile(dir=static_tmp_path, prefix=ext + '-', delete=False) as tf: for chunk in message_content.iter_content(): tf.write(chunk) tempfile_path = tf.name dist_path = tempfile_path + '.' + ext dist_name = os.path.basename(dist_path) os.rename(tempfile_path, dist_path) line_bot_api.reply_message( event.reply_token, [ TextSendMessage(text='Save content.'), TextSendMessage(text=request.host_url + os.path.join('static', 'tmp', dist_name)) ]) @handler.add(MessageEvent, message=FileMessage) def handle_file_message(event): message_content = line_bot_api.get_message_content(event.message.id) with tempfile.NamedTemporaryFile(dir=static_tmp_path, prefix='file-', delete=False) as tf: for chunk in message_content.iter_content(): tf.write(chunk) tempfile_path = tf.name dist_path = tempfile_path + '-' + event.message.file_name dist_name = os.path.basename(dist_path) os.rename(tempfile_path, dist_path) line_bot_api.reply_message( event.reply_token, [ TextSendMessage(text='Save file.'), TextSendMessage(text=request.host_url + os.path.join('static', 'tmp', dist_name)) ]) @handler.add(UnfollowEvent) def handle_unfollow(): app.logger.info("Got Unfollow event") @handler.add(JoinEvent) def handle_join(event): line_bot_api.reply_message( event.reply_token, TextSendMessage(text='Joined this ' + event.source.type)) @handler.add(LeaveEvent) def handle_leave(): app.logger.info("Got leave event") @handler.add(PostbackEvent) def handle_postback(event): if event.postback.data == 'ping': line_bot_api.reply_message( event.reply_token, TextSendMessage(text='pong')) elif event.postback.data == 'datetime_postback': line_bot_api.reply_message( event.reply_token, TextSendMessage(text=event.postback.params['datetime'])) elif event.postback.data == 'date_postback': line_bot_api.reply_message( event.reply_token, TextSendMessage(text=event.postback.params['date'])) elif event.postback.data in ('set_beacon_on', 'set_beacon_off'): # ビーコンを使うかどうかを設定するときの"YES", "No"を押したときの挙動を設定 beacon_whisper_event.set_beacon(event) else: # 植物の名前を消すときにはワンクッション挟んであげる data = event.postback.data.split() if data[0] == 'delete_plant': plant_animator.delete_plant(data[1]) elif data[0] == 'delete_plant_cancel': line_bot_api.reply_message( event.reply_token, TextSendMessage( text='ありがとう^^' ) ) # ビーコンがかざされたときに呼ばれる処理 @handler.add(BeaconEvent) def handle_beacon(event): if plant_animator.listen_beacon_span(): beacon_whisper_event.activation_msg(event) if user_data.json_data['use_line_beacon'] is 1: # ビーコンがエコモード中ならずっと家にいたと判断して挨拶はしない if plant_animator.check_beacon_eco_time() == False: line_bot_api.reply_message( event.reply_token, TextSendMessage( text='おかえりなさい!' )) plant_animator.listen_beacon(user_data.json_data['use_line_beacon']) #-------------------------------------- # メッセージを生成するメソッドへのディスパッチャ #-------------------------------------- lines = ( "植物の呼び出し", " ハロー `植物の名前`", "植物の登録:", " 登録 `植物の名前`", "植物の削除", " 削除 `植物の名前`", "会話の終了", ' またね') help_msg = os.linesep.join(lines) def create_reply(split_text, event=None, source=None): """ テキストとして受け取ったメッセージとclovaから受け取ったメッセージを同列に扱うために 応答メッセージ生成へのディスパッチ部分を抜き出す input: string[] output: None or iterable<string> """ def decorate_text(plant, text): return plant.display_name + ": " + text text = split_text[0] if text == 'bye': if isinstance(event.source, SourceGroup): line_bot_api.reply_message( event.reply_token, TextSendMessage(text='またね、今までありがとう')) line_bot_api.leave_group(event.source.group_id) elif isinstance(event.source, SourceRoom): line_bot_api.reply_message( event.reply_token, TextSendMessage(text='またね、今までありがとう')) line_bot_api.leave_room(event.source.room_id) else: line_bot_api.reply_message( event.reply_token, TextSendMessage(text="この会話から退出させることはできません")) # ユーザからビーコンの設定を行う elif text in {'beacon', 'ビーコン'}: return beacon_whisper_event.config_beacon_msg(event) elif text in {"help", "ヘルプ"}: return help_msg elif text in {'またね', 'じゃあね', 'バイバイ'}: plant = plant_animator.plant text = plant_animator.disconnect() if source == "text": text = decorate_text(plant, text) return text # 植物の生成を行う elif text in {'登録', 'ようこそ'}: if len(split_text) == 2: name = split_text[1] return plant_animator.register_plant(name) elif len(split_text) == 1: return "名前が設定されていません" else: return "メッセージが不正です", "例:登録 `植物の名前`" # ランダムに呼び出す elif text == "誰かを呼んで": reply = plant_animator.clova_random_connect() if source == "text": reply = decorate_text(plant_animator.plant, reply) return reply # 植物との接続命令 elif split_text[0] in {'ハロー', 'hello', 'こんにちは', 'こんばんは', 'おはよう', 'ごきげんよう'}: if len(split_text) == 2: reply = plant_animator.connect(split_text[1]) if source == "text": reply = decorate_text(plant_animator.plant, reply) return reply elif len(split_text) == 1: return "植物が選択されていません" else: return "メッセージが不正です:", "例:ハロー `植物の名前`" # 植物を削除するときの命令 elif split_text[0] == {'削除'}: if len(split_text) == 2: return plant_animator.delete_plant(split_text[1]) elif len(split_text) == 1: return "植物が選択されていません" else: return "メッセージが不正です:" , "例:削除 `植物の名前`" # 植物を削除するときの命令 # if split_msg[1] is not None: # confirm_template = ConfirmTemplate(text= split_msg[1] +"の情報を削除します\n本当によろしいですか?\n", actions=[ # PostbackAction(label='Yes', data='delete_plant '+ split_msg[1], displayText='はい'), # PostbackAction(label='No', data='delete_plant_cancel '+ split_msg[1], displayText='いいえ'), # ]) # template_message = TemplateSendMessage( # alt_text='Confirm alt text', template=confirm_template) # line_bot_api.reply_message(event.reply_token, template_message) # else: # line_bot_api.reply_message( # event.reply_token, # TextSendMessage( # text='植物が選択されていません' # ) # ) else: text = plant_animator.communicate(text) if source == "text": if plant_animator.connecting(): text = decorate_text(plant_animator.plant, text) else: text = [text, help_msg] return text # line_bot_api.reply_message( # event.reply_token, TextSendMessage(text=event.message.text)) #-------------------------------------- # メッセージを生成するメソッドへのディスパッチャ end #-------------------------------------- # 以下にClova用のイベントを書き込む # 起動時の処理 @clova.handle.launch def launch_request_handler(clova_request): welcome_japanese = cek.Message(message="おかえりなさい!", language="ja") response = clova.response([welcome_japanese]) return response @clova.handle.default def no_response(clova_request): text = plant_animator.communicate("hogehoge") if plant_animator.connecting(): text = "%s: よくわかんないや" % plant_animator.plant.display_name return clova.response(text) # Communicateの発火箇所 # debugのために、defaultにしているが本来は # @clova.handle.intent("Communication") と書いて、Clova アプリの方でインテントを設定しておく必要がある # ToDo: Connect処理を設定してあげないと不親切、LINE Clavaアプリで予冷応答を細かく設定(今回は時間が足りないかも) # @clova.handle.default # @clova.handle.intent("AskStatus") # def communication(clova_request): # msg = plant_animator.communicate("調子はどう?", None) # if msg is None: # msg = "誰ともお話ししていません" # message_japanese = cek.Message(message=msg, language="ja") # response = clova.response([message_japanese]) # return response # @clova.handle.intent("AskWater") # def ask_water(clova_request): # msg = plant_animator.communicate("水はいる?", None) # if msg is None: # msg = "誰ともお話ししていません" # message_japanese = cek.Message(message=msg, language="ja") # response = clova.response([message_japanese]) # return response # @clova.handle.intent("AskLuminous") # def ask_luminous(clova_request): # msg = plant_animator.communicate("日当たりはどう?", None) # if msg is None: # msg = "誰ともお話ししていません" # message_japanese = cek.Message(message=msg, language="ja") # response = clova.response([message_japanese]) # return response #-------------------------- # start Clova setting #-------------------------- def define_clova_handler(intent, text): @clova.handle.intent(intent) def handler(clova_request): # バグがあるかもしれない # textの形式次第で print("clova intent = %s" % intent) msg = create_reply([text], source="clova") # msg = plant_animator.communicate(text, None) if msg is None: msg = "誰ともお話ししていません" message_japanese = cek.Message(message=msg, language="ja") response = clova.response([message_japanese]) return response return handler with open("data/clova_setting.json") as f: js = json.load(f) intent_text_dict = js["intent_text_dict"] # Clovaに対するイベントハンドラを設定 for k ,v in intent_text_dict.items(): define_clova_handler(k, v) #------------------------------- # end Clova setting #------------------------------- import time # should be modified when required def update(): plant_animator.update() def main_loop(clock_span): while 1: time.sleep(clock_span) update() if __name__ == "__main__": arg_parser = ArgumentParser( usage='Usage: python ' + __file__ + ' [--port <port>] [--help]' ) arg_parser.add_argument('-p', '--port', type=int, default=8000, help='port') arg_parser.add_argument('-d', '--debug', default=False, help='debug') options = arg_parser.parse_args() # create tmp dir for download content make_static_tmp_dir() def push_message(msg): line_bot_api.push_message(user_id, TextSendMessage(text=msg)) plant_animator.push_message = push_message with futures.ThreadPoolExecutor(2) as exec: exec.submit(app.run, debug=options.debug, port=options.port) exec.submit(main_loop, 0.9)
16,881
6,141
# Nested Lists and Dictionaries def run(): my_list = [1, "Hello", True, 4.5] my_dict = { "firstname": "Mauricio", "lastname": "Valadez" } super_list = [ {"firstname": "Mauricio", "lastname": "Valadez"}, {"firstname": "Carlos", "lastname": "García"}, {"firstname": "Francisco", "lastname": "Hernández"}, {"firstname": "Laura", "lastname": "Pérez"}, {"firstname": "Gabriela", "lastname": "Rojas"} ] super_dict = { "natural_nums": [1,2,3,4,5], "integer_nums": [-1, -2, 0, 1, 2], "float_nums": [1.2, 3.7, 9.86] } for key, value in super_dict.items(): print(key, "-", value) for item in super_list: print(item["firstname"], "-", item["lastname"]) if __name__ == '__main__': run()
815
317
from bat import Bat from ubermesch import Ubermesch class Batman(Bat, Ubermesch): def __init__(self, *args, **kwargs): Ubermesch.__init__(self, 'anonymous', movie=True, superpowers=['Wealthy'], *args, **kwargs) Bat.__init__(self, *args, can_fly=False, **kwargs) self.name = "neo" def sing(self): return "tototototoototot" if __name__ == '__main__': sup = Batman print(Batman.__mro__) print(sup.get_species()) print(sup.sing())
460
176
import os import subprocess import sys import logging import time from os.path import join, dirname from flask import Flask, jsonify, request, send_file from flask.logging import default_handler from datafunctions.log.log import startLog, getLogFile, tailLogFile SCRAPER_NAME = './run_scrapers.py' SCRAPER_NAME_PS = SCRAPER_NAME[2:] MODEL_NAME = './run_models.py' MODEL_NAME_PS = MODEL_NAME[2:] LDA17_NN_PATH = join(dirname(__file__), 'datafunctions/model/models/lda17_files/nearest_neighbors') LDA17_M_PATH = join(dirname(__file__), 'datafunctions/model/models/lda17_files/model') LDA17_ME_PATH = join(dirname(__file__), 'datafunctions/model/models/lda17_files/model.expElogbeta.npy') LDA17_MI_PATH = join(dirname(__file__), 'datafunctions/model/models/lda17_files/model.id2word') LDA17_MS_PATH = join(dirname(__file__), 'datafunctions/model/models/lda17_files/model.state') LDA17_ID_PATH = join(dirname(__file__), 'datafunctions/model/models/lda17_files/id2word') startLog(getLogFile(__file__)) APP_LOG = logging.getLogger(__name__) APP_LOG.info('Creating app...') application = Flask(__name__) werkzeug_logger = logging.getLogger('werkzeug') for handler in APP_LOG.handlers: werkzeug_logger.addHandler(handler) application.logger.addHandler(handler) @application.route('/') def index(): return ''' <html><head></head><body> Health check: <a href="/health">/health</a> <br> Start scrapers: <a href="/start">/start</a> <br> Kill scrapers: <a href="/kill">/kill</a> <br> Start models: <a href="/start-models">/start-models</a> <br> Kill models: <a href="/kill-models">/kill-models</a> <br> Application logs: <a href="/logs?file=application.py&amp;lines=50">/logs?file=application.py</a> <br> Scraper logs: <a href="/logs?file=run_scrapers.py&amp;lines=100">/logs?file=run_scrapers.py</a> <br> Model logs: <a href="/logs?file=run_models.py&amp;lines=100">/logs?file=run_models.py</a> </body></html> ''' @application.route('/logs', methods=['GET']) def logs(): """ Gets the last n lines of a given log """ APP_LOG.info(f'/logs called with args {request.args}') logfile = request.args.get('file', None) lines = request.args.get('lines', 1000) if logfile is None: return(''' <pre> Parameters: file: The file to get logs for Required Usually one of either application.py or run_scrapers.py lines: Number of lines to get Defaults to 1000 </pre> ''') try: res = tailLogFile(logfile, n_lines=lines) return (f'<pre>{res}</pre>') except Exception as e: return(f'Exception {type(e)} getting logs: {e}') @application.route('/health', methods=['GET']) def health(): """ Prints various health info about the machine. """ APP_LOG.info('/health called') outputs = {} outputs['scrapers running'] = check_running(SCRAPER_NAME) outputs['models running'] = check_running(MODEL_NAME) outputs['free'] = os.popen('free -h').read() outputs['dstat'] = os.popen('dstat -cdlimnsty 1 0').read() outputs['top'] = os.popen('top -bn1').read() outputs['ps'] = os.popen('ps -Afly --forest').read() APP_LOG.info(f'Health results: {outputs}') r = '' for key, val in outputs.items(): r += f''' <hr /> <h4>{key}</h4> <pre style="white-space: pre-wrap; overflow-wrap: break-word;">{val}</pre> ''' return r @application.route('/kill', methods=['GET', 'POST']) def kill(): """ Kills the web scrapers. """ initial_state = check_running(SCRAPER_NAME) running = initial_state try: APP_LOG.info('/kill called') tries = 0 max_tries = 5 while running and tries < max_tries: APP_LOG.info(f'Scraper running, attempting to kill it (try {tries + 1} of {max_tries})') r = os.system( f'kill $(ps -Af | grep {SCRAPER_NAME_PS} | grep -v grep | grep -oP "^[a-zA-Z\s]+[0-9]+" | grep -oP "[0-9]+")' ) APP_LOG.info(f'Kill call exited with code: {r}') tries += 1 running = check_running(SCRAPER_NAME) if running: wait_time = 2 APP_LOG.info(f'Waiting {wait_time} seconds...') time.sleep(wait_time) except Exception as e: APP_LOG.warn(f'Exception while killing scrapers: {e}') APP_LOG.warn(e, exc_info=True) return f''' <html><body> <h4>initially running</h4> <pre>{initial_state}</pre> <hr /> <h4>scrapers running</h4> <pre>{running}</pre> </html></body> ''' @application.route('/start', methods=['GET', 'POST']) def start(): """ Starts the web scrapers. """ tries = 0 result = { 'running': False, 'tries': 0, 'message': 'Unknown failure.' } try: APP_LOG.info('/start called') max_tries = 5 while not check_running(SCRAPER_NAME) and tries < max_tries: APP_LOG.info(f'Scraper not running, attempting to start it (try {tries + 1} of {max_tries})') start_and_disown(SCRAPER_NAME) wait_time = 0 APP_LOG.info(f'Waiting {wait_time} seconds...') time.sleep(wait_time) tries += 1 if check_running(SCRAPER_NAME): APP_LOG.info(f'Scraper running.') if tries == 0: result = { 'running': True, 'tries': tries, 'message': f'{SCRAPER_NAME} already running.' } else: result = { 'running': True, 'tries': tries, 'message': f'{SCRAPER_NAME} started after {tries} tries.' } else: result = { 'running': False, 'tries': tries, 'message': f'Failed to start {SCRAPER_NAME} after {tries} tries.' } # APP_LOG.info(f'run_scrapers stdout: {p.stdout.read()}') # APP_LOG.info(f'run_scrapers stderr: {p.stderr.read()}') APP_LOG.info(f'result: {result}') except Exception as e: result = { 'running': False, 'tries': tries, 'message': f'Aborting after {type(e)} exception on try {tries}: {e}' } APP_LOG.warn(f'result: {result}') APP_LOG.warn(e, exc_info=True) return jsonify(result) @application.route('/kill-models', methods=['GET', 'POST']) def kill_models(): """ Kills the topic models. """ initial_state = check_running(MODEL_NAME) running = initial_state try: APP_LOG.info('/kill-models called') tries = 0 max_tries = 5 while running and tries < max_tries: APP_LOG.info(f'Models running, attempting to kill it (try {tries + 1} of {max_tries})') r = os.system( f'kill $(ps -Af | grep {MODEL_NAME_PS} | grep -v grep | grep -oP "^[a-zA-Z\s]+[0-9]+" | grep -oP "[0-9]+")' ) APP_LOG.info(f'Kill call exited with code: {r}') tries += 1 running = check_running(MODEL_NAME) if running: wait_time = 2 APP_LOG.info(f'Waiting {wait_time} seconds...') time.sleep(wait_time) except Exception as e: APP_LOG.warn(f'Exception while killing models: {e}') APP_LOG.warn(e, exc_info=True) return f''' <html><body> <h4>initially running</h4> <pre>{initial_state}</pre> <hr /> <h4>models running</h4> <pre>{running}</pre> </html></body> ''' @application.route('/start-models', methods=['GET', 'POST']) def start_models(): """ Starts the topic models. """ tries = 0 result = { 'running': False, 'tries': 0, 'message': 'Unknown failure.' } try: APP_LOG.info('/start-models called') max_tries = 5 while not check_running(MODEL_NAME) and tries < max_tries: APP_LOG.info(f'Models not running, attempting to start it (try {tries + 1} of {max_tries})') start_and_disown(MODEL_NAME) wait_time = 0 APP_LOG.info(f'Waiting {wait_time} seconds...') time.sleep(wait_time) tries += 1 if check_running(MODEL_NAME): APP_LOG.info(f'Models running.') if tries == 0: result = { 'running': True, 'tries': tries, 'message': f'{MODEL_NAME} already running.' } else: result = { 'running': True, 'tries': tries, 'message': f'{MODEL_NAME} started after {tries} tries.' } else: result = { 'running': False, 'tries': tries, 'message': f'Failed to start {MODEL_NAME} after {tries} tries.' } APP_LOG.info(f'result: {result}') except Exception as e: result = { 'running': False, 'tries': tries, 'message': f'Aborting after {type(e)} exception on try {tries}: {e}' } APP_LOG.warn(f'result: {result}') APP_LOG.warn(e, exc_info=True) return jsonify(result) @application.route('/models/lda17-nn') def models_lda17_nn(): ''' Returns the pickled NearestNeighbors model for the LDA17 model. ''' # At some point, this should be replaced with an autogenerated route or a static route return send_file(LDA17_NN_PATH) @application.route('/models/lda17-m') def models_lda17_m(): return send_file(LDA17_M_PATH) @application.route('/models/lda17-m.expElogbeta.npy') def models_lda17_me(): return send_file(LDA17_ME_PATH) @application.route('/models/lda17-m.id2word') def models_lda17_mi(): return send_file(LDA17_MI_PATH) @application.route('/models/lda17-m.state') def models_lda17_ms(): return send_file(LDA17_MS_PATH) @application.route('/models/lda17-id') def models_lda17_id(): return send_file(LDA17_ID_PATH) def check_running(pname): APP_LOG.info(f'check_running called, pname: {pname}') result = os.system(f'ps -Af | grep -v grep | grep -v log | grep {pname}') APP_LOG.info(f'exit code: {result}') return result == 0 def start_and_disown(pname): with open(os.devnull, 'r+b', 0) as DEVNULL: subprocess.Popen(['nohup', sys.executable, pname], stdin=DEVNULL, stdout=DEVNULL, stderr=DEVNULL, close_fds=True, preexec_fn=os.setpgrp) if __name__ == '__main__': APP_LOG.info('Starting Flask dev server...') application.run()
9,400
4,067
# -*- coding: utf-8 -*- # Authors: Kambiz Tavabi <ktavabi@gmail.com> # # simplified bsd-3 license """Script for infant basic auditory testing using infant directed speech (IDS)""" import numpy as np from os import path as op from expyfun import ExperimentController from expyfun.stimuli import read_wav from expyfun._trigger_controllers import decimals_to_binary from expyfun import assert_version assert_version('8511a4d') fs = 24414 stim_dir = op.join(op.dirname(__file__), 'stimuli', 'ids') sound_files = ['inForest_part-1-rms.wav', 'inForest_part-2-rms.wav', 'inForest_part-3-rms.wav', 'inForest_part-4-rms.wav', 'inForest_part-5-rms.wav'] sound_files = {j: op.join(stim_dir, k) for j, k in enumerate(sound_files)} wavs = [np.ascontiguousarray(read_wav(v)) for _, v in sorted(sound_files.items())] # convert length of wave files into number of bits n_bits = int(np.floor(np.log2(len(wavs)))) + 1 with ExperimentController('IDS', stim_db=75, stim_fs=fs, stim_rms=0.01, check_rms=None, suppress_resamp=True) as ec: for ii, wav in enumerate(wavs): # stamp trigger line prior to stimulus onset ec.clear_buffer() ec.load_buffer(wav[0]) ec.identify_trial(ec_id=str(ii), ttl_id=decimals_to_binary([ii], [n_bits])) # our next start time is our last start time, plus # the stimulus duration stim_len = 1./fs * len(wav[0][0]) # in seconds ec.start_stimulus() # stamps stimulus onset ec.wait_secs(stim_len) # wait through stimulus duration to stop the playback ec.stop() ec.trial_ok() ec.check_force_quit() # make sure we're not trying to quit
1,757
631
N = int(input()) grid = [["*"] * N for _ in range(N)] grid[0][0] = "W" for i in range(N): for j in range(N): if grid[i][j] == "*": if j == 0: if grid[i - 1][j] == "W": grid[i][j] = "B" else: grid[i][j] = "W" else: if grid[i][j - 1] == "W": grid[i][j] = "B" else: grid[i][j] = "W" if grid[i][j] == "W": if 0 <= i - 2 < N and 0 <= j + 1 < N: if grid[i - 2][j + 1] == "*": grid[i - 2][j + 1] = "B" if 0 <= i - 1 < N and 0 <= j + 2 < N: if grid[i - 1][j + 2] == "*": grid[i - 1][j + 2] = "B" if 0 <= i + 1 < N and 0 <= j + 2 < N: if grid[i + 1][j + 2] == "*": grid[i + 1][j + 2] = "B" if 0 <= i + 2 < N and 0 <= j + 1 < N: if grid[i + 2][j + 1] == "*": grid[i + 2][j + 1] = "B" if 0 <= i + 2 < N and 0 <= j - 1 < N: if grid[i + 2][j - 1] == "*": grid[i + 2][j - 1] = "B" if 0 <= i + 1 < N and 0 <= j - 2 < N: if grid[i + 1][j - 2] == "*": grid[i + 1][j - 2] = "B" if 0 <= i - 1 < N and 0 <= j + 2 < N: if grid[i - 1][j + 2] == "*": grid[i - 1][j + 2] = "B" if 0 <= i - 2 < N and 0 <= j - 1 < N: if grid[i - 2][j - 1] == "*": grid[i - 2][j - 1] = "B" else: if 0 <= i - 2 < N and 0 <= j + 1 < N: if grid[i - 2][j + 1] == "*": grid[i - 2][j + 1] = "W" if 0 <= i - 1 < N and 0 <= j + 2 < N: if grid[i - 1][j + 2] == "*": grid[i - 1][j + 2] = "W" if 0 <= i + 1 < N and 0 <= j + 2 < N: if grid[i + 1][j + 2] == "*": grid[i + 1][j + 2] = "W" if 0 <= i + 2 < N and 0 <= j + 1 < N: if grid[i + 2][j + 1] == "*": grid[i + 2][j + 1] = "W" if 0 <= i + 2 < N and 0 <= j - 1 < N: if grid[i + 2][j - 1] == "*": grid[i + 2][j - 1] = "W" if 0 <= i + 1 < N and 0 <= j - 2 < N: if grid[i + 1][j - 2] == "*": grid[i + 1][j - 2] = "W" if 0 <= i - 1 < N and 0 <= j + 2 < N: if grid[i - 1][j + 2] == "*": grid[i - 1][j + 2] = "W" if 0 <= i - 2 < N and 0 <= j - 1 < N: if grid[i - 2][j - 1] == "*": grid[i - 2][j - 1] = "W" for i in range(N): for j in range(N): print(grid[i][j], end="") print()
2,851
1,182
# # Copyright (c) 2016 Alexandre Joannou # Copyright (c) 2013 Michael Roe # All rights reserved. # # This software was developed by SRI International and the University of # Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237 # ("CTSRD"), as part of the DARPA CRASH research programme. # # @BERI_LICENSE_HEADER_START@ # # Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor # license agreements. See the NOTICE file distributed with this work for # additional information regarding copyright ownership. BERI licenses this # file to you under the BERI Hardware-Software License, Version 1.0 (the # "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at: # # http://www.beri-open-systems.org/legal/license-1-0.txt # # Unless required by applicable law or agreed to in writing, Work distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. # # @BERI_LICENSE_HEADER_END@ # from beritest_tools import BaseBERITestCase, attr # # Test a ccall_fast # @attr('capabilities') class test_cp2_ccall_fast(BaseBERITestCase): def test_cp2_ccall_fast_1(self): '''Test that ccall_fast called the sandbox and returned''' self.assertRegisterEqual(self.MIPS.a1, 0x900d, "ccall did not call the sandbox and come back") def test_cp2_ccall_fast_2(self): '''Test that the sandbox inverted the memory array''' self.assertRegisterEqual(self.MIPS.a2, 0x08, "the sandbox did not invert the memory array") def test_cp2_ccall_fast_3(self): '''Test that the sandbox zeroed the second memory array''' self.assertRegisterEqual(self.MIPS.a3, 0x00, "the sandbox did not zero the second memory array") def test_cp2_ccall_fast_4(self): '''Test that returning from the sandbox cleared $a4''' self.assertRegisterEqual(self.MIPS.a4, 0x00, "returning from the sandbox did not clear $a4") def test_cp2_ccall_fast_5(self): '''Test that the sandbox zeroed the second memory array from $a4''' self.assertRegisterEqual(self.MIPS.a5, 0x00, "the sandbox did not zero the second memory array from $a4")
2,419
784
""" Equals attribute condition """ from marshmallow import post_load from .base import AttributeCondition, AttributeConditionSchema class EqualsAttribute(AttributeCondition): """ Condition for attribute value equals that of another """ def _is_satisfied(self, what) -> bool: return what == self.value class EqualsAttributeSchema(AttributeConditionSchema): """ JSON schema for equals attribute condition """ @post_load def post_load(self, data, **_): # pylint: disable=missing-docstring,no-self-use return EqualsAttribute(**data)
602
165
#!/bin/env python # coding: utf8 # # WhatIsMySchema # # Copyright (c) 2018 Tim Gubner # # import unittest from whatismyschema import * class WhatIsMySchemaTestCase(unittest.TestCase): def fix_type(self, t): return t.lower().replace(" ", "").replace("\n", "") def check_type(self, col, expect): types = col.determine_type() self.assertTrue(len(types) > 0) expect = self.fix_type(expect) data = self.fix_type(types[0]) self.assertEqual(data, expect) def check_types(self, cols, types): self.assertEqual(len(cols), len(types)) for (col, tpe) in zip(cols, types): self.check_type(col, tpe) def check_null(self, cols, isnull): self.assertEqual(len(cols), len(isnull)) for (col, null) in zip(cols, isnull): if null: self.assertTrue(col.num_nulls > 0) else: self.assertEqual(col.num_nulls, 0) def check_all_null_val(self, cols, val): isnull = [] for r in range(0, len(cols)): isnull.append(val) self.check_null(cols, isnull) def check_all_null(self, cols): self.check_all_null_val(cols, True) def check_none_null(self, cols): self.check_all_null_val(cols, False) class TableTests(WhatIsMySchemaTestCase): def testDates1(self): table = Table() table.seperator = "," table.push("2013-08-29,2013-08-05 15:23:13.716532") self.check_types(table.columns, ["date", "datetime"]) self.check_none_null(table.columns) table.check() def testSep1(self): table = Table() table.seperator = "seperator" table.push("Hallo|seperator|Welt") self.check_types(table.columns, ["varchar(6)", "varchar(5)"]) self.check_none_null(table.columns) table.check() def testInt1(self): table = Table() table.seperator = "|" table.push("0") table.push("-127") table.push("127") self.check_types(table.columns, ["tinyint"]) self.check_none_null(table.columns) table.check() def testDec1(self): table = Table() table.seperator = "|" table.push("42") table.push("42.44") table.push("42.424") table.push("4.424") self.check_types(table.columns, ["decimal(5,3)"]) self.check_none_null(table.columns) table.check() def test1(self): table = Table() table.seperator = "|" table.push("Str1|Str2|42|42|13") table.push("Ha|Str3333|42.42|Test|34543534543543") self.check_types(table.columns, ["varchar(4)", "varchar(7)", "decimal(4,2)", "varchar(4)", "bigint"]) self.check_none_null(table.columns) table.check() def testColMismatch1(self): table = Table() table.seperator = "," table.push("1") table.push("1,2") table.push("1") self.check_types(table.columns, ["tinyint", "tinyint"]) self.check_null(table.columns, [False, True]) table.check() def testIssue4(self): table = Table() table.seperator = "," table.push("0.0390625") table.push("0.04296875") self.check_types(table.columns, ["decimal(8,8)"]) self.check_null(table.columns, [False]) table.check() def testDecZeros(self): table = Table() table.seperator = "|" table.push(".1000|000.0|.4") table.push(".123|1.1|.423") self.check_types(table.columns, [ "decimal(3, 3)", "decimal(2, 1)", "decimal(3, 3)"]) self.check_null(table.columns, [False, False, False]) table.check() def testIssue7a(self): table = Table() table.seperator = "|" table.push("123|.1|1.23") table.push("1|.123|12.3") self.check_types(table.columns, ["tinyint", "decimal(3,3)", "decimal(4,2)"]) self.check_null(table.columns, [False, False, False]) table.check() def testIssue7b(self): table = Table() table.seperator = "|" table.push("123|1|1.23|12.3") table.push("0.123|.1|.123|.123") self.check_types(table.columns, ["decimal(6,3)", "decimal(2,1)", "decimal(4,3)", "decimal(5,3)"]) self.check_null(table.columns, [False, False, False, False]) table.check() def testIssue5a(self): table = Table() table.seperator = "|" table.push("1||a") table.push("2||b") table.push("3||c") self.check_types(table.columns, ["tinyint", "boolean", "varchar(1)"]) self.check_null(table.columns, [False, True, False]) table.check() def testIssue5b(self): table = Table() table.seperator = "|" table.parent_null_value = "=" table.push("1|=|a") table.push("2|=|b") table.push("3|=|c") self.check_types(table.columns, ["tinyint", "boolean", "varchar(1)"]) self.check_null(table.columns, [False, True, False]) table.check() class CliTests(WhatIsMySchemaTestCase): def run_process(self, cmd, file): path = os.path.dirname(os.path.abspath(__file__)) p = subprocess.Popen("python {path}/whatismyschema.py{sep}{cmd}{sep}{path}/{file}".format( path=path, cmd=cmd, file=file, sep=" " if len(cmd) > 0 else ""), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() if p.returncode: raise Exception(err) else: # Print stdout from cmd call if err is None: err = "" if out is None: out = "" self.assertEqual(0, len(err.decode('utf8').strip())) return self.fix_type(out.decode('utf8').strip()) def testParallel1(self): for num_process in [1, 2, 4, 8]: for chunk_size in [1, 10, 100]: for begin in [0, 1]: flags = "--parallel-chunk-size {chunk_size} --parallelism {parallel} --begin {begin}".format( chunk_size=chunk_size, parallel=num_process, begin=begin) out = self.run_process(flags, "test1.txt") if begin == 0: expect = self.fix_type("col0varchar(5)notnullcol1varchar(2)notnullcol2varchar(3)notnull") self.assertEqual(out, expect) elif begin == 1: expect = self.fix_type("col0decimal(4,2)notnullcol1tinyintnotnullcol2smallintnotnull") self.assertEqual(out, expect) else: assert(False) if __name__ == '__main__': unittest.main()
5,760
2,627
from django.conf.urls import url from . import views urlpatterns = [ url(r'^problem/', views.problem, name = 'problem'), url(r'^blog/', views.blog, name = 'blog'), url(r'cpp/', views.cpp, name = 'cpp'), url(r'^$', views.temple, name = 'temple'), ]
266
101
"""Test configuration for semigenre.audio tests.""" import os import pytest from unittest.mock import MagicMock from semigenre.audio.library import Library @pytest.fixture(scope='session') def small_library(): """Test fixture for a Library object.""" # TODO: Make this path work from any directory filename = os.path.join(os.path.dirname(__file__), 'data', 'library.xml') return Library(filename) @pytest.fixture(scope='session') def playable_track(): """Test fixture for a Track object.""" track = MagicMock() track.location = './tests/test_play/data/track.mp3' return track
614
185
# Generated by Django 4.0.3 on 2022-03-20 02:34 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Pickle', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=256)), ('created_date', models.DateTimeField(verbose_name='created date')), ], ), migrations.CreateModel( name='Tag', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('tag', models.CharField(max_length=64)), ], ), migrations.CreateModel( name='Rating', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('rating', models.FloatField(default=1.0)), ('user', models.BigIntegerField()), ('pickle', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pickles.pickle')), ], ), migrations.CreateModel( name='PickleTag', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('pickle', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pickles.pickle')), ('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pickles.tag')), ], ), ]
1,782
524
from django.contrib import admin try: from views.models import * except ImportError: pass else: from images_admin import ImageInline from ..mixins import * class ViewLinkageInline(admin.StackedInline): fields = ( 'order', 'content_type', 'object_id', 'enabled' ) model = ViewLinkage extra = 0 related_lookup_fields = { 'generic': [['content_type', 'object_id'], ], } class ViewAdmin(TemplatesAdminMixin, PublicaModelAdminMixin, admin.ModelAdmin): fields = ( 'title', 'slug', 'short_title', 'text', 'template', 'enabled' ) inlines = [ ViewLinkageInline, ImageInline, ] prepopulated_fields = { 'slug': ('title', ) } class Media: js = TinyMCETextMixin.Media.js admin.site.register(View, ViewAdmin)
1,018
293
from setuptools import find_packages, setup setup( name='fenced', version='0.0.1', description='TrueNAS SCALE Fencing Daemon', packages=find_packages(), classifiers=[ 'Programming Language :: Python :: 3', ], install_requires=[], entry_points={ 'console_scripts': [ 'fenced = fenced.main:main', ], }, )
377
121
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest_lib.common.utils import data_utils from functional.tests.identity.v3 import test_identity class UserTests(test_identity.IdentityTests): def test_user_create(self): self._create_dummy_user() def test_user_delete(self): username = self._create_dummy_user(add_clean_up=False) raw_output = self.openstack('user delete ' '--domain %(domain)s ' '%(name)s' % {'domain': self.domain_name, 'name': username}) self.assertEqual(0, len(raw_output)) def test_user_list(self): raw_output = self.openstack('user list') items = self.parse_listing(raw_output) self.assert_table_structure(items, test_identity.BASIC_LIST_HEADERS) def test_user_set(self): username = self._create_dummy_user() raw_output = self.openstack('user show ' '--domain %(domain)s ' '%(name)s' % {'domain': self.domain_name, 'name': username}) user = self.parse_show_as_object(raw_output) new_username = data_utils.rand_name('NewTestUser') new_email = data_utils.rand_name() + '@example.com' raw_output = self.openstack('user set ' '--email %(email)s ' '--name %(new_name)s ' '%(id)s' % {'email': new_email, 'new_name': new_username, 'id': user['id']}) self.assertEqual(0, len(raw_output)) raw_output = self.openstack('user show ' '--domain %(domain)s ' '%(name)s' % {'domain': self.domain_name, 'name': new_username}) updated_user = self.parse_show_as_object(raw_output) self.assertEqual(user['id'], updated_user['id']) self.assertEqual(new_email, updated_user['email']) def test_user_set_default_project_id(self): username = self._create_dummy_user() project_name = self._create_dummy_project() # get original user details raw_output = self.openstack('user show ' '--domain %(domain)s ' '%(name)s' % {'domain': self.domain_name, 'name': username}) user = self.parse_show_as_object(raw_output) # update user raw_output = self.openstack('user set ' '--project %(project)s ' '--project-domain %(project_domain)s ' '%(id)s' % {'project': project_name, 'project_domain': self.domain_name, 'id': user['id']}) self.assertEqual(0, len(raw_output)) # get updated user details raw_output = self.openstack('user show ' '--domain %(domain)s ' '%(name)s' % {'domain': self.domain_name, 'name': username}) updated_user = self.parse_show_as_object(raw_output) # get project details raw_output = self.openstack('project show ' '--domain %(domain)s ' '%(name)s' % {'domain': self.domain_name, 'name': project_name}) project = self.parse_show_as_object(raw_output) # check updated user details self.assertEqual(user['id'], updated_user['id']) self.assertEqual(project['id'], updated_user['default_project_id']) def test_user_show(self): username = self._create_dummy_user() raw_output = self.openstack('user show ' '--domain %(domain)s ' '%(name)s' % {'domain': self.domain_name, 'name': username}) items = self.parse_show(raw_output) self.assert_show_fields(items, self.USER_FIELDS)
5,068
1,321
import gi gi.require_version('Gtk', '3.0') from gi.repository import Gtk from store import store from create_new_notepad import CreateNewNotepad from notepad_editor import NotepadEditor from unlock_notepad import UnlockNotepad from welcome import Welcome store['FORMAT_VERSION'] = '2' store['APPLICATION_VERSION'] = '1.0.0' class Encpad(Gtk.Window): def __init__(self): Gtk.Window.__init__(self, title='Encpad') store['password'] = None store['notepad'] = None self.set_border_width(20) self.set_default_size(360, 560) self.set_resizable(False) stack = Gtk.Stack() stack.set_transition_type(Gtk.StackTransitionType.SLIDE_LEFT_RIGHT) stack.set_transition_duration(200) store['stack'] = stack stack.add_named(Welcome(), 'welcome') stack.add_named(UnlockNotepad(), 'unlock_notepad') stack.add_named(CreateNewNotepad(), 'create_new_notepad') stack.add_named(NotepadEditor(), 'notepad_editor') stack.set_visible_child_name('welcome') self.connect('delete-event', self.window_delete) self.add(stack) def window_delete(self, _widget, _event): if store['confirm_close']: dialog = Gtk.MessageDialog( message_type=Gtk.MessageType.QUESTION, buttons=Gtk.ButtonsType.YES_NO, text='You have unsaved changes.' ) dialog.format_secondary_text('Are you sure you want to exit without saving them?') response = dialog.run() dialog.destroy() if response != Gtk.ResponseType.YES: return True return False if __name__ == '__main__': window = Encpad() window.connect('destroy', Gtk.main_quit) window.show_all() Gtk.main()
1,825
568
from apis import IEXAPI class RequestExecutor: message = """{ticker},{epoch_timestamp},{price}""" tickers = ['BTCUSDT'] def _reformat_response(self, deserialized_response): tickers = next(item for item in deserialized_response) quotes = list(deserialized_response.values()) prices = next(item['quote']['latestPrice'] for item in quotes) epoch_timestamps = next(item['quote']['latestUpdate'] for item in quotes) return self.message.format( ticker=tickers, epoch_timestamp=epoch_timestamps, price=prices ) def retrieve_price_data(self): api = IEXAPI() response = api.get_latest_price(self.tickers) deserialized_response = response.json() print(deserialized_response) message_format = self._reformat_response(deserialized_response) return message_format
904
262
import torch from torch.utils.cpp_extension import load norm = load(name="two_norm", sources=["two_norm/two_norm_bind.cpp", "two_norm/two_norm_kernel.cu"], verbose=True) n,m = 8,3 a = torch.randn(n,m) b = torch.randn(n,m) c = torch.zeros(1) print("a:\n",a) print("\nb:\n",b) a = a.cuda() b = b.cuda() c = c.cuda() norm.two_norm(a,b,c,n,m) torch.cuda.synchronize() print("\nresult by two_norm:",c) print("\nresult by torch.norm:",torch.norm(a-b))
492
224
def identity(x=None): return x call = identity
52
18
from .img_upload import * from .parse_md import *
49
16
import math import random import canvas f = 200 h = 0 canvas.set_size(f, f) canvas.set_fill_color(1, 1, 1) canvas.fill_rect(0,0,f,f) k = int(input('Number of dots: ')) for i in range(0,k): x = (random.randint(0,f)/f) y = (random.randint(0,f)/f) if math.sqrt(math.pow(x,2)+math.pow(y,2))<=1: h+=1 canvas.set_fill_color(0,1,0) canvas.fill_pixel(x*f,y*f) else: canvas.set_fill_color(1,0,0) canvas.fill_pixel(x*f,y*f) if i%5000 == 0: print(i) print(h/k)
510
262
from collections import deque from twisted.protocols.policies import TimeoutMixin from twisted.internet.protocol import Protocol from twisted.internet.defer import Deferred, TimeoutError, maybeDeferred import umsgpack from tint.log import Logger class NoSuchCommand(Exception): """ Exception raised when a non existent command is called. """ class Command(object): def __init__(self, command, args): self.command = command self.args = args self._deferred = Deferred() def encode(self): c = [self.command] + list(self.args) return umsgpack.packb(c) @classmethod def decode(self, data): parts = umsgpack.unpackb(data) return Command(parts[0], parts[1:]) def success(self, value): self._deferred.callback(value) def fail(self, error): self._deferred.errback(error) def __str__(self): args = ", ".join([str(a) for a in self.args]) return "<Command %s(%s)>" % (self.command, args) class MsgPackProtocol(Protocol, TimeoutMixin): _disconnected = False _buffer = '' _expectedLength = None def __init__(self, timeOut=10): self._current = deque() self.persistentTimeOut = self.timeOut = timeOut self.log = Logger(system=self) def _cancelCommands(self, reason): while self._current: cmd = self._current.popleft() cmd.fail(reason) def timeoutConnection(self): self._cancelCommands(TimeoutError("Connection timeout")) self.transport.loseConnection() def connectionLost(self, reason): self._disconnected = True self._cancelCommands(reason) Protocol.connectionLost(self, reason) def dataReceived(self, data): self.resetTimeout() self._buffer += data if self._expectedLength is None: parts = self._buffer.split(' ', 1) self._expectedLength = int(parts[0]) self._buffer = parts[1] if len(self._buffer) >= self._expectedLength: data = self._buffer[1:self._expectedLength] if self._buffer[0] == '>': self.commandReceived(data) elif self._buffer[0] == '<': self.responseReceived(data) elif self._buffer[0] == 'e': self.errorReceived(data) self._buffer = self._buffer[self._expectedLength:] self._expectedLength = None if len(self._buffer) > 0: self.dataReceived('') def commandReceived(self, data): cmdObj = Command.decode(data) cmd = getattr(self, "cmd_%s" % cmdObj.command, None) if cmd is None: raise NoSuchCommand("%s is not a valid command" % cmdObj.command) self.log.debug("RPC command received: %s" % cmdObj) d = maybeDeferred(cmd, *cmdObj.args) d.addCallback(self.sendResult) d.addErrback(self.sendError) def sendError(self, error): result = umsgpack.packb(str(error)) self.transport.write("%i e%s" % (len(result) + 1, result)) def sendResult(self, result): result = umsgpack.packb(result) self.transport.write("%i <%s" % (len(result) + 1, result)) def sendCommand(self, cmd, args): if not self._current: self.setTimeout(self.persistentTimeOut) cmdObj = Command(cmd, args) self._current.append(cmdObj) data = cmdObj.encode() self.transport.write("%i >%s" % (len(data) + 1, data)) return cmdObj._deferred def responseReceived(self, data): unpacked = umsgpack.unpackb(data) self.log.debug("result received: %s" % data) self._current.popleft().success(unpacked) def errorReceived(self, data): unpacked = umsgpack.unpackb(data) self.log.debug("error received: %s" % data) self._current.popleft().fail(Exception(unpacked))
3,931
1,185
from .config import DOMAIN from .errors import InternalServerError, BadRequest, InvalidMethod, WebTimeoutError from aiohttp import ClientSession from asyncio import TimeoutError async def api_request(session: ClientSession, method: str, url_path: str, headers: dict=None, json: dict=None): ''' *`session` = the aiohttp session *`method` = `GET`, `POST`, or `PATCH` *`url_path` = The api endpoint `headers` = headers for the api request `json` = json for the api request ''' url = DOMAIN + url_path try: if method.upper() == "GET": api_response = await session.get(url, headers=headers, json=json) elif method.upper() == "POST": api_response = await session.post(url, headers=headers, json=json) elif method.upper() == "PATCH": api_response = await session.patch(url, headers=headers, json=json) else: raise InvalidMethod("Invalid method provided. Must be `GET`, `POST`, or `PATCH`") except TimeoutError: raise WebTimeoutError("Your request has timed out, most likely due to the discoin API being down.") if api_response.status >= 500: raise InternalServerError(f"The Discoin API returned the status code {api_response.status}") elif api_response.status >= 400: raise BadRequest(f"The Discoin API returned the status code {api_response.status}") return api_response
1,435
410
class Solution(object): def gcd(self, x, y): while y: x, y = y, x % y return x def canMeasureWater(self, x, y, z): """ :type x: int :type y: int :type z: int :rtype: bool """ return not z or (x + y >= z and not z % self.gcd(x, y))
325
117
"""Declare package constants.""" # # Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from datetime import timedelta from .__version__ import __version__ DEFAULT_TIMEOUT = 30 USER_AGENT = "sprinkl-async/" + __version__ TOKEN_LIFETIME = timedelta(hours=int(23)) SPRINKL_ENDPOINT = "https://api.sprinkl.com/v1" SPRINKL_AUTH_ENDPOINT = SPRINKL_ENDPOINT + "/authenticate"
896
303
import torch from ..bbox import (PseudoSampler, assign_and_sample, bbox2delta, build_assigner, delta2hbboxrec5, hbbox2rbboxRec_v2, rbboxPoly2Rectangle, rec2target) from ..utils import multi_apply def orient_anchor_target(bbox_pred_list, anchor_list, valid_flag_list, gt_bboxes_list, gt_rbboxes_poly_list, img_metas, target_means_hbb, target_stds_hbb, target_means_obb, target_stds_obb, cfg, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=1, sampling=True, unmap_outputs=True): """Compute regression and classification targets for anchors. Args: anchor_list (list[list]): Multi level anchors of each image. valid_flag_list (list[list]): Multi level valid flags of each image. gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. img_metas (list[dict]): Meta info of each image. target_means (Iterable): Mean value of regression targets. target_stds (Iterable): Std value of regression targets. cfg (dict): RPN train configs. Returns: tuple """ num_imgs = len(img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] # concat all level anchors and flags to a single tensor bbox_pred_new_list = [] for i in range(num_imgs): assert len(anchor_list[i]) == len(valid_flag_list[i]) anchor_list[i] = torch.cat(anchor_list[i]) valid_flag_list[i] = torch.cat(valid_flag_list[i]) bbox_preds = [] for j in range(len(bbox_pred_list)): bbox_preds.append(bbox_pred_list[j][i].permute(1, 2, 0).reshape(-1, 4)) bbox_preds = torch.cat(bbox_preds) bbox_pred_new_list.append(bbox_preds) # compute targets for each image if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] if gt_labels_list is None: gt_labels_list = [None for _ in range(num_imgs)] (all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, all_obb_targets, all_obb_weights, pos_inds_list, neg_inds_list) = multi_apply( orient_anchor_target_single, bbox_pred_new_list, anchor_list, valid_flag_list, gt_bboxes_list, gt_rbboxes_poly_list, gt_bboxes_ignore_list, gt_labels_list, img_metas, target_means_hbb=target_means_hbb, target_stds_hbb=target_stds_hbb, target_means_obb=target_means_obb, target_stds_obb=target_stds_obb, cfg=cfg, label_channels=label_channels, sampling=sampling, unmap_outputs=unmap_outputs) # no valid anchors if any([labels is None for labels in all_labels]): return None # sampled anchors of all images num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) # split targets to a list w.r.t. multiple levels labels_list = images_to_levels(all_labels, num_level_anchors) label_weights_list = images_to_levels(all_label_weights, num_level_anchors) bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors) bbox_weights_list = images_to_levels(all_bbox_weights, num_level_anchors) obb_targets_list = images_to_levels(all_obb_targets, num_level_anchors) obb_weights_list = images_to_levels(all_obb_weights, num_level_anchors) return (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, obb_targets_list, obb_weights_list, num_total_pos, num_total_neg) def images_to_levels(target, num_level_anchors): """Convert targets by image to targets by feature level. [target_img0, target_img1] -> [target_level0, target_level1, ...] """ target = torch.stack(target, 0) level_targets = [] start = 0 for n in num_level_anchors: end = start + n level_targets.append(target[:, start:end].squeeze(0)) start = end return level_targets def orient_anchor_target_single(bbox_pred, flat_anchors, valid_flags, gt_bboxes, gt_rbboxes_poly, gt_bboxes_ignore, gt_labels, img_meta, target_means_hbb, target_stds_hbb, target_means_obb, target_stds_obb, cfg, label_channels=1, sampling=True, unmap_outputs=True): inside_flags = anchor_inside_flags(flat_anchors, valid_flags, img_meta['img_shape'][:2], cfg.allowed_border) # inside_flags: 返回在图中的anchor对应的索引 if not inside_flags.any(): return (None, ) * 6 # assign gt and sample anchors anchors = flat_anchors[inside_flags, :] bbox_pred = bbox_pred[inside_flags, :] # 筛选后在图中的anchor # 将anchor和gt_bbox匹配,得到正样本和负样本, 并用sampler将这些结果进行封装,方便之后使用 if sampling: assign_result, sampling_result = assign_and_sample( anchors, gt_bboxes, gt_bboxes_ignore, None, cfg) else: bbox_assigner = build_assigner(cfg.assigner) assign_result = bbox_assigner.assign(anchors, gt_bboxes, gt_bboxes_ignore, gt_labels) bbox_sampler = PseudoSampler() sampling_result = bbox_sampler.sample(assign_result, anchors, gt_bboxes) num_valid_anchors = anchors.shape[0] bbox_targets = torch.zeros_like(anchors) bbox_weights = torch.zeros_like(anchors) labels = anchors.new_zeros(num_valid_anchors, dtype=torch.long) label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) obb_targets = torch.zeros_like(anchors) obb_weights = torch.zeros_like(anchors) pos_inds = sampling_result.pos_inds # 正样本索引 neg_inds = sampling_result.neg_inds # 负样本索引 pos_bbox_pred = bbox_pred[pos_inds, :] if len(pos_inds) > 0: pos_bbox_targets = bbox2delta(sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes, target_means_hbb, target_stds_hbb) # 将bbox转化为delta,并使用target_means,target_stds标准化 bbox_targets[pos_inds, :] = pos_bbox_targets bbox_weights[pos_inds, :] = 1.0 # 正样本权重是1,负样本权重是0 pos_bbox_rec = delta2hbboxrec5(sampling_result.pos_bboxes, pos_bbox_pred, target_means_hbb, target_stds_hbb) pos_gt_rbboxes_poly = gt_rbboxes_poly[sampling_result.pos_assigned_gt_inds, :] pos_gt_rbboxes_rec = rbboxPoly2Rectangle(pos_gt_rbboxes_poly) pos_obb_targets = rec2target(pos_bbox_rec, pos_gt_rbboxes_rec, target_means_obb, target_stds_obb) obb_targets[pos_inds, :] = pos_obb_targets obb_weights[pos_inds, :] = 1.0 if gt_labels is None: labels[pos_inds] = 1 else: labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] if cfg.pos_weight <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = cfg.pos_weight if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_anchors.size(0) labels = unmap(labels, num_total_anchors, inside_flags) label_weights = unmap(label_weights, num_total_anchors, inside_flags) bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) obb_targets = unmap(obb_targets, num_total_anchors, inside_flags) obb_weights = unmap(obb_weights, num_total_anchors, inside_flags) # labels:每个anchor对应的label # label_weights:每个anchor cls_loss的权重,负样本权重为1,正样本权重可为1也可为其他值 # bbox_targets:每个anchor与其对应的gt_bbox之前的delta,用于回归 # bbox_weights: 每个anchor bbox_reg的权重,正样本为1,负样本为0 # pos_inds:anchor中正样本的索引 # neg_inds: anchor中负样本的索引 return (labels, label_weights, bbox_targets, bbox_weights, obb_targets, obb_weights, pos_inds, neg_inds) # 判断anchor是否超出图片边界 def anchor_inside_flags(flat_anchors, valid_flags, img_shape, allowed_border=0): img_h, img_w = img_shape[:2] if allowed_border >= 0: inside_flags = valid_flags & \ (flat_anchors[:, 0] >= -allowed_border).type(torch.uint8) & \ (flat_anchors[:, 1] >= -allowed_border).type(torch.uint8) & \ (flat_anchors[:, 2] < img_w + allowed_border).type(torch.uint8) & \ (flat_anchors[:, 3] < img_h + allowed_border).type(torch.uint8) else: inside_flags = valid_flags return inside_flags def unmap(data, count, inds, fill=0): """ Unmap a subset of item (data) back to the original set of items (of size count) """ if data.dim() == 1: ret = data.new_full((count, ), fill) ret[inds] = data else: new_size = (count, ) + data.size()[1:] ret = data.new_full(new_size, fill) ret[inds, :] = data return ret
10,338
3,664
def distinct_prime_factors(x: int) -> list: factors = [] """ Returns a list of distinct prime factors of the number x """ loop = 2 while loop <= x: if x % loop == 0: while x//loop == x/loop: x //= loop factors.append(loop) else: loop += 1 return factors def test_distinct_prime_factors() -> None: """ >>> distinct_prime_factors(1) [] >>> distinct_prime_factors(40) [2, 5] >>> distinct_prime_factors(165) [3, 5, 11] >>> distinct_prime_factors(701) [701] >>> distinct_prime_factors(2187) [3] """ pass if __name__ == "__main__": from doctest import testmod testmod()
728
261
from eufySecurityApi.const import ( TWO_FACTOR_AUTH_METHODS, API_BASE_URL, API_HEADERS, RESPONSE_ERROR_CODE, ENDPOINT_LOGIN,ENDPOINT_DEVICE_LIST, DEVICE_TYPE, ENDPOINT_STATION_LIST, ENDPOINT_REQUEST_VERIFY_CODE, ENDPOINT_TRUST_DEVICE_LIST, ENDPOINT_TRUST_DEVICE_ADD ) import logging, json, copy, functools, requests, asyncio from datetime import datetime, timedelta #, time as dtTime from eufySecurityApi.model import Device # import time _LOGGER = logging.getLogger(__name__) class Api(): def __init__(self, username=None, password=None, token=None, domain=API_BASE_URL, token_expire_at=None, preferred2FAMethod=TWO_FACTOR_AUTH_METHODS.EMAIL): self._username =username self._password = password self._preferred2FAMethod = preferred2FAMethod self._token = token self._tokenExpiration = None if token_expire_at is None else datetime.fromtimestamp(token_expire_at) self._refreshToken = None self._domain = domain self.headers = API_HEADERS self._LOGGER = logging.getLogger(__name__) self.devices = {} self.stations = {} self._userId = None # self.headers['timezone'] = # dtTime(dtTime.fromisoformat(time.strptime(time.localtime(), '%HH:%MM'))) - dtTime(dtTime.fromisoformat(time.strptime(time.gmtime(), '%HH:%MM'))) @property def userId(self): return self._userId async def authenticate(self): if(self._token is None or self._tokenExpiration > datetime.now()): response = await self._request('POST', ENDPOINT_LOGIN, { 'email': self._username, 'password': self._password }, self.headers) if(response.status_code != 200): self._LOGGER.error('Unexpected response code: %s, on url: %s' % response.status_code, response.request.url) raise LoginException('Unexpected response code: %s, on url: %s' % response.status_code, response.request.url) dataresult = response.json() self._LOGGER.debug('login response: %s' % dataresult) # self._LOGGER.debug('%s, %s' % (type(dataresult['code']), dataresult['code'])) if(RESPONSE_ERROR_CODE(dataresult['code']) == RESPONSE_ERROR_CODE.WHATEVER_ERROR): self._token = dataresult['data']['auth_token'] self._tokenExpiration = datetime.fromtimestamp(dataresult['data']['token_expires_at']) if('domain' in dataresult['data'] and dataresult['data']['domain'] != '' and dataresult['data']['domain'] != self._domain): self._token = None self._tokenExpiration = None self._domain = dataresult['data']['domain'] self._LOGGER.info('Switching to new domain: %s', self._domain) return await self.authenticate() self._LOGGER.debug('Token: %s' %self._token) self._LOGGER.debug('Token expire at: %s' % self._tokenExpiration) self._userId = dataresult['data']['user_id'] return 'OK' elif(RESPONSE_ERROR_CODE(dataresult['code']) == RESPONSE_ERROR_CODE.NEED_VERIFY_CODE): self._LOGGER.info('need two factor authentication. Send verification code...') #dataresult['data'] self._token = dataresult['data']['auth_token'] self._tokenExpiration = datetime.fromtimestamp(dataresult['data']['token_expires_at']) self._userId = dataresult['data']['user_id'] self._LOGGER.debug('Token: %s' %self._token) self._LOGGER.debug('Token expire at: %s' % self._tokenExpiration) await self.requestVerifyCode() return "send_verify_code" else: message = 'Unexpected API response code %s: %s (%s)' % (dataresult['code'], dataresult['msg'], response.request.url) self._LOGGER.error(message) raise LoginException(message) else: return 'OK' pass async def update(self, device_sn=None): if(device_sn is None): await self.get_stations() await self.get_devices() else: await self.get_devices(device_sn) async def get_devices(self, device_sn=None): data = {} if(device_sn is not None): data['device_sn'] = device_sn response = await self._request('POST', ENDPOINT_DEVICE_LIST, data, self.headers) if(response.status_code != 200): self._LOGGER.error('Unexpected response code: %s, on url: %s' % (response.status_code, response.request.url)) raise LoginException('Unexpected response code: %s, on url: %s' % (response.status_code, response.request.url)) dataresult = response.json() self._LOGGER.debug('get_devices response: %s' % dataresult) if(RESPONSE_ERROR_CODE(dataresult['code']) != RESPONSE_ERROR_CODE.WHATEVER_ERROR): message = 'Unexpected API response code %s: %s' % (dataresult['code'], dataresult['msg']) self._LOGGER.error(message) raise ApiException(message) for device in dataresult['data']: try: deviceType = DEVICE_TYPE(device['device_type']) if(device['device_sn'] not in self.devices): self.devices[device['device_sn']] = Device.fromType(self, deviceType) self.devices[device['device_sn']].init(device) else: self.devices[device['device_sn']].update(device) except Exception as e: self._LOGGER.exception(e) return self.devices async def get_stations(self): response = await self._request('POST', ENDPOINT_STATION_LIST, {}, self.headers) if(response.status_code != 200): self._LOGGER.error('Unexpected response code: %s, on url: %s' % (response.status_code, response.request.url)) raise LoginException('Unexpected response code: %s, on url: %s' % (response.status_code, response.request.url)) dataresult = response.json() self._LOGGER.debug('get_stations response: %s' % dataresult) if(RESPONSE_ERROR_CODE(dataresult['code']) != RESPONSE_ERROR_CODE.WHATEVER_ERROR): message = 'Unexpected API response code %s: %s' % (dataresult['code'], dataresult['msg']) self._LOGGER.error(message) raise ApiException(message) for device in dataresult['data']: try: deviceType = DEVICE_TYPE(device['device_type']) self.stations[device['station_sn']] = Device.fromType(self, deviceType) self.stations[device['station_sn']].init(device) except Exception as e: self._LOGGER.exception(e) return self.stations async def get_device(self, deviceId): pass async def refresh_token(self): pass async def invalidate_token(self): self._token = None self._refreshToken = None self._tokenExpiration = None pass async def requestVerifyCode(self): response = await self._request('POST', ENDPOINT_REQUEST_VERIFY_CODE, { 'message_type': self._preferred2FAMethod.value }, self.headers) if(response.status_code != 200): self._LOGGER.error('Unexpected response code: %s, on url: %s' % (response.status_code, response.request.url)) raise ApiException('Unexpected response code: %s, on url: %s' % (response.status_code, response.request.url)) dataresult = response.json() self._LOGGER.debug('request verify code response: %s' % dataresult) if(RESPONSE_ERROR_CODE(dataresult['code']) != RESPONSE_ERROR_CODE.WHATEVER_ERROR): message = 'Unexpected API response code %s: %s' % (dataresult['code'], dataresult['msg']) self._LOGGER.error(message) raise ApiException(message) return 'OK' async def sendVerifyCode(self, verifyCode): # check verify code # response = await self._request('POST', ENDPOINT_LOGIN, { 'verify_code': verifyCode, 'transaction': datetime.now().timestamp() }, self.headers) if(response.status_code != 200): self._LOGGER.error('Unexpected response code: %s, on url: %s' % response.status_code, response.request.url) raise ApiException('Unexpected response code: %s, on url: %s' % response.status_code, response.request.url) dataresult = response.json() self._LOGGER.debug('send verify code response: %s' % dataresult) if(RESPONSE_ERROR_CODE(dataresult['code']) != RESPONSE_ERROR_CODE.WHATEVER_ERROR): message = 'Unexpected API response code %s: %s' % (dataresult['code'], dataresult['msg']) self._LOGGER.error(message) raise ApiException(message) # if ok, add this device to trust device list # response = await self._request('POST', ENDPOINT_TRUST_DEVICE_ADD, { 'verify_code': verifyCode, 'transaction': datetime.now().timestamp() }, self.headers) if(response.status_code != 200): self._LOGGER.error('Unexpected response code: %s, on url: %s' % (response.status_code, response.request.url)) raise ApiException('Unexpected response code: %s, on url: %s' % (response.status_code, response.request.url)) dataresult = response.json() self._LOGGER.debug('add trust device response: %s' % dataresult) if(RESPONSE_ERROR_CODE(dataresult['code']) != RESPONSE_ERROR_CODE.WHATEVER_ERROR): message = 'Unexpected API response code %s: %s' % (dataresult['code'], dataresult['msg']) self._LOGGER.error(message) raise ApiException(message) response = await self._request('GET', ENDPOINT_TRUST_DEVICE_LIST, None, self.headers) if(response.status_code != 200): self._LOGGER.error('Unexpected response code: %s, on url: %s' % (response.status_code, response.request.url)) raise ApiException('Unexpected response code: %s, on url: %s' % (response.status_code, response.request.url)) dataresult = response.json() self._LOGGER.debug('add trust device response: %s' % dataresult) if(RESPONSE_ERROR_CODE(dataresult['code']) != RESPONSE_ERROR_CODE.WHATEVER_ERROR): message = 'Unexpected API response code %s: %s' % (dataresult['code'], dataresult['msg']) self._LOGGER.error(message) raise ApiException(message) isTrusted = False for trusted in dataresult['data']['list']: if(trusted['is_current_device'] == 1): self._tokenExpiration = (datetime.now() + timedelta(days=365*10)) isTrusted = True return 'OK' if isTrusted else 'KO' @property def connected(self): return self._token != None and self._tokenExpiration > datetime.now() @property def base_url(self): return ('https://%s/v1' % self._domain) @property def token(self): return self._token @property def token_expire_at(self): return self._tokenExpiration.timestamp() @property def domain(self): return self._domain async def _request(self, method, url, data, headers={}) -> requests.Response: try: loop = asyncio.get_running_loop() except: loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) call = None if(method == 'GET'): call = requests.get elif(method == 'POST'): call = requests.post else: raise ApiException('Unsupported operation: %s' % method) url = self.base_url + url newHeaders = copy.copy(headers) if(url != ENDPOINT_LOGIN or 'verify_code' in data): newHeaders['X-Auth-Token'] = self._token self._LOGGER.debug('method: %s' % method) self._LOGGER.debug('url: %s' % url) self._LOGGER.debug('data: %s' % data) self._LOGGER.debug('headers: %s' % newHeaders) response = await loop.run_in_executor(None, functools.partial(call, url, json=data, headers=newHeaders)) #response = call(url, json=data, headers=newHeaders) return response class ApiException(Exception): pass class LoginException(ApiException): pass
12,588
3,741
# Copyright 2017 Sidewalk Labs | https://www.apache.org/licenses/LICENSE-2.0 from __future__ import ( absolute_import, division, print_function, unicode_literals ) import mock from mock import Mock import unittest import pandas as pd import numpy as np from doppelganger import Accuracy from doppelganger.accuracy import ErrorStat class TestAccuracy(unittest.TestCase): def _mock_variable_bins(self): return [ ('num_people', '1'), ('num_people', '3'), ('num_people', '2'), ('num_people', '4+'), ('num_vehicles', '1'), ('num_vehicles', '0'), ('num_vehicles', '2'), ('num_vehicles', '3+'), ('age', '0-17'), ('age', '18-34'), ('age', '65+'), ('age', '35-64'), ] def _mock_state_puma(self): return [('20', '00500'), ('20', '00602'), ('20', '00604'), ('29', '00901'), ('29', '00902')] def _mock_comparison_dataframe(self): # Just the top 10 lines of a sample PUMS file, counts will NOT line up with marginals. return pd.DataFrame( data=[ [0, 1, 2], [0, 1, 2], [0, 1, 2], [0, 1, 2], [0, 1, 2], [0, 1, 2], [0, 1, 2], [0, 1, 2], [0, 1, 2], [0, 1, 2], [0, 1, 2], [0, 1, 2], ], columns=['pums', 'marginal', 'gen'], index=self._mock_variable_bins()) @mock.patch('doppelganger.Accuracy._comparison_dataframe') def test_error_metrics(self, mock_comparison_dataframe): accuracy = Accuracy(Mock(), Mock(), Mock(), Mock(), Mock(), Mock(), Mock()) accuracy.comparison_dataframe = self._mock_comparison_dataframe() self.assertEqual(accuracy.root_mean_squared_error(), (1.0, 1.0)) self.assertListEqual(accuracy.root_squared_error().mean().tolist(), [1.0, 1.0]) self.assertListEqual(accuracy.absolute_pct_error().mean().tolist(), [2.0, 0.66666666666666663]) @mock.patch('doppelganger.Accuracy.from_data_dir') @mock.patch('doppelganger.Accuracy._comparison_dataframe') def test_error_report(self, mock_comparison_datframe, mock_from_data_dir): accuracy = Accuracy(Mock(), Mock(), Mock(), Mock(), Mock(), Mock(), Mock()) accuracy.comparison_dataframe = self._mock_comparison_dataframe() accuracy.from_data_dir.return_value = accuracy state_puma = dict() state_puma['20'] = ['00500', '00602', '00604'] state_puma['29'] = ['00901', '00902'] expected_columns = ['marginal-pums', 'marginal-doppelganger'] df_puma, df_variable, df_total =\ accuracy.error_report( state_puma, 'fake_dir', marginal_variables=['num_people', 'num_vehicles', 'age'], statistic=ErrorStat.ABSOLUTE_PCT_ERROR ) # Test df_total df_total_expected = pd.Series( [2.00000, 0.666667], index=expected_columns ) self.assertTrue(all((df_total - df_total_expected) < 1)) # Test df_puma expected_puma_data = np.reshape([2.0, 2/3.0]*5, (5, 2)) df_expected_puma = pd.DataFrame( data=expected_puma_data, index=self._mock_state_puma(), columns=expected_columns ) self.assertTrue((df_expected_puma == df_puma).all().all()) # Test df_variable expected_variable_data = np.reshape([2.0, 2/3.0]*12, (12, 2)) df_expected_variable = pd.DataFrame( data=expected_variable_data, index=self._mock_variable_bins(), columns=expected_columns ) self.assertTrue((df_expected_variable == df_variable).all().all()) # Test unimplemented statistic name try: self.assertRaises( Exception, Accuracy.error_report( state_puma, 'fake_dir', marginal_variables=['num_people', 'num_vehicles', 'age'], statistic='wrong-statistic-name' ) ) except Exception: pass
4,595
1,498
""" Visitor Method is a Behavioral Design Pattern which allows us to separate the algorithm from an object structure on which it operates. It helps us to add new features to an existing class hierarchy dynamically without changing it. All the behavioral patterns proved as the best methods to handle the communication between the objects. Similarly, it is used when we have to perform an operation on a group of similar kinds of objects. A Visitor Method consists of two parts: • method named as Visit() implemented by the visitor and used and called for every element of the data structure. • Visitable classes providing Accept() methods that accept a visitor Design Components • Client: The Client class acts as the consumer of the classes of the visitor design pattern. It can access the data structure objects and can instruct them to accept a visitor for the future processing. • Visitor: An Abstract class which is used to declare visit operations for all visitable classes. • Concrete Visitor: Each Visitor will be responsible for different operations. For each type of visitor all the visit methods, declared in abstract visitor, must be implemented. • Visitable: Accept operations is declared by this class. It also act as the entry point which enables an object to be visited by visitor. • Concrete Visitable: These classes implement the Visitable class and defines the accept operation. The visitor object is passed to this object using the accept operation. Problem without using Visitor Method Imagine you are handling the Software management of GeeksforGeeks and they have started certain courses such as DSA, SDE, and STL which are definitely useful for students who are preparing for the product based companies. But how will you handle all the data of Courses, Instructors, students, classes, IDs in your database? If you go with a simple direct approach to handle such a situation, you will definitely end up with a mess only. Visitor-problem-diagram Solution using Visitor Method Let’s look at the solution to the above-described problem. The Visitor method suggests adding a new behavior in a separate class called Visitor class instead of mixing it with the already existing classes. We will pass the original object to the visitor’s method as parameters such that the method will access all the necessary information. • Python3 The Courses hierarchy cannot be changed to add new functionality dynamically. Abstract Crop class for Concrete Courses_At_GFG classes: methods defined in this class will be inherited by all Concrete Courses_At_GFG classes.""" class Courses_At_GFG: def accept(self, visitor): visitor.visit(self) def teaching(self, visitor): print(self, "Taught by ", visitor) def studying(self, visitor): print(self, "studied by ", visitor) def __str__(self): return self.__class__.__name__ """Concrete Courses_At_GFG class: Classes being visited.""" class SDE(Courses_At_GFG): pass class STL(Courses_At_GFG): pass class DSA(Courses_At_GFG): pass """ Abstract Visitor class for Concrete Visitor classes: method defined in this class will be inherited by all Concrete Visitor classes.""" class Visitor: def __str__(self): return self.__class__.__name__ """ Concrete Visitors: Classes visiting Concrete Course objects. These classes have a visit() method which is called by the accept() method of the Concrete Course_At_GFG classes.""" class Instructor(Visitor): def visit(self, crop): crop.teaching(self) class Student(Visitor): def visit(self, crop): crop.studying(self) """creating objects for concrete classes""" sde = SDE() stl = STL() dsa = DSA() """Creating Visitors""" instructor = Instructor() student = Student() """Visitors visiting courses""" sde.accept(instructor) sde.accept(student) stl.accept(instructor) stl.accept(student) dsa.accept(instructor) dsa.accept(student) '''Output SDE Taught by Instructor SDE studied by Student STL Taught by Instructor STL studied by Student DSA Taught by Instructor DSA studied by Student UML Diagram Following is the UML diagram for Visitor Method UML-diagram-visitor-method Advantages • Open/Closed principle: Introducing new behavior in class is easy which can work with objects of different classes without making changes in these classes. • Single Responibility Principle: Multiple versions of same behavior can be operated into the same class. • Addition of entities: Adding an entity in Visitor Method is easy as we have to make changes in visitor class only and it will not affect the existing item. • Updating Logic: If the logic of operation is updated, then we need to make change only in the visitor implementation rather than doing it in all the item classes. Disadvantages • Lots of Updates: We have to update each and every vistor whenever a class get added or removed form the primary hierarchy • Hard to Extend: If there are too many visitor classes then it becomes really hard to extend the whole interface of the class. • Lack of Access: Sometimes visitors might not have the access to private field of certain classes that they are supposed to work with. Applicability • Recursive structures: Visitor Method works really well with recursive structures like directory trees or XML structures. The Visitor object can visit each node in the recursive structure • Performing Operations: We cam use the visitor method when we have to perform operations on all the elements of the complex object like Tree. '''
5,578
1,423
from __future__ import absolute_import import re, os, copy from datetime import datetime from . import const from . import utils __all__ = ['Orgfile', 'orgFromFile'] #=============================================================================== # Main class definition for entire org file #=============================================================================== class Orgfile: """Class definition for an org file. Args: orgfile (str): full pathname of the org file todostates (dict): dictionary containing the 'in_progress' and 'completed' TODO keywords **kwargs: dictionary containing the command-line arguments Attributes: active (list): list of lists of the active (incomplete) tasks basename (str): basename of the org file data (str): string containing all text in the org file orgfile (str) parsed (list): list of lists of each line, parsed pattern_line (:obj:`re`): regular expression object to parse each line todostates (dict) tags (list, optional): tags to filter by title (str, optional): title string, if present in the file """ def __init__(self, orgfile, todostates, **kwargs): """Create a new Orgfile instance.""" self.orgfile = orgfile self.basename = os.path.split(self.orgfile)[1] self.todostates = todostates self.tags = kwargs['tags'] self.states = kwargs['states'] self.pattern_line = utils.get_parse_string(self.todostates) f = open(orgfile, 'r') self.data = f.read() self.parse() # File-wide properties, if they exist if const.catpattern.search(self.data): self.apply_category() has_title = const.titlepattern.search(self.data) if has_title: self.title = has_title.group().split(':')[1].strip() self.get_active_todos() self.get_days_to_duedate() if kwargs['agenda']: self.subset_agenda() if kwargs['states']: self.subset_states() if kwargs['tags']: self.subset_tags() if kwargs['colors']: self.colorize() def __add__(self, other): """Concatenate the active TODOS of two 'Orgfile' objects.""" return self.get_active_todos() + other.get_active_todos() def __len__(self): return len(self.active) #------------------------------------------------------- # Class methods #------------------------------------------------------- def parse(self): """Parse the given org file. Returns a list of lists, each with 6 elements: 1. Level (i.e., # of asterisks, or spaces if it is a bare list) 2. State (i.e., the "TODO" keyword for that line) 3. Text (the actual text of the task/note) 4. Number/percent of tasks completed (if the child list has checkboxes) 5. Date string (active or inactive) 6. Tag string (if tags are present) """ matches = self.pattern_line.findall(self.data) ll = [list(x) for x in matches] self.parsed = ll def get_active_todos(self): """Return the lines with an active due date.""" date_lines = [] for _,x in enumerate(self.parsed): if x[4] != '': date_lines.append(x) in_prog = [x for x in date_lines if self.todostates['in_progress'].search(x[1])] self.active = in_prog def get_days_to_duedate(self): """Return a list of integers of the days left until the duedate.""" days = [] for i,x in enumerate(self.active): days.append(utils.days_until_due(x[4])) self.days = days def apply_category(self): """Add a 'tag' if there is a file-wide '#+CATEGORY' string.""" category = const.catpattern.search(self.data).group().split(':')[1].strip() for i,x in enumerate(self.parsed): if x[5].strip() == '': self.parsed[i][5] = ':' + category + ':' else: self.parsed[i][5] = self.parsed[i][5] + category + ':' #------------------------------------------------------- # Methods to subset the tasks to be printed #------------------------------------------------------- def subset_agenda(self): """Subset the active todos if the 'agenda' option is specified.""" todos = []; days = [] for i,x in enumerate(self.days): if x < 7: todos.append(self.active[i]) days.append(x) self.active = todos self.days = days def subset_tags(self): """Subset the active todos if the 'tags' option is specified.""" todos = []; days = [] for i,x in enumerate(self.active): if re.search(self.tags, x[5], re.IGNORECASE): todos.append(self.active[i]) days.append(self.days[i]) self.active = todos self.days = days def subset_states(self): """Subset the active todos if the 'states' option is specified.""" todos = []; days = [] for i,x in enumerate(self.active): if re.search(self.states, x[1], re.IGNORECASE): todos.append(self.active[i]) days.append(self.days[i]) self.active = todos self.days = days def colorize(self): """Colorize dates, tags, todo states, and inline text.""" todos = copy.deepcopy(self.active) for i,x in enumerate(todos): if self.todostates['in_progress'].search(x[1]): if re.search('TODO', x[1]): todos[i][1] = const.styles['todo'] + x[1].strip() else: todos[i][1] = const.styles['started'] + x[1].strip() todos[i][1] += const.styles['normal'] # Apply different styles depending on due date if self.days[i] > 0: todos[i][3] = const.styles['checkbox'] + x[3] + const.styles['normal'] todos[i][4] = const.styles['date'] + x[4].strip() + const.styles['normal'] + '\n' todos[i][5] = const.styles['tag'] + x[5].strip().lstrip(':').title() + const.styles['normal'] todos[i][2] = utils.format_inline(todos[i][2]) + const.styles['normal'] else: if self.days[i] < 0: todos[i][4] = const.styles['late'] + x[4].strip() + '\n' todos[i][5] = const.styles['late'] + x[5].strip().lstrip(':').title() todos[i][2] = const.styles['late'] + x[2] elif self.days[i] == 0: todos[i][4] = const.styles['today'] + x[4].strip() + '\n' todos[i][5] = const.styles['today'] + x[5].strip().lstrip(':').title() todos[i][2] = const.styles['today'] + x[2] self.colored = todos #----------------------------------------------------------- # Function that will loop through all 'org' files listed in 'vimrc' #----------------------------------------------------------- def orgFromFile(**kwargs): """Get list of org files and TODO states from vimrc, read them, and print.""" if kwargs['file']: orgfiles = kwargs['file'].split() else: orgfiles = utils.get_org_files(kwargs['rcfile']) todostates = utils.get_todo_states(kwargs['rcfile']) # Loop through the org files todolist = [] for f in orgfiles: org = Orgfile(f, todostates, **kwargs) if kwargs['colors']: todolist += org.colored else: todolist += org.active # Add dates for the next week even if there are no tasks if kwargs['agenda']: for d in const.dates_agenda: if not any(re.search(d, item) for item in [x[4] for x in todolist]): todolist.append(['', '', '', '', const.styles['bright'] + d, '', '\n']) todolist = sorted(todolist, key=lambda x: const.ansicolors.sub('', x[4])) if kwargs['agenda']: for i,x in enumerate(todolist): # match = const.datepattern.search(x[4]).group() # repl = datetime.strptime(match, '<%Y-%m-%d %a>').strftime('%A %d %b %Y') # tmp = re.sub(match, repl, x[4]) # todolist[i][4] = tmp todolist[i][4] = utils.day_names(x[4]) # Remove repeating dates repeats = [] for i,x in enumerate(todolist): if i > 0 and todolist[i][4] == todolist[i-1][4]: repeats.append(i) for i in repeats: todolist[i][4] = '' # Print if not todolist: print "No tasks!"; return else: utils.print_all(todolist, **kwargs)
8,683
2,598
#Makes embeds easy to use and with none of the clutter. #--------- Libaries ---------# import discord, sys sys.path.append("../") from settings import * #--------- Code ---------# def baseEmbed(Title, ImageURL, EmbedColor=Default_Color, FooterText=Name, FooterIconURL=BotImage): """ A basic embed that has a name, profile picture, title, and image content """ embedVar = discord.Embed(title=str(Title), color=EmbedColor) embedVar.set_image(url=str(ImageURL)) embedVar.set_footer(text=FooterText, icon_url=FooterIconURL) return embedVar def TwoFieldEmbed(Title, FieldOne, FieldTwo, EmbedColor=Default_Color, FooterText=Name, FooterIconURL=BotImage, i1=True, i2=True): """ A basic embed that has a name, title, and two fields for text Takes Field values in a double, 0 being the name and 1 being the value. """ embedVar = discord.Embed(title=str(Title), color=EmbedColor) embedVar.add_field(name=str(FieldOne[0]), value=str(FieldOne[1]), inline=i1) embedVar.add_field(name=str(FieldTwo[0]), value=str(FieldTwo[1]), inline=i2) embedVar.set_footer(text=FooterText, icon_url=FooterIconURL) return embedVar def ThreeFieldEmbed(Title, FieldOne, FieldTwo, FieldThree, EmbedColor=Default_Color, FooterText=Name, FooterIconURL=BotImage, i1=True, i2=True, i3=True): """ A basic embed that has a name, title, and three fields for text Takes Field values in a double, 0 being the name and 1 being the value. """ embedVar = discord.Embed(title=str(Title), color=EmbedColor) embedVar.add_field(name=str(FieldOne[0]), value=str(FieldOne[1]), inline=i1) embedVar.add_field(name=str(FieldTwo[0]), value=str(FieldTwo[1]), inline=i2) embedVar.add_field(name=str(FieldThree[0]), value=str(FieldThree[1]), inline=i3) embedVar.set_footer(text=FooterText, icon_url=FooterIconURL) return embedVar
1,868
635
from mock.mock import MagicMock, patch import ca_test_common import ceph_ec_profile import pytest class TestCephEcProfile(object): def setup_method(self): self.fake_params = [] self.fake_binary = 'ceph' self.fake_cluster = 'ceph' self.fake_name = 'foo' self.fake_k = 2 self.fake_m = 4 self.fake_module = MagicMock() self.fake_module.params = self.fake_params def test_get_profile(self): expected_cmd = [ self.fake_binary, '-n', 'client.admin', '-k', '/etc/ceph/ceph.client.admin.keyring', '--cluster', self.fake_cluster, 'osd', 'erasure-code-profile', 'get', self.fake_name, '--format=json' ] assert ceph_ec_profile.get_profile(self.fake_module, self.fake_name) == expected_cmd @pytest.mark.parametrize("stripe_unit,force", [(False, False), (32, True), (False, True), (32, False)]) def test_create_profile(self, stripe_unit, force): expected_cmd = [ self.fake_binary, '-n', 'client.admin', '-k', '/etc/ceph/ceph.client.admin.keyring', '--cluster', self.fake_cluster, 'osd', 'erasure-code-profile', 'set', self.fake_name, 'k={}'.format(self.fake_k), 'm={}'.format(self.fake_m), ] if stripe_unit: expected_cmd.append('stripe_unit={}'.format(stripe_unit)) if force: expected_cmd.append('--force') assert ceph_ec_profile.create_profile(self.fake_module, self.fake_name, self.fake_k, self.fake_m, stripe_unit, self.fake_cluster, force) == expected_cmd def test_delete_profile(self): expected_cmd = [ self.fake_binary, '-n', 'client.admin', '-k', '/etc/ceph/ceph.client.admin.keyring', '--cluster', self.fake_cluster, 'osd', 'erasure-code-profile', 'rm', self.fake_name ] assert ceph_ec_profile.delete_profile(self.fake_module, self.fake_name, self.fake_cluster) == expected_cmd @patch('ansible.module_utils.basic.AnsibleModule.fail_json') @patch('ansible.module_utils.basic.AnsibleModule.exit_json') @patch('ceph_ec_profile.exec_command') def test_state_present_nothing_to_update(self, m_exec_command, m_exit_json, m_fail_json): ca_test_common.set_module_args({"state": "present", "name": "foo", "k": 2, "m": 4, "stripe_unit": 32, }) m_exit_json.side_effect = ca_test_common.exit_json m_fail_json.side_effect = ca_test_common.fail_json m_exec_command.return_value = (0, ['ceph', 'osd', 'erasure-code-profile', 'get', 'foo', '--format', 'json'], '{"crush-device-class":"","crush-failure-domain":"host","crush-root":"default","jerasure-per-chunk-alignment":"false","k":"2","m":"4","plugin":"jerasure","stripe_unit":"32","technique":"reed_sol_van","w":"8"}', # noqa: E501 '') with pytest.raises(ca_test_common.AnsibleExitJson) as r: ceph_ec_profile.run_module() result = r.value.args[0] assert not result['changed'] assert result['cmd'] == ['ceph', 'osd', 'erasure-code-profile', 'get', 'foo', '--format', 'json'] assert result['stdout'] == '{"crush-device-class":"","crush-failure-domain":"host","crush-root":"default","jerasure-per-chunk-alignment":"false","k":"2","m":"4","plugin":"jerasure","stripe_unit":"32","technique":"reed_sol_van","w":"8"}' # noqa: E501 assert not result['stderr'] assert result['rc'] == 0 @patch('ansible.module_utils.basic.AnsibleModule.fail_json') @patch('ansible.module_utils.basic.AnsibleModule.exit_json') @patch('ceph_ec_profile.exec_command') def test_state_present_profile_to_update(self, m_exec_command, m_exit_json, m_fail_json): ca_test_common.set_module_args({"state": "present", "name": "foo", "k": 2, "m": 6, "stripe_unit": 32 }) m_exit_json.side_effect = ca_test_common.exit_json m_fail_json.side_effect = ca_test_common.fail_json m_exec_command.side_effect = [ (0, ['ceph', 'osd', 'erasure-code-profile', 'get', 'foo', '--format', 'json'], '{"crush-device-class":"","crush-failure-domain":"host","crush-root":"default","jerasure-per-chunk-alignment":"false","k":"2","m":"4","plugin":"jerasure","stripe_unit":"32","technique":"reed_sol_van","w":"8"}', # noqa: E501 ''), (0, ['ceph', 'osd', 'erasure-code-profile', 'set', 'foo', 'k=2', 'm=6', 'stripe_unit=32', '--force'], '', '' ) ] with pytest.raises(ca_test_common.AnsibleExitJson) as r: ceph_ec_profile.run_module() result = r.value.args[0] assert result['changed'] assert result['cmd'] == ['ceph', 'osd', 'erasure-code-profile', 'set', 'foo', 'k=2', 'm=6', 'stripe_unit=32', '--force'] assert not result['stdout'] assert not result['stderr'] assert result['rc'] == 0 @patch('ansible.module_utils.basic.AnsibleModule.fail_json') @patch('ansible.module_utils.basic.AnsibleModule.exit_json') @patch('ceph_ec_profile.exec_command') def test_state_present_profile_doesnt_exist(self, m_exec_command, m_exit_json, m_fail_json): ca_test_common.set_module_args({"state": "present", "name": "foo", "k": 2, "m": 4, "stripe_unit": 32 }) m_exit_json.side_effect = ca_test_common.exit_json m_fail_json.side_effect = ca_test_common.fail_json m_exec_command.side_effect = [ (2, ['ceph', 'osd', 'erasure-code-profile', 'get', 'foo', '--format', 'json'], '', "Error ENOENT: unknown erasure code profile 'foo'"), (0, ['ceph', 'osd', 'erasure-code-profile', 'set', 'foo', 'k=2', 'm=4', 'stripe_unit=32', '--force'], '', '' ) ] with pytest.raises(ca_test_common.AnsibleExitJson) as r: ceph_ec_profile.run_module() result = r.value.args[0] assert result['changed'] assert result['cmd'] == ['ceph', 'osd', 'erasure-code-profile', 'set', 'foo', 'k=2', 'm=4', 'stripe_unit=32', '--force'] assert not result['stdout'] assert not result['stderr'] assert result['rc'] == 0 @patch('ansible.module_utils.basic.AnsibleModule.fail_json') @patch('ansible.module_utils.basic.AnsibleModule.exit_json') @patch('ceph_ec_profile.exec_command') def test_state_absent_on_existing_profile(self, m_exec_command, m_exit_json, m_fail_json): ca_test_common.set_module_args({"state": "absent", "name": "foo" }) m_exit_json.side_effect = ca_test_common.exit_json m_fail_json.side_effect = ca_test_common.fail_json m_exec_command.return_value = (0, ['ceph', 'osd', 'erasure-code-profile', 'rm', 'foo'], '', '') with pytest.raises(ca_test_common.AnsibleExitJson) as r: ceph_ec_profile.run_module() result = r.value.args[0] assert result['changed'] assert result['cmd'] == ['ceph', 'osd', 'erasure-code-profile', 'rm', 'foo'] assert result['stdout'] == 'Profile foo removed.' assert not result['stderr'] assert result['rc'] == 0 @patch('ansible.module_utils.basic.AnsibleModule.fail_json') @patch('ansible.module_utils.basic.AnsibleModule.exit_json') @patch('ceph_ec_profile.exec_command') def test_state_absent_on_nonexisting_profile(self, m_exec_command, m_exit_json, m_fail_json): ca_test_common.set_module_args({"state": "absent", "name": "foo" }) m_exit_json.side_effect = ca_test_common.exit_json m_fail_json.side_effect = ca_test_common.fail_json m_exec_command.return_value = (0, ['ceph', 'osd', 'erasure-code-profile', 'rm', 'foo'], '', 'erasure-code-profile foo does not exist') with pytest.raises(ca_test_common.AnsibleExitJson) as r: ceph_ec_profile.run_module() result = r.value.args[0] assert not result['changed'] assert result['cmd'] == ['ceph', 'osd', 'erasure-code-profile', 'rm', 'foo'] assert result['stdout'] == "Skipping, the profile foo doesn't exist" assert result['stderr'] == 'erasure-code-profile foo does not exist' assert result['rc'] == 0 @patch('ansible.module_utils.basic.AnsibleModule.exit_json') def test_check_mode(self, m_exit_json): ca_test_common.set_module_args({ 'name': 'foo', 'k': 2, 'm': 4, '_ansible_check_mode': True }) m_exit_json.side_effect = ca_test_common.exit_json with pytest.raises(ca_test_common.AnsibleExitJson) as result: ceph_ec_profile.run_module() result = result.value.args[0] assert not result['changed'] assert result['rc'] == 0 assert not result['stdout'] assert not result['stderr']
11,195
3,325
import argparse import logging import warnings from concurrent.futures import ThreadPoolExecutor import numpy as np from lithops import Storage from lithops.storage.utils import CloudObject from sm.engine.annotation.diagnostics import ( DiagnosticType, extract_dataset_diagnostics, add_diagnostics, del_diagnostics, ) from sm.engine.annotation.imzml_reader import LithopsImzMLReader, FSImzMLReader from sm.engine.db import DB from sm.engine.storage import get_s3_client from sm.engine.util import GlobalInit, split_cos_path, split_s3_path logger = logging.getLogger('engine') def parse_input_path_for_lithops(sm_config, input_path): if input_path.startswith('s3://') or input_path.startswith('s3a://'): backend = 'aws_s3' bucket, prefix = split_s3_path(input_path) else: backend = 'ibm_cos' bucket, prefix = split_cos_path(input_path) storage = Storage(sm_config['lithops'], backend) if backend == 'aws_s3' and sm_config['lithops']['aws_s3']['endpoint'].startswith('http://'): # WORKAROUND for local Minio access # Lithops forces the url to HTTPS, so overwrite the S3 client with a fixed client # https://github.com/lithops-cloud/lithops/issues/708 storage.storage_handler.s3_client = get_s3_client() keys_in_path = storage.list_keys(bucket, prefix) imzml_keys = [key for key in keys_in_path if key.lower().endswith('.imzml')] ibd_keys = [key for key in keys_in_path if key.lower().endswith('.ibd')] debug_info = f'Path {input_path} had keys: {keys_in_path}' assert len(imzml_keys) == 1, f'Couldn\'t determine imzML file. {debug_info}' assert len(ibd_keys) == 1, f'Couldn\'t determine ibd file. {debug_info}' imzml_cobject = CloudObject(storage.backend, bucket, imzml_keys[0]) ibd_cobject = CloudObject(storage.backend, bucket, ibd_keys[0]) return storage, imzml_cobject, ibd_cobject def process_dataset(sm_config, del_first, ds_id): logger.info(f'Processing {ds_id}') try: if del_first: del_diagnostics(ds_id) ds = DB().select_one_with_fields('SELECT * FROM dataset WHERE id = %s', (ds_id,)) input_path = ds['input_path'] if input_path.startswith('/'): imzml_reader = FSImzMLReader(input_path) if not imzml_reader.is_mz_from_metadata or not imzml_reader.is_tic_from_metadata: logger.info(f'{ds_id} missing metadata, reading spectra...') for _ in imzml_reader.iter_spectra(np.arange(imzml_reader.n_spectra)): # Read all spectra so that mz/tic data is populated pass else: storage, imzml_cobject, ibd_cobject = parse_input_path_for_lithops( sm_config, input_path ) imzml_reader = LithopsImzMLReader( storage, imzml_cobject=imzml_cobject, ibd_cobject=ibd_cobject, ) if not imzml_reader.is_mz_from_metadata or not imzml_reader.is_tic_from_metadata: logger.info(f'{ds_id} missing metadata, reading spectra...') chunk_size = 1000 for chunk_start in range(0, imzml_reader.n_spectra, chunk_size): chunk_end = min(imzml_reader.n_spectra, chunk_start + chunk_size) chunk = np.arange(chunk_start, chunk_end) for _ in imzml_reader.iter_spectra(storage, chunk): # Read all spectra so that mz/tic data is populated pass diagnostics = extract_dataset_diagnostics(ds_id, imzml_reader) add_diagnostics(diagnostics) return ds_id, True except Exception: logger.error(f'Failed to process {ds_id}', exc_info=True) return ds_id, False def find_dataset_ids(ds_ids_param, sql_where, missing, failed, succeeded): db = DB() if ds_ids_param: specified_ds_ids = ds_ids_param.split(',') elif sql_where: specified_ds_ids = db.select_onecol(f"SELECT id FROM dataset WHERE {sql_where}") else: specified_ds_ids = None if not missing: # Default to processing all datasets missing diagnostics missing = specified_ds_ids is None and not failed and not succeeded ds_type_counts = db.select( 'SELECT d.id, COUNT(DISTINCT dd.type), COUNT(dd.error) ' 'FROM dataset d LEFT JOIN dataset_diagnostic dd on d.id = dd.ds_id ' 'WHERE d.status = \'FINISHED\' ' 'GROUP BY d.id' ) if missing or failed or succeeded: # Get ds_ids based on status (or filter specified ds_ids on status) status_ds_ids = set() for ds_id, n_diagnostics, n_errors in ds_type_counts: if missing and (n_diagnostics or 0) < len(DiagnosticType): status_ds_ids.add(ds_id) elif failed and n_errors > 0: status_ds_ids.add(ds_id) elif succeeded and n_diagnostics == len(DiagnosticType) and n_errors == 0: status_ds_ids.add(ds_id) if specified_ds_ids is not None: # Keep order, if directly specified ds_ids = [ds_id for ds_id in specified_ds_ids if ds_id in status_ds_ids] else: # Order by ID descending, so that newer DSs are updated first ds_ids = sorted(status_ds_ids, reverse=True) else: ds_ids = specified_ds_ids assert ds_ids, 'No datasets found' return ds_ids def run_diagnostics(sm_config, ds_ids, del_first, jobs): failed_ds_ids = [] with ThreadPoolExecutor(jobs or None) as executor: map_func = executor.map if jobs != 1 else map for i, (ds_id, success) in enumerate( map_func(lambda ds_id: process_dataset(sm_config, del_first, ds_id), ds_ids) ): logger.info(f'Completed {ds_id} ({i}/{len(ds_ids)})') if not success: failed_ds_ids.append(ds_id) if failed_ds_ids: logger.error(f'Failed datasets ({len(failed_ds_ids)}): {failed_ds_ids}') def main(): parser = argparse.ArgumentParser( description='Reindex or update dataset results. NOTE: FDR diagnostics are unsupported as ' 'they require the dataset to be completely reprocessed.' ) parser.add_argument('--config', default='conf/config.json', help='SM config path') parser.add_argument('--ds-id', help='DS id (or comma-separated list of ids)') parser.add_argument('--sql-where', help='SQL WHERE clause for datasets table') parser.add_argument( '--missing', action='store_true', help='(Default if ds-id/failed/succeeded not specified) ' 'Process datasets that are missing diagnostics', ) parser.add_argument( '--failed', action='store_true', help='Process datasets that have errors in their diagnostics', ) parser.add_argument( '--succeeded', action='store_true', help='Process datasets even if they have diagnostics' ) parser.add_argument( '--del-first', action='store_true', help='Delete existing diagnostics before regenerating' ) parser.add_argument('--jobs', '-j', type=int, default=1, help='Number of parallel jobs to run') parser.add_argument('--verbose', '-v', action='store_true') args = parser.parse_args() with GlobalInit(config_path=args.config) as sm_config: if not args.verbose: logging.getLogger('lithops.storage.backends').setLevel(logging.WARNING) warnings.filterwarnings('ignore', module='pyimzml') ds_ids = find_dataset_ids( ds_ids_param=args.ds_id, sql_where=args.sql_where, missing=args.missing, failed=args.failed, succeeded=args.succeeded, ) run_diagnostics( sm_config=sm_config, ds_ids=ds_ids, del_first=args.del_first, jobs=args.jobs, ) if __name__ == '__main__': main()
8,038
2,529
""" ABM: Agent Based Modelling The base class Cell provides one method update() that will be called on every tick. Ticks are generated by Grids - a collection of Cell object """ import abc import copy from . import utils class SimpleCell(abc.ABC): """ The simple cell only exposes an .update method which should be called upon every tick. This method is a function only of its direct neighbours. """ def __init__(self, initial_conditions=None): pass @abc.abstractmethod def update(self, neighbours): pass class SimpleGrid(utils.list2): """ A simple grid is an uniform 2D grid of SimpleCells Args: width (int): The number of cells along the x-dimension height (int): The number of cells along the y-dimension cell (patraffic.SimpleCell): The cell class to use real_time (boolean): When True, the grid tick method will be realtime """ def __init__(self, row, col, cell, real_time=True, initial_conditions=None): self._dim = (row, col) self._cell_kls = cell if initial_conditions is None: cell_grid = utils.list2(row, col) for _, (r,c) in cell_grid.items(): cell_grid[r][c] = self._cell_kls() else: cell_grid = utils.list2(row, col) for _, (r,c) in cell_grid.items(): init_val = initial_conditions[r][c] cell_grid[r][c] = self._cell_kls(initial_conditions=init_val) self._array = cell_grid._array if real_time: self.tick_type = 'realtime' else: self.tick_type = 'staged' @property def tick_type(self): return self._tick_type @tick_type.setter def tick_type(self, val): if val == 'realtime': self._tick_method = self.tick_realtime self.set_sweep('dr') elif val == 'staged': self._tick_method = self.tick_staged self._stage = copy.deepcopy(self._array) def tick(self, *args, **kwargs): self._tick_method(*args, **kwargs) def get_neighbours(self, r, c): """ Returns the subset that surrounds point (r,c). Note that the start position of this subset is always up one, left one. But if (r,c) is on a boundary then: - 'up one, left one' will give a negative index, hence start uses max() - the nrow's or ncol's will be less hence the ternary operators """ start = (max(0, r-1), max(0, c-1)) maxr, maxc = self.dim nrow = 2 if (r == 0 or r == maxr-1) else 3 ncol = 2 if (c == 0 or c == maxc-1) else 3 neighbours = self.subset(start, nrow, ncol) this_r = 0 if (r == 0) else 1 this_c = 0 if (c == 0) else 1 neighbours[this_r][this_c] = None return neighbours def tick_staged(self, *args, **kwargs): for _, (r, c) in self.items(): cell = self._stage[r][c] cell.update(self.get_neighbours(r, c), *args, **kwargs) self._array = copy.deepcopy(self._stage) def tick_realtime(self, *args, **kwargs): for cell, (x, y) in self.sweep(): cell.update(self.get_neighbours(r, c), *args, **kwargs)
3,279
1,053
from abstract.instruccion import * from tools.tabla_tipos import * class expresion_validacion(instruccion): def __init__(self,comando,dato, line, column, num_nodo): super().__init__(line,column)
207
67
from aioconsul.api import API from aioconsul.common import cached_property from .acl_endpoint import ACLEndpoint from .agent_endpoint import AgentEndpoint from .catalog_endpoint import CatalogEndpoint from .checks_endpoint import ChecksEndpoint from .coordinate_endpoint import CoordinateEndpoint from .event_endpoint import EventEndpoint from .health_endpoint import HealthEndpoint from .kv_endpoint import KVEndpoint from .members_endpoint import MembersEndpoint from .operator_endpoint import OperatorEndpoint from .query_endpoint import QueryEndpoint from .services_endpoint import ServicesEndpoint from .session_endpoint import SessionEndpoint from .status_endpoint import StatusEndpoint __all__ = ["Consul"] class Consul: def __init__(self, address, *, token=None, consistency=None, loop=None): self.api = API(address, token=token, consistency=consistency, loop=loop) def close(self): if self.api: self.api.close() __del__ = close @property def address(self): return self.api.address @property def token(self): return self.api.token @token.setter def token(self, token): self.api.token = token @token.deleter def token(self): del self.api.token @property def consistency(self): return self.api.consistency @cached_property def acl(self): return ACLEndpoint(self.api) @cached_property def agent(self): return AgentEndpoint(self.api) @cached_property def catalog(self): return CatalogEndpoint(self.api) @cached_property def checks(self): return ChecksEndpoint(self.api) @cached_property def coordinate(self): return CoordinateEndpoint(self.api) @cached_property def event(self): return EventEndpoint(self.api) @cached_property def health(self): return HealthEndpoint(self.api) @cached_property def kv(self): return KVEndpoint(self.api) @cached_property def members(self): return MembersEndpoint(self.api) @cached_property def operator(self): return OperatorEndpoint(self.api) @cached_property def query(self): return QueryEndpoint(self.api) @cached_property def services(self): return ServicesEndpoint(self.api) @cached_property def session(self): return SessionEndpoint(self.api) @cached_property def status(self): return StatusEndpoint(self.api) def __repr__(self): return "<%s(%r)>" % (self.__class__.__name__, str(self.address))
2,688
774
import colorama colorama.init() def print_static_progress_bar(title, percentage, text, color="white"): empty_bar = "—" * 50 if color == 'red': color = colorama.Fore.RED elif color == 'green': color = colorama.Fore.GREEN elif color == 'blue': color = colorama.Fore.BLUE else: color = colorama.Fore.WHITE fill_char = color + "█" + colorama.Fore.WHITE # convert percentage to position position = (percentage * 50) / 100 position = round(position) filled_bar = empty_bar filled_bar = (fill_char * (position+1)) + filled_bar[position+1:] final_bar = "{}: |{}| {} ({}%)".format(title, filled_bar, text, percentage) print(final_bar)
733
254
#!/usr/bin/env python # -*- coding: utf-8 -*- """ """ from subprocess import PIPE, Popen import platform import os import sys import re import zlib from socket import socket from socket import AF_INET, SOCK_STREAM, SHUT_RDWR from socket import SOL_SOCKET, SO_REUSEADDR localhost = '127.0.0.1' allhosts = '0.0.0.0' import logging import logging.config LOG_SETTINGS = { # --------- GENERAL OPTIONS ---------# 'version': 1, 'disable_existing_loggers': False, 'root': { 'level': 'NOTSET', 'handlers': ['file'], }, #---------- HANDLERS ---------------# 'handlers': { 'console': { 'class': 'logging.StreamHandler', 'level': 'NOTSET', 'formatter': 'detailed', 'stream': 'ext://sys.stdout', }, 'file': { 'class': 'logging.handlers.RotatingFileHandler', 'level': 'NOTSET', 'formatter': 'detailed', 'filename': 'server.log', 'mode': 'a', 'maxBytes': 10485760, 'backupCount': 5, }, 'tcp' : { 'class' : 'logging.handlers.SocketHandler', 'level' : 'INFO', 'host' : '192.168.1.2', 'port' : 9020, 'formatter': 'detailed', }, }, # ----- FORMATTERS -----------------# 'formatters': { 'detailed': { 'format': '%(asctime)s %(module)-17s line:%(lineno)-4d %(funcName)s() ' \ '%(levelname)-8s %(message)s', }, 'verbose': { 'format': '%(levelname)-8s %(asctime)s %(module)s %(process)d %(thread)d %(message)s', 'datefmt': '%a, %d %b %Y %H:%M:%S' }, 'email': { 'format': 'Timestamp: %(asctime)s\nModule: %(module)s\n' \ 'Line: %(lineno)d\nMessage: %(message)s', }, }, } logging.config.dictConfig(LOG_SETTINGS) logger = logging.getLogger('root') class Server(object): """ Stream socket server class -- no definitive name """ def __init__(self, host, port, maxconn=10, buffersize=1024): self.host = host self.port = port self.maxconn = maxconn self.buffersize = buffersize # Clients IP's connected to this server self.clients = [] # Client Sockets List self.conns = [] # In the beggining accept only one client # @TODO: Handle multiple clients self.addr = "" self.conn = None # create socket handler s = socket(AF_INET, SOCK_STREAM) # Reuse address s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) s.bind((host, port)) s.listen(maxconn) self.sock = s def __str__(self): txt = "addr %s" % str(self.addr) txt += "\n%s" % str(self.conns) return txt def connect(self): """ Accept connection from client """ print "Waiting Clients" logger.debug("Waiting client connection") conn, addr = self.sock.accept() self.clients.append(addr) self.conns.append(conn) self.conn = conn self.addr = addr client = self.recevc() logger.debug("%s", {'conn':conn, 'addr':addr, 'client': client}) print "Received connection from", addr[0] print client def sendc(self, msg): """ Send flow control message to client """ self.conn.sendall(msg) def recevc(self): """ Receive control message from client module """ conn = self.conn while True: data = conn.recv(self.buffersize) #print data if not data: raise Exception("Error connection closed") else: #logger.debug("len(data) =%s" % len(data)) #data2 = zlib.decompress(data) #logger.debug("len(data2) =%s" % len(data2)) return data def getclients(self): logger.debug("Waiting client connection") self.sock.settimeout(None) for i in range(8): logger.info("i = %s" % i) try: logger.debug("accepting connection") conn, addr = self.sock.accept() logger.debug("connection accepted") self.clients.append(addr[0]) # Clients hostnames or ip's self.conns.append(conn) # Clients sockets self.conn.settimeout(0.1) self.conn = conn client = self.recevc() print "Get client from %s - %s" % (addr[0], client) except: pass logger.info("%s" % {'clients': self.clients, 'conn': self.conn, 'addr': self.addr } ) self.sock.settimeout(None) def handling_connections(self): pass def send(self): pass class CommandServer(Server): def __init__(self, host='0.0.0.0', port=9090, buffersize=1024): super(CommandServer, self).__init__(host=host, port=port, buffersize=buffersize) self.getclients() s = CommandServer('0.0.0.0', 9090, buffersize=1024) print s.recevc()
5,227
1,695
from os import environ from maga.maga_plugin_service import MagaPluginService from common.plugin_model_api import api, PluginModelAPI, PluginModelListAPI, PluginModelTrainAPI, \ PluginModelInferenceAPI, app, PluginModelParameterAPI multivarite = MagaPluginService() api.add_resource(PluginModelListAPI(multivarite), '/multivarite/models') api.add_resource(PluginModelAPI(multivarite), '/multivarite/model', '/multivarite/model/<model_key>') api.add_resource(PluginModelTrainAPI(multivarite), '/multivarite/<model_key>/train') api.add_resource(PluginModelInferenceAPI(multivarite), '/multivarite/<model_key>/inference') api.add_resource(PluginModelParameterAPI(multivarite), '/multivarite/parameters') if __name__ == '__main__': HOST = environ.get('SERVER_HOST', '0.0.0.0') PORT = environ.get('SERVER_PORT', 56789) app.run(HOST, PORT, threaded=True, use_reloader=False)
888
310
#!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- from typing import List, Dict, Optional, Generator, Tuple import collections from jf import command from jf import git from jf import schema class Error(Exception): '''Base for errors in the module.''' SEPARATOR = schema.SEPARATOR class JfTemplateCfg(schema.SectionCfg): '''Jflow template config.''' KEYS = [ 'version', 'upstream', 'fork', 'lreview_prefix', 'lreview_suffix', 'review_prefix', 'review_suffix', 'ldebug_prefix', 'ldebug_suffix', 'debug_prefix', 'debug_suffix', ] version = schema.Value(schema.IntType, ['version'], default=0) upstream = schema.MaybeValue(schema.StrType, ['upstream']) fork = schema.MaybeValue(schema.StrType, ['fork']) ldebug_prefix = schema.MaybeValue(schema.StrType, ['debug-local-prefix']) ldebug_suffix = schema.MaybeValue(schema.StrType, ['debug-local-suffix']) debug_prefix = schema.MaybeValue(schema.StrType, ['debug-prefix']) debug_suffix = schema.MaybeValue(schema.StrType, ['debug-suffix']) lreview_prefix = schema.MaybeValue(schema.StrType, ['public-prefix']) lreview_suffix = schema.MaybeValue(schema.StrType, ['public-suffix']) review_prefix = schema.MaybeValue(schema.StrType, ['remote-prefix']) review_suffix = schema.MaybeValue(schema.StrType, ['remote-suffix']) class JfCfg(schema.SectionCfg): '''Section of global Jflow config.''' remote = schema.Value(schema.StrType, ['remote'], default='origin') template = schema.Map(JfTemplateCfg, ['template']) default_green = schema.ListValue(schema.StrType, ['default-green']) autosync = schema.Value(schema.BoolType, ['autosync'], default=False) class JfBranchCfg(schema.SectionCfg): '''Jflow configuration for a branch.''' KEYS = [ 'version', 'remote', 'upstream', 'fork', 'lreview', 'review', 'ldebug', 'debug', 'hidden', 'protected', 'tested', 'sync', 'debug_prefix', 'debug_suffix', ] version = schema.Value(schema.IntType, ['version'], default=0) remote = schema.MaybeValue(schema.StrType, ['remote-name']) upstream = schema.Value(schema.BranchType, ['upstream'], git.ZeroBranchName) upstream_shortcut = schema.MaybeValue(schema.StrType, ['upstream-shortcut']) fork = schema.Value(schema.BranchType, ['fork'], git.ZeroBranchName) fork_shortcut = schema.MaybeValue(schema.StrType, ['fork-shortcut']) ldebug = schema.MaybeValue(schema.BranchType, ['ldebug']) debug = schema.MaybeValue(schema.BranchType, ['debug']) lreview = schema.MaybeValue(schema.BranchType, ['public']) review = schema.MaybeValue(schema.BranchType, ['remote']) debug_prefix = schema.MaybeValue(schema.StrType, ['debug-prefix']) debug_suffix = schema.MaybeValue(schema.StrType, ['debug-suffix']) # Properties below are not only for jflow-controlled branches hidden = schema.Value(schema.BoolType, ['hidden'], default=False) protected = schema.Value(schema.BoolType, ['protected'], default=False) sync = schema.Value(schema.BoolType, ['sync'], default=False) fork_from = schema.MaybeValue(schema.BranchType, ['fork-from']) tested = schema.MaybeValue(schema.BranchType, ['tested']) class StgitBranchCfg(schema.SectionCfg): '''Stgit configuration for a branch.''' version = schema.Value(schema.IntType, ['stackformatversion'], default=0) parentbranch = schema.MaybeValue(schema.StrType, ['parentbranch']) class GitRemoteCfg(schema.SectionCfg): '''Remote configuration.''' url = schema.MaybeValue(schema.StrType, ['url']) class GitBranchCfg(schema.SectionCfg): '''Branches configuration.''' jf = schema.Section(JfBranchCfg, ['jflow']) stgit = schema.Section(StgitBranchCfg, ['stgit']) remote = schema.Value(schema.StrType, ['remote'], default='') merge = schema.Value(schema.StrType, ['merge'], default='') description = schema.Value(schema.StrType, ['description'], default='') class Root(schema.Root): def __init__(self) -> None: schema.Root.__init__(self, GitConfigHolder()) branch = schema.Map(GitBranchCfg, ['branch']) remote = schema.Map(GitRemoteCfg, ['remote']) jf = schema.Section(JfCfg, ['jflow']) class GitConfigHolder: def __init__(self) -> None: self._config: Optional[Dict[str, List[str]]] = None @property def config(self) -> Dict[str, List[str]]: if self._config is not None: return self._config self._config = collections.defaultdict(list) for name, value in self._gen_config(): self._config[name].append(value) return self._config @staticmethod def _gen_config() -> Generator[Tuple[str, str], None, None]: for line in command.read(['git', 'config', '--list']): name, value = line.split('=', 1) yield name, value def set(self, name: str, value: str) -> None: command.run(['git', 'config', '--local', name, value]) if self._config is None: return self._config[name] = [value] def reset(self, name: str, value: str) -> None: command.run(['git', 'config', '--local', '--replace-all', name, value]) if self._config is None: return self._config[name] = [value] def append(self, name: str, value: str) -> None: command.run(['git', 'config', '--local', '--add', name, value]) if self._config is None: return self._config[name].append(value) def unset(self, name: str) -> None: command.run(['git', 'config', '--local', '--unset-all', name]) if self._config is None: return del self._config[name]
5,766
1,823
#!/usr/bin/env python3 from Utils import * def isOkay(ps, p1, p2): newP1 = int(str(p1) + str(p2)) newP2 = int(str(p2) + str(p1)) return (newP1 in ps or isPrime(newP1)) and (newP2 in ps or isPrime(newP2)) def isCombination(indices): return len(indices) == len(set(indices)) def isFinished(indices, indexLimit): return indices[-1] >= indexLimit def nextIndicesWithShort(indices, indexLimit, pl, foundMinimum): indices[0] += 1 # print("Before:", indices) for i in range(len(indices[:-1])): v = indices[i] # print("During:", i, v, indices) if v >= indexLimit: indices[i] -= indexLimit indices[i + 1] += 1 # Short the computation if the current index already exceeds the found minimum # print(i, v) elif pl[v] >= foundMinimum: indices[i + 1] += 1 for j in range(i + 1): indices[j] = 0 elif sum([pl[pi] for pi in indices[i:]]) >= foundMinimum: indices[i + 1] += 1 for j in range(i + 1): indices[j] = 0 # print("After:", indices) def isCool(ps, pl, indices): for i in range(len(indices)): for j in range(i + 1, len(indices)): # print(i, j, len(indices)) # print(indices[i], indices[j]) # print(len(pl)) if not isOkay(ps, pl[indices[i]], pl[indices[j]]): return False return True if __name__ == "__main__": # Your code here! limit = 674 digits = 5 nj = NumberJuggler(limit) pl = nj.primeList ps = set(pl) indexLimit = len(pl) foundMinimum = digits * limit foundGroup = None indices = list(range(digits)) i = 0 while not isFinished(indices, indexLimit): print(indices) if not isCombination: nextIndicesWithShort(indices, indexLimit, pl, foundMinimum) continue # Check if the higher indices are compatible with eachother mustContinue = False for j in range(len(indices) - 2, 0, -1): if indices[j] == 0: if not isCool(ps, pl, indices[j:]): # Skip all the indices for k in range(j + 1): indices[k] = indexLimit - 1 nextIndicesWithShort(indices, indexLimit, pl, foundMinimum) mustContinue = True break if mustContinue: continue i += 1 if i % 100000 == 0: print(indices) if isCool(ps, pl, indices): total = sum([pl[i] for i in indices]) if total < foundMinimum: foundMinimum = total foundGroup = [pl[i] for i in indices] print("New foundMinimum:", foundMinimum) print("Group:", foundGroup) nextIndicesWithShort(indices, indexLimit, pl, foundMinimum) print("New foundMinimum:", foundMinimum) print("Group:", foundGroup)
3,005
977
class PassEnvironmentException(Exception): """A custom exception which passes the current environment and the current mode""" def __init__(self, environment=None, exit_interactive_mode=False, exit_single_command_mode=False): self.passed_environment = environment self.exit_interactive_mode = exit_interactive_mode self.exit_single_command_mode = exit_single_command_mode
404
104
#!/usr/bin/env python3 from collections import OrderedDict import glob import json import os import os.path import subprocess import lib argv = lib.cli_config() if len(argv) > 1: extension = argv[1] else: extension = None def update(package_json): os.chdir(os.path.dirname(package_json)) print(package_json.split('/')[-2]) updating = [] out = subprocess.check_output(['git', 'diff', '--name-only']).decode() if 'package.json' in out: print('WARNING: package.json has local changes') return with open(package_json, 'r') as f: j = json.load(f, object_pairs_hook=OrderedDict) for package, version in j['devDependencies'].items(): if lib.get_npm_version(package) != version: i = (package, version, lib.get_npm_version(package)) print('Updating %s: %s --> %s' % i) updating.append(i) j['devDependencies'][package] = lib.get_npm_version(package) if not updating: print('Nothing to update') return with open(package_json, 'w') as f: out = json.dumps(j, indent=' ') f.write(out + '\n') subprocess.call(['npm', 'install']) try: subprocess.check_call(['npm', 'test']) except subprocess.CalledProcessError: print('Error updating %s' % package_json) return msg = 'build: Updating development dependencies\n\n' for tup in updating: msg += '* %s: %s → %s\n' % tup print(msg) lib.commit_and_push(files=['package.json'], msg=msg, branch='master', topic='bump-dev-deps') if extension == 'mediawiki': packages = [os.path.join(lib.MEDIAWIKI_DIR, 'package.json')] else: packages = glob.glob(os.path.join(lib.EXTENSIONS_DIR, '*/package.json')) for package in sorted(packages): ext_name = package.split('/')[-2] if extension and extension != ext_name: continue update(package)
1,954
625
#!/path/to/interpreter """ PUT Call Integration Test """ # Created by Egor Kostan. # GitHub: https://github.com/ikostan # LinkedIn: https://www.linkedin.com/in/egor-kostan/ import base64 import allure import unittest from flask import json from api.cars_app import app @allure.epic('Simple Flask App') @allure.parent_suite('REST API') @allure.suite("Integration Tests") @allure.sub_suite("Positive Tests") @allure.feature("PUT") @allure.story('Car Update') class PutCarsCallTestCase(unittest.TestCase): """ Testing a JSON API implemented in Flask. PUT Call Integration Test. PUT method requests for the enclosed entity be stored under the supplied Request-URI. If the Request-URI refers to an already existing resource – an update operation will happen, otherwise create operation should happen if Request-URI is a valid resource URI (assuming client is allowed to determine resource identifier). PUT method is idempotent. So if you send retry a request multiple times, that should be equivalent to single request modification. Use PUT when you want to modify a singular resource which is already a part of resources collection. PUT replaces the resource in its entirety. """ def setUp(self) -> None: with allure.step("Prepare test data"): self.car_original = {"name": "Creta", "brand": "Hyundai", "price_range": "8-14 lacs", "car_type": "hatchback"} self.car_updated = {"name": "Creta", "brand": "Hyundai", "price_range": "6-9 lacs", "car_type": "hatchback"} self.non_admin_user = {"name": "eric", "password": "testqxf2", "perm": "non_admin"} self.admin_user = {"name": "qxf2", "password": "qxf2", "perm": "admin"} def test_put_cars_update_non_admin(self): """ Test PUT call using non admin user credentials. :return: """ allure.dynamic.title("Update car properties using " "PUT call and non admin credentials") allure.dynamic.severity(allure.severity_level.NORMAL) with allure.step("Send PUT request"): response = app.test_client().put( '{}{}'.format('/cars/update/', self.car_original['name']), data=json.dumps(self.car_updated), content_type='application/json', # Testing Flask application # with basic authentication # Source: https://gist.github.com/jarus/1160696 headers={ 'Authorization': 'Basic ' + base64.b64encode(bytes(self.non_admin_user['name'] + ":" + self.non_admin_user['password'], 'ascii')).decode('ascii') } ) with allure.step("Verify status code"): self.assertEqual(200, response.status_code) with allure.step("Convert response into JSON data"): data = json.loads(response.get_data(as_text=True)) # print("\nDATA:\n{}\n".format(data)) # Debug only with allure.step("Verify 'successful' flag"): self.assertTrue(data['response']['successful']) with allure.step("Verify updated car data"): self.assertDictEqual(self.car_updated, data['response']['car']) def test_put_cars_update_admin(self): """ Test PUT call using admin user credentials. :return: """ allure.dynamic.title("Update car properties using " "PUT call and admin credentials") allure.dynamic.severity(allure.severity_level.NORMAL) with allure.step("Send PUT request"): response = app.test_client().put( '{}{}'.format('/cars/update/', self.car_original['name']), data=json.dumps(self.car_updated), content_type='application/json', # Testing Flask application # with basic authentication # Source: https://gist.github.com/jarus/1160696 headers={ 'Authorization': 'Basic ' + base64.b64encode(bytes(self.admin_user['name'] + ":" + self.admin_user['password'], 'ascii')).decode('ascii') } ) with allure.step("Verify status code"): self.assertEqual(200, response.status_code) with allure.step("Convert response into JSON data"): data = json.loads(response.get_data(as_text=True)) # print("\nDATA:\n{}\n".format(data)) # Debug only with allure.step("Verify 'successful' flag"): self.assertTrue(data['response']['successful']) with allure.step("Verify updated car data"): self.assertDictEqual(self.car_updated, data['response']['car'])
4,821
1,678
import os, sys sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from robots import getAllowedAgents def test_uas(): # valid as of 2020/05/25 uas = getAllowedAgents() assert set(uas) == {'Googlebot', 'Applebot', 'Bingbot', 'DuckDuckBot', 'Naverbot', 'Twitterbot', 'Yandex'} if __name__ == '__main__': test_uas()
356
146
from JumpScale import j from .WatchdogFactory import * j.base.loader.makeAvailable(j, 'tools.watchdog') j.tools.watchdog.manager=WatchdogFactory()
150
51
from warren.api.statements import BalanceSheet from warren.api.statements import IncomeStatement from warren.api.statements import CashFlow from warren.api.statements import FinancialPerformance data = { 'ebit': 100, 'total_assets': 100, 'net_income': 100, 'total_stockholder_equity': 100, 'gross_profit': 100, 'total_revenue': 100, 'year': 2016 } previous_data = { 'ebit': 100, 'total_assets': 100, 'net_income': 100, 'total_stockholder_equity': 100, 'gross_profit': 100, 'total_revenue': 100, 'year': 2016 } class TestApi(object): def test_init_balance_sheet(self): balance_sheet = BalanceSheet(data) assert isinstance(balance_sheet, BalanceSheet) def test_init_income_statement(self): income_statement = IncomeStatement(data) assert isinstance(income_statement, IncomeStatement) def test_init_cash_flow(self): cash_flow = CashFlow(data) assert isinstance(cash_flow, CashFlow) def test_init_financial_statements(self): balance_sheet = BalanceSheet(data) income_statement = IncomeStatement(data) cash_flow = CashFlow(data) fs = FinancialPerformance('TEST', [income_statement], [balance_sheet], [cash_flow]) assert isinstance(fs, FinancialPerformance) def test_get_balance_sheet_by_year(self): balance_sheet = BalanceSheet(data) income_statement = IncomeStatement(data) cash_flow = CashFlow(data) fs = FinancialPerformance('TEST', [income_statement], [balance_sheet], [cash_flow]) assert isinstance(fs.get_balance_sheet_by_year(2016), BalanceSheet) def test_get_income_statement_by_year(self): balance_sheet = BalanceSheet(data) income_statement = IncomeStatement(data) cash_flow = CashFlow(data) fs = FinancialPerformance('TEST', [income_statement], [balance_sheet], [cash_flow]) assert isinstance(fs.get_income_statement_by_year(2016), IncomeStatement) def test_get_cash_flow_by_year(self): balance_sheet = BalanceSheet(data) income_statement = IncomeStatement(data) cash_flow = CashFlow(data) fs = FinancialPerformance('TEST', [income_statement], [balance_sheet], [cash_flow]) assert isinstance(fs.get_cash_flow_by_year(2016), CashFlow) def test_return_on_assets(self): bss = [BalanceSheet(data), BalanceSheet(previous_data)] ics = [IncomeStatement(data), IncomeStatement(previous_data)] cfs = [CashFlow(data), CashFlow(previous_data)] fs = FinancialPerformance('TEST', bss, ics, cfs) assert fs.return_on_asset(2016) == 1.0 def test_return_on_equity(self): bss = [BalanceSheet(data), BalanceSheet(previous_data)] ics = [IncomeStatement(data), IncomeStatement(previous_data)] cfs = [CashFlow(data), CashFlow(previous_data)] fs = FinancialPerformance('TEST', bss, ics, cfs) assert fs.return_on_equity(2016) == 1.0 def test_profit_margin(self): bss = [BalanceSheet(data), BalanceSheet(previous_data)] ics = [IncomeStatement(data), IncomeStatement(previous_data)] cfs = [CashFlow(data), CashFlow(previous_data)] fs = FinancialPerformance('TEST', bss, ics, cfs) assert fs.profit_margin(2016) == 1.0 def test_balance_sheet_compare(self): assert BalanceSheet(data) == BalanceSheet(previous_data) def test_income_statement_compare(self): assert IncomeStatement(data) == IncomeStatement(previous_data) def test_cash_flow_compare(self): assert CashFlow(data) == CashFlow(previous_data)
4,411
1,368
# coding=utf-8 from unittest import TestCase from descriptor_tools.instance_properties import ( InstanceProperty) class MockDelegatedProperty: def __init__(self, value): self.value = value self.get_called = False self.set_called = False def set_meta(self, *args): pass def get(self): self.get_called = True return self.value def set(self, value): self.set_called = True self.value = value class InstancePropertyTest(TestCase): class Class: attr = InstanceProperty() def test_when_uninitialized_instance_when_calling_get_then_fails(self): instance = self.Class() with self.assertRaises(AttributeError): _ = instance.attr def test_given_initialized_instance_when_calling_get_then_gets_value(self): instance = self.Class() instance.attr = MockDelegatedProperty(5) result = instance.attr self.assertEqual(5, result) def test_given_initialized_instance_when_calling_get_then_gets_value_from_delegated_property(self): instance = self.Class() instance.attr = MockDelegatedProperty(5) _ = instance.attr delegprop = instance._attr self.assertTrue(delegprop.get_called) def test_given_uninitialized_instance_when_setting_a_non_delegate_then_raise_exception(self): instance = self.Class() with self.assertRaises(AttributeError): instance.attr = 5 def test_given_initialized_instance_when_calling__set__then_set_on_delegate(self): instance = self.Class() instance.attr = MockDelegatedProperty(1) instance.attr = 5 self.assertTrue( instance._attr.set_called, "set not called on delegated property") def test_given_initialized_instance_when_calling__set__then_set_value(self): instance = self.Class() instance.attr = MockDelegatedProperty(1) instance.attr = 5 self.assertEqual(5, instance.attr) def test_given_nondeletable_instance_property_when_delete_then_raise_error(self): instance = self.Class() instance.attr = MockDelegatedProperty(5) with self.assertRaises(AttributeError): del instance.attr def test_given_initialized_readonly_delegate_when_calling__set__then_raise_exception(self): class ReadOnlyAttrClass: attr = InstanceProperty(readonly=True) def __init__(self): self.attr = MockDelegatedProperty(5) instance = ReadOnlyAttrClass() with self.assertRaises(AttributeError): instance.attr = 1
2,718
791
import frappe def execute(): pass
36
13
#This code has been taken from https://github.com/mozilla/DeepSpeech/blob/master/data/lm/generate_lm.py import argparse import os import subprocess from tqdm import tqdm import shutil def convert_and_filter_topk(args): """ Convert to lowercase, count word occurrences and save top-k words to a file """ lexicon_in_path = os.path.join(args.lm_dir, 'lexicon.txt') lexicon_out_path = os.path.join(args.lm_dir, 'lexicon.lst') data_lower = shutil.copyfile(args.input_txt, os.path.join(args.lm_dir, "lower.txt.gz")) with open(lexicon_in_path) as f: content_list = f.readlines() content_list = [i.replace("\n", "") for i in content_list] first = True vocab_lst = [] with open(lexicon_out_path, "w+") as f: for line in tqdm(content_list): line =line.split('\t')[0] vocab_lst.append(line) if first: print(line) first = False print(str(line + "\t" + " ".join(list(line.replace("/n", "").replace(" ", "|").strip())) + " |"), file=f) vocab_str = "\n".join(vocab_lst) return data_lower, vocab_str def build_lm(args, data_lower, vocab_str): print("\nCreating ARPA file ...") lm_path = os.path.join(args.lm_dir, "lm.arpa") subargs = [ os.path.join(args.kenlm_bins, "lmplz"), "--order", str(args.arpa_order), "--temp_prefix", args.lm_dir, "--memory", args.max_arpa_memory, "--text", data_lower, "--arpa", lm_path, "--prune", *args.arpa_prune.split("|"), "--discount_fallback", ] if args.discount_fallback: subargs += ["--discount_fallback"] subprocess.check_call(subargs) # Filter LM using vocabulary of top-k words print("\nFiltering ARPA file using vocabulary of top-k words ...") filtered_path = os.path.join(args.lm_dir, "lm_filtered.arpa") subprocess.run( [ os.path.join(args.kenlm_bins, "filter"), "single", "model:{}".format(lm_path), filtered_path, ], input=vocab_str.encode("utf-8"), check=True, ) # Quantize and produce trie binary. print("\nBuilding lm.binary ...") binary_path = os.path.join(args.lm_dir, "lm.binary") subprocess.check_call( [ os.path.join(args.kenlm_bins, "build_binary"), # "-a", # str(args.binary_a_bits), # "-q", # str(args.binary_q_bits), # "-v", # args.binary_type, filtered_path, binary_path, ] ) def main(): parser = argparse.ArgumentParser( description="Generate lm.binary and lexicon.lst." ) parser.add_argument( "--input_txt", help="Path to a file.txt or file.txt.gz with sample sentences", type=str, required=True, ) parser.add_argument( "--lm_dir", help="Directory path for the lm", type=str, required=True ) parser.add_argument( "--top_k", help="Use top_k most frequent words for the vocab.txt file. These will be used to filter the ARPA file.", type=int, required=True, ) parser.add_argument( "--lexicon", help="Use top_k most frequent words for the lexicon.txt file. These will be used to create lexicon.lst file.", type=int, required=True, ) parser.add_argument( "--kenlm_bins", help="File path to the KENLM binaries lmplz, filter and build_binary", type=str, required=True, ) parser.add_argument( "--arpa_order", help="Order of k-grams in ARPA-file generation", type=int, required=True, ) parser.add_argument( "--max_arpa_memory", help="Maximum allowed memory usage for ARPA-file generation", type=str, required=True, ) parser.add_argument( "--arpa_prune", help="ARPA pruning parameters. Separate values with '|'", type=str, required=True, ) parser.add_argument( "--binary_a_bits", help="Build binary quantization value a in bits", type=int, required=True, ) parser.add_argument( "--binary_q_bits", help="Build binary quantization value q in bits", type=int, required=True, ) parser.add_argument( "--binary_type", help="Build binary data structure type", type=str, required=True, ) parser.add_argument( "--discount_fallback", help="To try when such message is returned by kenlm: 'Could not calculate Kneser-Ney discounts [...] rerun with --discount_fallback'", action="store_true", ) args = parser.parse_args() data_lower, vocab_str = convert_and_filter_topk(args) build_lm(args, data_lower, vocab_str) # Delete intermediate files os.remove(os.path.join(args.lm_dir, "lower.txt.gz")) os.remove(os.path.join(args.lm_dir, "lm.arpa")) os.remove(os.path.join(args.lm_dir, "lm_filtered.arpa")) if __name__ == "__main__": main()
5,252
1,726
import numpy as np import prody as pr from prody.measure.transform import calcRMSD from scipy.spatial.distance import cdist import itertools from sklearn.neighbors import NearestNeighbors from .vdmer import pair_wise_geometry_matrix class Search_filter: def __init__(self, filter_abple = False, filter_phipsi = True, max_phipsi_val = 15, filter_vdm_score = False, min_vdm_score = 0, filter_vdm_count = False, min_vdm_clu_num = 20, after_search_filter_geometry = False, filter_based_geometry_structure = False, angle_tol = 5, aa_aa_tol = 0.5, aa_metal_tol = 0.2, pair_angle_range = None, pair_aa_aa_dist_range = None, pair_metal_aa_dist_range = None, after_search_filter_qt_clash = False, vdm_vdm_clash_dist = 2.7, vdm_bb_clash_dist = 2.2, after_search_open_site_clash = True, open_site_dist = 3.0, write_filtered_result = False, selfcenter_filter_member_phipsi = True): self.filter_abple = filter_abple self.filter_phipsi = filter_phipsi self.max_phipsi_val = max_phipsi_val self.filter_vdm_score = filter_vdm_score self.min_vdm_score = min_vdm_score self.filter_vdm_count = filter_vdm_count self.min_vdm_clu_num = min_vdm_clu_num self.after_search_filter_geometry = after_search_filter_geometry self.filter_based_geometry_structure = filter_based_geometry_structure self.angle_tol = angle_tol self.aa_aa_tol = aa_aa_tol self.aa_metal_tol = aa_metal_tol self.pair_angle_range = pair_angle_range # [90, 110] self.pair_aa_aa_dist_range = pair_aa_aa_dist_range # [2.8, 3.4] self.pair_metal_aa_dist_range = pair_metal_aa_dist_range # [2.0, 2.3] self.after_search_filter_qt_clash = after_search_filter_qt_clash self.vdm_vdm_clash_dist = vdm_vdm_clash_dist self.vdm_bb_clash_dist = vdm_bb_clash_dist self.after_search_open_site_clash = after_search_open_site_clash self.open_site_dist = open_site_dist self.write_filtered_result = write_filtered_result self.selfcenter_filter_member_phipsi = selfcenter_filter_member_phipsi def para2string(self): parameters = "Filter parameters: \n" parameters += 'filter_abple: ' + str(self.filter_abple) + ' \n' parameters += 'filter_phipsi: ' + str(self.filter_phipsi) + ' \n' parameters += 'max_phipsi_val: ' + str(self.max_phipsi_val) + ' \n' parameters += 'filter_vdm_score: ' + str(self.filter_vdm_score) + ' \n' parameters += 'min_vdm_score: ' + str(self.min_vdm_score) + ' \n' parameters += 'filter_vdm_count: ' + str(self.filter_vdm_count) + ' \n' parameters += 'min_vdm_clu_num: ' + str(self.min_vdm_clu_num) + ' \n' parameters += 'after_search_filter_geometry: ' + str(self.after_search_filter_geometry) + ' \n' parameters += 'filter_based_geometry_structure: ' + str(self.filter_based_geometry_structure) + ' \n' parameters += 'pair_angle_range: ' + str(self.pair_angle_range) + ' \n' parameters += 'pair_aa_aa_dist_range: ' + str(self.pair_aa_aa_dist_range) + ' \n' parameters += 'pair_metal_aa_dist_range: ' + str(self.pair_metal_aa_dist_range) + ' \n' parameters += 'filter_qt_clash: ' + str(self.after_search_filter_qt_clash) + ' \n' parameters += 'vdm_vdm_clash_dist: ' + str(self.vdm_vdm_clash_dist) + ' \n' parameters += 'vdm_bb_clash_dist: ' + str(self.vdm_bb_clash_dist) + ' \n' parameters += 'after_search_open_site_clash: ' + str(self.after_search_open_site_clash) + ' \n' parameters += 'open_site_dist: ' + str(self.open_site_dist) + ' \n' parameters += 'write_filtered_result: ' + str(self.write_filtered_result) + ' \n' parameters += 'selfcenter_filter_member_phipsi: ' + str(self.selfcenter_filter_member_phipsi) + ' \n' return parameters @staticmethod def after_search_geo_pairwise_satisfied(combinfo, pair_angle_range = None, pair_aa_aa_dist_range = None, pair_metal_aa_dist_range = None): ''' range = (75, 125) for Zn. if all pairwise angle is between the range. The geometry is satisfied. ''' satisfied = True if pair_angle_range: for an in combinfo.angle_pair: if an < pair_angle_range[0] or an > pair_angle_range[1]: combinfo.pair_angle_ok = -1 satisfied = False break if pair_aa_aa_dist_range: for ad in combinfo.aa_aa_pair: if ad < pair_aa_aa_dist_range[0] or ad > pair_aa_aa_dist_range[1]: combinfo.pair_aa_aa_dist_ok = -1 satisfied = False break if pair_metal_aa_dist_range: combinfo.pair_aa_metal_dist_ok = 1 for amd in combinfo.metal_aa_pair: if amd < pair_metal_aa_dist_range[0] or amd > pair_metal_aa_dist_range[1]: combinfo.pair_aa_metal_dist_ok = -1 satisfied = False break return satisfied @staticmethod def get_min_geo(geometry, geo_struct, metal_sel = 'name NI MN ZN CO CU MG FE' ): ''' Metal must be the last atom in the prody object. ''' aa_coords = geo_struct.select('not ' + metal_sel).getCoords() metal_coord = geo_struct.select(metal_sel).getCoords()[0] ct_len = len(aa_coords) min_rmsd = 0 min_geo_struct = None for xs in itertools.permutations(range(ct_len), ct_len): _geo_struct = geo_struct.copy() coords = [] for x in xs: coords.append(aa_coords[x]) coords.append(metal_coord) _geo_struct.setCoords(np.array(coords)) pr.calcTransformation(_geo_struct.select('not oxygen'), geometry).apply(_geo_struct) rmsd = pr.calcRMSD(_geo_struct.select('not oxygen'), geometry) if not min_geo_struct: min_geo_struct = _geo_struct min_rmsd = rmsd elif rmsd < min_rmsd: min_geo_struct = _geo_struct min_rmsd = rmsd return min_geo_struct, min_rmsd @staticmethod def after_search_geo_strcut_satisfied(comb_info, min_geo_struct, angle_tol, aa_aa_tol, aa_metal_tol): aa_aa_pair, metal_aa_pair, angle_pair = pair_wise_geometry_matrix(min_geo_struct) info_aa_aa_pair, info_metal_aa_pair, info_angle_pair = pair_wise_geometry_matrix(comb_info.geometry) satisfied = True comb_info.pair_aa_metal_dist_ok = 1 for i in range(len(metal_aa_pair)): if info_metal_aa_pair[i] < metal_aa_pair[i] - aa_metal_tol or info_metal_aa_pair[i] > metal_aa_pair[i] + aa_metal_tol: comb_info.pair_aa_metal_dist_ok = -1 satisfied = False break comb_info.pair_aa_aa_dist_ok = 1 for i, j in itertools.combinations(range(aa_aa_pair.shape[0]), 2): if info_aa_aa_pair[i, j] < aa_aa_pair[i, j] - aa_aa_tol or info_aa_aa_pair[i, j] > aa_aa_pair[i, j] + aa_aa_tol: comb_info.pair_aa_aa_dist_ok = -1 satisfied = False break comb_info.pair_angle_ok = 1 for i, j in itertools.combinations(range(aa_aa_pair.shape[0]), 2): if info_angle_pair[i, j] < angle_pair[i, j] - angle_tol or info_angle_pair[i, j] > angle_pair[i, j] + angle_tol: comb_info.pair_angle_ok = -1 satisfied = False break return satisfied @staticmethod def vdm_clash(vdms, target, vdm_vdm_clash_dist = 2.7, vdm_bb_clash_dist = 2.2, unsupperimposed = True, wins = None, align_sel = 'name N CA C'): ''' clashing with sklearn.neighbors NearestNeighbors method. All sc except CB atom of vdm are used for clashing checking. All bb of target are used for clashing chekcing. If clash detected, return True. ''' coords = [] for i in range(len(vdms)): vdm = vdms[i] if unsupperimposed: win = wins[i] target_sel = 'resindex ' + str(win) + ' and ' + align_sel query_sel = 'resindex ' + str(vdm.contact_resind) + ' and '+ align_sel if len(vdm.query.select(query_sel)) != len(target.select(target_sel)): print('supperimpose_target_bb not happening') continue transform = pr.calcTransformation(vdm.query.select(query_sel), target.select(target_sel)) transform.apply(vdm.query) vdm_sel = 'protein and heavy and sc and not name CB' coord = vdm.query.select(vdm_sel).getCoords() coords.append(coord) for i, j in itertools.combinations(range(len(coords)), 2): neigh_y = NearestNeighbors(radius= vdm_vdm_clash_dist) neigh_y.fit(coords[i]) x_in_y = neigh_y.radius_neighbors(coords[j]) x_has_y = any([True if len(a) >0 else False for a in x_in_y[1]]) if x_has_y: return True bb_coord = target.select('protein and heavy and bb').getCoords() for i in range(len(coords)): neigh_y = NearestNeighbors(radius= vdm_bb_clash_dist) neigh_y.fit(bb_coord) x_in_y = neigh_y.radius_neighbors(coords[i]) x_has_y = any([True if len(a) >0 else False for a in x_in_y[1]]) if x_has_y: return True return False @staticmethod def open_site_clashing(vdms, target, ideal_geo, open_site_dist = 3.0): ''' The open site of ideal_geo must be Oxygen, the other atom could not be Oxygen. If clash detected, return True. ''' ideal_geo_coord = [ideal_geo.select('oxygen')[0].getCoords()] coords = [] for i in range(len(vdms)): vdm = vdms[i] vdm_sel = 'protein and heavy and sc and not name CB' coord = vdm.query.select(vdm_sel).getCoords() coords.extend(coord) bb_coord = target.select('protein and heavy and bb').getCoords() coords.extend(bb_coord) neigh_y = NearestNeighbors(radius= open_site_dist) neigh_y.fit(coords) x_in_y = neigh_y.radius_neighbors(ideal_geo_coord) x_has_y = any([True if len(a) >0 else False for a in x_in_y[1]]) if x_has_y: return True return False
10,712
3,723
import copy import logging from sklearn.base import ClassifierMixin from sklearn.metrics import adjusted_rand_score as ARI from sklearn.mixture import GaussianMixture from ucsl.base import * from ucsl.utils import * class UCSL_C(BaseEM, ClassifierMixin): """ucsl classifier. Implementation of Mike Tipping"s Relevance Vector Machine for classification using the scikit-learn API. Parameters ---------- clustering : string or object, optional (default="gaussian_mixture") Clustering method for the Expectation step, If not specified, "gaussian_mixture" (spherical by default) will be used. It must be one of "k_means", "gaussian_mixture" It can also be a sklearn-like object with fit, predict and fit_predict methods. maximization ; string or object, optional (default="lr") Classification method for the maximization step, If not specified, "lr" (Logistic Regression) will be used. It must be one of "k_means", "gaussian_mixture" It can also be a sklearn-like object with fit and predict methods; coef_ and intercept_ attributes. negative_weighting : string, optional (default="soft") negative samples weighting applied during the Maximization step, If not specified, UCSL original "soft" will be used. It must be one of "uniform", "soft", "hard". ie : the importance weight of non-clustered samples in the sub-classifiers estimation positive_weighting : string, optional (default="hard") positive samples weighting applied during the Maximization step, If not specified, UCSL original "hard" will be used. It must be one of "uniform", "soft", "hard". ie : the importance weight of clustered samples in the sub-classifiers estimation n_clusters : int, optional (default=2) numbers of subtypes we are assuming (equal to K in UCSL original paper) If not specified, the value of 2 will be used. Must be > 1. label_to_cluster : int, optional (default=1) which label we are clustering into subgroups If not specified, the value of 1 will be used. ie : label_to_cluster is similar to "positive class" in UCSL original paper Must be 0 or 1. n_iterations : int, optional (default=10) numbers of Expectation-Maximization step performed per consensus run If not specified, the value of 10 will be used. Must be > 1. n_consensus : int, optional (default=10) numbers of Expectation-Maximization loops performed before ensembling of all the clusterings If not specified, the value of 10 will be used. Must be > 1. stability_threshold : float, optional (default=0.9) Adjusted rand index threshold between 2 successive iterations clustering If not specified, the value of 0.9 will be used. Must be between 0 and 1. noise_tolerance_threshold : float, optional (default=10) Threshold tolerance in graam-schmidt algorithm Given an orthogonalized vector, if its norm is inferior to 1 / noise_tolerance_threshold, we do not add it to the orthonormalized basis. Must be > 0. """ def __init__(self, stability_threshold=0.9, noise_tolerance_threshold=10, n_consensus=10, n_iterations=10, n_clusters=2, label_to_cluster=1, clustering='gaussian_mixture', maximization='logistic', negative_weighting='soft', positive_weighting='hard', training_label_mapping=None): super().__init__(clustering=clustering, maximization=maximization, stability_threshold=stability_threshold, noise_tolerance_threshold=noise_tolerance_threshold, n_consensus=n_consensus, n_iterations=n_iterations) # define the number of clusters needed self.n_clusters = n_clusters # define which label we want to cluster self.label_to_cluster = label_to_cluster # define the mapping of labels before fitting the algorithm # for example, one may want to merge 2 labels together before fitting to check if clustering separate them well if training_label_mapping is None: self.training_label_mapping = {label: label for label in range(2)} else: self.training_label_mapping = training_label_mapping # define what are the weightings we want for each label assert (negative_weighting in ['hard', 'soft', 'uniform']), \ "negative_weighting must be one of 'hard', 'soft'" assert (positive_weighting in ['hard', 'soft', 'uniform']), \ "positive_weighting must be one of 'hard', 'soft'" self.negative_weighting = negative_weighting self.positive_weighting = positive_weighting # store directions from the Maximization method and store intercepts self.coefficients = {cluster_i: [] for cluster_i in range(self.n_clusters)} self.intercepts = {cluster_i: [] for cluster_i in range(self.n_clusters)} # store intermediate and consensus results in dictionaries self.cluster_labels_ = None self.clustering_assignments = None # define barycenters saving dictionaries self.barycenters = None # define orthonormal directions basis and clustering methods at each consensus step self.orthonormal_basis = {c: {} for c in range(n_consensus)} self.clustering_method = {c: {} for c in range(n_consensus)} def fit(self, X_train, y_train): """Fit the ucsl model according to the given training data. Parameters ---------- X_train : array-like, shape (n_samples, n_features) Training vectors. y_train : array-like, shape (n_samples,) Target values. Returns ------- self """ # apply label mapping (in our case we merged "BIPOLAR" and "SCHIZOPHRENIA" into "MENTAL DISEASE" for our xp) y_train_copy = y_train.copy() for original_label, new_label in self.training_label_mapping.items(): y_train_copy[y_train == original_label] = new_label # run the algorithm self.run(X_train, y_train_copy, idx_outside_polytope=self.label_to_cluster) return self def predict(self, X, y_true=None): """Predict classification and clustering using the UCSL model. Parameters ---------- X : array-like, shape (n_samples, n_features) Query points to be evaluate. y_true : array-like, shape (n_samples, n_features) Ground truth classification labels. Returns ------- y_pred_clsf // y_true : array, shape (n_samples,) Predictions of the classification binary task of the query points if y_true is None. Returns y_true if y_true is not None y_pred : array, shape (n_samples,) Predictions of the clustering task of the query points. BEWARE : if y_true is not None, clustering prediction of samples considered "negative" (with classification ground truth label different than label_to_cluster) are set to -1. BEWARE : if y_true is None, clustering predictions of samples considered "negative" (when classification prediction different than label_to_cluster) are set to -1. """ y_pred_proba_clsf = self.predict_classif_proba(X) y_pred_clsf = np.argmax(y_pred_proba_clsf, 1) y_pred_proba_clusters = self.predict_clusters(X) y_pred_clusters = np.argmax(y_pred_proba_clusters, 1) if y_true is None : y_pred_clusters[y_pred_clsf == (1 - self.label_to_cluster)] = -1 return y_pred_clsf, y_pred_clusters else : y_pred_clusters[y_true == (1 - self.label_to_cluster)] = -1 return y_true, y_pred_clusters def predict_proba(self, X, y_true=None): """Predict using the ucsl model. Parameters ---------- X : array-like, shape (n_samples, n_features) Query points to be evaluate. Returns ------- y_pred_clsf : array, shape (n_samples,) Probabailistic predictions of the classification binary task of the query points. y_pred : array, shape (n_samples,) Probabilistic predictions of the clustering task of the query points. BEWARE : if y_true is not None, clustering prediction of samples considered "negative" (with classification ground truth label different than label_to_cluster) are set to -1. BEWARE : if y_true is None, clustering predictions of samples considered "negative" (when classification prediction different than label_to_cluster) are set to -1. """ y_pred_proba_clsf = self.predict_classif_proba(X) y_pred_clsf = np.argmax(y_pred_proba_clsf, 1) y_pred_proba_clusters = self.predict_clusters(X) if y_true is None : y_pred_proba_clusters[y_pred_clsf == (1 - self.label_to_cluster)] = -1 return y_pred_clsf, y_pred_proba_clusters else : y_pred_proba_clusters[y_true == (1 - self.label_to_cluster)] = -1 return y_true, y_pred_proba_clusters def predict_classif_proba(self, X): """Predict using the ucsl model. Parameters ---------- X : array-like, shape (n_samples, n_features) Query points to be evaluate. Returns ------- y_pred : array, shape (n_samples, n_labels) Predictions of the probabilities of the query points belonging to labels. """ y_pred = np.zeros((len(X), 2)) distances_to_hyperplanes = self.compute_distances_to_hyperplanes(X) # compute the predictions \w.r.t cluster previously found cluster_predictions = self.predict_clusters(X) y_pred[:, self.label_to_cluster] = sum( [cluster_predictions[:, cluster] * distances_to_hyperplanes[:, cluster] for cluster in range(self.n_clusters)]) # compute probabilities \w sigmoid y_pred[:, self.label_to_cluster] = sigmoid( y_pred[:, self.label_to_cluster] / np.max(y_pred[:, self.label_to_cluster])) y_pred[:, 1 - self.label_to_cluster] = 1 - y_pred[:, self.label_to_cluster] return y_pred def compute_distances_to_hyperplanes(self, X): """Predict using the ucsl model. Parameters ---------- X : array-like, shape (n_samples, n_features) Query points to be evaluate. Returns ------- SVM_distances : dict of array, length (n_labels) , shape of element (n_samples, n_clusters[label]) Predictions of the point/hyperplane margin for each cluster of each label. """ # first compute points distances to hyperplane distances_to_hyperplanes = np.zeros((len(X), self.n_clusters)) for cluster_i in range(self.n_clusters): coefficient = self.coefficients[cluster_i] intercept = self.intercepts[cluster_i] distances_to_hyperplanes[:, cluster_i] = X @ coefficient[0] + intercept[0] return distances_to_hyperplanes def predict_clusters(self, X): """Predict clustering for each label in a hierarchical manner. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vectors. Returns ------- cluster_predictions : dict of arrays, length (n_labels) , shape per key:(n_samples, n_clusters[key]) Dict containing clustering predictions for each label, the dictionary keys are the labels """ X_proj = X @ self.orthonormal_basis[-1].T Q_distances = np.zeros((len(X_proj), self.n_clusters)) for cluster in range(self.n_clusters): if X_proj.shape[1] > 1: Q_distances[:, cluster] = np.sum(np.abs(X_proj - self.barycenters[cluster]), 1) else: Q_distances[:, cluster] = (X_proj - self.barycenters[cluster][None, :])[:, 0] y_pred_proba_clusters = Q_distances / np.sum(Q_distances, 1)[:, None] return y_pred_proba_clusters def run(self, X, y, idx_outside_polytope): # set label idx_outside_polytope outside of the polytope by setting it to positive labels y_polytope = np.copy(y) # if label is inside of the polytope, the distance is negative and the label is not divided into y_polytope[y_polytope != idx_outside_polytope] = -1 # if label is outside of the polytope, the distance is positive and the label is clustered y_polytope[y_polytope == idx_outside_polytope] = 1 index_positives = np.where(y_polytope == 1)[0] # index for Positive labels (outside polytope) index_negatives = np.where(y_polytope == -1)[0] # index for Negative labels (inside polytope) n_consensus = self.n_consensus # define the clustering assignment matrix (each column correspond to one consensus run) self.clustering_assignments = np.zeros((len(index_positives), n_consensus)) for consensus in range(n_consensus): # first we initialize the clustering matrix S, with the initialization strategy set in self.initialization S, cluster_index = self.initialize_clustering(X, y_polytope, index_positives) if self.negative_weighting in ['uniform']: S[index_negatives] = 1 / self.n_clusters elif self.negative_weighting in ['hard']: S[index_negatives] = np.rint(S[index_negatives]) if self.positive_weighting in ['hard']: S[index_positives] = np.rint(S[index_positives]) cluster_index = self.run_EM(X, y_polytope, S, cluster_index, index_positives, index_negatives, consensus) # update the cluster index for the consensus clustering self.clustering_assignments[:, consensus] = cluster_index if n_consensus > 1: self.clustering_ensembling(X, y_polytope, index_positives, index_negatives) def initialize_clustering(self, X, y_polytope, index_positives): """Perform a bagging of the previously obtained clusterings and compute new hyperplanes. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vectors. y_polytope : array-like, shape (n_samples,) Target values. index_positives : array-like, shape (n_positives_samples,) indexes of the positive labels being clustered Returns ------- S : array-like, shape (n_samples, n_samples) Cluster prediction matrix. """ S = np.ones((len(y_polytope), self.n_clusters)) / self.n_clusters if self.clustering in ["k_means"]: KM = KMeans(n_clusters=self.n_clusters, init="random", n_init=1).fit(X[index_positives]) S = one_hot_encode(KM.predict(X)) if self.clustering in ["gaussian_mixture"]: GMM = GaussianMixture(n_components=self.n_clusters, init_params="random", n_init=1, covariance_type="spherical").fit(X[index_positives]) S = GMM.predict_proba(X) else: custom_clustering_method_ = copy.deepcopy(self.clustering) S_positives = custom_clustering_method_.fit_predict(X[index_positives]) S_distances = np.zeros((len(X), np.max(S_positives) + 1)) for cluster in range(np.max(S_positives) + 1): S_distances[:, cluster] = np.sum( np.abs(X - np.mean(X[index_positives][S_positives == cluster], 0)[None, :]), 1) S_distances /= np.sum(S_distances, 1)[:, None] S = 1 - S cluster_index = np.argmax(S[index_positives], axis=1) return S, cluster_index def maximization_step(self, X, y_polytope, S): if self.maximization == "svc": for cluster in range(self.n_clusters): cluster_assignment = np.ascontiguousarray(S[:, cluster]) SVM_coefficient, SVM_intercept = launch_svc(X, y_polytope, cluster_assignment) self.coefficients[cluster] = SVM_coefficient self.intercepts[cluster] = SVM_intercept elif self.maximization == "lr": for cluster in range(self.n_clusters): cluster_assignment = np.ascontiguousarray(S[:, cluster]) logistic_coefficient, logistic_intercept = launch_logistic(X, y_polytope, cluster_assignment) self.coefficients[cluster] = logistic_coefficient self.intercepts[cluster] = logistic_intercept else: for cluster in range(self.n_clusters): cluster_assignment = np.ascontiguousarray(S[:, cluster]) self.maximization.fit(X, y_polytope, sample_weight=cluster_assignment) self.coefficients[cluster] = self.maximization.coef_ self.intercepts[cluster] = self.maximization.intercept_ def expectation_step(self, X, S, index_positives, consensus): """Update clustering method (update clustering distribution matrix S). Parameters ---------- X : array-like, shape (n_samples, n_features) Training vectors. S : array-like, shape (n_samples, n_samples) Cluster prediction matrix. index_positives : array-like, shape (n_positives_samples,) indexes of the positive labels being clustered consensus : int which consensus is being run ? Returns ------- S : array-like, shape (n_samples, n_samples) Cluster prediction matrix. cluster_index : array-like, shape (n_positives_samples, ) clusters predictions argmax for positive samples. """ # get directions basis directions_basis = [] for cluster in range(self.n_clusters): directions_basis.extend(self.coefficients[cluster]) norm_directions = [np.linalg.norm(direction) for direction in directions_basis] directions_basis = np.array(directions_basis) / np.array(norm_directions)[:, None] # apply graam-schmidt algorithm orthonormalized_basis = self.graam_schmidt(directions_basis) self.orthonormal_basis[consensus] = orthonormalized_basis self.orthonormal_basis[-1] = np.array(orthonormalized_basis).copy() X_proj = X @ self.orthonormal_basis[consensus].T # get centroids or barycenters centroids = [np.mean(S[index_positives, cluster][:, None] * X_proj[index_positives, :], 0) for cluster in range(self.n_clusters)] if self.clustering == 'k_means': self.clustering_method[consensus] = KMeans(n_clusters=self.n_clusters, init=np.array(centroids), n_init=1).fit(X_proj[index_positives]) Q_positives = self.clustering_method[consensus].fit_predict(X_proj[index_positives]) Q_distances = np.zeros((len(X_proj), np.max(Q_positives) + 1)) for cluster in range(np.max(Q_positives) + 1): Q_distances[:, cluster] = np.sum( np.abs(X_proj - self.clustering_method[consensus].cluster_centers_[cluster]), 1) Q_distances = Q_distances / np.sum(Q_distances, 1)[:, None] Q = 1 - Q_distances elif self.clustering == 'gaussian_mixture': self.clustering_method[consensus] = GaussianMixture(n_components=self.n_clusters, covariance_type="spherical", means_init=np.array(centroids)).fit( X_proj[index_positives]) Q = self.clustering_method[consensus].predict_proba(X_proj) self.clustering_method[-1] = copy.deepcopy(self.clustering_method[consensus]) else: self.clustering_method[consensus] = copy.deepcopy(self.clustering) Q_positives = self.clustering_method[consensus].fit_predict(X_proj[index_positives]) Q_distances = np.zeros((len(X_proj), np.max(Q_positives) + 1)) for cluster in range(np.max(Q_positives) + 1): Q_distances[:, cluster] = np.sum( np.abs(X_proj - np.mean(X_proj[index_positives][Q_positives == cluster], 0)[None, :]), 1) Q_distances = Q_distances / np.sum(Q_distances, 1)[:, None] Q = 1 - Q_distances # define matrix clustering S = Q.copy() cluster_index = np.argmax(Q[index_positives], axis=1) return S, cluster_index def graam_schmidt(self, directions_basis): # compute the most important vectors because Graam-Schmidt is not invariant by permutation when the matrix is not square scores = [] for i, direction_i in enumerate(directions_basis): scores_i = [] for j, direction_j in enumerate(directions_basis): if i != j: scores_i.append(np.linalg.norm(direction_i - (np.dot(direction_i, direction_j) * direction_j))) scores.append(np.mean(scores_i)) directions = directions_basis[np.array(scores).argsort()[::-1], :] # orthonormalize coefficient/direction basis basis = [] for v in directions: w = v - np.sum(np.dot(v, b) * b for b in basis) if len(basis) >= 2: if np.linalg.norm(w) * self.noise_tolerance_threshold > 1: basis.append(w / np.linalg.norm(w)) elif np.linalg.norm(w) > 1e-2: basis.append(w / np.linalg.norm(w)) return np.array(basis) def run_EM(self, X, y_polytope, S, cluster_index, index_positives, index_negatives, consensus): """Perform a bagging of the previously obtained clustering and compute new hyperplanes. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vectors. y_polytope : array-like, shape (n_samples,) Target values. S : array-like, shape (n_samples, n_samples) Cluster prediction matrix. cluster_index : array-like, shape (n_positives_samples, ) clusters predictions argmax for positive samples. index_positives : array-like, shape (n_positives_samples,) indexes of the positive labels being clustered index_negatives : array-like, shape (n_positives_samples,) indexes of the positive labels being clustered consensus : int index of consensus Returns ------- S : array-like, shape (n_samples, n_samples) Cluster prediction matrix. """ best_cluster_consistency = 1 if consensus == -1: save_stabler_coefficients = True consensus = self.n_consensus + 1 best_cluster_consistency = 0 for iteration in range(self.n_iterations): # check for degenerate clustering for positive labels (warning) and negatives (might be normal) for cluster in range(self.n_clusters): if np.count_nonzero(S[index_positives, cluster]) == 0: logging.debug("Cluster dropped, one cluster have no positive points anymore, in iteration : %d" % ( iteration - 1)) logging.debug("Re-initialization of the clustering...") S, cluster_index = self.initialize_clustering(X, y_polytope, index_positives) if np.max(S[index_negatives, cluster]) < 0.5: logging.debug( "Cluster too far, one cluster have no negative points anymore, in consensus : %d" % ( iteration - 1)) logging.debug("Re-distribution of this cluster negative weight to 'all'...") S[index_negatives, cluster] = 1 / self.n_clusters # re-init directions for each clusters self.coefficients = {cluster_i: [] for cluster_i in range(self.n_clusters)} self.intercepts = {cluster_i: [] for cluster_i in range(self.n_clusters)} # run maximization step self.maximization_step(X, y_polytope, S) # decide the convergence based on the clustering stability S_hold = S.copy() S, cluster_index = self.expectation_step(X, S, index_positives, consensus) # applying the negative weighting set as input if self.negative_weighting in ['uniform']: S[index_negatives] = 1 / self.n_clusters elif self.negative_weighting in ['hard']: S[index_negatives] = np.rint(S[index_negatives]) if self.positive_weighting in ['hard']: S[index_positives] = np.rint(S[index_positives]) # check the Clustering Stability \w Adjusted Rand Index for stopping criteria cluster_consistency = ARI(np.argmax(S[index_positives], 1), np.argmax(S_hold[index_positives], 1)) if cluster_consistency > best_cluster_consistency: best_cluster_consistency = cluster_consistency self.coefficients[-1] = copy.deepcopy(self.coefficients) self.intercepts[-1] = copy.deepcopy(self.intercepts) self.orthonormal_basis[-1] = copy.deepcopy(self.orthonormal_basis[consensus]) self.clustering_method[-1] = copy.deepcopy(self.clustering_method[consensus]) if cluster_consistency > self.stability_threshold: break return cluster_index def predict_clusters_proba_from_cluster_labels(self, X): """Predict positive and negative points clustering probabilities. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vectors. Returns ------- S : array-like, shape (n_samples, n_samples) Cluster prediction matrix. """ X_clustering_assignments = np.zeros((len(X), self.n_consensus)) for consensus in range(self.n_consensus): X_proj = X @ self.orthonormal_basis[consensus].T if self.clustering in ['k_means', 'gaussian_mixture']: X_clustering_assignments[:, consensus] = self.clustering_method[consensus].predict(X_proj) else: X_clustering_assignments[:, consensus] = self.clustering_method[consensus].fit_predict(X_proj) similarity_matrix = compute_similarity_matrix(self.clustering_assignments, clustering_assignments_to_pred=X_clustering_assignments) Q = np.zeros((len(X), self.n_clusters)) y_clusters_train_ = self.cluster_labels_ for cluster in range(self.n_clusters): Q[:, cluster] = np.mean(similarity_matrix[y_clusters_train_ == cluster], 0) Q /= np.sum(Q, 1)[:, None] return Q def clustering_ensembling(self, X, y_polytope, index_positives, index_negatives): """Perform a bagging of the previously obtained clustering and compute new hyperplanes. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vectors. y_polytope : array-like, shape (n_samples,) Modified target values. index_positives : array-like, shape (n_positives_samples,) indexes of the positive labels being clustered index_negatives : array-like, shape (n_positives_samples,) indexes of the positive labels being clustered Returns ------- None """ # perform consensus clustering consensus_cluster_index = compute_spectral_clustering_consensus(self.clustering_assignments, self.n_clusters) # save clustering predictions computed by bagging step self.cluster_labels_ = consensus_cluster_index # update clustering matrix S S = self.predict_clusters_proba_from_cluster_labels(X) if self.negative_weighting in ['uniform']: S[index_negatives] = 1 / self.n_clusters elif self.negative_weighting in ['hard']: S[index_negatives] = np.rint(S[index_negatives]) if self.positive_weighting in ['hard']: S[index_positives] = np.rint(S[index_positives]) cluster_index = self.run_EM(X, y_polytope, S, consensus_cluster_index, index_positives, index_negatives, -1) # save barycenters and final predictions self.cluster_labels_ = cluster_index X_proj = X @ self.orthonormal_basis[-1].T self.barycenters = [ np.mean(X_proj[index_positives][cluster_index == cluster], 0)[None, :] for cluster in range(np.max(cluster_index) + 1)]
29,199
8,684
''' 第 0005 题: 你有一个目录,装了很多照片,把它们的尺寸变成都不大于 iPhone5 分辨率的大小。 ''' from PIL import Image import os.path def Size(dirPath, size_x, size_y): f_list = os.listdir(dirPath) for i in f_list: if os.path.splitext(i)[1] == '.jpg': img = Image.open(i) img.thumbnail((size_x,size_y)) img.save(i) print(i) Size('D:\PyCharm 2017.1.3\projects', 1136, 640)
400
206
import tkinter as tk windows = tk.Tk() windows.title("输入框、文本框") windows.geometry("500x300") #界面大小 #设置输入框,对象是在windows上,show参数--->显示文本框输入时显示方式None:文字不加密,show="*"加密 e = tk.Entry(windows,show=None) e.pack() def insert_point(): var = e.get() #获取输入的信息 t.insert("insert",var) #参数1:插入方式,参数2:插入的数据 def insert_end(): var = e.get() t.insert("end",var) #根据光标位置插入数据 b1 = tk.Button(windows,text="insert point",width=15,height=2,command=insert_point) b1.pack() b2 = tk.Button(windows,text="insert end",width=15,height=2,command=insert_end) b2.pack() #设置文本框 t = tk.Text(windows,height=2) t.pack() windows.mainloop() # print(len("我我我"))
672
349
import lexical as lx import codecs from random import shuffle l=lx.lexical() result='tld,dot_num,avg_host,max_host,avg_path,max_path,class\n' result_arr1=[] with codecs.open('../parse/good.csv',encoding='utf-8') as f: string=f.read() arr=string.split('\n') del(arr[0]) del(arr[-1]) for line in arr: comp=line.split(',') hostname=comp[0] tld=comp[1] path=comp[3] dot_num, avg_host, max_host, avg_path, max_path=l.lexical(hostname,path) result_arr1.append(tld+','+str(dot_num)+','+str(avg_host)+','+str(max_host)+','+str(avg_path)+','+str(max_path)+',good') result='tld,dot_num,avg_host,max_host,avg_path,max_path,class\n' result+='\n'.join(result_arr1) with codecs.open('lexical_good.csv',mode='w',encoding='utf-8') as f: f.write(result) result_arr2=[] with codecs.open('../parse/bad.csv',encoding='utf-8') as f: string=f.read() arr=string.split('\n') del(arr[0]) del(arr[-1]) for line in arr: comp=line.split(',') hostname=comp[0] tld=comp[1] path=comp[3] dot_num, avg_host, max_host, avg_path, max_path=l.lexical(hostname,path) result_arr2.append(tld+','+str(dot_num)+','+str(avg_host)+','+str(max_host)+','+str(avg_path)+','+str(max_path)+',bad') result='tld,dot_num,avg_host,max_host,avg_path,max_path,class\n' result+='\n'.join(result_arr2) with codecs.open('lexical_bad.csv',mode='w',encoding='utf-8') as f: f.write(result)
1,399
611
#!/usr/bin/env python # coding: utf-8 # Copyright (C) USC Information Sciences Institute # Author: Vladimir M. Zaytsev <zaytsev@usc.edu> # URL: <http://nlg.isi.edu/> # For more information, see README.md # For license information, see LICENSE import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mokujin.web.settings") from django.core.management import execute_from_command_line execute_from_command_line(sys.argv)
477
173
"""~google <search term> will return three results from the google search for <search term>""" import re import requests from random import shuffle from googleapiclient.discovery import build import logging my_api_key = "Your API Key(Link: https://console.developers.google.com/apis/dashboard)" my_cse_id = "Your Custom Search Engine ID(Link: https://cse.google.co.in/cse/)" """fuction to fetch data from Google Custom Search Engine API""" def google(searchterm, api_key, cse_id, **kwargs): service = build("customsearch", "v1", developerKey=api_key, cache_discovery=False) res = service.cse().list(q=searchterm, cx=cse_id, **kwargs).execute() return res['items'] """fuction to return first three search results""" def google_search(searchterm): results = google(searchterm, my_api_key, my_cse_id, num=10) length = len(results) retval = "" if length < 3: for index in range(length): retval += results[index]['link'] + "\n" else: for index in range(3): retval += results[index]['link'] + "\n" return retval def on_message(msg, server): text = msg.get("text", "") match = re.findall(r"~google (.*)", text) if not match: return searchterm = match[0] return google_search(searchterm) on_bot_message = on_message
1,321
423
import tensorflow as tf import tensorbayes as tb import numpy as np from codebase.args import args from tensorbayes.tfutils import softmax_cross_entropy_with_two_logits as softmax_xent_two from tensorflow.contrib.framework import add_arg_scope @add_arg_scope def normalize_perturbation(d, scope=None): with tf.name_scope(scope, 'norm_pert'): output = tf.nn.l2_normalize(d, axis=list(range(1, len(d.shape)))) return output @add_arg_scope def scale_gradient(x, scale, scope=None, reuse=None): with tf.name_scope('scale_grad'): output = (1 - scale) * tf.stop_gradient(x) + scale * x return output @add_arg_scope def noise(x, std, phase, scope=None, reuse=None): with tf.name_scope(scope, 'noise'): eps = tf.random_normal(tf.shape(x), 0.0, std) output = tf.where(phase, x + eps, x) return output @add_arg_scope def leaky_relu(x, a=0.2, name=None): with tf.name_scope(name, 'leaky_relu'): return tf.maximum(x, a * x) @add_arg_scope def basic_accuracy(a, b, scope=None): with tf.name_scope(scope, 'basic_acc'): a = tf.argmax(a, 1) b = tf.argmax(b, 1) eq = tf.cast(tf.equal(a, b), 'float32') output = tf.reduce_mean(eq) return output @add_arg_scope def perturb_image(x, p, classifier, pert='vat', scope=None): with tf.name_scope(scope, 'perturb_image'): eps = 1e-6 * normalize_perturbation(tf.random_normal(shape=tf.shape(x))) # Predict on randomly perturbed image eps_p = classifier(x + eps, phase=True, reuse=True) loss = softmax_xent_two(labels=p, logits=eps_p) # Based on perturbed image, get direction of greatest error eps_adv = tf.gradients(loss, [eps], aggregation_method=2)[0] # Use that direction as adversarial perturbation eps_adv = normalize_perturbation(eps_adv) x_adv = tf.stop_gradient(x + args.radius * eps_adv) return x_adv @add_arg_scope def vat_loss(x, p, classifier, scope=None): with tf.name_scope(scope, 'smoothing_loss'): x_adv = perturb_image(x, p, classifier) p_adv = classifier(x_adv, phase=True, reuse=True) loss = tf.reduce_mean(softmax_xent_two(labels=tf.stop_gradient(p), logits=p_adv)) return loss
2,255
819
#!/usr/bin/env python 3 ############################################################################################ # # # Program purpose: Removes all consecutive duplicates from a given string. # # Program Author : Happi Yvan <ivensteinpoker@gmail.com> # # Creation Date : November 5, 2019 # # # ############################################################################################ import itertools def obtain_data_from_user(input_mess: str) -> str: is_valid, user_data = False, '' while is_valid is False: try: user_data = input(input_mess) if len(user_data) == 0: raise ValueError('Please enter some string to work with') is_valid = True except ValueError as ve: print(f'[ERROR]: {ve}') return user_data def do_processing(main_str: str) -> str: return ''.join(i for i, _ in itertools.groupby(main_str)) if __name__ == "__main__": main_data = obtain_data_from_user(input_mess='Enter some string data: ') print(f'New string: {do_processing(main_str=main_data)}')
1,366
333
from django.contrib import admin from .models import Haat, Stall, Rating, Product # Register your models here. admin.site.register(Haat) admin.site.register(Stall) admin.site.register(Rating) admin.site.register(Product)
223
71
from .ukbb import * from .augmentations import * from .multi_series import * from torchvision.transforms import Compose class RandomTransforms(object): """Base class for a list of transformations with randomness Args: transforms (list or tuple): list of transformations """ def __init__(self, transforms, out_range=(0.0, 1.0)): assert isinstance(transforms, (list, tuple)) self.transforms = transforms self.out_range = out_range def __call__(self, *args, **kwargs): raise NotImplementedError() def __repr__(self): format_string = self.__class__.__name__ + '(' for t in self.transforms: format_string += '\n' format_string += ' {0}'.format(t) format_string += '\n)' return format_string class RandomOrder(RandomTransforms): """Apply a list of transformations in a random order """ def __call__(self, img): order = list(range(len(self.transforms))) random.shuffle(order) for i in order: img = self.transforms[i](img) rescale = RescaleIntensity(out_range=self.out_range) img = rescale(img) return img class ComposeMultiChannel(object): """Composes several transforms together for multi channel operations. Args: transforms (list of ``Transform`` objects): list of transforms to compose. Example: >>> transforms.Compose([ >>> transforms.CenterCrop(10), >>> transforms.ToTensor(), >>> ]) """ def __init__(self, transforms): self.transforms = transforms def __call__(self, img1, img2, img3): for t in self.transforms: img1, img2, img3 = t(img1, img2, img3) return img1, img2, img3 def __repr__(self): format_string = self.__class__.__name__ + '(' for t in self.transforms: format_string += '\n' format_string += ' {0}'.format(t) format_string += '\n)' return format_string ############################################################################## # SINGLE Series Transforms (to be used on flow_250_*_MAG) ############################################################################## ############################ # Preprocessing Transforms ############################ def compose_preprocessing(preprocessing): """ Compose a preprocessing transform to be performed. Params ------ preprocessing : dict - dictionary defining all preprocessing steps to be taken with their values e.g. {"FrameSelector" : "var", "Rescale_Intensity" : [0, 255], "Gamma_Correction" : 2.0} Return ------ torchvision.transforms.Compose """ # Frame Selector if (preprocessing["FrameSelector"]["name"] == "FrameSelectionVar"): frame_selector = FrameSelectionVar(n_frames=preprocessing["n_frames"]) else: frame_selector = FrameSelectionStd(n_frames=preprocessing["n_frames"], channel=preprocessing["FrameSelector"]["channel"], epsilon=preprocessing["FrameSelector"]["epsilon"]) # Rescale Intensity if ("Rescale_Intensity" in preprocessing): intensity_rescale = RescaleIntensity(out_range=tuple(preprocessing["Rescale_Intensity"])) else: intensity_rescale = NullTransform() # Gamma Correction if ("Gamma_Correction" in preprocessing): gamma_correct = GammaCorrection(gamma=preprocessing["Gamma_Correction"]["gamma"], intensity=preprocessing["Gamma_Correction"]["intensity"]) else: gamma_correct = NullTransform() return Compose([frame_selector, intensity_rescale, gamma_correct]) ########################### # Augmentation Transforms ########################### def compose_augmentation(augmentations, seed=1234): """ Compose an augmentation transform to be performed. Params ------ augmentations : dict - dictionary defining all augmentation steps to be taken with their values e.g. { "RandomCrop" : { "size" : 28, "padding" : 12 }, "RandomRotation" : { "degrees" : 25 }, "RandomTranslation" : { "translate" : (0.2, 0.8) }, "RandomShear" : { "shear" : 12.5 }, "RandomAffine" : { "degrees" : 5, "translate" : (0.5, 0.5), "scale" : 0.8, "shear" : 15.0 }, "Randomize" : 0 } Return ------ torchvision.transforms.Compose (ordered transforms) OR torchvision.transforms.RandomOrder (randomly ordered transforms) """ # Padding if ("Pad" in augmentations): if ("padding" in augmentations["Pad"]): padding = augmentations["Pad"]["padding"] else: padding = 0 if ("fill" in augmentations["Pad"]): fill = augmentations["Pad"]["fill"] else: fill = 0 if ("padding_mode" in augmentations["Pad"]): padding_mode = augmentations["Pad"]["padding_mode"] else: padding_mode = 'constant' pad = Pad( padding=padding, fill=fill, padding_mode=padding_mode) else: pad = NullAugmentation() # Random Horizontal Flip if ("RandomHorizontalFlip" in augmentations): if ("probability" in augmentations["RandomHorizontalFlip"]): probability = augmentations["RandomHorizontalFlip"]["probability"] else: probability = 0.5 random_horizontal = RandomHorizontalFlip(p=probability, seed=seed) else: random_horizontal = NullAugmentation() # Random Vertical Flip if ("RandomVerticalFlip" in augmentations): if ("probability" in augmentations["RandomVerticalFlip"]): probability = augmentations["RandomVerticalFlip"]["probability"] else: probability = 0.5 random_vertical = RandomVerticalFlip(p=probability, seed=seed) else: random_vertical = NullAugmentation() # Random Cropping if ("RandomCrop" in augmentations): if ("padding" in augmentations["RandomCrop"]): padding = augmentations["RandomCrop"]["padding"] else: padding = 0 random_crop = RandomCrop( augmentations["RandomCrop"]["size"], padding=padding, seed=seed) else: random_crop = NullAugmentation() # Random Rotation if ("RandomRotation" in augmentations): if ("resample" in augmentations["RandomRotation"]): resample = augmentations["RandomRotation"]["resample"] else: resample = False if ("center" in augmentations["RandomRotation"]): center = augmentations["RandomRotation"]["center"] else: center = None random_rotation = RandomRotation( augmentations["RandomRotation"]["degrees"], resample=resample, center=center, seed=seed) else: random_rotation = NullAugmentation() # Random Translation if ("RandomTranslation" in augmentations): if ("resample" in augmentations["RandomTranslation"]): resample = augmentations["RandomTranslation"]["resample"] else: resample = False random_translation = RandomTranslation( augmentations["RandomTranslation"]["translate"], resample=resample, seed=seed) else: random_translation = NullAugmentation() # Random Shear if ("RandomShear" in augmentations): if ("resample" in augmentations["RandomShear"]): resample = augmentations["RandomShear"]["resample"] else: resample = False random_shear = RandomShear( augmentations["RandomShear"]["shear"], resample=resample, seed=seed) else: random_shear = NullAugmentation() # Random Affine if ("RandomAffine" in augmentations): if ("translate" in augmentations["RandomAffine"]): translate = augmentations["RandomAffine"]["translate"] else: translate = None if ("scale" in augmentations["RandomAffine"]): scale = augmentations["RandomAffine"]["scale"] else: scale = None if ("shear" in augmentations["RandomAffine"]): shear = augmentations["RandomAffine"]["shear"] else: shear = None if ("resample" in augmentations["RandomAffine"]): resample = augmentations["RandomAffine"]["resample"] else: resample = False if ("fillcolor" in augmentations["RandomAffine"]): fillcolor = augmentations["RandomAffine"]["fillcolor"] else: fillcolor = 0 random_affine = RandomAffine( augmentations["RandomAffine"]["degrees"], translate=translate, scale=scale, shear=shear, resample=resample, fillcolor=fillcolor, seed=seed) else: random_affine = NullAugmentation() try: if (augmentations["Randomize"]): if ("PixelRange" in augmentations): return RandomOrder( [random_crop, random_rotation, random_translation, random_shear, random_affine]) else: return RandomOrder( [random_crop, random_rotation, random_translation, random_shear, random_affine]) except: # This will fail when "Randomize" is not defined in augmentations pass return Compose([pad, random_horizontal, random_vertical, random_crop, random_rotation, random_translation, random_shear, random_affine]) ############################################################################## # Postprocessing Transforms ############################################################################## def compose_postprocessing(postprocessing): """ Compose a postprocessing transform to be performed. Params ------ postprocessing : dict - dictionary defining all preprocessing steps to be taken with their values e.g. {"Name" : "RescaleIntensity"} OR {"Name" : "StdNormalize"} Return ------ torchvision.transforms.Compose """ if (postprocessing["Name"] == "StdNormalize"): postprocess = StdNormalize() else: postprocess = RescaleIntensity(out_range=(0.0, 1.0)) return Compose([postprocess]) ############################################################################## # MULTIPLE Series Transforms (to be used on ALL flow_250_* series) ############################################################################## ############################ # Preprocessing Transforms ############################ def compose_preprocessing_multi(preprocessing): """ Compose a preprocessing transform to be performed on MULTI series. Params ------ preprocessing : dict - dictionary defining all preprocessing steps to be taken with their values e.g. {"FrameSelector" : "var", "Rescale_Intensity" : [0, 255], "Gamma_Correction" : 2.0} Return ------ torchvision.transforms.Compose """ # Frame Selector if (preprocessing["FrameSelector"]["name"] == "FrameSelectionVarMulti"): frame_selector = FrameSelectionVarMulti(n_frames=preprocessing["n_frames"]) # Rescale Intensity if ("RescaleIntensityMulti" in preprocessing): intensity_rescale = RescaleIntensityMulti(out_range=tuple(preprocessing["RescaleIntensityMulti"])) else: intensity_rescale = NullTransformMulti() return ComposeMultiChannel([frame_selector, intensity_rescale]) ############################# # Postprocessing Transforms ############################# def compose_postprocessing_multi(postprocessing): """ Compose a postprocessing transform to be performed on MULTI series. Params ------ postprocessing : dict - dictionary defining all preprocessing steps to be taken with their values e.g. {"Name" : "RescaleIntensity"} OR {"Name" : "StdNormalize"} Return ------ torchvision.transforms.Compose """ if (postprocessing["Name"] == "StdNormalizeMulti"): postprocess = StdNormalizeMulti() else: postprocess = RescaleIntensityMulti(out_range=(0.0, 1.0)) return ComposeMultiChannel([postprocess])
13,235
3,562
# Python > Sets > Check Strict Superset # Check if A is a strict superset of the other given sets. # # https://www.hackerrank.com/challenges/py-check-strict-superset/problem # a = set(map(int, input().split())) strict_superset = True for _ in range(int(input())): x = set(map(int, input().split())) if not ((len(x - a) == 0 and len(a - x) > 0)): strict_superset = False break print(strict_superset)
424
153
# -*- coding: utf-8 -*- # @COPYRIGHT_begin # # Copyright [2010-2014] Institute of Nuclear Physics PAN, Krakow, Poland # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # @COPYRIGHT_end """@package src.clm.views.admin_cm.iso_image @alldecoratedby{src.clm.utils.decorators.admin_cm_log} """ from clm.utils.decorators import admin_cm_log, cm_request from clm.utils.cm import CM from clm.utils.exception import CLMException from clm.models.user import User @admin_cm_log(log=False, pack=True) def get_list(cm_id, caller_id, cm_password): """ @cm_request{iso_image.get_list()} @clmview_admin_cm """ names = {} resp = CM(cm_id).send_request("admin_cm/iso_image/get_list/", caller_id=caller_id, cm_password=cm_password) for img in resp['data']: if str(img['user_id']) not in names: try: user = User.objects.get(pk=img['user_id']) names[str(img['user_id'])] = user.first + " " + user.last except: raise CLMException('user_get') img['owner'] = names[str(img['user_id'])] return resp['data'] @admin_cm_log(log=False, pack=False) @cm_request def get_by_id(cm_response, **data): """ @clmview_admin_cm @cm_request_transparent{iso_image.get_by_id()} """ return cm_response @admin_cm_log(log=True, pack=False) @cm_request def delete(cm_response, **data): """ @clmview_admin_cm @cm_request_transparent{iso_image.delete()} """ return cm_response @admin_cm_log(log=True, pack=False) @cm_request def edit(cm_response, **data): """ @clmview_admin_cm @cm_request_transparent{iso_image.edit()} """ return cm_response @admin_cm_log(log=True, pack=False) @cm_request def download(cm_response, **data): """ @clmview_admin_cm @cm_request_transparent{iso_image.download()} """ return cm_response @admin_cm_log(log=True, pack=False) @cm_request def copy(cm_response, **data): """ @clmview_admin_cm @cm_request_transparent{iso_image.copy()} """ return cm_response @admin_cm_log(log=True, pack=False) @cm_request def set_public(cm_response, **data): """ @clmview_admin_cm @cm_request_transparent{iso_image.set_public()} """ return cm_response @admin_cm_log(log=True, pack=False) @cm_request def set_private(cm_response, **data): """ @clmview_admin_cm @cm_request_transparent{iso_image.set_private()} """ return cm_response
2,985
1,073
import json import os from datetime import datetime from time import sleep import requests from getters_maker import get_request_hash, get_insta_query, get_cookies def __get_follow_requests(variables, res_json, personal_path, json_piece): # json_formatted_str = json.dumps(res_json, indent=1) # print(json_formatted_str) full_info_dict = dict() file_name = json_piece + "_info.txt" for node in res_json["data"]["user"][str(json_piece)]["edges"]: node = node["node"] full_info_dict["id"] = node["id"] full_info_dict["username"] = node["username"] full_info_dict["full_name"] = node["full_name"] full_info_dict["is_private"] = node["is_private"] full_info_dict["is_verified"] = node["is_verified"] full_info_dict["followed_by_viewer"] = node["followed_by_viewer"] full_info_dict["requested_by_viewer"] = node["requested_by_viewer"] with open(os.path.join(personal_path, file_name), 'a+') as file: json.dump(full_info_dict, file, indent=1) if res_json["data"]["user"][str(json_piece)]["page_info"]["has_next_page"]: variables["after"] = res_json["data"]["user"][str(json_piece)]["page_info"]["end_cursor"] return True return False def __get_user_followers(id_user, username, dir_name, session_id, param, action): reintentos_maximos = 3 sleep_time = 10 # 1 seg variables = { 'id': id_user, 'first': 50 } params = { "query_hash": param, "variables": json.dumps(variables) } has_next_page = True error = False reintentos_actuales = 0 print("------------------------------------------------------") while has_next_page and reintentos_actuales < reintentos_maximos: res = requests.get(get_insta_query(), params=params, cookies=get_cookies(session_id, JSON_codification=False)) if res.status_code == 200: print("Obteniendo " + action + " de " + username + "...") reintentos_actuales = 0 try: if action == "followers": has_next_page = __get_follow_requests(variables, res.json(), dir_name, 'edge_followed_by') else: has_next_page = __get_follow_requests(variables, res.json(), dir_name, 'edge_follow') if has_next_page: params["variables"] = json.dumps(variables) sleep(sleep_time) except Exception as err: print("Se produjo un error en get_user_followers: ", err) reintentos_actuales = reintentos_maximos error = True else: reintentos_actuales += 1 sleep(sleep_time) if not error: print("Todos los " + action + " de " + username + " han sido obtenidos") return reintentos_actuales < reintentos_maximos def get_user_followers_ing(id_user, username, dir_name, session_id, boolean_followes, boolean_following): a = False b = False if boolean_followes: a = __get_user_followers(id_user, username, dir_name, session_id, get_request_hash()['followers'], 'followers') if boolean_following: b = __get_user_followers(id_user, username, dir_name, session_id, get_request_hash()['following'], 'following') return a and b
3,424
1,127
#!/usr/bin/python # Input file: EWS.enriched.bins.txt import sys counter = {} for line in sys.stdin: if line[0] == '#': continue cols = line.strip().split('\t') if 'count' in cols[4]: continue # just print EWS1 + EWS2 print int(cols[4]) + int(cols[5])
289
108
# SPDX-FileCopyrightText: 2022 Mark Komus # # SPDX-License-Identifier: MIT import random import time import board import busio import digitalio import displayio import framebufferio import is31fl3741 from adafruit_is31fl3741.is31fl3741_PixelBuf import IS31FL3741_PixelBuf from adafruit_is31fl3741.led_glasses_map import ( glassesmatrix_ledmap_no_ring, left_ring_map_no_inner, right_ring_map_no_inner, ) from adafruit_display_text import label from adafruit_bitmap_font import bitmap_font from adafruit_led_animation.animation.chase import Chase from adafruit_debouncer import Debouncer # List of possible messages to display. Randomly chosen MESSAGES = ( "GO TEAM GO", "WE ARE NUMBER 1", "I LIKE THE HALFTIME SHOW", ) # Colors used for the text and ring lights BLUE_TEXT = (0, 20, 255) BLUE_RING = (0, 10, 120) YELLOW_TEXT = (220, 210, 0) YELLOW_RING = (150, 140, 0) def ScrollMessage(text, color, repeat): """Scroll a message across the eyeglasses a set number of times""" text_area.text = text text_area.color = color # Start the message just off the side of the glasses x = display.width text_area.x = x # Determine the width of the message to scroll width = text_area.bounding_box[2] for _ in range(repeat): while x != -width: x = x - 1 text_area.x = x # Update the switch and if it has been pressed abort scrolling this message switch.update() if not switch.value: return time.sleep(0.025) # adjust to change scrolling speed x = display.width def Score(text, color, ring_color, repeat): """Show a scrolling text message and animated effects on the eye rings. The messages scrolls left to right, then right to left while the eye rings are animated using the adafruit_led_animation library.""" # Set up a led animation chase sequence for both eyelights chase_left = Chase(left_eye, speed=0.11, color=ring_color, size=8, spacing=4) chase_right = Chase(right_eye, speed=0.07, color=ring_color, size=8, spacing=4) text_area.text = text text_area.color = color x = display.width text_area.x = x width = text_area.bounding_box[2] for _ in range(repeat): # Scroll the text left and animate the eyes while x != -width: x = x - 1 text_area.x = x chase_left.animate() chase_right.animate() time.sleep(0.008) # adjust to change scrolling speed # Scroll the text right and animate the eyes while x != display.width: x = x + 1 text_area.x = x chase_left.animate() chase_right.animate() time.sleep(0.008) # adjust to change scrolling speed # Remove any existing displays displayio.release_displays() # Set up the top button used to trigger a special message when pressed switch_pin = digitalio.DigitalInOut(board.SWITCH) switch_pin.direction = digitalio.Direction.INPUT switch_pin.pull = digitalio.Pull.UP switch = Debouncer(switch_pin) # Initialize the LED Glasses # # In this example scale is set to True. When True the logical display is # three times the physical display size and scaled down to allow text to # look more natural for small display sizes. Hence the display is created # as 54x15 when the physical display is 18x5. # i2c = busio.I2C(board.SCL, board.SDA, frequency=1000000) is31 = is31fl3741.IS31FL3741(i2c=i2c) is31_framebuffer = is31fl3741.IS31FL3741_FrameBuffer( is31, 54, 15, glassesmatrix_ledmap_no_ring, scale=True, gamma=True ) display = framebufferio.FramebufferDisplay(is31_framebuffer, auto_refresh=True) # Set up the left and right eyelight rings # init is set to False as the IS31FL3741_FrameBuffer has already initialized the IS31FL3741 driver left_eye = IS31FL3741_PixelBuf( is31, left_ring_map_no_inner, init=False, auto_write=False ) right_eye = IS31FL3741_PixelBuf( is31, right_ring_map_no_inner, init=False, auto_write=False ) # Dim the display. Full brightness is BRIGHT is31_framebuffer.brightness = 0.2 # Load the font to be used - scrolly only has upper case letters font = bitmap_font.load_font("/fonts/scrolly.bdf") # Set up the display elements text_area = label.Label(font, text="", color=(0, 0, 0)) text_area.y = 8 group = displayio.Group() group.append(text_area) display.show(group) while True: # Run the debouncer code to get the updated switch value switch.update() # If the switch has been pressed interrupt start a special message if not switch.value: Score("SCORE!", YELLOW_TEXT, BLUE_RING, 2) # If the switch is not pressed pick a random message and scroll it left_eye.fill(BLUE_RING) right_eye.fill(BLUE_RING) left_eye.show() right_eye.show() ScrollMessage(random.choice(MESSAGES), YELLOW_TEXT, 2)
4,895
1,705
counts = [] arrin = {} printed=0 with open('model.txt') as f: for line in f: doesContainAlready = False line = line.replace('\n', '') pre = line.split(';')[0] post = line.split(';')[1] n=0 #for i in counts: # if i.startswith(line): # doesContainAlready = True # break # n+=1 #good riddance (maybe) try: n = int(arrin.get(line)) doesContainAlready = True except: doesContainAlready = False if doesContainAlready == False: counts.append(line + ';1') arrin[line] = len(counts) - 1 elif doesContainAlready == True: try: counts[n] = line + ';' + str(int(counts[n].split(';')[2]) + 1) except: pass printed+=1 print(printed) print(counts) with open('counts.txt', 'w') as f: for i in counts: i = i.replace('\n', '') f.write(i + '\n')
1,014
325
# By Nick Erickson # Contains functions related to metrics import sys import numpy as np from utils import graph_helper from utils.data_utils import save_class class Metrics: # TODO: Save this to a pickle file? def __init__(self, metric_type='a3c'): self.type = metric_type # self.survival_times = [] # self.survival_times_last_10 = [] # self.survival_times_full_mean = [] self.runs = MetricInfoSurvival(name='survival') self.Q = [] self.loss = [] # self.total_size = 0 # self.size = 0 # self.max_survival = -1 self.V = [] self.V_episode = [] if metric_type == 'a3c': self.a3c = MetricsA3C() else: self.a3c = None def save(self, save_location, name=None): if name is None: name = '' save_class(self, save_location + name + '.dat') """ def update(self, survival_time): self.size += 1 self.total_size += 1 if survival_time > self.max_survival: self.max_survival = survival_time #self.survival_times.append(survival_time) #self.survival_times_last_10.append(np.mean(self.survival_times[-10:])) #self.survival_times_full_mean.append(np.mean(self.survival_times)) # make this better... """ def display_metrics(self, frame, use_rate, total_saved=0, epsilon=0): if np.sum(use_rate) != 0: use_rate = use_rate / np.sum(use_rate) framerate = frame/self.runs.times[-1] print('R' + str(self.runs.total_size) + ': ' + "%.2f" % self.runs.times[-1] + 's' + ', %.2f fps' % framerate + ', key: ', ['%.2f' % k for k in use_rate], end='') print(' Mean: %.2f' % self.runs.mean[-1], 'Last 10: %.2f' % self.runs.last10[-1], 'Max: %.2f' % self.runs.max, "TS", total_saved, "E %.2f" % epsilon) sys.stdout.flush() def save_metrics_training(self, save_location): graph_helper.graph_simple([np.arange(len(self.Q))], [self.Q], ['Q Value'], 'Q Value', 'Q Value', 'Replay (10^2)', savefig_name=save_location + 'Q') graph_helper.graph_simple([np.arange(len(self.loss))], [self.loss], ['Loss'], 'Loss', 'Loss', 'Replay (10^2)', savefig_name=save_location + 'loss') def save_metrics_v(self, save_location): graph_helper.graph_simple([np.arange(len(self.V))], [self.V], ['Value'], 'Value', 'Value', 'Run', savefig_name=save_location + 'V') graph_helper.graph_simple([np.arange(len(self.V_episode))], [self.V_episode], ['Value'], 'Value', 'Value', 'Frame', savefig_name=save_location + 'V_episode') # def save_metrics(self, save_location): # # TODO: Remove these logs, just export the data directly # graphHelper.graphSimple([np.arange(self.size),np.arange(self.size),np.arange(self.size)], [self.survival_times, self.survival_times_last_10, self.survival_times_full_mean], ['Survival Time', 'Survival Rolling 10 Mean', 'Survival Mean'], 'Survival Times', 'Time(s)', 'Run', savefigName=save_location + 'survival') class MetricInfoSurvival: def __init__(self, total_size=0, name='survival'): self.name = name self.total_size = total_size self.size = 0 self.max = -1 self.mean = [] self.last10 = [] self.times = [] def update(self, time): if self.max < time: self.max = time self.times.append(time) self.last10.append(np.mean(self.times[-10:])) if self.size == 0: self.mean.append(time) else: self.mean.append((self.mean[-1] * self.size + time) / (self.size+1)) self.size += 1 self.total_size += 1 def graph(self, save_location, name=None): if name is None: name = self.name # TODO: Remove these logs, just export the data directly graphRange = np.arange(self.total_size - self.size, self.total_size) graph_helper.graph_simple([graphRange, graphRange, graphRange], [self.times, self.last10, self.mean], ['Time', 'Rolling 10 Mean', 'Mean'], 'Survival Times', 'Time(s)', 'Run', savefig_name=save_location + name) class MetricInfo: def __init__(self, name): self.name = name self.mean = [] self.max = [] self.min = [] self.size = 0 def update(self, data): self.mean.append(np.mean(data)) self.max.append(np.max(data)) self.min.append(np.min(data)) self.size += 1 def graph_mean(self, save_location): graph_helper.graph_simple([np.arange(self.size)], [self.mean], [self.name], self.name, self.name, 'Batch', savefig_name=save_location + self.name) class MetricsA3C: def __init__(self): self.L = MetricInfo('Loss Total') # Loss Total self.Pr = MetricInfo('Log Probability') # Log Prob self.Po = MetricInfo('Loss Probability') # Loss Prob self.V = MetricInfo('Loss Value') # Loss Value self.E = MetricInfo('Loss Entropy') # Loss Entropy self.metrics = [] self.metrics.extend([self.L, self.Pr, self.Po, self.V, self.E]) def update(self, l, pr, po, v, e): self.L.update(l) self.Pr.update(pr) self.Po.update(po) self.V.update(v) self.E.update(e) def graph_all(self, save_location): for metric in self.metrics: try: metric.graph_mean(save_location) except: print(metric.name)
5,763
1,975
import RootPath def create_submission_file(tweets, users, predictions, output_file): # Preliminary checks assert len(tweets) == len(users), f"length are different tweets -> {len(tweets)}, and users -> {len(users)} " assert len(users) == len(predictions), f"length are different users -> {len(users)}, and predictions -> {len(predictions)} " assert len(tweets) == len(predictions), f"length are different tweets -> {len(tweets)}, and predictions -> {len(predictions)} " file = open(RootPath.get_root().joinpath(output_file), "w") for i in range(len(tweets)): file.write(f"{tweets[i]},{users[i]},{round(predictions[i], 4)}\n") file.close()
680
227
from utils import chunk_list """ Database utilities (future middleware layer if we decide to use DuckDB by default.) """ def insert_many(settings, table, rows): """ Entry function on insert_many. """ if settings.db_type == 'sqlite': __insert_many_sqlite(settings, table, rows) else: with settings.db.atomic(): table.insert_many(rows).execute() def __insert_many_sqlite(settings, table, rows): """ SQLite has a limit on number of rows. Chunk the rows and batch insert. """ chunked_rows = chunk_list(rows, 500) with settings.db.atomic(): for row in chunked_rows: table.insert_many(row).execute() def insert_many_on_conflict_ignore(settings, table, rows): """ Entry function on insert_many, ignoring conflicts on key issues. """ if settings.db_type == 'sqlite': __insert_many_on_conflict_ignore_sqlite(settings, table, rows) else: with settings.db.atomic(): table.insert_many(rows).on_conflict_ignore().execute() def __insert_many_on_conflict_ignore_sqlite(settings, table, rows): """ SQLite has a limit on number of rows. Chunk the rows and batch insert. """ chunked_rows = chunk_list(rows, 500) with settings.db.atomic(): for row in chunked_rows: table.insert_many(row).on_conflict_ignore().execute()
1,388
426
import time import picamera import apriltag import cv2 import numpy as np import math import threading from parameters import Parameters # Create a pool of image processors done = False lock = threading.Lock() pool = [] np.set_printoptions(suppress=True) ########################################################################## class ImageProcessor(threading.Thread): def __init__(self, width, height, parameters): super(ImageProcessor, self).__init__() self.height = height self.width = width self.detector = apriltag.Detector() self.tag_size = 3.0 self.parameters = (0,0,0,0) #x,y,z,r self.paramstruct = parameters; # self.paramstruct = Parameters(); fov_x = 62.2*math.pi/180 fov_y = 48.8*math.pi/180 fx = self.width/(2*math.tan(fov_x/2)) fy = self.height/(2*math.tan(fov_y/2)) self.camera_params = (fx, fy, width/2, height/2) self.img = np.empty((self.width * self.height * 3,),dtype=np.uint8) self.event = threading.Event() self.terminated = False self.start() def run(self): # This method runs in a separate thread global done while not self.terminated: # Wait for an image to be written to the stream if self.event.wait(1): try: t = time.time() self.img = self.img.reshape((self.height,self.width,3)) self.img = cv2.cvtColor(self.img,cv2.COLOR_BGR2GRAY) results = self.detector.detect(self.img) for i, detection in enumerate(results): pose, e0, e1 = self.detector.detection_pose(detection,self.camera_params,self.tag_size) mat = np.array(pose) T = mat[0:3,3] # print("MAT:", mat) rz = -math.atan2(mat[1,0],mat[0,0]) lock.acquire() self.paramstruct.add(np.array(mat[0:3,3]), rz, t) lock.release() if results == []: lock.acquire() self.paramstruct.softReset() lock.release() finally: # Reset the stream and event self.img = np.empty((self.width * self.height * 3,),dtype=np.uint8) self.event.clear() # Return ourselves to the pool with lock: pool.append(self) class PiCam(object): def __init__(self, multi, parameters): self.width = 160 #640 self.height = 128 #480 self.params = parameters self.multi = multi global pool if (multi): pool = [ImageProcessor(self.width,self.height,self.params) for i in range(8)] else: pool = [ImageProcessor(self.width,self.height,self.params) for i in range(1)] def streams(self): global done global lock while not done: with lock: if pool: processor = pool.pop() else: processor = None if processor: yield processor.img processor.event.set() else: # When the pool is starved, wait a while for it to refill time.sleep(0.1) def start(self): with picamera.PiCamera() as camera: width = self.width height = self.height camera.sensor_mode = 4 camera.framerate=30 camera.exposure_mode = 'sports' camera.resolution = (self.width, self.height) time.sleep(2) camera.capture_sequence(self.streams(), 'bgr', use_video_port=True) # Shut down the processors in an orderly fashion while pool: with lock: processor = pool.pop() processor.terminated = True processor.join() ####################### if __name__ == "__main__": paramstruct = Parameters() cam = PiCam(True, paramstruct) cam.start()
4,327
1,243
# -*- coding: utf-8 -*- """ Created on Mon Aug 17 14:28:27 2020 @author: Mingcong Li """ import difflib # 计算两个字符串相似度的 import pandas as pd import numpy as np import matplotlib.pyplot as plt import copy #用来深度复制 import matplotlib.ticker as mtick # 用来改变坐标抽格式 plt.rcParams['font.sans-serif'] = ['FangSong'] # 指定默认字体 plt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题 # 做分类汇总的函数 def pivot1(listn, version): # csv_data[csv_data['area'].isna()] subset = csv_data[csv_data['area'].isin(listn)] subset['list_date_short'] = subset['list_date'].apply(str).str[0:4] global result result = pd.crosstab(subset.list_date_short, subset.industry, margins = True) result.to_excel(r'D:\桌面的文件夹\实习\睿丛\output_%s.xls' %version) return # 统计的三个层次 list1 = ['南京', '苏州', '无锡', '常州', '镇江', '扬州', '泰州', '南通', '淮安', '连云港', '盐城', '徐州', '宿迁', '杭州', '宁波', '温州', '绍兴', '湖州', '嘉兴', '金华', '衢州', '台州', '丽水', '舟山', '合肥 ', '马鞍山', '淮北', '宿州', '阜阳', '蚌埠', '淮南', '滁州', '六安', '巢湖', '芜湖', '亳州', '安庆', '池州', '铜陵', '宣城', '黄山', '上海', '江苏', '安徽', '浙江'] list2 = ['南京', '苏州', '无锡', '常州', '镇江', '扬州', '泰州', '南通', '淮安', '连云港', '盐城', '徐州', '宿迁', '杭州', '宁波', '温州', '绍兴', '湖州', '嘉兴', '金华', '衢州', '台州', '丽水', '舟山', '上海', '江苏', '浙江'] list3 = ['上海'] # 导入数据 csv_file = r'D:\桌面的文件夹\实习\睿丛\分年份、分行业统计长三角地区当年上市数量\df_stock.csv' csv_data = pd.read_csv(csv_file, low_memory = False)#防止弹出警告 print(csv_data) csv_data.info() csv_data.head() csv_data.describe() csv_data.head(50) # 进行三个层次的分类汇总 pivot1(list1,'list1') pivot1(list2,'list2') pivot1(list3,'list3') result # 查看分类汇总的结果 # 处理行业名称 # 准备好申万行业分类的数据 Tpye=pd.read_excel(r'D:\桌面的文件夹\实习\睿丛\分年份、分行业统计长三角地区当年上市数量\申银万国行业分类标准 .xlsx',sheet_name='处理', header=None) # 导入行业分类 type1 = Tpye.sort_values(1, axis=0) # 按照行业编号有小到大排序 type1=type1.drop_duplicates(subset=0, keep='first', inplace=False, ignore_index=False) # 去除重复行。有些母分类和子分类是同名的,就只保留母分类。 type1=type1.rename(columns={0:'industry'}) # 给行业名称的列命名。 type1=type1.rename(columns={1:'code'}) # 给行业名称的列命名。 type1 = type1.set_index("industry") # 让行业名称成为行标签,便于后续合并 print(type1.index.is_unique) # 发现行标签没有重复的 type1 # 在最前面插入一个空列,用来保存匹配的结果 test=result.T.iloc[0:79,:] # 取消行业类型里面的“all” col_name=test.columns.tolist() # 将数据框的列名全部提取出来存放在列表里 col_name.insert(0,'new') # 在列索引为0的位置插入一列,列名为:new,刚插入时不会有值,整列都是NaN test=test.reindex(columns=col_name) # DataFrame.reindex() 对原行/列索引重新构建索引值 test # 把申万分类匹配到原始分类上 test.iloc[:,0] = test.index.map(lambda x: difflib.get_close_matches(x, type1.index, cutoff=0.3,n=1)[0]) # map()就是对于一个可迭代对象中的元素,轮流执行一个function test.head(60) # 查看匹配结果 test.iloc[61:81,:] # 查看匹配结果 test.to_excel(r'D:\桌面的文件夹\实习\睿丛\行业分类匹配结果.xls') # 导出匹配结果,手工在excel里面处理匹配不正确的项目。发现有11个需要手工调整 # 把行业名称转换为申万的命名体系。 #导入并整理 data=pd.read_excel(r'D:\桌面的文件夹\实习\睿丛\行业分类匹配结果_修改后.xls', index_col = 'industry') # 重新导入匹配好分类的行业汇总 data = data.groupby(data.index).sum() # 把重复的行业进行加和。因为concat要求index不能重复。注:此时子行业和母行业是混乱出现的。 # 合并 outcome = pd.concat([data, type1], axis=1, join='inner', ignore_index=False) # 这里是按照index合并数据,可以合并object类型的。inner表示求交集,outer表示求并集。由于data里面的index是type1的子集,所以可以用inner方式。axis=1表示横向合并。 # 改行业代码 outcome['code'] = outcome['code'].apply(str).str[0:2].map(lambda x: x+'0000') # 把行业代码改成一级行业的代码,即后四位全是0 outcome['code'] = outcome['code'].astype('int64') # 生成新的index outcome1 = outcome.set_index('code') outcome1 = outcome1.groupby(outcome1.index).sum() type2 = type1.reset_index().set_index('code') # 把原来作为index的‘industry’还原成一列数据 outcome2 = pd.concat([outcome1, type2], axis=1, join='inner', ignore_index=False) # 把申万的中文一级行业名称匹配到数据上。这个地方一定要注意,index的数据类型也必须一致,否则合并不出来。 result = outcome2.set_index('industry').T row_name=result.index.tolist() # 将数据框的列名全部提取出来存放在列表里 type(row_name[1]) # 确认是字符型元素 row_name.insert(1,'1991') # 在列索引为1的位置插入一行,行名为:1991。因为前面的分类汇总会导致一些没有上市的年份被省略掉。 row_name.insert(15,'2005') row_name.insert(-8,'2013') result=result.reindex(index=row_name) # DataFrame.reindex() 对原行/列索引重新构建索引值 result.iloc[[1, 15, -9],:]=0.0 # 把NaN的值填充成零 result # result是整理完的总的数据集 # 到这里,数据的整理就完成了。 # 下面开始分析数据 nameDF = pd.DataFrame() # # 空df储存分析类型、行业名称 # 提取分行业的上市总量,用于1和2 industry = result[31:32] # 提取最后一行加总的值ALL # 1.上市数量最多的10个行业 # 提取 temp1 = industry.T.sort_values('All',ascending=False,inplace=False)[0:11] # 提取行业名称以及上市数量 temp1 # 画图 title='过去30年上市数量最多的10个行业' # 单独设置title,一遍存储到nameDF中 fig1 = temp1.plot(kind='bar', fontsize=16, figsize=(14,14*0.618), title=title, rot=0, legend='') #设置图的格式 fig1.axes.title.set_size(20) #设置标题 # 储存 fig1.figure.savefig(r'D:\桌面的文件夹\实习\睿丛\过去30年上市数量最多的10个行业.png') #保存图片 type(temp1) # 查看temp1的类型 stri=',' # 设置分隔符 seq=temp1.index.tolist() # 获取行业名称 industryName = stri.join(seq) # 把列表中的所有元素合并成一个字符串。 s = pd.Series([title,industryName]) #保存标题和行业名称 nameDF = nameDF.append(s, ignore_index=True) # 添加到df中 # 2.上市数量最少的10个行业。这里的代码比1可复制性更高。 # 提取 temp2 = industry.T.sort_values('All',ascending=True,inplace=False)[0:11].sort_values('All',ascending=False,inplace=False) # 和1一样的规则。提取行业名称以及上市数量。先从小到大提取前10,再把筛选出来的从大到小排。 # 画图 title='过去30年上市数量最少的10个行业' # 单独设置title,一遍存储到nameDF中 fig2 = temp2.plot(kind='bar', fontsize=16, figsize=(14,14*0.618), title=title, rot=0, legend='') #设置图的格式 fig2.axes.title.set_size(20) #设置标题 fmt='%.0f' yticks = mtick.FormatStrFormatter(fmt) fig2.yaxis.set_major_formatter(yticks) # 设置不要有小数位数。dataframe里面每一个数都是浮点型的。 # 储存 fig2.figure.savefig(r'D:\桌面的文件夹\实习\睿丛\%s.png' %title) #保存图片 seq=temp2.index.tolist() # 获取行业名称 industryName = stri.join(seq) # 把列表中的所有元素合并成一个字符串。 s = pd.Series([title,industryName]) #保存标题和行业名称 nameDF = nameDF.append(s, ignore_index=True) # 添加到df中 # 3.提取分年度的上市总量 # 提取 result['All'] = result.apply(lambda x: x.sum(),axis=1) # 增加每一行的汇总值,下面一步提取的就是这个值 # 画图 title='上海地区过去30年每年的上市数量变化' temp3= result.iloc[:,-1].drop(['All']) fig3 = temp3.plot(kind='line', fontsize=16, figsize=(14,14*0.618),use_index=True, title=title, rot=0) fig3.axes.title.set_size(20) # 储存 fig3.figure.savefig(r'D:\桌面的文件夹\实习\睿丛\%s.png' %title) #保存图片 # 年份合并,来平滑波动 result4 = result.iloc[:-1,:] # 4.五年一合并,绝对数 i = 0 data_new = pd.DataFrame() while i < (result.shape[0]-1): try: data_new = data_new.append(result4.iloc[i,:]+result4.iloc[i+1,:]+result4.iloc[i+2,:]+result4.iloc[i+3,:]+result4.iloc[i+4,:], ignore_index=True) except: i +=5 i +=5 s=data_new.sum(axis=0) data_new = data_new.append(s, ignore_index=True) data_new # 提取 title='上市总数最多的12个行业的上市数量' temp4 = data_new.T.sort_values(by=[6],ascending=False,inplace=False).iloc[0:12,:-1].T # 画图 fig4 = temp4.plot(kind='line', subplots=True,sharex=True, sharey=True, fontsize=16, layout=(3,4),figsize=(18,18*0.618),use_index=True, title=title, legend=True, rot=90) labels = ['1990-1994', '1995-1999', '2000-2004', '2005-2009', '2010-2014','2015-2019'] # 设置标签的名称 x = np.arange(len(labels)) # the label locations fig4[1,1].set_xticks(x) # 设置刻度 fig4[1,1].set_xticklabels(labels) # 设置刻度的名称 fmt='%.0f' yticks = mtick.FormatStrFormatter(fmt) fig4[1,1].yaxis.set_major_formatter(yticks) # 设置不要有小数位数。dataframe里面每一个数都是浮点型的。 # 储存 fig4[1,1].figure.savefig(r'D:\桌面的文件夹\实习\睿丛\%s.png' %title) #保存图片,这里,fig4是一个AxesSubplot对象,实际形式是一个ndarray。因此,只要调用这个ndarray里面的任何一个图像,就能把所有的图片画出来。注意,这一调用的是第二行、第二列的图片。 fig4[0,0].figure.show() seq=temp4.T.index.tolist() # 获取行业名称 industryName = stri.join(seq) # 把列表中的所有元素合并成一个字符串。 s = pd.Series([title,industryName]) #保存标题和行业名称 nameDF = nameDF.append(s, ignore_index=True) # 添加到df中 # 5.五年一合并,相对数 # 准备加总数 data_reg = copy.deepcopy(data_new) #这里需要一个深度复制,保持Df是不变的。否则如果运行一次程序要连着查好几次,就会出问题。因为我们要对Df的格式整个进行改变。 data_reg['All']=data_reg.sum(axis=1) # 每一年所有行业的上市量求和,放在最后一列。每个行业的加总已经有了,在第六行。 # 求相对数 data_reg=data_reg.div(data_reg.iloc[:,-1],axis=0).iloc[:,:-1] # 用来回归的数据集,是相对数 # 提取 title='上市总数最多的12个行业的上市占比' temp5 = data_reg.T.sort_values(by=[6],ascending=False,inplace=False).iloc[0:12,:-1].T # 画图 fig5 = temp5.plot(kind='line', subplots=True,sharex=True, sharey=True, fontsize=16, layout=(3,4),figsize=(18,18*0.618),use_index=True, title=title, legend=True, rot=90) labels = ['1990-1994', '1995-1999', '2000-2004', '2005-2009', '2010-2014','2015-2019'] # 设置标签的名称 x = np.arange(len(labels)) # the label locations fig5[1,1].set_xticks(x) # 设置x轴刻度 fig5[1,1].set_xticklabels(labels) # 设置x轴刻度的名称 fig5[1,1].yaxis.set_major_formatter(mtick.PercentFormatter(1,0)) # 设置y轴的格式为没有小数点的百分比。第一个参数为把多少的数值设置为100%,第二个参数为保留几位小数。 # 储存 fig5[1,1].figure.savefig(r'D:\桌面的文件夹\实习\睿丛\%s.png' %title) #保存图片,这里,fig4是一个AxesSubplot对象,实际形式是一个ndarray。因此,只要调用这个ndarray里面的任何一个图像,就能把所有的图片画出来。注意,这一调用的是第二行、第二列的图片。 fig5[0,0].figure.show() seq=temp5.T.index.tolist() # 获取行业名称 industryName = stri.join(seq) # 把列表中的所有元素合并成一个字符串。 s = pd.Series([title,industryName]) #保存标题和行业名称 nameDF = nameDF.append(s, ignore_index=True) # 添加到df中 # 做回归进行分类 # 设置好X、Y、模型 Y_train=data_reg.iloc[:-1,:].T X_train = pd.DataFrame(np.arange(6).reshape((-1, 1))) from sklearn.linear_model import LinearRegression linreg = LinearRegression() # 开始训练 i=0 box=np.array([]) while i < (Y_train.shape[0]): print(i) linreg.fit(X_train, Y_train.iloc[i,:]) i +=1 box = np.hstack((box, linreg.coef_)) # 训练结果 print(box) Y_train[6] = box # 画图 # 增长最快的15个行业 temp11 = Y_train.sort_values(by=[6],ascending=False,inplace=False).iloc[:15,:-1].T fig11 = temp11.plot(kind='line', ax=None, subplots=True,sharex=True, sharey=True, fontsize=16, layout=(3,5),figsize=(18,18*0.618),use_index=True, title='# 增长最快的15个行业', grid=None, legend=True,style= None, logx=False, logy=False, loglog=False, xticks=None, yticks=None, xlim=None, ylim=None, rot=0, xerr=None,secondary_y=False, sort_columns=False) # 衰退最快的15个行业 temp12 = Y_train.sort_values(by=[6],ascending=True,inplace=False).iloc[:15,:-1].T fig12 = temp12.plot(kind='line', ax=None, subplots=True,sharex=True, sharey=True, fontsize=16, layout=(3,5),figsize=(18,18*0.618),use_index=True, title='增长前15的行业', grid=None, legend=True,style= None, logx=False, logy=False, loglog=False, xticks=None, yticks=None, xlim=None, ylim=None, rot=0, xerr=None,secondary_y=False, sort_columns=False)
9,913
6,231
from rest_framework import serializers from django.db.models import fields from django.contrib.auth.models import User from .models import Clients, Products, Bills, BillsProducts class UserSerializer(serializers.Serializer): id = serializers.ReadOnlyField() first_name = serializers.CharField() last_name = serializers.CharField() username = serializers.CharField() email = serializers.EmailField() password = serializers.CharField() def create(self, validate_data): instance = User() instance.first_name = validate_data.get('first_name') instance.last_name = validate_data.get('last_name') instance.username = validate_data.get('username') instance.email = validate_data.get('email') instance.set_password(validate_data.get('password')) instance.save() return instance def validate_username(self, data): user = User.objects.filter(username = data) if len(user) != 0: raise serializers.ValidationError("Este usuario ya existe!") else: return data class ClientSerializer(serializers.ModelSerializer): class Meta: model = Clients fields = ('id', 'document', 'first_name', 'last_name', 'email', 'created_on', 'update_at') class ProductSerializer(serializers.ModelSerializer): class Meta: model = Products fields = ('id', 'name', 'description', 'price', 'stock', 'created_on', 'update_at') class BillSerializer(serializers.ModelSerializer): class Meta: model = Bills fields = ('id', 'client_id', 'company_name', 'nit', 'code', 'created_on', 'update_at') class BillsProductSerializer(serializers.ModelSerializer): class Meta: model = BillsProducts fields = ('id', 'bill_id', 'product_id', 'created_on', 'update_at')
1,846
512
#!/usr/bin/env python3 def mergesort(unsorted_list): n = len(unsorted_list) if n > 1: m = n // 2 left = unsorted_list[:m] right = unsorted_list[m:] mergesort(left) mergesort(right) merge(unsorted_list, left, right) def merge(original, left, right): i = j = k = 0 nleft = len(left) nright = len(right) while i < nleft and j < nright: if left[i] < right[j]: original[k] = left[i] i += 1 else: original[k] = right[j] j += 1 k += 1 while i < nleft: original[k] = left[i] i += 1 k += 1 while j < nright: original[k] = right[j] j += 1 k += 1 if __name__ == '__main__': example_list = [-1, 1, 1, 0, -2, 199, 204, 1000, -400, 6] print(example_list) mergesort(example_list) print(example_list)
904
353
import csv import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # Correlation coef for oil prices to GDP per capita dfOil = pd.read_excel('./data/Oil Prices.xls', dtype={'Date': int, 'Value': float}) dfOil = dfOil.rename(columns={'Date': 'Year', 'Value': 'Oil Price per Barrel (USD)'}) dfGdp = pd.read_csv('./data/Per capita GDP at current prices - US Dollars.csv', header=0) dfGdp = dfGdp.sort_values('Year', ascending=True) dfGdp = dfGdp[dfGdp['Country or Area']=='Venezuela (Bolivarian Republic of)'] dfGdp = dfGdp[['Year', 'Value']] dfGdp = dfGdp.rename(columns={'Value': 'GDP per Capita (USD)'}) dfOil = dfOil.set_index('Year') dfGdp = dfGdp.set_index('Year') # print(dfGdp) dfJoin = dfOil.join(dfGdp, on='Year', how='inner', lsuffix=' - Oil Price', rsuffix='- GDP per Capita') print('------------------------------------------------------------------------------------------------') print(dfJoin.corr(method='pearson')) print('') sns.lmplot(x='Oil Price per Barrel (USD)',y='GDP per Capita (USD)', data=dfJoin, fit_reg=True) plt.savefig('./Oil Price to GDP per Capita') # Correlation coef for GDP and inflation dfGdp = pd.read_csv('./data/Per capita GDP at current prices - US Dollars.csv', header=0) dfGdp = dfGdp.sort_values('Year', ascending=True) dfGdp = dfGdp[dfGdp['Country or Area']=='Venezuela (Bolivarian Republic of)'] dfGdp = dfGdp[['Year', 'Value']] dfGdp = dfGdp.rename(columns={'Value': 'GDP per Capita (USD)'}) dfGdp = dfGdp.set_index('Year') dfInflation = pd.read_csv('./data/Inflation.csv', header=0) dfInflation = dfInflation[dfInflation['Country Name']=='Venezuela, RB'] dfInflation = dfInflation.set_index('Country Name') obj = {'year': [], 'rate': []} for index, row in dfInflation.iterrows(): for year in range(2009, 2017): obj['year'].append(year) obj['rate'].append(row[str(year)]) dfInflation = pd.DataFrame(data={'Inflation rate %': obj['rate']}, index=obj['year']) dfJoin = dfGdp.join(dfInflation, on='Year', how='inner') print('------------------------------------------------------------------------------------------------') print(dfJoin.corr(method='pearson')) print('') sns.lmplot(x='Inflation rate %', y='GDP per Capita (USD)', data=dfJoin, fit_reg=True) plt.savefig('./Inflation rate % to GDP per Capita') # Correlation coef for GDP % change and infant mortality rate rate dfGdp = pd.read_csv('./data/Per capita GDP at current prices - US Dollars.csv', header=0) dfGdp = dfGdp.sort_values('Year', ascending=True) dfGdp = dfGdp[dfGdp['Country or Area']=='Venezuela (Bolivarian Republic of)'] dfGdp = dfGdp[['Year', 'Value']] dfGdp = dfGdp.rename(columns={'Value': 'GDP per Capita (USD)'}) dfGdp = dfGdp.set_index('Year') dfMortality = pd.read_csv('./data/Infant Mortaility.csv', header=0) dfMortality = dfMortality[dfMortality['Country Name']=='Venezuela, RB'] obj = {'year': [], 'deaths': []} for index, row in dfMortality.iterrows(): for year in range(1960, 2017): obj['year'].append(year) obj['deaths'].append(row[str(year)]) dfMortality = pd.DataFrame(data={'Infant deaths per 1,000 live births': obj['deaths']}, index=obj['year']) dfJoin = dfGdp.join(dfMortality, on='Year', how='inner') print('------------------------------------------------------------------------------------------------') print(dfJoin.corr(method='pearson')) print('') sns.lmplot(x='Infant deaths per 1,000 live births',y='GDP per Capita (USD)', data=dfJoin, fit_reg=True) plt.savefig('./Infant deaths per 1,000 live births rate % to GDP per Capita')
3,538
1,299
def to_string(bytes_or_string): if isinstance(bytes_or_string, bytes): string = bytes_or_string.decode('utf-8') else: string = bytes_or_string return string def to_bytes(bytes_or_string): if isinstance(bytes_or_string,str): byte = bytes_or_string.encode('utf-8') else: byte = bytes_or_string return byte
362
121
# -*- coding: utf-8 -*- from libs.cloud_formation_stack_resource_checker import CloudFormationStackResourceChecker cloud_formation_stack_resource_checker = CloudFormationStackResourceChecker() print(cloud_formation_stack_resource_checker.get_stack_and_resource_count())
274
85
#!/bin/python import re import sys import os class Config: def __init__(self): options = self.__parseOptions() self.projectFileName = options.project self.sourcesPerFile = options.number self.mkFileName = options.makefile self.mkProjectFileName = options.mkproject self.destinationFolder = os.path.basename(os.getcwd()) self.excludeFileNames = ['OrganicMotion/OrganicMotionClient.cpp'] def __parseOptions(self): from optparse import OptionParser optionParser = OptionParser() optionParser.add_option('-p', '--project', help='Project file name', default='../GameDllSDK.vcxproj', type='string') optionParser.add_option('-k', '--mkproject', help='Project file name', default='../Project.mk', type='string') optionParser.add_option('-n', '--number', help='Number of sources per file', default='25', type='int') optionParser.add_option('-m', '--makefile', help='Makefile name', default='Project.mk', type='string') (options, args) = optionParser.parse_args() return options def __str__(self): return 'projectFileName="%s" destinationFolder="%s" sourcesPerFile="%d" mkFileName="%s"' % (self.projectFileName, self.destinationFolder, self.sourcesPerFile, self.mkFileName) class Parser: def __init__(self, config): self.config = config self.reFileName = re.compile(r'<ClCompile\s*Include\s*=\s*\"([^\"]*)\"\s*/?>', re.DOTALL) def parseFileNames(self): fileNames = [] projectFileContent = open(self.config.projectFileName).read() for match in self.reFileName.findall(projectFileContent): fileName = match.replace('\\', '/').strip('./') isSourceFile = fileName.endswith('.cpp') or fileName.endswith('.c') if isSourceFile and not fileName in fileNames and not fileName in self.config.excludeFileNames: fileNames.append(fileName) return fileNames def createUnityFileName(self, unityFileID): return 'GameSDK_%d_uber.cpp' % unityFileID def splitFileNames(self, list): splittedFilesDict = {} subList = [] for elem in list: if len(subList) == self.config.sourcesPerFile: unityFileName = self.createUnityFileName(len(splittedFilesDict)) splittedFilesDict[unityFileName] = subList subList = [] subList.append(elem) if len(subList) > 0: unityFileName = self.createUnityFileName(len(splittedFilesDict)) splittedFilesDict[unityFileName] = subList return splittedFilesDict class Generator: def __init__(self, config): self.config = config self.unityBuildFirstLine = 'ifeq ($(MKOPTION_UNITYBUILD),1)' self.unityBuildLastLine = 'endif' def __writeRemovedSourceFiles(self, splittedFileNames, mkFile): print (self.unityBuildFirstLine, file=mkFile) print ('PROJECT_SOURCES_CPP_REMOVE += \\', file=mkFile) removedCounter = 0 for (unityFileName, codeFileNames) in splittedFileNames.items(): for codeFileName in codeFileNames: removedCounter = removedCounter + 1 print ('\t%s \\' % codeFileName, file=mkFile) print ('', file=mkFile) print ('Writing removed sources in "%s" - %d' % (self.config.mkFileName, removedCounter)) def __writeUnityFileNames(self, splittedFileNames, mkFile): print ('PROJECT_SOURCES_CPP_ADD += \\', file=mkFile) for (unityFileName, codeFileNames) in splittedFileNames.items(): print ('\t%s/%s \\' % (config.destinationFolder, unityFileName), file=mkFile) print ('', file=mkFile) print (self.unityBuildLastLine, file=mkFile) print ('Writing unity file names to be compiled in "%s" - %d' % (self.config.mkFileName, len(splittedFileNames))) def __writeUnityFiles(self, splittedFileNames): unityFileNamesWritten = [] for (unityFileName, codeFileNames) in splittedFileNames.items(): print ('Generating unity file: %s - %d' % (unityFileName, len(codeFileNames))) unityFile = open(unityFileName, 'w') print ('#ifdef _DEVIRTUALIZE_\n\t#include <GameSDK_devirt_defines.h>\n#endif\n', file=unityFile) for codeFileName in codeFileNames: print ('#include "../%s"' % codeFileName, file=unityFile) print ('\n#ifdef _DEVIRTUALIZE_\n\t#include <GameSDK_wrapper_includes.h>\n#endif', file=unityFile) unityFile.flush() unityFileNamesWritten.append(unityFileName) for fileName in os.listdir('./'): if fileName not in unityFileNamesWritten and fileName.endswith('_uber.cpp'): print ('Clearing:', fileName) file = open(fileName, 'w') file.close() def __writeProjectFile(self): mkFile = open(self.config.mkFileName, 'w') mkPrjFile = open(self.config.mkProjectFileName) copyCurrentLine = True for line in mkPrjFile: if line.startswith(self.unityBuildFirstLine): copyCurrentLine = False self.__writeRemovedSourceFiles(splittedFileNames, mkFile) self.__writeUnityFileNames(splittedFileNames, mkFile) if copyCurrentLine: print (line.rstrip('\n'), file=mkFile) if line.startswith(self.unityBuildLastLine): copyCurrentLine = True mkFile.flush() def writeFiles(self, splittedFileNames): try: self.__writeProjectFile() self.__writeUnityFiles(splittedFileNames) except IOError as errorMessage: print ('IO error: %s' % errorMessage) config = Config() parser = Parser(config) fileNames = parser.parseFileNames() splittedFileNames = parser.splitFileNames(fileNames) generator = Generator(config) generator.writeFiles(splittedFileNames)
5,351
1,942
#%% # https://preppindata.blogspot.com/2022/01/2022-week-4-prep-school-travel-plans.html # 2022-01-26 import pandas as pd RAW = pd.read_csv('2022/04/inputs/travel_plans.csv') MISTAKES = { 'Scoter': 'Scooter', 'Walkk': 'Walk', 'Carr': 'Car', 'Bycycle': 'Bicycle', 'Scootr': 'Scooter', 'Wallk': 'Walk', 'WAlk': 'Walk', 'Waalk': 'Walk', 'Helicopeter': 'Helicopter' } SUSTAINABILITY = { 'Car': 'Non-Sustainable', 'Bicycle': 'Sustainable', 'Scooter': 'Sustainable', 'Walk': 'Sustainable', 'Aeroplane': 'Non-Sustainable', 'Helicopter': 'Non-Sustainable', 'Van': 'Non-Sustainable', "Mum's Shoulders": 'Sustainable', 'Hopped': 'Sustainable', "Dad's Shoulders": 'Sustainable', 'Skipped': 'Sustainable', 'Jumped': 'Sustainable', 'Helicopter': 'Non-Sustainable' } trip_counts_per_day = (RAW .drop(columns=['Student ID']) .count() .rename('trips_per_day') ) output = (RAW .melt(id_vars='Student ID', value_name='method', var_name='day') .assign(method = lambda df_: (df_['method'] .map(MISTAKES) .fillna(df_['method'])), ) .groupby(['day', 'method'])['Student ID'] .count() .reset_index() .rename(columns={'Student ID': 'number_of_trips'}) .join(trip_counts_per_day, on='day') .assign( sustainable=lambda df_: df_['method'].map(SUSTAINABILITY), percent_trips_per_day=lambda df_: df_['number_of_trips'] / df_['trips_per_day'] ) .round(2) .rename(columns={'sustainable': 'Sustainable?', 'method': 'Method of Travel', 'day': 'Weekday', 'number_of_trips': 'Number of Trips', 'trips_per_day': 'Trips per day', 'percent_trips_per_day': '% of trips per day'}) ).to_csv('2022/04/outputs/output.csv', columns=['Sustainable?', 'Method of Travel', 'Weekday', 'Number of Trips', 'Trips per day', '% of trips per day'], index=False)
2,275
844
# -*- coding: utf-8 -*- from mamonsu.plugins.pgsql.plugin import PgsqlPlugin as Plugin from mamonsu.plugins.pgsql.pool import Pooler class PgInvalidIndexes(Plugin): Interval = 60 DEFAULT_CONFIG = { 'Interval': str(60), # Default interval (1 hour = 3600 sec) } zbx_key = "invalid_indexes_count" # query_agent_discovery = "SELECT json_build_object ('data',json_agg(json_build_object('{#TABLE_IDX}', '" + zbx_key + "')));" # Select count on invalid indexes in database query = """ SELECT COUNT(*) FROM pg_index i JOIN pg_class c ON i.indexrelid = c.oid JOIN pg_class c2 ON i.indrelid = c2.oid JOIN pg_namespace n2 ON c2.relnamespace = n2.oid WHERE (NOT i.indisready OR NOT i.indisvalid) AND NOT EXISTS (SELECT 1 FROM pg_stat_activity where datname = current_database() AND query ilike '%concurrently%' AND pid <> pg_backend_pid()); """ AgentPluginType = 'pg' key_rel_part = 'pgsql.' + zbx_key key_rel_part_discovery = key_rel_part+'{0}' def run(self, zbx): objects = [] for info_dbs in Pooler.query("select datname from pg_catalog.pg_database where datistemplate = false and datname not in ('mamonsu','postgres')"): objects.append({'{#TABLE_IDX}': info_dbs[0]}) result = Pooler.query(self.query, info_dbs[0]) zbx.send(self.key_rel_part+'[{0}]'.format(info_dbs[0]), result[0][0]) zbx.send(self.key_rel_part+'[]', zbx.json({'data': objects})) def discovery_rules(self, template, dashboard=False): rule = { 'name': 'Invalid indexes in database discovery', 'key': self.key_rel_part_discovery.format('[{0}]'.format(self.Macros[self.Type])), 'filter': '{#TABLE_IDX}:.*' } items = [ {'key': self.right_type(self.key_rel_part_discovery, var_discovery="{#TABLE_IDX},"), 'name': 'Invalid indexes in database: {#TABLE_IDX}', 'units': Plugin.UNITS.none, 'value_type': Plugin.VALUE_TYPE.numeric_unsigned, 'delay': self.Interval}, ] conditions = [ { 'condition': [ {'macro': '{#TABLE_IDX}', 'value': '.*', 'formulaid': 'A'} ] } ] triggers = [{ 'name': 'PostgreSQL: In the database {#TABLE_IDX} invalid indexes on {HOSTNAME} (value={ITEM.LASTVALUE})', 'expression': '{#TEMPLATE:'+self.right_type(self.key_rel_part_discovery, var_discovery="{#TABLE_IDX},")+'.last()}&gt;0', 'priority': 2 } ] return template.discovery_rule(rule=rule, conditions=conditions, items=items, triggers=triggers)
2,864
928
"""This submodule makes it possible for `phasespace` and `DecayLanguage` to work together. More generally, the `GenMultiDecay` object can also be used as a high-level interface for simulating particles that can decay in multiple different ways. """ import sys from typing import Tuple from .genmultidecay import GenMultiDecay # noqa: F401 try: import zfit # noqa: F401 import zfit_physics as zphys # noqa: F401 from particle import Particle # noqa: F401 except ModuleNotFoundError as error: raise ModuleNotFoundError( "The fromdecay functionality in phasespace requires particle and zfit-physics. " "Either install phasespace[fromdecay] or particle and zfit-physics.", file=sys.stderr, ) from error __all__ = ("GenMultiDecay",) def __dir__() -> Tuple[str, ...]: return __all__
863
277
from reporter import GenerateReport
36
9
from keras.applications import VGG16 def create_model(img_width, img_height): conv_base = VGG16(weights='imagenet', include_top=False, input_shape=( img_width, img_height, 3)) # 3 = number of channels in RGB pictures return conv_base def compile_model(model): model.compile(optimizer=optimizers.Adam(), loss='binary_crossentropy', metrics=['acc']) def train_model(model, X): model.fit()
457
153
from .base_options import BaseOptions class TimerOptions(BaseOptions): def initialize(self): BaseOptions.initialize(self) self.parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.') self.parser.add_argument('--feature_dir', type=str, default='./features/', help='saves features here.') self.parser.add_argument('--pooling', type=str, default=None, help='global_mean_pool, global_add_pool, global_max_pool, global_sort_pool') self.parser.add_argument('--normalize', type=int, default=0, help='0, 1, or 2') self.parser.add_argument('--which_layer', type=str, default='gb_pool', help='which layer to extract features?') self.parser.add_argument('--search_methods', nargs='+', type=str, default='IndexFlatL2', help='IndexFlatL2, etc') self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') self.parser.add_argument('--num_aug', type=int, default=1, help='# of augmentation files') self.parser.add_argument('--num_neigb', type=int, default=4, help='# of augmentation files') self.parser.add_argument('--query_index', type=int, default=0, help='test the results of an example') self.is_train = False
1,324
406