prefix
stringlengths
0
918k
middle
stringlengths
0
812k
suffix
stringlengths
0
962k
# coding:utf-8 import logging import numpy as np from scipy.linalg import svd from mla.base import BaseEstimator np.random.seed(1000) class PCA(BaseEstimator): y_required = False def __init__(self, n_components, solver="svd"): """Principal component analysis (PCA) implementation. Transforms a dataset of possibly correlated values into n linearly uncorrelated components. The components are ordered such that the first has the largest possibl
e variance and each following component as the largest possible variance given the previous components. This causes the early components to contain most of the variability in the dataset. Parameters -------
--- n_components : int solver : str, default 'svd' {'svd', 'eigen'} """ self.solver = solver self.n_components = n_components self.components = None self.mean = None def fit(self, X, y=None): self.mean = np.mean(X, axis=0) self._decompose(X) def _decompose(self, X): # Mean centering X = X.copy() X -= self.mean if self.solver == "svd": _, s, Vh = svd(X, full_matrices=True) elif self.solver == "eigen": s, Vh = np.linalg.eig(np.cov(X.T)) Vh = Vh.T s_squared = s ** 2 variance_ratio = s_squared / s_squared.sum() logging.info("Explained variance ratio: %s" % (variance_ratio[0: self.n_components])) self.components = Vh[0: self.n_components] def transform(self, X): X = X.copy() X -= self.mean return np.dot(X, self.components.T) def _predict(self, X=None): return self.transform(X)
"""
Add user scren name to whitelist if it is not to be unfollowed """ whitelist = [
]
['ne'] in PRE_MERGE_NETAG) b0_atomics['isne']=b0_atomics['ne'] in PRE_MERGE_NETAG b0_atomics['hastrace'] = len(self.A.nodes[self.cidx].incoming_traces) > 0 # prop feature b0_atomics['isarg']=self.cidx in s0_args if s0_args else NOT_ASSIGNED b0_atomics['arglabel']=s0_args[self.cidx] if b0_atomics['isarg'] else NOT_ASSIGNED b0_atomics['isprd']=self.cidx in s0_prds if s0_prds else NOT_ASSIGNED b0_atomics['prdlabel']=s0_prds[self.cidx] if b0_atomics['isprd'] else NOT_ASSIGNED if isinstance(self.cidx,int) and isinstance(self.idx,int): path,direction = GraphState.deptree.get_path(self.cidx,self.idx) if self.A.nodes[self.idx].end - self.A.nodes[self.idx].start > 1: path_pos_str = [(GraphState.sent[i]['pos'],GraphState.sent[i]['rel']) for i in path[1:-1] if i not in range(self.A.nodes[self.idx].start,self.A.nodes[self.idx].end)] path_x_str_pp = [('X','X') if not isprep(GraphState.sent[i]) else GraphState.sent[i]['form'] for i in path[1:-1] if i not in range(self.A.nodes[self.idx].start,self.A.nodes[self.idx].end)] else: path_pos_str = [(GraphState.sent[i]['pos'],GraphState.sent[i]['rel']) for i in path[1:-1]] path_x_str_pp = [('X','X') if not isprep(GraphState.sent[i]) else GraphState.sent[i]['form'] for i in path[1:-1]] path_pos_str.insert(0,GraphState.sent[path[0]]['rel']) path_pos_str.append(GraphState.sent[path[-1]]['rel']) path_x_str_pp.insert(0,GraphState.sent[path[0]]['rel']) path_x_str_pp.append(GraphState.sent[path[-1]]['rel']) b0_atomics['pathp'] = path_pos_str b0_atomics['pathprep'] = path_x_str_pp b0_atomics['pathpwd'] = str(path_pos_str) + direction b0_atomics['pathprepwd'] = str(path_x_str_pp) + direction else: b0_atomics['pathp'] = EMPTY b0_atomics['pathprep'] = EMPTY b0_atomics['pathpwd'] = EMPTY b0_atomics['pathprepwd'] = EMPTY b0_atomics['apathx'] = EMPTY b0_atomics['apathp'] = EMPTY b0_atomics['apathprep'] = EMPTY b0_atomics['apathxwd'] = EMPTY b0_atomics['apathpwd'] = EMPTY b0_atomics['apathprepwd'] = EMPTY else: b0_atomics = EMPTY if action['type'] in [REATTACH,REENTRANCE]: #child_to_add = action['child_to_add'] if action['type'] == REATTACH: parent_to_attach = action['parent_to_attach'] else: parent_to_attach = action['parent_to_add'] if parent_to_attach is not None: a0_atomics = GraphState.sent[parent_to_attach].copy() if isinstance(parent_to_attach,int) else ABT_TOKEN #GraphState.abt_tokens[parent_to_attach] a0_brown_repr = BROWN_CLUSTER[a0_atomics['form']] a0_atomics['brown4'] = a0_brown_repr[:4] if len(a0_brown_repr) > 3 else a0_brown_repr a0_atomics['brown6'] = a0_brown_repr[:6] if len(a0_brown_repr) > 5 else a0_brown_repr a0_atomics['brown8'] = a0_brown_repr[:8] if len(a0_brown_repr) > 7 else a0_brown_repr a0_atomics['brown10'] = a0_brown_repr[:10] if len(a0_brown_repr) > 9 else a0_brown_repr a0_atomics['brown20'] = a0_brown_repr[:20] if len(a0_brown_repr) > 19 else a0_brown_repr a0_atomics['concept'] = self.A.nodes[parent_to_attach].tag aprs2,aprs1,ap1,alsb,arsb,ar2sb = self.get_node_context(parent_to_attach) a0_atomics['p1']=ap1 a0_atomics['lsb']=alsb a0_atomics['rsb']=arsb a0_atomics['r2sb']=ar2sb a0_atomics['nswp']=self.A.nodes[parent_to_attach].num_swap a0_atomics['isne']=a0_atomics['ne'] is not 'O' itr = list(self.A.nodes[self.cidx].incoming_traces) tr = [t for r,t in itr] a0_atomics['istrace'] = parent_to_attach in tr if len(tr) > 0 else EMPTY a0_atomics['rtr'] = itr[tr.index(parent_to_attach)][0] if parent_to_attach in tr else EMPTY a0_atomics['hasnsubj'] = b0_atomics['rel'] in set(GraphState.sent[c]['rel'] for c in self.A.nodes[parent_to_attach].children if isinstance(c,int))
#a0_atomics['iscycle'] = parent_to_attach in self.A.nodes[self.cidx].children or parent_to_att
ach in self.A.nodes[self.cidx].parents # prop feature b0_prds = None b0_args = None if isinstance(self.cidx,int) and GraphState.sent[self.cidx].get('pred',{}): b0_prds = GraphState.sent[self.cidx]['pred'] if isinstance(self.cidx,int) and GraphState.sent[self.cidx].get('args',{}): b0_args = GraphState.sent[self.cidx]['args'] a0_atomics['isprd']=parent_to_attach in b0_prds if b0_prds else NOT_ASSIGNED a0_atomics['prdlabel']=b0_prds[parent_to_attach] if a0_atomics['isprd'] else NOT_ASSIGNED a0_atomics['isarg']=parent_to_attach in b0_args if b0_args else NOT_ASSIGNED a0_atomics['arglabel']=b0_args[parent_to_attach] if a0_atomics['isarg'] else NOT_ASSIGNED if isinstance(self.cidx,int) and isinstance(parent_to_attach,int): path,direction = GraphState.deptree.get_path(self.cidx,parent_to_attach) #path_x_str=[(GraphState.sent[i]['pos'],GraphState.sent[i]['rel']) for i in path[1:-1]] if self.A.nodes[parent_to_attach].end - self.A.nodes[parent_to_attach].start > 1: apath_x_str = [('X','X') for i in path[1:-1] if i not in range(self.A.nodes[parent_to_attach].start,self.A.nodes[parent_to_attach].end)] apath_pos_str = [(GraphState.sent[i]['pos'],GraphState.sent[i]['rel']) for i in path[1:-1] if i not in range(self.A.nodes[parent_to_attach].start,self.A.nodes[parent_to_attach].end)] apath_pos_str_pp = [(GraphState.sent[i]['pos'],GraphState.sent[i]['rel']) if not isprep(GraphState.sent[i]) else GraphState.sent[i]['form'] for i in path[1:-1] if i not in range(self.A.nodes[parent_to_attach].start,self.A.nodes[parent_to_attach].end)] else: apath_x_str = [('X','X') for i in path[1:-1]] apath_pos_str = [(GraphState.sent[i]['pos'],GraphState.sent[i]['rel']) for i in path[1:-1]] apath_pos_str_pp = [(GraphState.sent[i]['pos'],GraphState.sent[i]['rel']) if not isprep(GraphState.sent[i]) else GraphState.sent[i]['form'] for i in path[1:-1]] apath_x_str.insert(0,GraphState.sent[path[0]]['rel']) apath_x_str.append(GraphState.sent[path[-1]]['rel']) apath_pos_str.insert(0,GraphState.sent[path[0]]['rel']) apath_pos_str.append(GraphState.sent[path[-1]]['rel']) apath_pos_str_pp.insert(0,GraphState.sent[path[0]]['rel']) apath_pos_str_pp.append(GraphState.sent[path[-1]]['rel']) #path_label_str = [GraphState.sent[i]['rel'] for i in path] # dependency label #path_lemma_str.insert(0,GraphState.sent[path[0]]['rel']) #path_lemma_str.append(GraphState.sent[path[-1]]['rel']) b0_atomics['apathx'] = apath_x_str b0_atomics['apathp'] = apath_pos_str b0_atomics['apathprep'] = apath_pos_str_pp b0_atomics['apathxwd'] = str(apath_x_str) + direction b0_atomics['apathpwd'] = str(apath_pos_str) + direction b0_atomics['apathprepwd'] = str(apath_pos_str_pp) + direction #a0_atomics['pathl'] = path_label_str else:
# -*- coding: utf-8 -*- #+---------------------------------------------------------------------------+ #| 01001110 01100101 01110100 01111010 0110
1111 01100010 | #| | #| Netzob : Inferring communication protocols | #+---------------------------------------------------------------------------+ #| Copyright (C) 2011 Georges Bossert and Frédéric Guihéry | #| This program is free software: you can redistribute it and/or modify | #| it under the terms of the GNU
General Public License as published by | #| the Free Software Foundation, either version 3 of the License, or | #| (at your option) any later version. | #| | #| This program is distributed in the hope that it will be useful, | #| but WITHOUT ANY WARRANTY; without even the implied warranty of | #| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | #| GNU General Public License for more details. | #| | #| You should have received a copy of the GNU General Public License | #| along with this program. If not, see <http://www.gnu.org/licenses/>. | #+---------------------------------------------------------------------------+ #| @url : http://www.netzob.org | #| @contact : contact@netzob.org | #| @sponsors : Amossys, http://www.amossys.fr | #| Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ | #+---------------------------------------------------------------------------+ #+---------------------------------------------------------------------------+ #| Standard library imports #+---------------------------------------------------------------------------+ import logging #+---------------------------------------------------------------------------+ #| Local application imports #+---------------------------------------------------------------------------+ from netzob.Common.Type.Format import Format from netzob.Common.Models.L2NetworkMessage import L2NetworkMessage from netzob.Common.Models.Factories.L3NetworkMessageFactory import L3NetworkMessageFactory from netzob.Common.Property import Property ## Remarques : # - Peut-être moins clair de parler de Layer 3 source Adress que IP Adress... class L3NetworkMessage(L2NetworkMessage): """Definition of a layer 3 network message""" def __init__(self, id, timestamp, data, l2Protocol, l2SourceAddress, l2DestinationAddress, l3Protocol, l3SourceAddress, l3DestinationAddress, pattern=[]): if len(pattern) == 1: pattern.insert(0, str(l3DestinationAddress)) super(L3NetworkMessage, self).__init__(id, timestamp, data, l2Protocol, l2SourceAddress, l2DestinationAddress, pattern=[]) self.type = "L3Network" self.l3Protocol = str(l3Protocol) self.l3SourceAddress = str(l3SourceAddress) self.l3DestinationAddress = str(l3DestinationAddress) def getFactory(self): return L3NetworkMessageFactory def getL3Protocol(self): return self.l3Protocol def getL3SourceAddress(self): return self.l3SourceAddress def getL3DestinationAddress(self): return self.l3DestinationAddress def getProperties(self): properties = super(L3NetworkMessage, self).getProperties() properties.append(Property('Layer 3 Protocol', Format.STRING, self.getL3Protocol())) properties.append(Property('Layer 3 Source Address', Format.IP, self.getL3SourceAddress())) properties.append(Property('Layer 3 Destination Address', Format.IP, self.getL3DestinationAddress())) return properties
def sync(self): self.googlecalendar.read() def load_preferences(self): configuration = Configuration() self.time = configuration.get('time') self.theme = configuration.get('theme') self.calendars = configuration.get('calendars') self.visible_calendars = [] for calendar in self.calendars: if calendar['visible']: self.visible_calendars.append(calendar['id']) def work(self): self.update_menu(check=True) if (time.time()-self.actualization_time) > self.time*3600: if internet_on(): self.sync() self.actualization_time = time.time() return True def create_menu(self): self.menu = Gtk.Menu() self.menu_events = [] for i in range(10): menu_event = EventMenuItem('%s'%i) menu_event.show() menu_event.set_visible(False) menu_event.connect('activate',self.on_menu_event_activate) self.menu.append(menu_event) self.menu_events.append(menu_event) add2menu(self.menu) self.menu_add_new_calendar = add2menu(self.menu, text = _('Add new calendar'), conector_event = 'activate',conector_action = self.on_menu_add_new_calendar) self.menu_add_new_event = add2menu(self.menu, text = _('Add new event'), conector_event = 'activate',conector_action = self.on_menu_add_new_event) add2menu(self.menu) self.menu_refresh = add2menu(self.menu, text = _('Sync with google calendar'),
conector_event = 'activate',conector_action = self.on_menu_refresh) self.menu_show_calendar = add2menu(self.menu, text = _('Show Calendar'), conector_event = 'activate',conector_action = self.menu_show_calendar_response) self.menu_preferences = add2menu(self.menu, text = _('Preferences'), conector_event = 'activa
te',conector_action = self.menu_preferences_response) add2menu(self.menu) menu_help = add2menu(self.menu, text =_('Help')) menu_help.set_submenu(self.get_help_menu()) add2menu(self.menu) add2menu(self.menu, text = _('Exit'), conector_event = 'activate',conector_action = self.menu_exit_response) self.menu.show() self.indicator.set_menu(self.menu) def set_menu_sensitive(self,sensitive = False): self.menu_add_new_calendar.set_sensitive(sensitive) self.menu_add_new_event.set_sensitive(sensitive) self.menu_refresh.set_sensitive(sensitive) self.menu_show_calendar.set_sensitive(sensitive) self.menu_preferences.set_sensitive(sensitive) self.menu_about.set_sensitive(sensitive) def update_menu(self,check=False): # now = datetime.datetime.now() normal_icon = os.path.join(comun.ICONDIR,'%s-%s-normal.svg'%(now.day,self.theme)) starred_icon = os.path.join(comun.ICONDIR,'%s-%s-starred.svg'%(now.day,self.theme)) # self.indicator.set_icon(normal_icon) self.indicator.set_attention_icon(starred_icon) # events2 = self.googlecalendar.getNextTenEvents(self.visible_calendars) if check and len(self.events)>0: for event in events2: if not is_event_in_events(event,self.events): msg = _('New event:')+'\n' if 'summary' in event.keys: msg += event.get_start_date_string() + ' - '+ event['summary'] else: msg += event.get_start_date_string() self.notification.update('Calendar Indicator',msg,comun.ICON_NEW_EVENT) self.notification.show() for event in self.events: if not is_event_in_events(event,events2): msg = _('Event finished:') + '\n' if 'summary' in event.keys: msg += event.get_start_date_string()+' - '+event['summary'] else: msg += event.get_start_date_string() self.notification.update('Calendar Indicator',msg,comun.ICON_FINISHED_EVENT) self.notification.show() self.events = events2 for i,event in enumerate(self.events): self.menu_events[i].set_event(event) self.menu_events[i].set_visible(True) for i in range(len(self.events),10): self.menu_events[i].set_visible(False) now = datetime.datetime.now() if len(self.events)>0: com = self.events[0].get_start_date() if now.year == com.year and now.month == com.month and now.day == com.day and now.hour == com.hour: self.indicator.set_status(appindicator.IndicatorStatus.ATTENTION) else: self.indicator.set_status(appindicator.IndicatorStatus.ACTIVE) else: self.indicator.set_status(appindicator.IndicatorStatus.ACTIVE) while Gtk.events_pending(): Gtk.main_iteration() def get_help_menu(self): help_menu =Gtk.Menu() # add2menu(help_menu,text = _('In Launchpad'),conector_event = 'activate',conector_action = lambda x: webbrowser.open('https://launchpad.net/calendar-indicator')) add2menu(help_menu,text = _('Get help online...'),conector_event = 'activate',conector_action = lambda x: webbrowser.open('https://answers.launchpad.net/calendar-indicator')) add2menu(help_menu,text = _('Translate this application...'),conector_event = 'activate',conector_action = lambda x: webbrowser.open('https://translations.launchpad.net/calendar-indicator')) add2menu(help_menu,text = _('Report a bug...'),conector_event = 'activate',conector_action = lambda x: webbrowser.open('https://bugs.launchpad.net/calendar-indicator')) add2menu(help_menu) web = add2menu(help_menu,text = _('Homepage'),conector_event = 'activate',conector_action = lambda x: webbrowser.open('http://www.atareao.es/tag/calendar-indicator')) twitter = add2menu(help_menu,text = _('Follow us in Twitter'),conector_event = 'activate',conector_action = lambda x: webbrowser.open('https://twitter.com/atareao')) googleplus = add2menu(help_menu,text = _('Follow us in Google+'),conector_event = 'activate',conector_action = lambda x: webbrowser.open('https://plus.google.com/118214486317320563625/posts')) facebook = add2menu(help_menu,text = _('Follow us in Facebook'),conector_event = 'activate',conector_action = lambda x: webbrowser.open('http://www.facebook.com/elatareao')) add2menu(help_menu) self.menu_about = add2menu(help_menu,text = _('About'),conector_event = 'activate',conector_action = self.menu_about_response) # web.set_image(Gtk.Image.new_from_file(os.path.join(comun.SOCIALDIR,'web.svg'))) web.set_always_show_image(True) twitter.set_image(Gtk.Image.new_from_file(os.path.join(comun.SOCIALDIR,'twitter.svg'))) twitter.set_always_show_image(True) googleplus.set_image(Gtk.Image.new_from_file(os.path.join(comun.SOCIALDIR,'googleplus.svg'))) googleplus.set_always_show_image(True) facebook.set_image(Gtk.Image.new_from_file(os.path.join(comun.SOCIALDIR,'facebook.svg'))) facebook.set_always_show_image(True) # help_menu.show() return help_menu def on_menu_add_new_event(self,widget): ew = EventWindow(self.googlecalendar.calendars.values()) if ew.run() == Gtk.ResponseType.ACCEPT: calendar_id = ew.get_calendar_id() summary = ew.get_summary() start_date = ew.get_start_date() end_date = ew.get_end_date() description = ew.get_description() ew.destroy() new_event = self.googlecalendar.add_event(calendar_id, summary, start_date, end_date, description) if new_event is not None: self.googlecalendar.calendars[calendar_id]['events'][new_event['id']] = new_event self.update_menu(check=True) ew.destroy() def on_menu_event_activate(self,widget): ew = EventWindow(self.googlecalendar.calendars.values(),widget.get_event()) if ew.run() == Gtk.ResponseType.ACCEPT: if ew.get_operation() == 'DELETE': ew.destroy() md = Gtk.MessageDialog( parent = None, flags = Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT, type = Gtk.MessageType.ERROR, buttons = Gtk.ButtonsType.OK_CANCEL, message_format = _('Are you sure you want to revove this event?')) if md.run() == Gtk.ResponseType.OK: md.destroy() event = widget.get_event() if self.googlecalendar.remove_event(event['calendar_id'],event['id']): self.googlecalendar.calendars[event['calendar_id']]['events'].pop(event['id'],True) self.update_menu(check=True) md.destroy() elif ew.get_operation() == 'EDIT': event = widget.get_event() event_id = event['id'] calendar_id = ew.get_calendar_id() summary = ew.get_summary() start_date = ew.get_start_date() end_date = ew.get_end_date() description = ew.get_description() ew.de
continue if result is None: continue vectors.append( (hexlify(public_key), hexlify(private_key), hexlify(shared), result) ) return vectors def generate_ecdh(filename): vectors = [] data = load_json_testvectors(filename) if not keys_in_dict(data, {"algorithm", "testGroups"}): raise DataError() if data["algorithm"] != "ECDH": raise DataError() for test_group in data["testGroups"]: if not keys_in_dict(test_group, {"tests"}): raise DataError() for test in test_group["tests"]: if not keys_in_dict( test, {"public", "private", "shared", "result", "curve"} ): raise DataError() try: public_key = unhexlify(test["public"]) curve_name = parse_curve_name(test["curve"]) private_key = parse_signed_hex(test["private"]) shared = unhexlify(test["shared"]) result = parse_result(test["result"]) except Exception: raise DataError() try: private_key = parse_ecdh256_privkey(private_key) except ParseError: continue try: key_curve_name, public_key = parse_ec_pubkey(public_key) except NotSupported: continue except ParseError: continue if key_curve_name != curve_name: continue if result is None: continue vectors.append( ( curve_name, hexlify(public_key), hexlify(private_key), hexlify(shared), result, ) ) return vectors def generate_ecdsa(filename): vectors = [] data = load_json_testvectors(filename) if not keys_in_dict(data, {"algorithm", "testGroups"}): raise DataError() if data["algorithm"] != "ECDSA": raise DataError() for test_group in data["testGroups"]: if not keys_in_dict(test_group, {"tests", "keyDer", "sha"}): raise DataError() try: public_key = unhexlify(test_group["keyDer"]) except Exception: raise DataError() try: curve_name, public_key = parse_ec_pubkey(public_key) except NotSupported: continue except ParseError: continue try: hasher = parse_digest(test_group["sha"]) except NotSupported: continue for test in test_group["tests"]: if not keys_in_dict(test, {"sig", "msg", "result"}): raise DataError() try: signature = unhexlify(test["sig"]) message = unhexlify(test["msg"]) result = parse_result(test["result"]) except Exception: raise DataError() if result is None: continue try: signature = parse_ecdsa256_signature(signature) except ParseError: continue vectors.append( ( curve_name, hexlify(public_key), hasher, hexlify(message), hexlify(signature), result, ) ) return vectors def generate_eddsa(filename): vectors = [] data = load_json_testvectors(filename) if not keys_in_dict(data, {"algorithm", "testGroups"}): raise DataError() if data["algorithm"] != "EDDSA": raise DataError() for test_group in data["testGroups"]: if not keys_in_dict(test_group, {"tests", "keyDer"}): raise DataError() try: public_key = unhexlify(test_group["keyDer"]) except Exception: raise DataError() try: public_key = parse_ed_pubkey(public_key) except ParseError: continue for test in test_group["tests"]: if not keys_in_dict(test, {"sig", "msg", "result"}): raise DataError() try: signature = unhexlify(test["sig"]) message = unhexlify(test["msg"]) result = parse_result(test["result"]) except Exception: raise DataError() if result is None: continue try: signature = parse_eddsa_signature(signature) except ParseError: continue vectors.append( (hexlify(public_key), hexlify(message), hexlify(signature), result) ) return vectors dir = os.path.abspath(os.path.dirname(__file__)) lib = ctypes.cdll.LoadLibrary(os.path.join(dir, "libtrezor-crypto.so")) testvectors_directory = os.path.join(dir, "wycheproof/testvectors") context_structure_length = 1024 ecdh_vectors = generate_ecdh("ecdh_test.json") curve25519_dh_vectors = generate_curve25519_dh("x25519_test.json") eddsa_vectors = generate_eddsa("eddsa_test.json") ecdsa_vectors = ( generate_ecdsa("ecdsa_test.json") + generate_ecdsa("ecdsa_secp256k1_sha256_test.json") + generate_ecdsa("ecdsa_secp256r1_sha256_test.json") ) ecdh_vectors = ( generate_ecdh("ecdh_test.json") + generate_ecdh("ecdh_secp256k1_test.json") + generate_ecdh("ecdh_secp256r1_test.json") ) chacha_poly_vectors = generate_chacha_poly("chacha20_poly1305_test.json") aes_vectors = generate_aes("aes_cbc_pkcs5_test.json") @pytest.mark.parametrize("public_key, message, signature, result", eddsa_vectors) def test_eddsa(public_key, message, signature, result): public_key = unhexlify(public_key) signature = unhexlify(signature) message = unhexlify(message) computed_result = ( lib.ed25519_sign_open(message, len(message), public_key, signature) == 0 ) assert result == computed_result @pytest.mark.parametrize( "curve_name, public_key, hasher, message, signature, result", ecdsa_vectors ) def test_ecdsa(curve_name, public_key, hasher, message, signature, result): curve = get_curve_by_name(curve_name)
if curve is None: raise NotSupp
orted("Curve not supported: {}".format(curve_name)) public_key = unhexlify(public_key) signature = unhexlify(signature) message = unhexlify(message) computed_result = ( lib.ecdsa_verify(curve, hasher, public_key, signature, message, len(message)) == 0 ) assert result == computed_result @pytest.mark.parametrize( "public_key, private_key, shared, result", curve25519_dh_vectors ) def test_curve25519_dh(public_key, private_key, shared, result): public_key = unhexlify(public_key) private_key = unhexlify(private_key) shared = unhexlify(shared) computed_shared = bytes([0] * 32) lib.curve25519_scalarmult(computed_shared, private_key, public_key) computed_result = shared == computed_shared assert result == computed_result @pytest.mark.parametrize( "curve_name, public_key, private_key, shared, result", ecdh_vectors ) def test_ecdh(curve_name, public_key, private_key, shared, result): curve = get_curve_by_name(curve_name) if curve is None: raise NotSupported("Curve not supported: {}".format(curve_name)) public_key = unhexlify(public_key) private_key = unhexlify(private_key) shared = unhexlify(shared) computed_shared = bytes([0] * 2 * 32) lib.ecdh_multiply(curve, private_key, public_key, computed_shared) computed_shared = computed_shared[1:33] computed_result = shared == computed_shared assert result == computed_result @pytest.mark.parametrize( "key, iv, associated_data, plaintext, ciphertext, tag, result", chacha_poly_vectors ) def test_chacha_poly(key, iv, associated_data, plaintext, ciphertext, tag, result): key = unhexlify(key) iv = unhexlify(iv) associated_data = unhexlify(associat
# -*- coding: utf-8 -*- from .common import * class ReferenceDescriptorTest(TestCase): def setUp(self): self.reference_descriptor = ReferenceD
escriptor.obje
cts.create( content_type=ContentType.objects.get_for_model(Model)) self.client = JSONClient() def test_reference_descriptor_list(self): url = reverse('business-logic:rest:reference-descriptor-list') response = self.client.get(url) self.assertEqual(200, response.status_code) _json = response_json(response) self.assertIsInstance(_json, list) self.assertEqual(1, len(_json)) descriptor = _json[0] model = 'test_app.Model' self.assertEqual(model, descriptor['name']) self.assertEqual('Test Model', descriptor['verbose_name']) self.assertEqual(reverse('business-logic:rest:reference-list', kwargs=dict(model=model)), descriptor['url']) def test_unregistered_reference_list_not_found(self): model = 'business_logic.ReferenceDescriptor' url = reverse('business-logic:rest:reference-list', kwargs=dict(model=model)) response = self.client.get(url) self.assertEqual(404, response.status_code) def test_notexists_model_not_found(self): for model in ('ooo.XXX', 'password'): url = reverse('business-logic:rest:reference-list', kwargs=dict(model=model)) response = self.client.get(url) self.assertEqual(404, response.status_code) class ReferenceListTest(TestCase): def setUp(self): self.reference_descriptor = ReferenceDescriptor.objects.create( content_type=ContentType.objects.get_for_model(Model)) self.client = JSONClient() model = 'test_app.Model' self.url = reverse('business-logic:rest:reference-list', kwargs=dict(model=model)) self.test_models = [] for i in range(11): self.test_models.append(Model.objects.create(string_value='str_{}'.format(str(i) * 3))) def test_reference_list(self): response = self.client.get(self.url) self.assertEqual(200, response.status_code) _json = response_json(response) self.assertIsInstance(_json, dict) self.assertEqual(11, len(_json['results'])) reference = _json['results'][0] self.assertEqual(self.test_models[0].id, reference['id']) self.assertEqual(str(self.test_models[0]), reference['name']) def test_reference_list_search_not_configured(self): response = self.client.get(self.url, dict(search='111')) self.assertEqual(400, response.status_code) _json = response_json(response) self.assertEqual( ['ReferenceDescriptor for `test_app.Model` are not configured: incorrect `search_fields` field'], _json) def test_reference_list_search(self): self.reference_descriptor.search_fields = 'string_value' self.reference_descriptor.save() response = self.client.get(self.url, dict(search='111')) _json = response_json(response) self.assertEqual(1, len(_json['results'])) def test_reference_list_search_related_fields(self): self.reference_descriptor.search_fields = 'foreign_value__string_value' self.reference_descriptor.save() test_model = self.test_models[2] test_related_model = RelatedModel.objects.create(string_value='xxx') test_model.foreign_value = test_related_model test_model.save() response = self.client.get(self.url, dict(search='xxx')) _json = response_json(response) self.assertEqual(1, len(_json['results'])) reference = _json['results'][0] self.assertEqual(test_model.id, reference['id']) class ReferenceViewTest(TestCase): def setUp(self): self.reference_descriptor = ReferenceDescriptor.objects.create( content_type=ContentType.objects.get_for_model(Model)) self.client = JSONClient() model = 'test_app.Model' self.test_model = Model.objects.create(string_value='str_value') self.url = reverse('business-logic:rest:reference', kwargs=dict(model=model, pk=self.test_model.id)) def test_reference_view(self): response = self.client.get(self.url) self.assertEqual(200, response.status_code) _json = response_json(response) self.assertIsInstance(_json, dict) self.assertEqual(self.test_model.id, _json['id']) self.assertEqual(str(self.test_model), _json['name']) def test_reference_view_name_field(self): self.reference_descriptor.name_field = 'string_value' self.reference_descriptor.save() response = self.client.get(self.url) self.assertEqual(200, response.status_code) _json = response_json(response) self.assertIsInstance(_json, dict) self.assertEqual(self.test_model.id, _json['id']) self.assertEqual(self.test_model.string_value, _json['name'])
""" Define a few commands """ from .meeseeksbox.utils import Session, fix_issue_body, fix_comment_body from .meeseeksbox.scopes import admin, write, everyone from textwrap import dedent def _format_doc(function, name): if not function.__doc__: doc = " " else: doc = function.__doc__.splitlines() first, other = doc[0], "\n".join(doc[1:]) return "`@meeseeksdev {} {}` ({}) \n{} ".format(name, first, function.scope, other) def help_make(commands): data = "\n".join([_format_doc(v, k) for k, v in commands.items()]) @everyone def help(*, session, payload, arguments): comment_url = payload["issue"]["comments_url"] session.post_comment( comment_url, dedent( """The following commands are available:\n\n{} """.format( data ) ), ) return help @write def close(*, session, payload, arguments, local_config=None): session.ghrequest("PATCH", payload["issue"]["url"], json={"state": "closed"}) @write def open(*, session, payload, arguments, local_config=None): session.ghrequest("PATCH", payload["issue"]["url"], json={"state": "open"}) @write def migrate_issue_request( *, session: Session, payload: dict, arguments: str, local_config=None ): """[to] {org}/{repo} Need to be admin on target repo. Replicate all comments on target repo and close current on. """ """Todo: - Works through pagination of comments - Works through pagination of labels Link to non-migrated labels. """ if arguments.startswith("to "): arguments = arguments[3:] org_repo = arguments org, repo = arguments.split("/") target_session = yield org_repo if not target_session: session.post_comment( payload["issue"]["comments_url"], body="I'm afraid I can't do that. Maybe I need to be installed on target repository ?\n" "Click [here](https://github.com/integrations/meeseeksdev/installations/new) to do that.".format( botname="meeseeksdev" ), ) r
eturn issue_title = payload["issue"]["title"] issue_body = payload["issue"]["body"] original_org = payload["organization"]["login"] original_repo = payload["repository"]["name"] original_poster = payload["issue"]["user"]["login"] original_number = payload["issue"]["number"] migration_requester = payload["comment"]["
user"]["login"] request_id = payload["comment"]["id"] original_labels = [l["name"] for l in payload["issue"]["labels"]] if original_labels: available_labels = target_session.ghrequest( "GET", "https://api.github.com/repos/{org}/{repo}/labels".format( org=org, repo=repo ), None, ).json() available_labels = [l["name"] for l in available_labels] migrate_labels = [l for l in original_labels if l in available_labels] not_set_labels = [l for l in original_labels if l not in available_labels] new_response = target_session.create_issue( org, repo, issue_title, fix_issue_body( issue_body, original_poster, original_repo, original_org, original_number, migration_requester, ), labels=migrate_labels, ) new_issue = new_response.json() new_comment_url = new_issue["comments_url"] original_comments = session.ghrequest( "GET", payload["issue"]["comments_url"], None ).json() for comment in original_comments: if comment["id"] == request_id: continue body = comment["body"] op = comment["user"]["login"] url = comment["html_url"] target_session.post_comment( new_comment_url, body=fix_comment_body(body, op, url, original_org, original_repo), ) if not_set_labels: body = "I was not able to apply the following label(s): %s " % ",".join( not_set_labels ) target_session.post_comment(new_comment_url, body=body) session.post_comment( payload["issue"]["comments_url"], body="Done as {}/{}#{}.".format(org, repo, new_issue["number"]), ) session.ghrequest("PATCH", payload["issue"]["url"], json={"state": "closed"}) from .meeseeksbox.scopes import pr_author, write from .meeseeksbox.commands import tag, untag @pr_author @write def ready(*, session, payload, arguments, local_config=None): """{no arguments} Remove "waiting for author" tag, adds "need review" tag. Can also be issued if you are the current PR author even if you are not admin. """ tag(session, payload, "need review") untag(session, payload, "waiting for author") @write def merge(*, session, payload, arguments, method="merge", local_config=None): print("===== merging =====") if arguments: if arguments not in {"merge", "squash", "rebase"}: print("don't know how to merge with methods", arguments) return else: method = arguments prnumber = payload["issue"]["number"] org_name = payload["repository"]["owner"]["login"] repo_name = payload["repository"]["name"] # collect extended payload on the PR print("== Collecting data on Pull-request...") r = session.ghrequest( "GET", "https://api.github.com/repos/{}/{}/pulls/{}".format( org_name, repo_name, prnumber ), json=None, ) pr_data = r.json() head_sha = pr_data["head"]["sha"] mergeable = pr_data["mergeable"] repo_name = pr_data["head"]["repo"]["name"] if mergeable: resp = session.ghrequest( "PUT", "https://api.github.com/repos/{}/{}/pulls/{}/merge".format( org_name, repo_name, prnumber ), json={"sha": head_sha, "merge_method": method}, override_accept_header="application/vnd.github.polaris-preview+json", ) print("------------") print(resp.json()) print("------------") resp.raise_for_status() else: print("Not mergeable", pr_data["mergeable"]) ### # Lock and Unlock are not yet available for integration. ### # def _lock_primitive(meth,*, session, payload, arguments): # number = payload['issue']['number'] # org_name = payload['repository']['owner']['login'] # repo_name = payload['repository']['name'] # session.ghrequest('PUT', 'https://api.github.com/repos/{}/{}/issues/{}/lock'.format(org_name, repo_name, number)) # # @admin # def lock(**kwargs): # _lock_primitive('PUT', **kwargs) # # @admin # def unlock(**kwargs): # _lock_primitive('DELETE', **kwargs)
vertex set vt_list[face.index, i]=tex_count backlist[tex_count] = face.verts[i].index tex_count+=1 else: vt_list[face.index, i]=tex_list[tex_key] backlist[tex_count] = face.verts[i].index mdl.num_vertices=tex_count for this_tex in range (0, mdl.num_vertices): mdl.tex_coords.append(mdl_tex_coord()) for coord, index in tex_list.iteritems(): mdl.tex_coords[index].u=floor(coord[0]*mdl.skin_width) mdl.tex_coords[index].v=floor((1-coord[1])*mdl.skin_height) if g_fixuvs.val == 1: #shift them while mdl.tex_coords[index].u < 0: mdl.tex_coords[index].u = mdl.tex_coords[index].u + mdl.skin_width while mdl.tex_coords[index].u >= mdl.skin_width: mdl.tex_coords[index].u = mdl.tex_coords[index].u - mdl.skin_width while mdl.tex_coords[index].v < 0: mdl.tex_coords[index].v = mdl.tex_coords[index].v + mdl.skin_height while mdl.tex_coords[index].v >= mdl.skin_height: mdl.tex_coords[index].v = mdl.tex_coords[index].v - mdl.skin_height elif g_fixuvs.val == 2: #clamp them if mdl.tex_coords[index].u < 0: mdl.tex_coords[index].u = 0 if mdl.tex_coords[index].u >= mdl.skin_width:# mdl.skin_width: mdl.tex_coords[index].u = mdl.skin_width - 1 #print "vertex ", index, " clamped" if mdl.tex_coords[index].v < 0: mdl.tex_coords[index].v = 0 if mdl.tex_coords[index].v >= mdl.skin_height: mdl.tex_coords[index].v = mdl.skin_height - 1 #print "vertex ", index, " clamped" #put faces in the mdl structure #for each face in the model for this_face in range(0, mdl.num_faces): mdl.faces.append(mdl_face()) for i in range(0,3): #blender uses indexed vertexes so this works very well mdl.faces[this_face].vertex_index[i]=vt_list[mesh.faces[this_face].index, i] #get the frame list user_frame_list=get_frame_list() if user_frame_list=="default": mdl.num_frames=10 else: temp=user_frame_list[len(user_frame_list)-1] #last item mdl.num_frames=temp[2] #last frame number progress=0.5 progressIncrement=0.25/mdl.num_frames # set global scale and translation points # maybe add integer options mesh_min_x=100000.0 mesh_max_x=-100000.0 mesh_min_y=100000.0 mesh_max_y=-100000.0 mesh_min_z=100000.0 mesh_max_z=-100000.0 for frame_counter in range(0,mdl.num_frames): Blender.Set("curframe", frame_counter+1) #set blender to the correct frame mesh.getFromObject(object.name, 1, 0) #update the mesh to make verts current for face in mesh.faces: for vert in face.verts: if mesh_min_x>vert.co[1]: mesh_min_x=vert.co[1] if mesh_max_x<vert.co[1]: mesh_max_x=vert.co[1] if mesh_min_y>vert.co[0]: mesh_min_y=vert.co[0] if mesh_max_y<vert.co[0]: mesh_max_y=vert.co[0] if mesh_min_z>vert.co[2]: mesh_min_z=vert.co[2] if mesh_max_z<vert.co[2]: mesh_max_z=vert.co[2] mesh_scale_x=(mesh_max_x-mesh_min_x)/255 mesh_scale_y=(mesh_max_y-mesh_min_y)/255 mesh_scale_z=(mesh_max_z-mesh_min_z)/255 mdl.scale[0] = mesh_scale_x mdl.scale[1] = mesh_scale_y mdl.scale[2] = mesh_scale_z mdl.translate[0] = mesh_min_x mdl.translate[1] = mesh_min_y mdl.translate[2] = mesh_min_z #fill in each frame with frame info and all the vertex data for that frame for frame_counter in range(0,mdl.num_frames): progress+=progressIncrement Blender.Window.DrawProgressBar(progress, "Calculating Frame: "+str(frame_counter+1)) #add a frame mdl.frames.append(mdl_frame()) #update the mesh objects vertex positions for the animation Blender.Set("curframe", frame_counter+1) #set blender to the correct frame mesh.getFromObject(object.name, 1, 0) #update the mesh to make verts current frame_min_x=100000 frame_max_x=-100000 frame_min_y=100000 frame_max_y=-100000 frame_min_z=100000 frame_max_z=-100000 #now for the vertices for vert_counter in range(0, mdl.num_vertices): #add a vertex to the mdl structure mdl.frames[frame_counter].vertices.append(mdl_point()) #figure out the new coords based on scale and transform #then translates the point so it's not less than 0 #then scale it so it's between 0..255 current_vertex = backlist[vert_counter] vert = mesh.verts[current_vertex] # scale # x coord needs flipping new_x=255-int((vert.co[1]-mesh_min_x)/mesh_scale_x) new_y=int((vert.co[0]-mesh_min_y)/mesh_scale_y) new_z=int((vert.co[2]-mesh_min_z)/mesh_scale_z) # bbox stuff if frame_min_x>new_x: frame_min_x=new_x if frame_max_x<new_x: frame_max_x=new_x if frame_min_y>new_y: frame_min_y=new_y if frame_max_y<new_y: frame_max_y=new_y if frame_min_z>new_z: frame_min_z=new_z if frame_max_z<new_z: frame_max_z=new_z #put them in the structure mdl.frames[frame_counter].vertices[vert_counter].vertices=(new_x, new_y, new_z) #need to add the lookup table check here maxdot = -999999.0; maxdotindex = -1; for j in range(0,162): x1=-mesh.verts[current_vertex].no[1] y1=mesh.verts[current_vertex].no[0] z1=mesh.verts[current_vertex].no[2] dot = (x1*MDL_NORMALS[j][0]+ y1*MDL_NORMALS[j][1]+ z1*MDL_NORMALS[j][2]); if (dot > maxdot): maxdot = dot; maxdotindex = j; mdl.frames[frame_counter].vertices[vert_counter].lightnormalindex=maxdotindex del maxdot, maxdotindex del new_x, new_y, new_z mdl.frames[frame_counter].bboxmin[0] = frame_min_x mdl.frames[frame_counter].bboxmax[0] = frame_max_x mdl.frames[frame_counter].bboxmin[1] = frame_min_y mdl.frames[frame_counter].bboxmax[1] = frame_max_y mdl.frames[frame_counter].bboxmin[2] = frame_min_z mdl.frames[frame_counter].bboxmax[2] = frame_max_z del frame_min_x,frame_max_x,frame_min_y,frame_max_y,frame_min_z,frame_max_z #output all the frame names-user_frame_list is loaded during the validation for frame_set in user_frame_list: for counter in range(frame_set[1]-1, frame_set[2]): mdl.frames[counter].name=frame_set[0]+str(counter-frame_set[1]+2) ofs = object.getLocation('worldspace') sc = object.getSize('worldspace') # Rather than warn about these things, just apply the transformations they indicate mdl.scale[0] = mdl.scale[0] * sc[1] * g_scale.val mdl.scale[1] = mdl.scale[1] * sc[0] * g_scale.val mdl.scale[2] = mdl.scale[2] * sc[2] * g_scale.val mdl.translate[0] = mdl.translate[0] + ofs[1] mdl.translate[1] = mdl.translate[1] + ofs[0] mdl.translate[2] = mdl.translate[2] + ofs[2] mdl.boundingradius = (mesh_max_x-mesh_min_x+mesh_max_y-mesh_min_y+mesh_max_z-mesh_min_z)/2 # a crude approximation, but when is this used? mdl.eyeposition[0] = 0 mdl.eyeposition[1] = 0 mdl.eyeposition[2] = mesh_min_z #ground plane for QMe mdl.synctype = 1 mdl.flags = g_flags.val mdl.size = 10.0 #unused? ###################################################### # Get Frame List ###################################################### def get_frame_list(): global g_frame_filename frame_list=[] if g_frame_filename.val=="default": return MDL_FRAME_NAME_LIST else: #check for file if (Blender.sys.exists(g_frame_filename.val)==1): #open fi
le and read it in file=open(g_frame_filename.val,"r") lines=file.readlines() file.close() #check header (first line) if lines[0].strip()<>"# MDL Frame Name List": print "its not a valid file" result=Blender.Draw.PupMenu("This is not a valid frame definition file-using default%t|OK") return MDL_FRAME_NAME_LIST else: #read in the data num_frames=0 for counter
in range(1, len(lines)): current_line=lines[counter].strip() if current_line[0]=="#": #found a comment pass else: data=current_line.split() frame_list.append([data[0],num_frames+1, num_frames+in
disk.split(':')[1].lstrip() elif key.strip() == "SysFS ID": devlist["sysfs_id"] = \ disk.split(':')[1].lstrip() elif key.strip() == "SysFS BusID": devlist["sysfs_busid"] = \ disk.split(':')[1].lstrip() elif key.strip() == "SysFS Device Link": devlist["sysfs_device_link"] = \ disk.split(':')[1].lstrip() elif key.strip() == "Hardware Class": devlist["hardware_class"] = \ disk.split(':')[1].lstrip() elif key.strip() == "Model": devlist["model"] = \ disk.split(':')[1].lstrip().replace('"', "") elif key.strip() == "Vendor": devlist["vendor"] = \ disk.split(':')[1].replace(" ", "").replace('"', "") elif key.strip() == "Device": devlist["device"] = \ disk.split(':')[1].replace(" ", "").replace('"', "") elif key.strip() == "Revision": devlist["rmversion"] = \ disk.split(':')[1].lstrip().replace('"', "") elif key.strip() == "Serial ID": devlist["serial_no"] = \ disk.split(':')[1].replace(" ", "").replace('"', "") elif key.strip() == "Driver": devlist["driver"] = \ disk.split(':')[1].lstrip().replace('"', "") elif key.strip() == "Driver Modules": devlist["driver_modules"] = \ disk.split(':')[1].lstrip().replace('"', "") elif key.strip() == "Device File": _name = disk.split(':')[1].lstrip() devlist["disk_name"] = \ "".join(_name.split(" ")[0]) elif key.strip() == "Device Files": devlist["device_files"] = \ disk.split(':')[1].lstrip() elif key.strip() == "Device Number": devlist["device_number"] = \ disk.split(':')[1].lstrip() elif key.strip() == "BIOS id": devlist["bios_id"] = \ disk.split(':')[1].lstrip() elif key.strip() == "Geometry (Logical)": devlist["geo_logical"] = \ disk.split(':')[1].lstrip() elif key.strip() == "Capacity": devlist["size"] = \ disk.split('(')[1].split()[0] elif key.strip() == "Geometry (BIOS EDD)": devlist["geo_bios_edd"] = \ disk.split(':')[1].lstrip() elif key.strip() == "Size (BIOS EDD)": devlist["size_bios_edd"] = \ disk.split(':')[1].lstrip() elif key.strip() == "Geometry (BIOS Legacy)": devlist["geo_bios_legacy"] = \ disk.split(':')[1].lstrip() elif key.strip() == "Config Status": devlist["config_status"] = \ disk.split(':')[1].lstrip() if ("virtio" in devlist["driver"] and "by-id/virtio" in devlist['device_files']): # split from: # /dev/vdc, /dev/disk/by-id/virtio-0200f64e-5892-40ee-8, # /dev/disk/by-path/virtio-pci-0000:00:08.0 for entry in devlist['device_files'].split(','): if "by-id/virtio" in entry: devlist['disk_id'] = entry.split('/')[-1] break elif "VMware" in devlist["vendor"]: devlist["disk_id"] = \ "{vendor}_{device}_{parent_id}_{hardware_id}".format(**devlist) elif (devlist["vendor"] != "" and devlist["device"] != "" and devlist["serial_no"] != ""): devlist["disk_id"] = (devlist["vendor"] + "_" + devlist["device"] + "_" + devlist[ "serial_no"]) else: devlist['disk_id'] = devlist['disk_name'] if devlist["disk_id"] in disks.keys(): # Multipath is like multiple I/O paths between # server nodes and storage arrays into a single device # If single device is connected with more than one path # then hwinfo and lsblk will give same device details with # different device names. To avoid this duplicate entry, # If multiple devices exists with same disk_id then # device_name which is lower in alphabetical order is stored. # It will avoid redundacy of disks and next sync it will # make sure same device detail is populated if devlist["disk_name"] < disks[ devlist['disk_id']]['disk_name']: disks[devlist["disk_id"]] = devlist disks_map[devlist['hardware_id']] = devlist["disk_id"] else: disks[devlist["disk_id"]] = devlist disks_map[devlist['hardware_id']] = devlist["disk_id"] return disks, disks_map, err def get_node_block_devices(disks_map): block_devices = dict(all=list(), free=list(), used=list()) columns = 'NAME,KNAME,PKNAME,MAJ:MIN,FSTYPE,MOUNTPOINT,LABEL,' \ 'UUID,RA,RO,RM,SIZE,STATE,OWNER,GROUP,MODE,ALIGNMENT,' \ 'MIN-IO,OPT-IO,PHY-SEC,LOG-SEC,ROTA,SCHED,RQ-SIZE,' \ 'DISC-ALN,DISC-GRAN,DISC-MAX,DISC-ZERO,TYPE' keys = columns.split(',') lsblk = ( "lsblk --all --bytes --noheadings --output='%s' --path --raw" % columns) cmd = cmd_utils.Command(lsblk) out, err, rc = cmd.run() if not err: out = unicodedata.normalize('NFKD', out).encode('utf8', 'ignore') \ if isinstance(out, unicode) \ else unicode(out, errors="ignore").encode('utf8') devlist = map( lambda line: dict(zip(keys, line.split(' '))), out.splitlines()) all_parents = [] parent_ids = [] multipath = {} for dev_info in devlist: device = dict() device['device_name'] = dev_info['NAME'] device['device_kernel_name'] = dev_info['KNAME'] device['parent_name'] = dev_info['PKNAME'] device['major_
to_minor_no'] = dev_info['MAJ:MIN'] device['fstype'] = dev_info['FSTYPE'] device['mount_point'] = dev_info['MOUNTPOINT'] device['label'] = dev_info['LABEL'] device['fsuuid'] = dev_info['UUID'] device['read_ahead'] = dev_info['RA'] if dev_info['RO'] == '0': device['read_only'] = False else: device['read_only'] = True if dev_info['RM'] == '0': device['removable_devi
ce'] = False else: device['removable_device'] = True device['size'] = dev_info['SIZE'] device['state'] = dev_info['STATE'] device['owner'] = dev_info['OWNER'] device['group'] = dev_info['GROUP'] device['mode'] = dev_info['MODE'] device['alignment'] = dev_info['ALIGNMENT'] device['min_io_size'] = dev_info['MIN-IO'] device['optimal_io_size'] = dev_info['OPT-IO'] device['phy_sector_size'] = dev_info['PHY-SEC'] device['log_sector_size'] = dev_info['LOG-SEC'] device['device_type'] = dev_info['TYPE'] device['scheduler_name'] = dev_info['SCHED'] device['req_queue_size'] = dev_info['RQ-SIZE'] device['discard_align_offset'] = dev_info['DISC-ALN'] device['discard_granularity'] = dev_info['DISC-GRAN'] device['dis
# This is your "setup.py" file. # See the following sites for general guide to Python packaging: # * `The Hitchhiker's Guide to Packaging <http://guide.python-distribute.org/>`_ # * `Python Project Howto <http://infinitemonkeycorps.net/docs/pph/>`_ from setuptools import setup, find_packages import sys import os #from Cython.Build import cythonize from setuptools.extension import Extension here = os.path.abspath(os.path.dirname(__file__)) README = open(os.path.join(here, 'README.md'), "rb").read().decode("utf-8") NEWS = open(os.path.
join(here, 'NEWS.rst')).read() version = '0.1' install_requires = [ # List your project dependencies here. # For more details, see: # http://packages.python.org/distribute/setuptools.html#decla
ring-dependencies # Packages with fixed versions # "<package1>==0.1", # "<package2>==0.3.0", # "nose", "coverage" # Put it here. ] tests_requires = [ # List your project testing dependencies here. ] dev_requires = [ # List your project development dependencies here.\ ] dependency_links = [ # Sources for some fixed versions packages #'https://github.com/<user1>/<package1>/archive/master.zip#egg=<package1>-0.1', #'https://github.com/<user2>/<package2>/archive/master.zip#egg=<package2>-0.3.0', ] # Cython extension # TOP_DIR="/home/eugeneai/Development/codes/NLP/workprog/tmp/link-grammar" # LG_DIR="link-grammar" # LG_LIB_DIR=os.path.join(TOP_DIR,LG_DIR,".libs") # LG_HEADERS=os.path.join(TOP_DIR) ext_modules = [ # Extension("isu.aquarium.cython_module", # sources=["src/./isu.aquarium/cython_module.pyx"], # libraries=["gdal"], # ) ] setup( name='isu.aquarium', version=version, description="Document organizing WEB-system", long_description=README + '\n\n' + NEWS, # Get classifiers from http://pypi.python.org/pypi?%3Aaction=list_classifiers # classifiers=[c.strip() for c in """ # Development Status :: 4 - Beta # License :: OSI Approved :: MIT License # Operating System :: OS Independent # Programming Language :: Python :: 2.6 # Programming Language :: Python :: 2.7 # Programming Language :: Python :: 3 # Topic :: Software Development :: Libraries :: Python Modules # """.split('\n') if c.strip()], # ], keywords='WEB Semantics JavaScript', author='Evgeny Cherkashin', author_email='eugeneai@irnok.net', url='https://github.com/sergeeva-olga/decree-server', license='GPL>=2', packages=find_packages("src"), package_dir={'': "src"}, namespace_packages=['isu'], include_package_data=True, zip_safe=False, install_requires=install_requires, dependency_links=dependency_links, extras_require={ 'tests': tests_requires, 'dev': dev_requires, }, test_suite='tests', entry_points="""\ [paste.app_factory] main=isu.aquarium.server:main """, #ext_modules = cythonize(ext_modules), #test_suite = 'nose.collector', # setup_requires=['nose>=1.0','Cython','coverage'] )
from testscenarios import TestWithScenarios import unittest from geocode.geocode import GeoCodeAccessAPI class GeoCodeTests(TestWithScenarios, unittest.TestCase): scenarios = [ ( "Scenario - 1: Get latlng from address", { 'address': "Sydney NSW", 'latlng': (-33.8674869, 151.2069902), 'method': "geocode", } ), (
"Scenario - 2: Get address from latlng", { 'address': "Sydney NSW"
, 'latlng': (-33.8674869, 151.2069902), 'method': "address", } ), ] def setUp(self): self.api = GeoCodeAccessAPI() def test_geocode(self): if self.method == 'geocode': expected_address = self.address expected_lat = self.latlng[0] expected_lng = self.latlng[1] geocode = self.api.get_geocode(expected_address) self.assertAlmostEqual(geocode.lat, expected_lat, delta=5) self.assertAlmostEqual(geocode.lng, expected_lng, delta=5) self.assertIn(expected_address, geocode.address) else: expected_address = self.address expected_lat = self.latlng[0] expected_lng = self.latlng[1] address = self.api.get_address(lat=expected_lat, lng=expected_lng) self.assertIn(expected_address, address) def tearDown(self): pass if __name__ == "__main__": unittest.main()
""" A simple client to query a TensorFlow Serving instance. Example: $ python client.py \ --images IMG_0932_sm.jpg \ --num_results 10 \ --model_name inception \ --host localhost \ --port 9000 \ --timeout 10 Author: Grant Van Hor
n """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import time import tfserver def parse_args(): parser = argparse.ArgumentParser(description='Command line classification client. Sorts and prints the classification results.') parser.add_argument('--images', dest='image_paths', help='Path to one or more images to classify (jpeg or png).',
type=str, nargs='+', required=True) parser.add_argument('--num_results', dest='num_results', help='The number of results to print. Set to 0 to print all classes.', required=False, type=int, default=0) parser.add_argument('--model_name', dest='model_name', help='The name of the model to query.', required=False, type=str, default='inception') parser.add_argument('--host', dest='host', help='Machine host where the TensorFlow Serving model is.', required=False, type=str, default='localhost') parser.add_argument('--port', dest='port', help='Port that the TensorFlow Server is listening on.', required=False, type=int, default=9000) parser.add_argument('--timeout', dest='timeout', help='Amount of time to wait before failing.', required=False, type=int, default=10) args = parser.parse_args() return args def main(): args = parse_args() # Read in the image bytes image_data = [] for fp in args.image_paths: with open(fp) as f: data = f.read() image_data.append(data) # Get the predictions t = time.time() predictions = tfserver.predict(image_data, model_name=args.model_name, host=args.host, port=args.port, timeout=args.timeout ) dt = time.time() - t print("Prediction call took %0.4f seconds" % (dt,)) # Process the results results = tfserver.process_classification_prediction(predictions, max_classes=args.num_results) # Print the results for i, fp in enumerate(args.image_paths): print("Results for image: %s" % (fp,)) for name, score in results[i]: print("%s: %0.3f" % (name, score)) print() if __name__ == '__main__': main()
"""A backport
of the get_terminal_size function from Python 3.3's shutil.""" __title__ = "backports.shutil_get_terminal_size" __version__ = "1.0.0" __license__ = "MIT" __author__ = "Christopher Rosell" __copyright__ = "Copyright 2014 Christopher Rosell" __all__ = ["get_terminal_size"] fro
m .get_terminal_size import get_terminal_size
from django.test import TestCase from django.contrib.auth.models import User from django.urls import reverse from .models import UserProfile from imagersite.tests import AuthenticatedTestCase # Create your tests here. class ProfileTestCase(TestCase): """TestCase for Profile""" def setUp(self): """Set up User Profile""" self.user = User(username='Cris', first_name='Cris') self.user.save() def test_user_has_profile(self): """Test User has a profile.""" self.assertTrue(hasattr(self.user, 'profile')) def test_profile_username(self): """Test Profile has username""" self.assertEqual(self.user.profile.user.username, 'Cris') # Learn to paramertize def test_profile_has_cameratype(self): """Test profile has cameria type attr.""" self.assertTrue(hasattr(self.user.profile, 'camera_type')) def test_profile_repr(self): """Test repr function.""" self.assertIn('Cris', repr(self.user.profile)) def test_profile_active(self): """Test profile manager.""" self.assertTrue(len(UserProfile.active.all()) > 0) class UserProfilePageTestCase(AuthenticatedTestCase): """Test case for viewing the profile.""" def test_profile_page(self): self.log_in() self.assertEqual(self.client.get('/profile/').status_code, 200) def test_profile_page_has_username(self): self.log_in() self.assertIn( self.username.encode('utf-8'), self.client.get('/profile/').content ) def test_profile_page_has_photo_count(self): self.log_in() self.assertIn( b'Photos uploaded:', self.client.get('/profile/').content ) def test_profile_page_has_album_count(self): self.log_in() self.assertIn(b'Albums created:', self.client.get('/profile/'
).content) class EditProfileTestCase(TestCase): """Edit profile test case.""" def setUp(self): """GET the route named edit_profile.""" self.user = User(username='test') self.user.save() self.client.force_login(self.user) self.response = self.client.get(reverse('edit_profile')) def test_status_code(self): """Test the status code for GETing edit_profile is 200.""" self.asse
rtEqual(self.response.status_code, 200) def test_edit_profile(self): """Test editing a album stores the updated value.""" new_camera_type = 'camera' data = { 'camera_type': new_camera_type, } response = self.client.post(reverse('edit_profile'), data) self.assertEqual(response.status_code, 302) profile = UserProfile.objects.filter(user=self.user).first() self.assertEqual(profile.camera_type, new_camera_type)
""" Revision ID: 0146_add_service_callback_api Revises: 0145_add_notification_reply_to Create Date: 2017-11-28 15:13:48.730554 """ from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import postgresql revision = '0146_add_service_callback_api' down_revision = '0145_add_notification_reply_to' def upgrade(): op.create_table('service_callback_api_history', sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False), sa.Column('service_id', postgresql.UUID(as_uuid=True), nullable=False), sa.Column('url', sa.String(), nullable=False), sa.Column('bearer_token', sa.String(), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=False), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('updated_by_id', postgresql.UUID(as_uuid=True), nullable=False), sa.Column('version', sa.Integer(), autoincrement=False, nullable=False), sa.PrimaryKeyConstraint('id', 'version') ) op.create_index(op.f('ix_service_callback_api_history_service_id'), 'service_callback_api_history', ['service_id'], unique=False) op.create_index(op.f('ix_service_callback_api_history_updated_by_id'), 'service_callback_api_history', ['updated_by_id'], unique=False) op.create_table('service_callback_api', sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False), sa.Column('service_id', postgresql.UUID(as_uuid=True), nullable=False), sa.Column('url', sa.String(), nullable=False), sa.Column('bearer_token', sa.String(), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=False), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('updated_by_id', postgresql.UUID(as_uuid=True), nullable=False), sa.Column('version', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['service_id'], ['services.id'], ), sa.Foreign
KeyConstraint(['updated_by_id'], ['users.id'], ), sa.PrimaryKeyConstraint('id') ) op.create_index(op.f('ix_service_callback_api_service_id'), 'service_callback_api', ['service_id'], unique=True) op.create_index(op.f('ix_
service_callback_api_updated_by_id'), 'service_callback_api', ['updated_by_id'], unique=False) def downgrade(): op.drop_index(op.f('ix_service_callback_api_updated_by_id'), table_name='service_callback_api') op.drop_index(op.f('ix_service_callback_api_service_id'), table_name='service_callback_api') op.drop_table('service_callback_api') op.drop_index(op.f('ix_service_callback_api_history_updated_by_id'), table_name='service_callback_api_history') op.drop_index(op.f('ix_service_callback_api_history_service_id'), table_name='service_callback_api_history') op.drop_table('service_callback_api_history')
is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode'] # Breathe setup, for integrating doxygen content extensions.append('breathe') doxyxml_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '../doxygen') print doxyxml_dir breathe_projects = {"libmypaint": doxyxml_dir} breathe_default_project = "libmypaint" # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'libmypaint' copyright = u'2012, MyPaint Development Team' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.1' # The full version, including alpha/beta/rc tags. release = '0.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description fi
le will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'libmypaintdoc' # -- Options for LaTeX output -------------------------------------------------- latex
_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'libmypaint.tex', u'libmypaint Documentation', u'MyPaint Development Team', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'libmypaint', u'libmypaint Documentation', [u'MyPaint Development Team'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'libmypaint', u'libmypaint Documentation', u'MyPaint Development Team', 'libmypaint', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # Example configuration for intersphinx: refer to the Python standard library. intersphinx_m
# -*- coding: utf-8 -*
- """Controllers for the pypollmanage pluggable application.""" from .root import RootController
# -*- coding: utf-8 -*- # © 2016 Comunitea # License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html from odoo import api, fields, models class GeneralLedgerReportWizard(models.TransientModel): _inherit = "general.ledger.report.wizard" @api.onchange('company_id') def onchange_company_id(self): res = su
per(GeneralLedgerReportWizard, self).onchange_company_id() if self.company_id: res['domain']['partner_ids'] = [ ('is_company', '=', True) ] re
turn res
.getLogger(__name__) configdrive_opts = [ cfg.StrOpt('config_drive_format', default='iso9660', choices=('iso9660', 'vfat'), help='Config drive format.'), # force_config_drive is a string option, to allow for future behaviors # (e.g. use config_drive based on image properties) cfg.StrOpt('force_config_drive', choices=('always', 'True', 'False'), help='Set to "always" to force injection to take place on a ' 'config drive. NOTE: The "always" will be deprecated in ' 'the Liberty release cycle.'), cfg.StrOpt('mkisofs_cmd', default='genisoimage', help='Name and optionally path of the tool used for ' 'ISO image creation') ] CONF = cfg.CONF CONF.register_opts(configdrive_opts) # Config drives are 64mb, if we can't size to the exact size of the data CONFIGDRIVESIZE_BYTES = 64 * units.Mi FS_FORMAT_VFAT = 'vfat' FS_FORMAT_ISO9660 = 'iso9660' IMAGE_TYPE_RAW = 'raw' IMAGE_TYPE_PLOOP = 'ploop' class ConfigDriveBuilder(object): """Build config drives, optionally as a context manager.""" def __init__(self, instance_md=None): if CONF.force_config_drive == 'always': LOG.warning(_LW('The setting "always" will be deprecated in the ' 'Liberty version. Please use "True" instead')) self.imagefile = None self.mdfiles = [] if instance_md is not None: self.add_instance_metadata(instance_md) def __enter__(self): return self def __exit__(self, exctype, excval, exctb): if exctype is not None: # NOTE(mikal): this means we're being cleaned up because an # exception was thrown. All bets are off now, and we should not # swallow the exception return False self.cleanup() def _add_file(self, basedir, path, data): filepath = os.path.join(basedir, path) dirname = os.path.dirname(filepath) fileutils.ensure_tree(dirname) with open(filepath, 'wb') as f: f.write(data) def add_instance_metadata(self, instance_md): for (path, data) in instance_md.metadata_for_config_drive(): self.mdfiles.append((path, data)) def _write_md_files(self, basedir): for data in self.mdfiles: self._add_file(basedir, data[0], data[1]) def _make_iso9660(self, path, tmpdir): publisher = "%(product)s %(version)s" % { 'product': version.product_string(), 'version': version.version_string_with_package() } utils.execute(CONF.mkisofs_cmd, '-o', path, '-ldots', '-allow-lowercase', '-allow-multidot', '-l', '-publisher', publisher, '-quiet', '-J', '-r', '-V', 'config-2', tmpdir, attempts=1, run_as_root=False) def _make_vfat(self, path, tmpdir): # NOTE(mikal): This is a little horrible, but I couldn't find an # equivalent to genisoimage for vfat filesystems. with open(path, 'wb') as f: f.truncate(CONFIGDRIVESIZE_BYTES) utils.mkfs('vfat', path, label='config-2') with utils.tempdir() as mountdir: mounted = False try: _, err = utils.trycmd( 'mount', '-o', 'loop,uid=%d,gid=%d' % (os.getuid(), os.getgid()), path, mountdir, run_as_root=True) if err: raise exception.ConfigDriveMountFailed(operation='mount', error=err) mounted = True # NOTE(mikal): I can't just use shutils.copytree here, # because the destination directory already # exists. This is annoying. for ent in os.listdir(tmpdir): shutil.copytree(os.path.join(tmpdir, ent), os.path.join(mountdir, ent)) finally: if mounted: utils.execute('umount', mountdir, run_as_root=True) def _make_ext4_ploop(self, path, tmpdir): """ploop is a disk loopback block device, that is used in Parallels(OpenVZ) containers. It is similiar to Linux loop device but prevents double caching of data in memory and supports snapshots and some other effeciency benefits. Adding ploop is a natural way to add disk device to VZ containers. Ploop device has its own image format. It contains specific partition table with one ext4 partition. """ os.mkdir(path) utils.execute('ploop', 'in
it', '-s', CONFIGDRIVESIZE_BYTES, '-t', 'ext4', path + '/disk.config.hds', attempts=1, run_as_root=True) with utils.tempdir() as mountdir: mounted = False try: _, err = utils.trycmd(
'ploop', 'mount', '-m', mountdir, '-t', 'ext4', path + '/DiskDescriptor.xml', run_as_root=True) if os.path.exists(mountdir): utils.execute('chown', '-R', '%(u)d:%(g)d' % {'u': os.getuid(), 'g': os.getgid()}, mountdir, run_as_root=True) mounted = True for ent in os.listdir(tmpdir): shutil.copytree(os.path.join(tmpdir, ent), os.path.join(mountdir, ent)) finally: if mounted: utils.execute('ploop', 'umount', path + '/disk.config.hds', run_as_root=True) def make_drive(self, path, image_type=IMAGE_TYPE_RAW): """Make the config drive. :param path: the path to place the config drive image at :param image_type: host side image format :raises ProcessExecuteError if a helper process has failed. """ fs_format = CONF.config_drive_format if fs_format is None: if image_type == IMAGE_TYPE_RAW: fs_format = FS_FORMAT_ISO9660 with utils.tempdir() as tmpdir: self._write_md_files(tmpdir) if image_type == IMAGE_TYPE_RAW: if fs_format not in (FS_FORMAT_VFAT, FS_FORMAT_ISO9660): raise exception.ConfigDriveUnsupportedFormat( format=fs_format, image_type=image_type, image_path=path) elif fs_format == FS_FORMAT_ISO9660: self._make_iso9660(path, tmpdir) elif fs_format == FS_FORMAT_VFAT: self._make_vfat(path, tmpdir) elif image_type == IMAGE_TYPE_PLOOP: self._make_ext4_ploop(path, tmpdir) else: raise exception.ConfigDriveUnsupportedFormat( format=fs_format, image_type=image_type, image_path=path) def cleanup(self): if self.imagefile: fileutils.delete_if_exists(self.imagefile) def __repr__(self): return "<ConfigDriveBuilder: " + str(self.mdfiles) + ">" def required_by(instance): image_prop = utils.instance_sys_meta(instance).get( utils.SM_IMAGE_PROP_PREFIX + 'img_config_drive', 'optional') if image_prop not in ['optional', 'mandatory']: LOG.
from django.contrib.admin.models import LogE
ntry from django.contrib.auth.models import User, Group, Permission from simple_history import register from celsius.tools import register_for_permission_handling register(User) register(Group) register_for_permission_handling(User) register_for_permission_handling(Group) register_for_permission_handling(Permission) register_for_permi
ssion_handling(LogEntry)
from django.test import TestCase from voice.models import Call class CallModelTestCase(TestCase): def setUp(self): self.call = Call(sid="CAxxx",
from_number="+15558675309", to_number="+15556667777") self.call.save() def test_string_representation(self): self.assertEqual(str(self.call),
"{0}: from +15558675309 to " "+15556667777".format(self.call.date_created))
cdss = [feature] else: cdss = list(genes(feature.sub_features, feature_type="CDS", sort=True)) if cdss == []: return "None" res = (sum([len(cds) for cds in cdss]) / 3) - 1 if floor(res) == res: res = int(res) return str(res) def notes(record, feature): """User entered Notes""" for x in ["Note", "note", "Notes", "notes"]: for y in feature.qualifiers.keys(): if x == y: return feature.qualifiers[x][0] if useSubs: res = checkSubs(feature, ["Note", "note", "Notes", "notes"]) if res != "": return res return "None" def date_created(record, feature): """Created""" return feature.qualifiers.get("date_creation", ["None"])[0] def date_last_modified(record, feature): """Last Modified""" res = feature.qualifiers.get("date_last_modified", ["None"])[0] if res != "None": return res if useSubs: res = checkSubs(feature, ["date_last_modified"]) if res != "": return res return "None" def description(record, feature): """Description""" res = feature.qualifiers.get("description", ["None"])[0] if res != "None": return res if useSubs: res = checkSubs(feature, ["description"]) if res != "": return res return "None" def owner(record, feature): """Owner User who created the feature. In a 464 scenario this may be one of the TAs.""" for x in ["Owner", "owner"]: for y in feature.qualifiers.keys(): if x == y: return feature.qualifiers[x][0] if useSubs: res = checkSubs(feature, ["Owner", "owner"]) if res != "": return res return "None" def product(record, feature): """Product User entered product qualifier (collects "Product" and "product" entries)""" """User entered Notes""" for x in ["product", "Product"]: for y in feature.qualifiers.keys(): if x == y: return feature.qualifiers[x][0] if useSubs: res = checkSubs(feature, ["product", "Product"]) if res != "": return res return "None" def note(record, feature): """Note User entered Note qualifier(s)""" return feature.qualifiers.get("Note", []) def strand(record, feature):
"""Strand """ return "+" if feature.location.strand > 0 else "-" def sd_spacing(record, feature): """Shine-Dalgarno spacing """ rbss = get_rbs_from(gene) if len(rbss) == 0: return "None" else: resp = [] for rbs in rbss: cdss = list(genes(feature.sub_features, feature_type="CDS", sort
=True)) if len(cdss) == 0: return "No CDS" if rbs.location.strand > 0: distance = min( cdss, key=lambda x: x.location.start - rbs.location.end ) distance_val = str(distance.location.start - rbs.location.end) resp.append(distance_val) else: distance = min( cdss, key=lambda x: x.location.end - rbs.location.start ) distance_val = str(rbs.location.start - distance.location.end) resp.append(distance_val) if len(resp) == 1: return str(resp[0]) return resp def sd_seq(record, feature): """Shine-Dalgarno sequence """ rbss = get_rbs_from(gene) if len(rbss) == 0: return "None" else: resp = [] for rbs in rbss: resp.append(str(rbs.extract(record).seq)) if len(resp) == 1: return str(resp[0]) else: return resp def start_codon(record, feature): """Start Codon """ if feature.type == "CDS": cdss = [feature] else: cdss = list(genes(feature.sub_features, feature_type="CDS", sort=True)) data = [x for x in cdss] if len(data) == 1: return str(data[0].extract(record).seq[0:3]) else: return [ "{0} ({1.location.start}..{1.location.end}:{1.location.strand})".format( x.extract(record).seq[0:3], x ) for x in data ] def stop_codon(record, feature): """Stop Codon """ return str(feature.extract(record).seq[-3:]) def dbxrefs(record, feature): """DBxrefs """ """User entered Notes""" for x in ["Dbxref", "db_xref", "DB_xref", "DBxref", "DB_Xref", "DBXref"]: for y in feature.qualifiers.keys(): if x == y: return feature.qualifiers[x][0] return "None" def upstream_feature(record, feature): """Next gene upstream""" if feature.strand > 0: upstream_features = [ x for x in sorted_features if (x.location.start < feature.location.start and x.type == "gene" and x.strand == feature.strand) ] if len(upstream_features) > 0: foundSelf = False featCheck = upstream_features[-1].sub_features for x in featCheck: if x == feature: foundSelf = True break featCheck = featCheck + x.sub_features if foundSelf: if len(upstream_features) > 1: return upstream_features[-2] return None return upstream_features[-1] else: return None else: upstream_features = [ x for x in sorted_features if (x.location.end > feature.location.end and x.type == "gene" and x.strand == feature.strand) ] if len(upstream_features) > 0: foundSelf = False featCheck = upstream_features[0].sub_features for x in featCheck: if x == feature: foundSelf = True break featCheck = featCheck + x.sub_features if foundSelf: if len(upstream_features) > 1: return upstream_features[1] return None return upstream_features[0] else: return None def upstream_feature__name(record, feature): """Next gene upstream""" up = upstream_feature(record, feature) if up: return str(up.id) return "None" def ig_dist(record, feature): """Distance to next upstream gene on same strand""" up = upstream_feature(record, feature) if up: dist = None if feature.strand > 0: dist = feature.location.start - up.location.end else: dist = up.location.start - feature.location.end return str(dist) else: return "None" def _main_gaf_func(record, feature, gaf_data, attr): if feature.id in gaf_data: return [x[attr] for x in gaf_data[feature.id]] return [] def gaf_annotation_extension(record, feature, gaf_data): """GAF Annotation Extension Contains cross references to other ontologies that can be used to qualify or enhance the annotation. The cross-reference is prefaced by an appropriate GO relationship; references to multiple ontologies can be entered. For example, if a gene product is localized to the mitochondria of lymphocytes, the GO ID (column 5) would be mitochondrion ; GO:0005439, and the
'flavor_ref': '1', }, 'os-scheduler-hints:scheduler_hints': 'here', } req.body = jsonutils.dumps(body) res = req.get_response(self.app) self.assertEqual(400, res.status_int) class ServersControllerCreateTest(test.TestCase): def setUp(self): """Shared implementation for tests below that create instance.""" super(ServersControllerCreateTest, self).setUp() self.flags(verbose=True, enable_instance_password=True) self.instance_cache_num = 0 self.instance_cache_by_id = {} self.instance_cache_by_uuid = {} ext_info = plugins.LoadedExtensionInfo() self.controller = servers.ServersController(extension_info=ext_info) CONF.set_override('extensions_blacklist', 'os-scheduler-hints', 'osapi_v3') self.no_scheduler_hints_controller = servers.ServersController( extension_info=ext_info) def instance_create(context, inst): inst_type = flavors.get_flavor_by_flavor_id(3) image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' def_image_ref = 'http://localhost/images/%s' % image_uuid self.instance_cache_num += 1 instance = fake_instance.fake_db_instance(**{ 'id': self.instance_cache_num, 'display_name': inst['display_name'] or 'test', 'uuid': FAKE_UUID, 'instance_type': dict(inst_type), 'access_ip_v4': '1.2.3.4', 'access_ip_v6': 'fead::1234', 'image_ref': inst.get('image_ref', def_image_ref), 'user_id': 'fake', 'project_id': 'fake', 'reservation_id': inst['reservation_id'], "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0), "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0), "config_drive": None, "progress": 0, "fixed_ips": [], "task_state": "", "vm_state": "", "root_device_name": inst.get('root_device_name', 'vda'), }) self.instance_cache_by_id[instance['id']] = instance self.instance_cache_by_uuid[instance['uuid']] = instance return instance def instance_get(context, instance_id): """Stub for compute/api create() pulling in instance after scheduling """ return self.instance_cache_by_id[instance_id] def instance_update(context, uuid, values): instance = self.instance_cache_by_uuid[uuid] instance.update(values) return instance def server_update(context, instance_uuid, params): inst = self.instance_cache_by_uuid[instance_uuid] inst.update(params) return (inst, inst) def fake_method(*args, **kwargs): pass def project_get_networks(context, user_id): return dict(id='1', host='localhost') def queue_get_for(context, *args): return 'network_topic' fakes.stub_out_rate_limiting(self.stubs) fakes.stub_out_key_pair_funcs(self.stubs) fake.stub_out_image_service(self.stubs) fakes.stub_out_nw_api(self.stubs) self.stubs.Set(uuid, 'uuid4', fake_gen_uuid) self.stubs.Set(db, 'instance_add_security_group', return_security_group) self.stubs.Set(db, 'project_get_networks', project_get_networks) self.stubs.Set(db, 'instance_create', instance_create) self.stubs.Set(db, 'instance_system_metadata_update', fake_method) self.stubs.Set(db, 'instance_get', instance_get) self.stubs.Set(db, 'instance_update', instance_update) self.stubs.Set(rpc, 'cast', fake_method) self.stubs.Set(db, 'instance_update_and_get_original', server_update) self.stubs.Set(rpc, 'queue_get_for', queue_get_for) self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip', fake_method) def _test_create_extra(self, params, no_image=False, override_controller=None): image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' server = dict(name='server_test', image_ref=image_uuid, flavor_ref=2) if no_image: server.pop('image_ref', None) server.update(params) body = dict(server=server) req = fakes.HTTPRequestV3.blank('/servers') req.method = 'POST' req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json" if override_controller: server = override_controller.create(req, body).obj['server'] else: server = self.controller.create(req, body).obj['server'] def test_create_instance_with_scheduler_hints_disabled(self): hints = {'a': 'b'} params = {'scheduler_hints': hints} old_create = compute_api.API.create def create(*args
, **kwargs): self.assertNotIn('scheduler_hints', kwargs) # self.assertEqual(kwargs['scheduler_hints'], {}) return old_create(*args, **kwargs) self.stubs.Set(compute_api.API, 'create', create) self._test_create_extra(params, override_controller=self.no_scheduler_hints_controller) def test_create_instance_with_scheduler_hints_enabled(self): hints = {'a': 'b'} params = {'scheduler_hints': hints} old_create = compute_api.API.create def create(*args, **kwargs): self.assertEqual(kwargs['scheduler_hints'], hints) return old_create(*args, **kwargs) self.stubs.Set(compute_api.API, 'create', create) self._test_create_extra(params) class TestServerCreateRequestXMLDeserializer(test.TestCase): def setUp(self): super(TestServerCreateRequestXMLDeserializer, self).setUp() ext_info = plugins.LoadedExtensionInfo() controller = servers.ServersController(extension_info=ext_info) self.deserializer = servers.CreateDeserializer(controller) def test_request_with_scheduler_hints_and_alternate_namespace_prefix(self): serial_request = """ <ns2:server xmlns:ns2="http://docs.openstack.org/compute/api/v3" name="new-server-test" image_ref="1" flavor_ref="2"> <ns2:metadata><ns2:meta key="hello">world</ns2:meta></ns2:metadata> <os:scheduler_hints xmlns:os="http://docs.openstack.org/compute/ext/scheduler-hints/api/v3"> <hypervisor>xen</hypervisor> <near>eb999657-dd6b-464e-8713-95c532ac3b18</near> </os:scheduler_hints> </ns2:server> """ request = self.deserializer.deserialize(serial_request) expected = { "server": { 'os-scheduler-hints:scheduler_hints': { 'hypervisor': ['xen'], 'near': ['eb999657-dd6b-464e-8713-95c532ac3b18'] }, "name": "new-server-test", "image_ref": "1", "flavor_ref": "2", "metadata": { "hello": "world" } } } self.assertEquals(request['body'], expected) def test_request_with_scheduler_hints(self): serial_request = """ <server xmlns="http://docs.openstack.org/compute/api/v3" xmlns:os-scheduler-hints= "http://docs.openstack.org/compute/ext/scheduler-hints/api/v3" name="new-server-test" image_ref="1" flavor_ref="1"> <os-scheduler-hints:scheduler_hints> <different_host> 7329b667-50c7-46a6-b913-cb2a09dfeee0 </different_host> <different_host> f31efb24-34d2-43e1-8b44-316052956a39 </different_host> </os-scheduler-hints:scheduler_hints> </server>""" request = self.deserializer.deserialize(serial_request) expected = {"server": { "name": "new-server
urn generic_combine(intl_combine.mean_method(), arrays, masks=masks, dtype=dtype, out=out, zeros=zeros, scales=scales, weights=weights) def median(arrays, masks=None, dtype=None, out=None, zeros=None, scales=None, weights=None): """Combine arrays using the median, with masks. Arrays and masks are a list of array objects. All input arrays have the same shape. If present, the masks have the same shape also. The function returns an array with one more dimension than the inputs and with size (3, shape). out[0] contains the mean, out[1] the variance and out[2] the number of points used. :param arrays: a list of arrays :param masks: a list of mask arrays, True values are masked :param dtype: data type of the output :param out: optional output, with one more axis than the input arrays :return: median, variance of the median and number of points stored """ return generic_combine(intl_combine.median_method(), arrays, masks=masks, dtype=dtype, out=out, zeros=zeros, scales=scales, weights=weights) def sigmaclip(arrays, masks=None, dtype=None, out=None, zeros=None, scales=None, weights=None, low=3., high=3.): """Combine arrays using the sigma-clipping, with masks. Inputs and masks are a list of array objects. All input arrays have the same shape. If present, the masks have the same shape also. The function returns an array with one more dimension than the inputs and with size (3, shape). out[0] contains the mean, out[1] the variance and out[2] the number of points used. :param arrays: a list of arrays :param masks: a list of mask arrays, True values are masked :param dtype: data type of the output :param out: optional output, with one more axis than the input arrays :param low: :param high: :return: mean, variance of the mean and number of points stored """ return generic_combine(intl_combine.sigmaclip_method(low, high), arrays, masks=masks, dtype=dtype, out=out, zeros=zeros, scales=scales, weights=weights) def minmax(arrays, masks=None, dtype=None, out=None, zeros=None, scales=None, weights=None, nmin=1, nmax=1): """Combine arrays using mix max rejection, with masks. Inputs and masks are a list of array objects. All input arrays have the same shape. If present, the masks have the same shape also. The function returns an array with one more dimension than the inputs and with size (3, shape). out[0] contains the mean, out[1] the variance and out[2] the number of points used. :param arrays: a list of arrays :param masks: a list of mask arrays, True values are masked :param dtype: data type of the output :param out: optional output, with one more axis than the input arrays :param nmin: :param nmax: :return: mean, variance of the mean and number of points stored """ return generic_combine(intl_combine.minmax_method(nmin, nmax), arrays, masks=masks, dtype=dtype, out=out, zeros=zeros, scales=scales, weights=weights) def quantileclip(arrays, masks=None, dtype=None, out=None, zeros=None, scales=None, weights=None, fclip=0.10): """Combine arrays using the sigma-clipping, with masks. Inputs and masks are a list of array objects. All input arrays have the same shape. If present, the masks have the same shape also. The function returns an array with one more dimension than the inputs and with size (3, shape). out[0] contains the mean, out[1] the variance and out[2] the number of points used. :param arrays: a list of arrays :param masks: a list of mask arrays, True values are masked :param dtype: data type of the output :param out: optional output, with one more axis than the input arrays :param fclip: fraction of points removed on both ends. Maximum is 0.4 (80% of points rejected) :return: mean, variance of the mean and number of points stored """ return generic_combine(intl_combine.quantileclip_method(fclip), arrays, masks=masks, dtype=dtype, out=out, zeros=zeros, scales=scales, weights=weights) def flatcombine(arrays, masks=None, dtype=None, scales=None, low=3.0, high=3.0, blank=1.0): """Combine flat arrays. :param arrays: a list of arrays :param masks: a list of mask arrays, True values are masked :param dtype: data type of the output :param out: optional output, with one more axis than the input arrays :param blank: non-positive values are substituted by this on output :return: mean, variance of the mean and number of points stored """
result = sigmaclip(arrays, masks=masks, dtype=dtype, scales=scales, low=low, high=high) # Substitute values <= 0 by blank mm = result[0]
<= 0 result[0, mm] = blank # Add values to mask result[1:2, mm] = 0 return result def zerocombine(arrays, masks, dtype=None, scales=None): """Combine zero arrays. :param arrays: a list of arrays :param masks: a list of mask arrays, True values are masked :param dtype: data type of the output :param scales: :return: median, variance of the median and number of points stored """ result = median(arrays, masks=masks, dtype=dtype, scales=scales) return result def sum(arrays, masks=None, dtype=None, out=None, zeros=None, scales=None): """Combine arrays by addition, with masks and offsets. Arrays and masks are a list of array objects. All input arrays have the same shape. If present, the masks have the same shape also. The function returns an array with one more dimension than the inputs and with size (3, shape). out[0] contains the sum, out[1] the variance and out[2] the number of points used. :param arrays: a list of arrays :param masks: a list of mask arrays, True values are masked :param dtype: data type of the output :param out: optional output, with one more axis than the input arrays :return: sum, variance of the sum and number of points stored Example: >>> import numpy >>> image = numpy.array([[1., 3.], [1., -1.4]]) >>> inputs = [image, image + 1] >>> sum(inputs) array([[[ 1.5, 3.5], [ 1.5, -0.9]], <BLANKLINE> [[ 0.5, 0.5], [ 0.5, 0.5]], <BLANKLINE> [[ 2. , 2. ], [ 2. , 2. ]]]) """ return generic_combine(intl_combine.sum_method(), arrays, masks=masks, dtype=dtype, out=out, zeros=zeros, scales=scales) def generic_combine(method, arrays, masks=None, dtype=None, out=None, zeros=None, scales=None, weights=None): """Stack arrays using different methods. :param method: the combination method :type method: PyCObject :param arrays: a list of arrays :param masks: a list of mask arrays, True values are masked :param dtype: data type of the output :param zeros: :param scales: :param weights: :return: median, variance of the median and number of points stored """ arrays = [numpy.asarray(arr, dtype=dtype) for arr in arrays] if masks is not None: masks = [numpy.asarray(msk) for msk in masks] if out is None: # Creating out if needed # We need three numbers try: outshape = (3,) + tuple(arrays[0].shape) out = numpy.zeros(outshape, dtype) except AttributeError: raise TypeError('First element in arrays does ' 'not have .shape attribute') else: out = numpy.asanyarray(out) intl_combine.generic_combine(
""" Constants used across the ORM in general. """ # S
eparator used to split filter strings apart. LOOKUP_S
EP = '__'
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2014 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # import time import numpy import os import pmt from gnuradio import gr, gr_unittest from gnuradio import blocks class test_multiply_matrix_ff (gr_unittest.TestCase): def setUp (self): self.tb = gr.top_block () self.multiplier = None def tearDown (self): self.tb = None self.multiplier = None def run_once(self, X_in, A, tpp=gr.TPP_DONT, A2=None, tags=None, msg_A=None): """ Run the test for given input-, output- and matrix values. Every row from X_in is considered an input signal on a port. """ X_in = numpy.matrix(X_in) A_matrix = numpy.matrix(A) (N, M) = A_matrix.shape self.assertTrue(N == X_in.shape[0]) # Calc expected Y_out_exp = numpy.matrix(numpy.zeros((M, X_in.shape[1]))) self.multiplier = blocks.multiply_matrix_ff(A, tpp) if A2 is not None: self.multiplier.set_A(A2) A = A2 A_matrix = numpy.matrix(A) for i in xrange(N): if tags is None: these_tags = () else: these_tags = (tags[i],) self.tb.connect(blocks.vector_source_f(X_in[i].tolist()[0], tags=these_tags), (self.multiplier, i)) sinks = [] for i in xrange(M): sinks.append(blocks.vector_sink_f()) self.tb.connect((self.multiplier, i), sinks[i]) # Run and check self.tb.run() for i in xrange(X_in.shape[1]): Y_out_exp[:,i] = A_matrix * X_in[:,i] Y_out = [list(x.data()) for x in sinks] if tags is not None: self.the_tags = [] for i in xrange(M): self.the_tags.append(sinks[i].tags()) self.assertEqual(list(Y_out), Y_out_exp.tolist()) def test_001_t (self): """ Simplest possible check: N==M, unit matrix """ X_in = ( (1, 2, 3, 4), (5, 6, 7, 8), )
A = ( (1, 0), (0, 1), ) self.run_once(X_in, A) def test_002_t (self): """ Switch check: N==M, flipped unit matrix """ X_in = ( (1, 2, 3, 4), (5, 6, 7, 8), ) A = (
(0, 1), (1, 0), ) self.run_once(X_in, A) def test_003_t (self): """ Average """ X_in = ( (1, 1, 1, 1), (2, 2, 2, 2), ) A = ( (0.5, 0.5), (0.5, 0.5), ) self.run_once(X_in, A) def test_004_t (self): """ Set """ X_in = ( (1, 2, 3, 4), (5, 6, 7, 8), ) A1 = ( (1, 0), (0, 1), ) A2 = ( (0, 1), (1, 0), ) self.run_once(X_in, A1, A2=A2) def test_005_t (self): """ Tags """ X_in = ( (1, 2, 3, 4), (5, 6, 7, 8), ) A = ( (0, 1), # Flip them round (1, 0), ) tag1 = gr.tag_t() tag1.offset = 0 tag1.key = pmt.intern("in1") tag1.value = pmt.PMT_T tag2 = gr.tag_t() tag2.offset = 0 tag2.key = pmt.intern("in2") tag2.value = pmt.PMT_T self.run_once(X_in, A, tpp=gr.TPP_ONE_TO_ONE, tags=(tag1, tag2)) self.assertTrue(pmt.equal(tag1.key, self.the_tags[0][0].key)) self.assertTrue(pmt.equal(tag2.key, self.the_tags[1][0].key)) #def test_006_t (self): #""" Message passing """ #X_in = ( #(1, 2, 3, 4), #(5, 6, 7, 8), #) #A1 = ( #(1, 0), #(0, 1), #) #msg_A = ( #(0, 1), #(1, 0), #) #self.run_once(X_in, A1, msg_A=msg_A) if __name__ == '__main__': #gr_unittest.run(test_multiply_matrix_ff, "test_multiply_matrix_ff.xml") gr_unittest.run(test_multiply_matrix_ff)
import pyautogui, win32api, win32con, ctypes, autoit from PIL import ImageOps, Image, ImageGrab from numpy import * import os import time import cv2 import rando
m from Bot import * def main(): bot = Bot() autoit.win_wait(bot.title, 5) counter = 0 poitonUse = 0 cycle = True fullCounter = 0 while cycle: hpstatus = bot.checkOwnHp() print 'hp ' + str(hpstatus) if hpstatus == 0: autoit.c
ontrol_send(bot.title, '', '{F9}', 0) bot.sleep(0.3,0.6) print 'Dead' cv2.imwrite('Dead' + str(int(time.time())) + '.png',bot.getScreen(leftCornerx,leftCornery,x2,fullY2)) cycle = False if hpstatus == 1: if poitonUse == 0: autoit.control_send(bot.title, '', '{F10}', 0) poitonUse += 1 if poitonUse > 5: poitonUse = 0 else: poitonUse = 0 res = bot.findHP(); print 'tgs ' + str(res) if res == 3: fullCounter += 1 print 'fc ' + str(fullCounter) autoit.control_send(bot.title, '', '{F1}', 0) else: fullCounter = 0 if fullCounter > 4: autoit.control_send(bot.title, '', '{ESC}', 0) bot.sleep(0.3,0.6) autoit.control_send(bot.title, '', '{F3}', 0) bot.sleep(0.1,0.3) autoit.control_send(bot.title, '', '{F1}', 0) # bot.mouseRotate() fullCounter = 0 if res > 0: autoit.control_send(bot.title, '', '{F1}', 0) counter = 0 if res == 1 or res == 3: bot.sleep(0.3,0.6) if res > 1 and res < 3: bot.sleep(1,3) if res == 1: autoit.control_send(bot.title, '', '{F3}', 0) bot.sleep(0.3,0.6) autoit.control_send(bot.title, '', '{F2}', 0) bot.sleep(0.3,0.6) autoit.control_send(bot.title, '', '{F1}', 0) else: fullCounter = 0 if counter < 3: autoit.control_send(bot.title, '', '{F3}', 0) bot.sleep(0.5,0.8) autoit.control_send(bot.title, '', '{F1}', 0) print 'F3' if counter > 2: # bot.findTarget() autoit.control_send(bot.title, '', '{F7}', 0) # if counter > 3: # autoit.control_send(bot.title, '', '{F8}', 0) # counter = 0 counter += 1 print 'cnt ' + str(counter) pass if __name__ == '__main__': main()
#
-*- coding: utf-8 -*- import system_tests class TestCvePoC(metaclass=system_tests.CaseMeta): url = "https://github.com/Exiv2/exiv2/issues/208" filename = "$data_path/2018-01-09-exiv2-crash-001.tiff" commands = ["$exiv2 " + filename] retval = [1] stdout = [""] stderr = [ """$exiv2_exception_message """ + filename + """: $filename: $kerFileContainsUnknownI
mageType """]
import subprocess import sys import os import time from collections import namedtuple sys.path.append(os.path.join(os.getcwd(), "src")) from utils import settings from utils import logger settings.initialize('watcher') original_plist = '/opt/TopPatch/agent/daemon/com.toppatch.agent.plist' osx_plist = '/System/Library/LaunchDaemons/com.toppatch.agent.plist' daemon_label = 'com.toppatch.agent' cp_command = ['/bin/cp', original_plist, osx_plist] list_command = ['/bin/launchctl', 'list'] load_command = ['/bin/launchctl', 'load', '-w', osx_plist] unload_command = ['/bin/launchctl', 'unload', '-w', osx_plist] start_command = ['/bin/launchctl', 'start', daemon_label] stop_command = ['/bin/launchctl', 'stop', daemon_label] check_in_seconds = 60 def start_agent(): result = False try: process = subprocess.Popen(start_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) raw_output, error_output = process.communicate() if raw_output == '' and error_output == '': log
ger.log('Agent started.') result = True elif 'No such process' in error_output: logger.log('Agent not found.') else: logger.log('Unknown output: "%s"' % error_output) except Exception as e: logger.log("Could not start agent.", logger.LogLevel.Error) logger.log
_exception(e) return result def restart_agent(): try: process = subprocess.Popen(stop_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) raw_output, error_output = process.communicate() if raw_output == '' and error_output == '': logger.log('Agent has restarted.') elif 'No such process' in error_output: logger.log('Agent not found. Nothing to restart.') else: logger.log('Unknown output: "%s"' % error_output) except Exception as e: logger.log("Could not start agent.", logger.LogLevel.Error) logger.log_exception(e) def load_agent(): try: process = subprocess.Popen(load_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) raw_output, error_output = process.communicate() if raw_output == '' and error_output == '': logger.log('Agent loaded.') elif 'Already loaded' in error_output: logger.log('Agent is already loaded.') else: logger.log('Unknown output: "%s"' % error_output) except Exception as e: logger.log("Could not load agent.", logger.LogLevel.Error) logger.log_exception(e) def unload_agent(): try: process = subprocess.Popen(unload_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) raw_output, error_output = process.communicate() if raw_output == '' and error_output == '': logger.log('Agent unloaded.') elif 'Error unloading' in error_output: logger.log('Agent is not loaded/installed.') else: logger.log('Unknown output: "%s"' % error_output) except Exception as e: logger.log("Could not load agent.", logger.LogLevel.Error) logger.log_exception(e) AgentStatus = namedtuple('AgentStats', ['loaded', 'running']) def agent_running_stats(): ps_info = [] running = False loaded = False process = subprocess.Popen(list_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) raw_output, error_output = process.communicate() for line in raw_output.splitlines(): pid, run, pname = line.split('\t') ps_info.append((pname, run, pid)) for p in ps_info: if daemon_label == p[0]: # p[1] can either be: # : '0' meaning not running. # : '-' meaning its running. loaded = True if p[1] == '-': running = True break elif p[1] == '0': running = False status = AgentStatus(loaded, running) logger.log(str(status), logger.LogLevel.Debug) return status if __name__ == '__main__': logger.log("Starting watcher daemon.") while True: time.sleep(check_in_seconds) agent_status = agent_running_stats() if agent_status.loaded: if agent_status.running: logger.log("Agent is running.", logger.LogLevel.Debug) continue else: if not start_agent(): load_agent() else: load_agent()
# When some software has issues and we need to fix it in a # hackish way, we put it in here. This one day will be empty. import copy_reg from twisted.web.client import SchemeNotSupported from txsocksx.http import SOCKS5Agent as SOCKS5AgentOriginal def patched_reduce_ex(self, proto): """ This is a hack to overcome a bug in one of pythons core functions. It is located inside of copy_reg and is called _red
uce_ex. Some background on the issue can be found here: http://stackoverflow.com/questions/569754/how-to-tell-for-which-object-attribute-pickle http://stacko
verflow.com/questions/2049849/why-cant-i-pickle-this-object There was also an open bug on the pyyaml trac repo, but it got closed because they could not reproduce. http://pyyaml.org/ticket/190 It turned out to be easier to patch the python core library than to monkey patch yaml. XXX see if there is a better way. sigh... """ _HEAPTYPE = 1 << 9 assert proto < 2 for base in self.__class__.__mro__: if hasattr(base, '__flags__') and not base.__flags__ & _HEAPTYPE: break else: base = object # not really reachable if base is object: state = None elif base is int: state = None else: if base is self.__class__: raise TypeError("can't pickle %s objects" % base.__name__) state = base(self) args = (self.__class__, base, state) try: getstate = self.__getstate__ except AttributeError: if getattr(self, "__slots__", None): raise TypeError("a class that defines __slots__ without " "defining __getstate__ cannot be pickled") try: dict = self.__dict__ except AttributeError: dict = None else: dict = getstate() if dict: return copy_reg._reconstructor, args, dict else: return copy_reg._reconstructor, args class SOCKS5Agent(SOCKS5AgentOriginal): """ This is a quick hack to fix: https://github.com/habnabit/txsocksx/issues/9 """ def _getEndpoint(self, scheme_or_uri, host=None, port=None): if host is not None: scheme = scheme_or_uri else: scheme = scheme_or_uri.scheme host = scheme_or_uri.host port = scheme_or_uri.port if scheme not in ('http', 'https'): raise SchemeNotSupported('unsupported scheme', scheme) endpoint = self.endpointFactory( host, port, self.proxyEndpoint, **self.endpointArgs) if scheme == 'https': if hasattr(self, '_wrapContextFactory'): tlsPolicy = self._wrapContextFactory(host, port) elif hasattr(self, '_policyForHTTPS'): tlsPolicy = self._policyForHTTPS.creatorForNetloc(host, port) else: raise NotImplementedError("can't figure out how to make a context factory") endpoint = self._tlsWrapper(tlsPolicy, endpoint) return endpoint
try: im
port _pygeapi except ImportError as e: e.msg += ' (this module can be imported only from greenev)' raise from .evloop import event_l
oop
# -*- coding: ISO-8859-15 -*- from twisted.web import client from twisted.internet.defer import inlineCallbacks from core.Uusipuu import UusipuuModule import urllib, simplejson class Module(UusipuuModule): def startup(self
): self.log('google.py loaded') @inlineCallbacks def cmd_google(self, user, target, params): self.log('Querying google for "%s"' % params) data = yield client.getPage( 'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&%s' % urllib.urlencode({'q': params.strip()})) json = simplejson.loa
ds(data) results = json['responseData']['results'] if not results: self.log('No results found matching "%s"' % keyword) self.chanmsg('No results found matching "%s"' % keyword) return self.chanmsg('%s: %s' % \ (results[0]['titleNoFormatting'].encode('utf-8'), results[0]['url'].encode('utf-8'))) # vim: set et sw=4:
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Generated code. DO NOT EDIT! # # Snippet for UpdateCertificateAuthority # NOTE: This snippet has been automatically generated for illustrativ
e purposes only. # It may require modifications to work in your environment. # To install the latest published package dependency, execute the following: # python3 -m pip install google-cloud-private-ca # [START privateca_v1beta1_generated_CertificateAuthorityService_UpdateCertificateAuthority_async] from google.cloud.security import privateca_v1beta1
async def sample_update_certificate_authority(): # Create a client client = privateca_v1beta1.CertificateAuthorityServiceAsyncClient() # Initialize request argument(s) certificate_authority = privateca_v1beta1.CertificateAuthority() certificate_authority.type_ = "SUBORDINATE" certificate_authority.tier = "DEVOPS" certificate_authority.config.reusable_config.reusable_config = "reusable_config_value" certificate_authority.key_spec.cloud_kms_key_version = "cloud_kms_key_version_value" request = privateca_v1beta1.UpdateCertificateAuthorityRequest( certificate_authority=certificate_authority, ) # Make the request operation = client.update_certificate_authority(request=request) print("Waiting for operation to complete...") response = await operation.result() # Handle the response print(response) # [END privateca_v1beta1_generated_CertificateAuthorityService_UpdateCertificateAuthority_async]
# -*- encoding: utf-8 -*- ############################################################################## # # Copyright (C) 2016 Compassion CH (http://www.compassion.ch) # Releasing children from poverty in Jesus' name #
@author: Emanuel Cino <ecino@compassion.ch> # # The licence is in the file __openerp__.py # ############################################################################## from openerp import models, fields class MailMessage(models.Model): """ Add relation to communication configuration to track generated e-mails. """ _inherit = 'mail.mail' ##############################
############################################ # FIELDS # ########################################################################## communication_config_id = fields.Many2one('partner.communication.config')
__author__ = 'Ralph' from ui.app import Application if __name__ == '__main__': from ui.app import Example import wx app = wx.App() Example(None, title='Example') app.MainLoop() # application = Application() # application.run() # node1 = Import
ARFF() # node2 = SelectAttributes() # node3 = SupportVectorMachine() # node4 = SelectAttributes() # node5 = ApplyModel() # # node1.get_config().set('file_name', '/Users/Ralph/datasets/imagemend/out/prepared/features_prepared.arff') # # node2.get_config().set('selector_type', 'subset') # node2.get_config().set('attributes', ['M', 'F', 'age', 'id']) # # node3.get_config().set('kernel_type', 'rbf') # node3.get_config().set('target', 'diagnosis')
# node3.get_config().set('auto_detect', True) # node3.get_config().set('performance_measure', 'accuracy') # node3.get_config().set('n_folds', 2) # node3.get_config().set('n_grid_folds', 2) # node3.get_config().set('model_output_dir', '/Users/Ralph/tmp/model') # # node4.get_config().set('selector_type', 'single') # node4.get_config().set('attributes', ['diagnosis']) # # Connection( # # ImportARFF -> SelectAttributes # node1.get_output_port('output'), node2.get_input_port('input')) # Connection( # # SelectAttributes -> SVM # node2.get_output_port('output'), node3.get_input_port('input')) # Connection( # # SelectAttributes -> SelectAttributes # node2.get_output_port('output'), node4.get_input_port('input')) # Connection( # # SelectAttributes -> ApplyModel # node4.get_output_port('output'), node5.get_input_port('input')) # Connection( # # SVM -> ApplyModel # node3.get_output_port('model'), node5.get_input_port('model')) # # node1.execute() # # print('predictions: {}'.format(node5.get_output_port('output').get_data()))
#!/usr/bin/env py
thon #-*- coding: utf-8 -*- import sys, urllib2
def main(): if len(sys.argv) < 2: print("Error, usage: {0} <your url>".format(sys.argv[0])) return 1 url = sys.argv[1] print(urllib2.urlopen('http://t34.me/api/?u=' + url).read()) return 0 if __name__ == '__main__': main()
###################################################### from __future__ import absolute_import LICENSE = """\ This file is part of pagekite.py. Copyright 2010-2020, the Beanstalks Project ehf. and Bjarni Runar Einarsson This program is free softwa
re: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have recei
ved a copy of the GNU Affero General Public License along with this program. If not, see: <http://www.gnu.org/licenses/> """ ############################################################################## import six import re import time import pagekite.logging as logging from pagekite.compat import * class TunnelFilter: """Base class for watchers/filters for data going in/out of Tunnels.""" FILTERS = ('connected', 'data_in', 'data_out') IDLE_TIMEOUT = 1800 def __init__(self, ui): self.sid = {} self.ui = ui def clean_idle_sids(self, now=None): now = now or time.time() for sid in list(six.iterkeys(self.sid)): if self.sid[sid]['_ts'] < now - self.IDLE_TIMEOUT: del self.sid[sid] def filter_set_sid(self, sid, info): now = time.time() if sid not in self.sid: self.sid[sid] = {} self.sid[sid].update(info) self.sid[sid]['_ts'] = now self.clean_idle_sids(now=now) def filter_connected(self, tunnel, sid, data): if sid not in self.sid: self.sid[sid] = {} self.sid[sid]['_ts'] = time.time() return data def filter_data_in(self, tunnel, sid, data): if sid not in self.sid: self.sid[sid] = {} self.sid[sid]['_ts'] = time.time() return data def filter_data_out(self, tunnel, sid, data): if sid not in self.sid: self.sid[sid] = {} self.sid[sid]['_ts'] = time.time() return data class TunnelWatcher(TunnelFilter): """Base class for watchers/filters for data going in/out of Tunnels.""" FILTERS = ('data_in', 'data_out') def __init__(self, ui, watch_level=0): TunnelFilter.__init__(self, ui) self.watch_level = watch_level def format_data(self, data, level): if '\r\n\r\n' in data: head, tail = data.split('\r\n\r\n', 1) output = self.format_data(head, level) output[-1] += '\\r\\n' output.append('\\r\\n') if tail: output.extend(self.format_data(tail, level)) return output else: output = data.encode('string_escape').replace('\\n', '\\n\n') if output.count('\\') > 0.15*len(output): if level > 2: output = [['', '']] count = 0 for d in data: output[-1][0] += '%2.2x' % ord(d) output[-1][1] += '%c' % ((ord(d) > 31 and ord(d) < 127) and d or '.') count += 1 if (count % 2) == 0: output[-1][0] += ' ' if (count % 20) == 0: output.append(['', '']) return ['%-50s %s' % (l[0], l[1]) for l in output] else: return ['<< Binary bytes: %d >>' % len(data)] else: return output.strip().splitlines() def now(self): return ts_to_iso(int(10*time.time())/10.0 ).replace('T', ' ').replace('00000', '') def filter_data_in(self, tunnel, sid, data): if data and self.watch_level[0] > 0: self.ui.Notify('===[ INCOMING @ %s / %s ]===' % (self.now(), sid), color=self.ui.WHITE, prefix=' __') for line in self.format_data(data, self.watch_level[0]): self.ui.Notify(line, prefix=' <=', now=-1, color=self.ui.GREEN) return TunnelFilter.filter_data_in(self, tunnel, sid, data) def filter_data_out(self, tunnel, sid, data): if data and self.watch_level[0] > 1: self.ui.Notify('===[ OUTGOING @ %s / %s ]===' % (self.now(), sid), color=self.ui.WHITE, prefix=' __') for line in self.format_data(data, self.watch_level[0]): self.ui.Notify(line, prefix=' =>', now=-1, color=self.ui.BLUE) return TunnelFilter.filter_data_out(self, tunnel, sid, data) class HaproxyProtocolFilter(TunnelFilter): """Filter prefixes the HAProxy PROXY protocol info to requests.""" FILTERS = ('connected') ENABLE = 'proxyproto' def filter_connected(self, tunnel, sid, data): info = self.sid.get(sid) if info: if not info.get(self.ENABLE, False): pass elif info[self.ENABLE] in ("1", True): remote_ip = info['remote_ip'] if '.' in remote_ip: remote_ip = remote_ip.rsplit(':', 1)[1] data = 'PROXY TCP%s %s 0.0.0.0 %s %s\r\n%s' % ( '4' if ('.' in remote_ip) else '6', remote_ip, info['remote_port'], info['port'], data or '') else: logging.LogError( 'FIXME: Unimplemented PROXY protocol v%s\n' % info[self.ENABLE]) return TunnelFilter.filter_connected(self, tunnel, sid, data) class HttpHeaderFilter(TunnelFilter): """Filter that adds X-Forwarded-For and X-Forwarded-Proto to requests.""" FILTERS = ('data_in') HTTP_HEADER = re.compile('(?ism)^(([A-Z]+) ([^\n]+) HTTP/\d+\.\d+\s*)$') DISABLE = 'rawheaders' def filter_data_in(self, tunnel, sid, data): info = self.sid.get(sid) if (info and info.get('proto') in ('http', 'http2', 'http3', 'websocket') and not info.get(self.DISABLE, False)): # FIXME: Check content-length and skip bodies entirely http_hdr = self.HTTP_HEADER.search(data) if http_hdr: data = self.filter_header_data_in(http_hdr, data, info) return TunnelFilter.filter_data_in(self, tunnel, sid, data) def filter_header_data_in(self, http_hdr, data, info): clean_headers = [ r'(?mi)^(X-(PageKite|Forwarded)-(For|Proto|Port):)' ] add_headers = [ 'X-Forwarded-For: %s' % info.get('remote_ip', 'unknown'), 'X-Forwarded-Proto: %s' % (info.get('using_tls') and 'https' or 'http'), 'X-PageKite-Port: %s' % info.get('port', 0) ] if info.get('rewritehost', False): add_headers.append('Host: %s' % info.get('rewritehost')) clean_headers.append(r'(?mi)^(Host:)') if http_hdr.group(1).upper() in ('POST', 'PUT'): # FIXME: This is a bit ugly add_headers.append('Connection: close') clean_headers.append(r'(?mi)^(Connection|Keep-Alive):') info['rawheaders'] = True for hdr_re in clean_headers: data = re.sub(hdr_re, 'X-Old-\\1', data) return re.sub(self.HTTP_HEADER, '\\1\n%s\r' % '\r\n'.join(add_headers), data) class HttpSecurityFilter(HttpHeaderFilter): """Filter that blocks known-to-be-dangerous requests.""" DISABLE = 'trusted' HTTP_DANGER = re.compile('(?ism)^((get|post|put|patch|delete) ' # xampp paths, anything starting with /adm* '((?:/+(?:xampp/|security/|licenses/|webalizer/|server-(?:status|info)|adm)' '|[^\n]*/' # WordPress admin pages '(?:wp-admin/(?!admin-ajax|css/)|wp-config\.php' # Hackzor tricks '|system32/|\.\.|\.ht(?:access|pass)' # phpMyAdmin and similar tools '|(?:php|sql)?my(?:sql)?(?:adm|manager)' # Setup pages for common PHP tools '|(?:adm[^\n]*|install[^\n]*|setup)\.php)' ')[^\n]*)' ' HTTP/\d+\.\d+\s*)$') REJECT = 'PAGEKITE_REJECT_' def filter_header_data_in(self, http_hdr, data, info): danger = self.HTTP_DANGER.search(data) if danger: self.ui.Notify('BLOCKED: %s %s' % (danger.group(2), danger.group(3)), color=self.ui.RED, prefix='***') self.ui.Notify('See https://pagekite.net/support
from .allow_origin import allow_origin_tween_factory # noqa from .api_headers import api_headers_
tween_factory # noqa from .basic_auth import basic_auth_tween
_factory # noqa
See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== # ============= standard library imports ======================== from itertools import groupby import six # ============= enthought library imports ======================= from pyface.qt.QtGui import QTextEdit, QWidget, QHBoxLayout, QTextFormat, QColor, QPainter, QFrame, \ QSizePolicy, QPainterPath from traits.trait_errors import TraitError from traitsui.basic_editor_factory import BasicEditorFactory from traitsui.qt4.editor import Editor # ============= local library imports ========================== from pychron.git_archive.diff_util import extract_line_numbers def get_ranges(data): return [[gi[0] for gi in g] for k, g in groupby(enumerate(data), lambda i_x: i_x[0] - i_x[1])] class QDiffConnector(QFrame): _left_y = 0 _right_y = 0 def __init__(self): super(QDiffConnector, self).__init__() self.color = QColor(0, 100, 0, 100) self.setSizePolicy(QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Ignored)) self.setFixedWidth(30) def paintEvent(self, event): qp = QPainter() qp.begin(self) qp.setRenderHint(QPainter.Antialiasing) qp.setBrush(self.color) qp.setPen(self.color) rect = event.rect() x = rect.x() w = rect.width() lineheight = 16 print('-------------------') print('lefts', self.lefts) print('rights', self.rights) print('-------------------') ly = self._left_y + 5 ry = self._right_y + 5 rs=self.rights[:] # offset=1 for i, l in enumerate(self.lefts): path = QPainterPath() sl, el = l[0], l[-1] try: r=rs[i] sr, er = r[0], r[-1] rs.pop(i) # offset+=1 except IndexError: sr, er = l[-1], l[-1]-1 y = ly + lineheight * sl y2 = ry + lineheight * sr path.moveTo(x, y) path.lineTo(x, y + lineheight * (el - sl + 1)) path.lineTo(x + w, y2 + lineheight * (er - sr + 1)) path.lineTo(x + w, y2) qp.drawPath(path) for i, r in enumerate(rs): path = QPainterPath() sr, er = r[0], r[-1] # try: l=self.lefts[i] sl, el = r[-1], r[-1]-1 # except IndexError: # sl, el = l[-1]+2, l[-1]+1 # print sl, el y = ly + lineheight * (sl) y2 = ry + lineheight * (sr) path.moveTo(x, y) path.lineTo(x, y + lineheight * (el - sl + 1)) path.lineTo(x + w, y2 + lineheight * (er - sr + 1)) path.lineTo(x + w, y2) qp.drawPath(path) qp.end() def set_left_y(self, y): self._left_y += y def set_right_y(self, y): self._right_y += y class LinkedTextEdit(QTextEdit): linked_widget = None connector = None orientation = 'left' no_update = False def scrollContentsBy(self, x, y): if self.linked_widget and not self.no_update: sb = self.linked_widget.verticalScrollBar() v = sb.value() - y self.linked_widget.no_update = True sb.setSliderPosition(v) self.linked_widget.no_update = False if self.connector: if self.orientation == 'left': self.connector.set_left_y(y) else: self.connector.set_right_y(y) self.connector.update() super(LinkedTextEdit, self).scrollContentsBy(x, y) class QDiffEdit(QWidget): def __init__(self, parent, *args, **kw): super(QDiffEdit, self).__init__(*args, **kw) self.left = LinkedTextEdit() self.left.orientation = 'left' self.left.setReadOnly(True) self.right = LinkedTextEdit() self.right.orientation = 'right' self.right.setReadOnly(True) self.connector = QDiffConnector() self.left.linked_widget = self.right self.right.linked_widget = self.left self.left.connector = self.connector self.right.connector = self.connector layout = QHBoxLayout() layout.setSpacing(0) layout.addWidget(self.left) layout.addWidget(self.connector) layout.addWidget(self.right) self.setLayout(layout) def set_left_text(self, txt): self.left.setText(txt) def set_right_text(self, txt): self.right.setText(txt) def highlight(self, ctrl, lineno): selection = QTextEdit.ExtraSelection() selection.cursor = ctrl.textCursor() selection.format.setBackground(QColor(100, 200, 100)) selection.format.setProperty( QTextFormat.FullWidthSelection, True) doc = ctrl.document() block = doc.findBlockByLineNumber(lineno) selection.cursor.setPosition(block.position()) ss = ctrl.extraSelections() ss.append(selection) ctrl.setExtraSelections(ss) selection.cursor.clearSelection() def _clear_selection(self): for ctrl in (self.left, self.right): ctrl.setExtraSelections([]) def set_diff(self): self._clear_selection() ls, rs = extract_line_numbers(self.left.toPlainText(), self.right.toPlainText()) for li in ls: self.highlight(self.left, li) for ri in rs: self.highlight(self.right, ri) self._set_connectors(ls, rs) def _set_connectors(self, ls, rs): self.connector.lefts =
get_ranges(ls) self.connector.rights = get_ranges(rs) self.connector.update() class _DiffEditor(Editor): _no_update = False def init(self, parent): self.control = self._create_control(parent) def _create_control(self, parent): control = QDiffEdit(parent) # QtCore.QObject.connect(ctrl.left, # QtCore.SIGNAL('textChanged()'), self.update_left_
object) # QtCore.QObject.connect(ctrl.right, # QtCore.SIGNAL('textChanged()'), self.update_right_object) control.left.textChanged.connect(self.update_left_object) control.right.textChanged.connect(self.update_right_object) return control def update_editor(self): if self.value: self.control.set_left_text(self.value.left_text) self.control.set_right_text(self.value.right_text) self.control.set_diff() def update_right_object(self): """ Handles the user entering input data in the edit control. """ self._update_object('right') def update_left_object(self): """ Handles the user entering input data in the edit control. """ self._update_object('left') def _get_user_left_value(self): return self._get_user_value('left') def _get_user_right_value(self): return self._get_user_value('left') def _update_object(self, attr): if (not self._no_update) and (self.control is not None): try: setattr(self.value, '{}_text'.format(attr), getattr(self, '_get_user_{}_value'.format(attr))()) self.control.set_diff() if self._error is not None: self._error = None self.ui.errors -= 1 self.set_error_state(False) except TraitError as excp: pass def _get_user_value(self, attr): ctrl = getattr(self.control, attr) try: value = ctrl.text() except AttributeError: value = ctrl.toPlainText() value = six.text_type(value) try: value = self.evaluate(value) except: pass try: ret = self.f
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .resource import Resource class RouteTable(Resource): """Route table resource. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: Resource tags. :type tags: dict :param routes: Collection of routes contained within a route table. :type routes: list of :class:`Route <azure.mgmt.network.v2017_03_01.models.Route>` :ivar subnets: A collection of references to subnets. :vartype subnets: list of :class:`Subnet <azure.mgmt.network.v2017_03_01.models.Subnet>` :param provisioning_state: The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str :param etag: Gets a unique read-only string that changes whenever the resource is updated. :type etag: str """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, 'subnets': {'readonly': True}, } _a
ttribute_map = { 'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'routes': {'key': 'properties.routes', 'type': '[Route]'}, 'subnets': {'key': 'properties.subnets', 'type': '[Subnet]'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, } def __init__(self, id=None, location=None, tags=None, routes=None, provisioning_state=None, etag=None): super(RouteTable, self).__init__(id=id, location=location, tags=tags) self.routes = routes self.subnets = None self.provisioning_state = provisioning_state self.etag = etag
#!/usr/bin/env python3 import socket HOST = '127.0.0.1'
# The server's hostname or IP address PORT = 65432 # The port used by the server with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.connect((HOST, PORT)) s.sendall(b'Hello, world') da
ta = s.recv(1024) print('Received', repr(data))
import transform_util from lingvo.tasks.car.waymo import waymo_ap_metric from lingvo.tasks.car.waymo import waymo_metadata import numpy as np class WaymoOpenDatasetDecoder(base_decoder.BaseDecoder): """A decoder to use for decoding a detector model on Waymo.""" @classmethod def Params(cls): p = super().Params() p.Define( 'draw_visualizations', False, 'Boolean for whether to draw ' 'visualizations. This is independent of laser_sampling_rate.') p.ap_metric = waymo_ap_metric.WaymoAPMetrics.Params( waymo_metadata.WaymoMetadata()) p.Define( 'extra_ap_metrics', {}, 'Dictionary of extra AP metrics to run in the decoder. The key' 'is the name of the metric and the value is a sub-class of ' 'APMetric') p.Define( 'save_residuals', False, 'If True, this expects the residuals and ground-truth to be available ' 'in the decoder output dictionary, and it will save it to the decoder ' 'output file. See decode_include_residuals in PointDetectorBase ' 'for details.') return p def CreateDecoderMetrics(self): """Decoder metrics for WaymoOpenDataset.""" p = self.params waymo_metric_p = p.ap_metric.Copy().Set(cls=waymo_ap_metric.WaymoAPMetrics) waymo_metrics = waymo_metric_p.Instantiate() class_names = waymo_metrics.metadata.ClassNames() # TODO(bencaine,vrv): There's some code smell with this ap_metrics params # usage. We create local copies of the params to then instantiate them. # Failing to do this risks users editing the params after construction of # the object, making each object method call have the potential for side # effects. # Create a new dictionary with copies of the params converted to objects # so we can then add these to the decoder metrics. extra_ap_metrics = {} for k, metric_p in p.extra_ap_metrics.items(): extra_ap_metrics[k] = metric_p.Instantiate() waymo_metric_bev_p = waymo_metric_p.Copy() waymo_metric_bev_p.box_type = '2d' waymo_metrics_bev = waymo_metric_bev_p.Instantiate() # Convert the list of class names to a dictionary mapping class_id -> name. class_id_to_name = dict(enumerate(class_names)) # TODO(vrv): This uses the same top down transform as for KITTI; # re-visit these settings since detections can happen all around # the car. top_down_transform = transform_util.MakeCarToImageTransform( pixels_per_meter=32., image_ref_x=512., image_ref_y=1408., flip_axes=True) decoder_metrics = py_utils.NestedMap({ 'top_down_visualization': (detection_3d_metrics.TopDownVisualizationMetric( top_down_transform, image_height=1536, image_width=1024, class_id_to_name=class_id_to_name)), 'num_samples_in_batch': metrics.AverageMetric(), 'waymo_metrics': waymo_metrics, 'waymo_metrics_bev': waymo_metrics_bev, }) self._update_metrics_class_keys = ['waymo_metrics_bev', 'waymo_metrics'] for k, metric in extra_ap_metrics.items(): decoder_metrics[k] = metric self._update_metrics_class_keys.append(k) decoder_metrics.mesh = detection_3d_metrics.WorldViewer() return decoder_metrics def ProcessOutputs(self, input_batch, model_outputs): """Produce additional decoder outputs for WaymoOpenDataset. Args: input_batch: A .NestedMap of the inputs to the model. model_outputs: A .NestedMap of the outputs of the model, including:: - per_class_predicted_bboxes: [batch, num_classes, num_boxes, 7] float Tensor with per class 3D (7 DOF) bounding boxes. - per_class_predicted_bbox_scores: [batch, num_classes, num_boxes] float Tensor with per class, per box scores. - per_class_valid_mask: [batch, num_classes, num_boxes] masking Tensor indicating which boxes were still kept after NMS for each class. Returns: A NestedMap of additional decoder outputs needed for PostProcessDecodeOut. """ del model_outputs p = self.params input_labels = input_batch.labels input_metadata = input_batch.metadata source_ids = tf.strings.join([ input_metadata.run_segment, tf.as_string(input_metadata.run_start_offset) ], separator='_') ret = py_utils.NestedMap({ 'num_points_in_bboxes': input_batch.labels.bboxes_3d_num_points, # Ground truth. 'bboxes_3d': input_labels.bboxes_3d, 'bbox
es_3d_mask': input_labels.bboxes_3d_mask, 'labels': input_labels.labels, 'label_ids': input_labels.label_ids, 'speed': input_labels.speed, 'acceleration': input_labels.acceleration, # Fill the following in. 'source_ids': source_ids, 'difficulti
es': input_labels.single_frame_detection_difficulties, 'unfiltered_bboxes_3d_mask': input_labels.unfiltered_bboxes_3d_mask, 'run_segment': input_metadata.run_segment, 'run_start_offset': input_metadata.run_start_offset, 'pose': input_metadata.pose, }) if p.draw_visualizations: laser_sample = self._SampleLaserForVisualization( input_batch.lasers.points_xyz, input_batch.lasers.points_padding) ret.update(laser_sample) return ret def PostProcessDecodeOut(self, dec_out_dict, dec_metrics_dict): """Post-processes the decoder outputs.""" p = self.params # Update num_samples_in_batch. batch_size, num_classes, num_boxes, _ = ( dec_out_dict.per_class_predicted_bboxes.shape) dec_metrics_dict.num_samples_in_batch.Update(batch_size) # Update decoder output by removing z-coordinate, thus reshaping the bboxes # to [batch, num_bboxes, 5] to be compatible with # TopDownVisualizationMetric. # Indices corresponding to the 2D bbox parameters (x, y, dx, dy, phi). bbox_2d_idx = np.asarray([1, 1, 0, 1, 1, 0, 1], dtype=np.bool) bboxes_2d = dec_out_dict.bboxes_3d[..., bbox_2d_idx] predicted_bboxes = dec_out_dict.per_class_predicted_bboxes[..., bbox_2d_idx] if p.draw_visualizations and dec_out_dict.points_sampled: tf.logging.info('Updating sample for top down visualization') dec_metrics_dict.mesh.Update( py_utils.NestedMap({ 'points_xyz': dec_out_dict.points_xyz, 'points_padding': dec_out_dict.points_padding, })) # Flatten our predictions/scores to match the API of the visualization # The last dimension of flattened_bboxes is 5 due to the mask # above using bbox_2d_idx. flattened_bboxes = np.reshape(predicted_bboxes, [batch_size, num_classes * num_boxes, 5]) flattened_visualization_weights = np.reshape( dec_out_dict.visualization_weights, [batch_size, num_classes * num_boxes]) # Create a label id mask for now to maintain compatibility. # TODO(bencaine): Refactor visualizations to reflect new structure. flattened_visualization_labels = np.tile( np.arange(0, num_classes)[np.newaxis, :, np.newaxis], [batch_size, 1, num_boxes]) flattened_visualization_labels = np.reshape( flattened_visualization_labels, [batch_size, num_classes * num_boxes]) dec_metrics_dict.top_down_visualization.Update( py_utils.NestedMap({ 'visualization_labels': flattened_visualization_labels, 'predicted_bboxes': flattened_bboxes, 'visualization_weights': flattened_visualization_weights, 'points_xyz': dec_out_dict.points_xyz, 'points_padding': dec_out_dict.points_padding, 'gt_bboxes_2d': bboxes_2d, 'gt_bboxes_2d_weights': dec_out_dict.bboxes_3d_mask, 'labels': dec_out_dict.labels, 'difficulties': dec_out_dict.difficulties, 'source_ids': dec_out_dict.source_ids, })) # Update AP metrics. # Skip zeroth step decoding. if dec_out_dict.global_step == 0: return None
WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. # # from . import base class API_general_hosts(base.API_base): """ General Host Information TR64 Object-Oriented API. Can be instantiated via ``session.getOOAPI("general_hosts")`` or ``session.getOOAPI("urn:dslforum-org:service:Hosts:1")``\ . Same parameters and attributes as :py:class:`fritzctl.ooapi.base.API_base()`\ . """ def getHostByIndex(self,index,ext=True): """ Returns the Host associated with the given Index. :param int index: The Index of the Host :param bool ext: Optional Flag if information from the AVM Extension should be integrated, defaults to True :return: Host Information Object :rtype: Host :raises AssertionError: if the index is invalid, e.g. not an integer or lower than 0 :raises ValueError: if the index is out-of-bounds """ assert isinstance(index,int) and index>=0 d = self.dynapi.GetGenericHostEntry(NewIndex=index) if ext: d.update(self.dynapi.callAPI("X_AVM-DE_GetGenericHostEntryExt",NewIndex=index)) d["_ext"]=True else: d["_ext"]=False return Host(self,index,d) def getHostByMAC(self,mac,ext=True): """ Returns the Host associated with the given MAC Address. :param str mac: MAC Address of the Host :param bool ext: Optional Flag if information from the AVM Extension should be integrated, defaults to True :return: Host Information Object :rtype: Host :raises AssertionError: if the MAC Address is invalid, e.g. not a string :raises ValueError: if the MAC Address is unknown """ assert isinstance(mac,str) d = self.dynapi.GetSpecificHostEntry(NewMACAdress=mac) d["NewMACAddress"]=mac if ext: d.update(self.dynapi.callAPI("X_AVM-DE_GetSpecificHostEntryExt",NewMACAddress=mac)) d["_ext"]=True else: d["_ext"]=False return Host(self,-1,d) def getHostListLength(self): """ Returns the length of the List of all known Hosts. :return: Number of Entries in the host list. :rtype: int """ return int(self.dynapi.GetHostNumberOfEntries()["NewHostNumberOfEntries"]) def getHostList(self,ext=True): """ Returns a list of all hosts. :param bool ext: Optional Flag if information from the AVM Extension should be integrated, defaults to True :return: List of Hosts :rtype: List of :py:class:`Host()` """ out = [] for i in range(self.getHostListLength()): out.append(self.getHostByIndex(i,ext=ext)) return out def getMacByIndex(self,index): """ Returns the MAC Address of the device associated with the given index. :param int index: Index of the Device to return :return: MAC Address :rtype: str """ return self.dynapi.GetGenericHostEntry(NewIndex=index)["NewMACAddress"] def
getChangeCounter(self): """ Returns the current change counter. :return: The current change counter :rtype: int """ return int(self.dynapi.callAPI("X_AVM-DE_GetC
hangeCounter")["NewX_AVM-DE_GetChangeCounter"]) def wakeUp(self,mac): """ Sends a WakeOnLAN request to the specified Host. :param str mac: MAC Address to wake up :raises AssertionError: if the MAC Address is invalid, e.g. not a string :raises ValueError: if the MAC Address is unknown """ assert isinstance(mac,str) self.dynapi.callAPI("X_AVM-DE_WakeOnLANByMACAddress",NewMACAddress=mac) class Host(object): """ Host Information and Configuration Class. :param API_avm_homeauto api: API object to use when querying for data :param int index: Index this device had when requested via ``GetGenericHostEntry()``\ , may be -1 if unknown :param dict info: Dictionary containing the TR64 Response with all the data about the device; automatically passed to :py:meth:`loadData()` :ivar API_avm_homeauto api: stores the supplied API object :ivar int index: stores the supplied index :ivar dict info: stores the data in a dictionary :py:attr:`info` stores a flag if extension data is available in the ``_ext`` key. :ivar str mac: MAC Address of this Host :ivar str ip: IP Address of this Host :ivar str address_source: Source of the Address :ivar int lease_remaining: Time in second until the DHCP Lease expires :ivar str interface_type: Type of the interface this Host is connected with :ivar bool active: Flag if this host is active :ivar str hostname: Property for reading and writing hostname, see :py:attr:`hostname` Extension Variables: :ivar int ethport: Which ethernet port the host is connected with, from 1-4 or 0 if not via LAN :ivar float speed: Current Connection Speed :ivar bool updateAvailable: Flag if an update is available, where applicable :ivar bool updateSuccessful: Flag if the last update was successful, where applicable :ivar str infourl: URL for getting Information :ivar str model: Model of the Host :ivar str url: URL of the Host """ def __init__(self,api,index,info): self.api = api self.index = index self.info = info self.loadData(self.info) def loadData(self,data): """ Populates instance variables with the supplied TR64 response. This method is automatically called upon construction with the supplied info dict. Note that the ``_ext`` key must be set to a boolean flag indicating if extension information is contained in the response. """ self.mac = data["NewMACAddress"] self.ip = data["NewIPAddress"] self.address_source = data["NewAddressSource"] self.lease_remaining = int(data["NewLeaseTimeRemaining"]) self.interface_type = data["NewInterfaceType"] self.active = data["NewActive"]=="1" self._hostname = data["NewHostName"] if data["_ext"]: self.ethport = int(data["NewX_AVM-DE_Port"]) self.speed = float(data["NewX_AVM-DE_Speed"]) self.updateAvailable = data["NewX_AVM-DE_UpdateAvailable"]=="1" self.updateSuccessful = data["NewX_AVM-DE_UpdateSuccessful"]=="succeeded" self.infourl = data["NewX_AVM-DE_InfoURL"] self.model = data["NewX_AVM-DE_Model"] self.url = data["NewX_AVM-DE_URL"] def reloadData(self): """ Reloads the data from the server. Note that this method will only request extension data if the key ``_ext`` is set to ``True``\ . """ d = self.api.dynapi.GetSpecificHostEntry(NewMACAddress=self.mac) if self.info["_ext"]: d.update(self.api.dynapi.callAPI("X_AVM-DE_GetSpecificHostEntryExt",NewMACAddress=self.mac)) d["_ext"]=self.info["_ext"] d["NewMACAddress"]=self.mac self.info = d def doUpdate(self): """ Requests that the host does an update. Note that this may not work on every host """ self.checkForUpdates() self.api.dynapi.callAPI("X_AVM-DE_HostDoUpdate",NewMACAddress=self.mac) def checkForUpdates(self): """ Checks for Updates. Note that this method does not return anything as the underlying API call gives no variables in return. This method automatically reloads the data to update any update flags that may have changed. """ self.api.d
-------------------------------------------------------- def current_state(self): return self.lexstate # ------------------------------------------------------------ # skip() - Skip ahead n characters # ------------------------------------------------------------ def skip(self,n): self.lexpos += n # ------------------------------------------------------------ # opttoken() - Return the next token from the Lexer # # Note: This function has been carefully implemented to be as fast # as possible. Don't make changes unless you really know what # you are doing # ------------------------------------------------------------ def token(self): # Make local copies of frequently referenced attributes lexpos = self.lexpos lexlen = self.lexlen lexignore = self.lexignore lexdata = self.lexdata while lexpos < lexlen: # This code provides some short-circuit code for whitespace, tabs, and other ignored characters if lexdata[lexpos] in lexignore: lexpos += 1 continue # Look for a regular expression match for lexre,lexindexfunc in self.lexre: m = lexre.match(lexdata,lexpos) if not m: continue # Create a token for return tok = LexToken() tok.value = m.group() tok.lineno = self.lineno tok.lexpos = lexpos i = m.lastindex func,tok.type = lexindexfunc[i] if not func: # If no token type was set, it's an ignored token if tok.type: self.lexpos = m.end() return tok else: lexpos = m.end() break lexpos = m.end() # If token is processed by a function, call it tok.lexer = self # Set additional attributes useful in token rules self.lexmatch = m self.lexpos = lexpos newtok = func(tok) # Every function must return a token, if nothing, we just move to next token if not newtok: lexpos = self.l
expos # This is here in case user has updated lexpos. lexignore = self.lexignore # This is here in case there was a state change break # Verify type of the token. If not in the token m
ap, raise an error if not self.lexoptimize: if not newtok.type in self.lextokens: raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % ( func_code(func).co_filename, func_code(func).co_firstlineno, func.__name__, newtok.type),lexdata[lexpos:]) return newtok else: # No match, see if in literals if lexdata[lexpos] in self.lexliterals: tok = LexToken() tok.value = lexdata[lexpos] tok.lineno = self.lineno tok.type = tok.value tok.lexpos = lexpos self.lexpos = lexpos + 1 return tok # No match. Call t_error() if defined. if self.lexerrorf: tok = LexToken() tok.value = self.lexdata[lexpos:] tok.lineno = self.lineno tok.type = "error" tok.lexer = self tok.lexpos = lexpos self.lexpos = lexpos newtok = self.lexerrorf(tok) if lexpos == self.lexpos: # Error method didn't change text position at all. This is an error. raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:]) lexpos = self.lexpos if not newtok: continue return newtok self.lexpos = lexpos raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:]) self.lexpos = lexpos + 1 if self.lexdata is None: raise RuntimeError("No input string given with input()") return None # Iterator interface def __iter__(self): return self def next(self): t = self.token() if t is None: raise StopIteration return t __next__ = next # ----------------------------------------------------------------------------- # ==== Lex Builder === # # The functions and classes below are used to collect lexing information # and build a Lexer object from it. # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # get_caller_module_dict() # # This function returns a dictionary containing all of the symbols defined within # a caller further down the call stack. This is used to get the environment # associated with the yacc() call if none was provided. # ----------------------------------------------------------------------------- def get_caller_module_dict(levels): try: raise RuntimeError except RuntimeError: e,b,t = sys.exc_info() f = t.tb_frame while levels > 0: f = f.f_back levels -= 1 ldict = f.f_globals.copy() if f.f_globals != f.f_locals: ldict.update(f.f_locals) return ldict # ----------------------------------------------------------------------------- # _funcs_to_names() # # Given a list of regular expression functions, this converts it to a list # suitable for output to a table file # ----------------------------------------------------------------------------- def _funcs_to_names(funclist,namelist): result = [] for f,name in zip(funclist,namelist): if f and f[0]: result.append((name, f[1])) else: result.append(f) return result # ----------------------------------------------------------------------------- # _names_to_funcs() # # Given a list of regular expression function names, this converts it back to # functions. # ----------------------------------------------------------------------------- def _names_to_funcs(namelist,fdict): result = [] for n in namelist: if n and n[0]: result.append((fdict[n[0]],n[1])) else: result.append(n) return result # ----------------------------------------------------------------------------- # _form_master_re() # # This function takes a list of all of the regex components and attempts to # form the master regular expression. Given limitations in the Python re # module, it may be necessary to break the master regex into separate expressions. # ----------------------------------------------------------------------------- def _form_master_re(relist,reflags,ldict,toknames): if not relist: return [] regex = "|".join(relist) try: lexre = re.compile(regex,re.VERBOSE | reflags) # Build the index to function map for the matching engine lexindexfunc = [ None ] * (max(lexre.groupindex.values())+1) lexindexnames = lexindexfunc[:] for f,i in lexre.groupindex.items(): handle = ldict.get(f,None) if type(handle) in (types.FunctionType, types.MethodType): lexindexfunc[i] = (handle,toknames[f]) lexindexnames[i] = f elif handle is not None: lexindexnames[i] = f if f.find("ignore_") > 0: lexindexfunc[i] = (None,None) else: lexindexfunc[i] = (None, toknames[f]) return [(lexre,lexinde
iated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from __future__ import unicode_literals import platform import warnings try: from .StringMatcher import StringMatcher as SequenceMatcher except ImportError: #if platform.python_implementation() != "PyPy": # warnings.warn('Using slow pure-pyt
hon SequenceMatcher. Install python-Levenshtein to remove this warning') from difflib import SequenceMatcher from . import utils ########################### # Basic Scoring Functions # ########################### @utils.check_for_none @utils.check_empty_string def ratio(s1, s2): s1, s2 = utils.make_type_consistent(s1, s2) m = SequenceMatcher(None, s1, s2) return utils.intr(100 * m.ratio()) @utils.check_for_none @utils.check_empty_string def partial_ratio(s1, s2): """"Return the r
atio of the most similar substring as a number between 0 and 100.""" s1, s2 = utils.make_type_consistent(s1, s2) if len(s1) <= len(s2): shorter = s1 longer = s2 else: shorter = s2 longer = s1 m = SequenceMatcher(None, shorter, longer) blocks = m.get_matching_blocks() # each block represents a sequence of matching characters in a string # of the form (idx_1, idx_2, len) # the best partial match will block align with at least one of those blocks # e.g. shorter = "abcd", longer = XXXbcdeEEE # block = (1,3,3) # best score === ratio("abcd", "Xbcd") scores = [] for block in blocks: long_start = block[1] - block[0] if (block[1] - block[0]) > 0 else 0 long_end = long_start + len(shorter) long_substr = longer[long_start:long_end] m2 = SequenceMatcher(None, shorter, long_substr) r = m2.ratio() if r > .995: return 100 else: scores.append(r) return utils.intr(100 * max(scores)) ############################## # Advanced Scoring Functions # ############################## def _process_and_sort(s, force_ascii, full_process=True): """Return a cleaned string with token sorted.""" # pull tokens ts = utils.full_process(s, force_ascii=force_ascii) if full_process else s tokens = ts.split() # sort tokens and join sorted_string = u" ".join(sorted(tokens)) return sorted_string.strip() # Sorted Token # find all alphanumeric tokens in the string # sort those tokens and take ratio of resulting joined strings # controls for unordered string elements @utils.check_for_none def _token_sort(s1, s2, partial=True, force_ascii=True, full_process=True): sorted1 = _process_and_sort(s1, force_ascii, full_process=full_process) sorted2 = _process_and_sort(s2, force_ascii, full_process=full_process) if partial: return partial_ratio(sorted1, sorted2) else: return ratio(sorted1, sorted2) def token_sort_ratio(s1, s2, force_ascii=True, full_process=True): """Return a measure of the sequences' similarity between 0 and 100 but sorting the token before comparing. """ return _token_sort(s1, s2, partial=False, force_ascii=force_ascii, full_process=full_process) def partial_token_sort_ratio(s1, s2, force_ascii=True, full_process=True): """Return the ratio of the most similar substring as a number between 0 and 100 but sorting the token before comparing. """ return _token_sort(s1, s2, partial=True, force_ascii=force_ascii, full_process=full_process) @utils.check_for_none def _token_set(s1, s2, partial=True, force_ascii=True, full_process=True): """Find all alphanumeric tokens in each string... - treat them as a set - construct two strings of the form: <sorted_intersection><sorted_remainder> - take ratios of those two strings - controls for unordered partial matches""" p1 = utils.full_process(s1, force_ascii=force_ascii) if full_process else s1 p2 = utils.full_process(s2, force_ascii=force_ascii) if full_process else s2 if not utils.validate_string(p1): return 0 if not utils.validate_string(p2): return 0 # pull tokens tokens1 = set(p1.split()) tokens2 = set(p2.split()) intersection = tokens1.intersection(tokens2) diff1to2 = tokens1.difference(tokens2) diff2to1 = tokens2.difference(tokens1) sorted_sect = " ".join(sorted(intersection)) sorted_1to2 = " ".join(sorted(diff1to2)) sorted_2to1 = " ".join(sorted(diff2to1)) combined_1to2 = sorted_sect + " " + sorted_1to2 combined_2to1 = sorted_sect + " " + sorted_2to1 # strip sorted_sect = sorted_sect.strip() combined_1to2 = combined_1to2.strip() combined_2to1 = combined_2to1.strip() if partial: ratio_func = partial_ratio else: ratio_func = ratio pairwise = [ ratio_func(sorted_sect, combined_1to2), ratio_func(sorted_sect, combined_2to1), ratio_func(combined_1to2, combined_2to1) ] return max(pairwise) def token_set_ratio(s1, s2, force_ascii=True, full_process=True): return _token_set(s1, s2, partial=False, force_ascii=force_ascii, full_process=full_process) def partial_token_set_ratio(s1, s2, force_ascii=True, full_process=True): return _token_set(s1, s2, partial=True, force_ascii=force_ascii, full_process=full_process) ################### # Combination API # ################### # q is for quick def QRatio(s1, s2, force_ascii=True): p1 = utils.full_process(s1, force_ascii=force_ascii) p2 = utils.full_process(s2, force_ascii=force_ascii) if not utils.validate_string(p1): return 0 if not utils.validate_string(p2): return 0 return ratio(p1, p2) def UQRatio(s1, s2): return QRatio(s1, s2, force_ascii=False) # w is for weighted def WRatio(s1, s2, force_ascii=True): """Return a measure of the sequences' similarity between 0 and 100, using different algorithms. """ p1 = utils.full_process(s1, force_ascii=force_ascii) p2 = utils.full_process(s2, force_ascii=force_ascii) if not utils.validate_string(p1): return 0 if not utils.validate_string(p2): return 0 # should we look at partials? try_partial = True unbase_scale = .95 partial_scale = .90 base = ratio(p1, p2) len_ratio = float(max(len(p1), len(p2))) / min(len(p1), len(p2)) # if strings are similar length, don't use partials if len_ratio < 1.5: try_partial = False # if one string is much much shorter than the other if len_ratio > 8: partial_scale = .6 if try_partial: partial = partial_ratio(p1, p2) * partial_scale ptsor = partial_token_sort_ratio(p1, p2, full_process=False) \ * unbase_scale * partial_scale ptser = partial_token_set_ratio(p1, p2, full_process=False) \ * unbase_scale * partial_scale return utils.intr(max(base, partial, ptsor, ptser)) else: tsor = token_sort_ratio(p1, p2, full_process=False) * unbase_scale tser = token_set_ratio(p1, p2, full_process=False) * unbase_scale return utils.intr(max(base, tsor, tser)) def UWRatio(s1, s2): """Return a measure of the sequences' similarity between 0 and 100, using different algorithms. Same as WRatio but preserving unicode.
estObjectCountingGuiMultiImage, cls).setupClass() if hasattr(cls, 'SAMPLE_DATA'): cls.using_random_data = False else: cls.using_random_data = True cls.SAMPLE_DATA = [] cls.SAMPLE_DATA.append(os.path.split(__file__)[0] + '/random_data1.npy') cls.SAMPLE_DATA.append(os.path.split(__file__)[0] + '/random_data2.npy') data1 = numpy.random.random((1,200,200,1,1)) data1 *= 256 data2 = numpy.random.random((1,50,100,1,1)) data2 *= 256 numpy.save(cls.SAMPLE_DATA[0], data1.astype(numpy.uint8)) numpy.save(cls.SAMPLE_DATA[1], data2.astype(numpy.uint8)) @classmethod def teardownClass(cls): # Call our base class so the app quits! super(TestObjectCountingGuiMultiImage, cls).teardownClass() # Clea
n up: Delete any test files we generated removeFiles = [ TestObjectCountingGuiMultiImage.PROJECT_FILE ] if cls.using_random_data:
removeFiles += TestObjectCountingGuiMultiImage.SAMPLE_DATA for f in removeFiles: try: os.remove(f) except: pass def test_1_NewProject(self): """ Create a blank project, manipulate few couple settings, and save it. """ def impl(): projFilePath = self.PROJECT_FILE shell = self.shell # New project shell.createAndLoadNewProject(projFilePath, self.workflowClass()) workflow = shell.projectManager.workflow from ilastik.applets.dataSelection.opDataSelection import DatasetInfo opDataSelection = workflow.dataSelectionApplet.topLevelOperator for i, dataFile in enumerate(self.SAMPLE_DATA): # Add a file info = DatasetInfo() info.filePath = dataFile opDataSelection.DatasetGroup.resize(i+1) opDataSelection.DatasetGroup[i][0].setValue(info) # Set some features opFeatures = workflow.featureSelectionApplet.topLevelOperator opFeatures.FeatureIds.setValue( OpPixelFeaturesPresmoothed.DefaultFeatureIds ) opFeatures.Scales.setValue( [0.3, 0.7, 1, 1.6, 3.5, 5.0, 10.0] ) # sigma: 0.3 0.7 1.0 1.6 3.5 5.0 10.0 selections = numpy.array( [[True, False, False, False, False, False, False], [True, False, False, False, False, False, False], [True, False, False, False, False, False, False], [False, False, False, False, False, False, False], [False, False, False, False, False, False, False], [False, False, False, False, False, False, False]] ) opFeatures.SelectionMatrix.setValue(selections) # Save and close shell.projectManager.saveProject() shell.ensureNoCurrentProject(assertClean=True) # Run this test from within the shell event loop self.exec_in_shell(impl) def test_2_ClosedState(self): """ Check the state of various shell and gui members when no project is currently loaded. """ def impl(): assert self.shell.projectManager is None assert self.shell.appletBar.count() == 0 # Run this test from within the shell event loop self.exec_in_shell(impl) def test_3_OpenProject(self): def impl(): self.shell.openProjectFile(self.PROJECT_FILE) assert self.shell.projectManager.currentProjectFile is not None # Run this test from within the shell event loop self.exec_in_shell(impl) # These points are relative to the CENTER of the view def test_4_AddDotsAndBackground(self): """ Add labels and draw them in the volume editor. """ def impl(): imageId = 0 workflow = self.shell.projectManager.workflow countingClassApplet = workflow.countingApplet self.shell.imageSelectionCombo.setCurrentIndex(imageId) gui = countingClassApplet.getMultiLaneGui() self.waitForViews(gui.currentGui().editor.imageViews) opPix = countingClassApplet.topLevelOperator # Select the labeling drawer self.shell.setSelectedAppletDrawer(3) # Turn off the huds and so we can capture the raw image viewMenu = gui.currentGui().menus()[0] viewMenu.actionToggleAllHuds.trigger() # Select the labeling drawer self.shell.setSelectedAppletDrawer(3) # Turn off the huds and so we can capture the raw image viewMenu = gui.currentGui().menus()[0] viewMenu.actionToggleAllHuds.trigger() ## Turn off the slicing position lines ## FIXME: This disables the lines without unchecking the position ## box in the VolumeEditorWidget, making the checkbox out-of-sync #gui.currentGui().editor.navCtrl.indicateSliceIntersection = False # Do our tests at position 0,0,0 gui.currentGui().editor.posModel.slicingPos = (0,0,0) assert gui.currentGui()._labelControlUi.liveUpdateButton.isChecked() == False assert gui.currentGui()._labelControlUi.labelListModel.rowCount() == 2, "Got {} rows".format(gui.currentGui()._labelControlUi.labelListModel.rowCount()) # Select the brush gui.currentGui()._labelControlUi.paintToolButton.click() # Let the GUI catch up: Process all events QApplication.processEvents() # Draw some arbitrary labels in the view using mouse events. # Set the brush size gui.currentGui()._labelControlUi.brushSizeComboBox.setCurrentIndex(1) gui.currentGui()._labelControlUi.labelListModel.select(0) imgView = gui.currentGui().editor.imageViews[2] dot_start_list = [(-14,-20),(6,-8),(10,4), (20,21)] dot_stop_list = [(-20,-11),(9,-12),(15,-3), (20,21)] LABEL_START = (-14,-20) LABEL_STOP = (-14,-21) LABEL_ERASE_START = (6,-8) LABEL_ERASE_STOP = (9,-8) #draw foreground dots for start,stop in zip(dot_start_list,dot_stop_list): self.strokeMouseFromCenter( imgView, start,stop ) labelData = opPix.LabelImages[imageId][:].wait() assert numpy.sum(labelData[labelData==1]) == 4, "Number of foreground dots was {}".format( numpy.sum(labelData[labelData==1]) ) center = (numpy.array(labelData.shape[:-1]))/2 + 1 true_idx = numpy.array([center + dot for dot in dot_start_list]) idx = numpy.where(labelData) test_idx = numpy.array((idx[0],idx[1])).transpose() # This test doesn't require *exact* pixel locations to match due to rounding differences in mouse strokes. # Instead, we just require them to be close. # FIXME: This should be fixable by ensuring that the image is properly zoomed to 1-1 scale before the test. assert numpy.abs(test_idx - true_idx).max() <= 1 # Set the brush size # Draw background gui.currentGui()._labelControlUi.labelListModel.select(1) gui.currentGui()._labelControlUi.brushSizeComboBox.setCurrentIndex(0) self.strokeMouseFromCenter( imgView, LABEL_START,LABEL_STOP) #The background in this configuration should override the dots labelData = opPix.LabelImages[imageId][:].wait() assert labelData.max() == 2, "Max label value was {}".format( labelData.max() ) assert numpy.sum(labelData[labelData==1]) == 3, "Number of foreground dots was {}".format( numpy.sum(labelData[labelData==1]) ) #Now select er
port test_functions as funcs from .common import Benchmark from .lsq_problems import extract_lsq_problems class _BenchOptimizers(Benchmark): """a framework for benchmarking the optimizer Parameters ---------- function_name : string fun : callable der : callable function that returns the derivative (jacobian, gradient) of fun hess : callable function that returns the hessian of fun minimizer_kwargs : kwargs additional keywords passed to the minimizer. e.g. tol, maxiter """ def __init__(self, function_name, fun, der=None, hess=None, **minimizer_kwargs): self.function_name = function_name self.fun = fun self.der = der self.hess = hess self.minimizer_kwargs = minimizer_kwargs if "tol" not in minimizer_kwargs: minimizer_kwargs["tol"] = 1e-4 self.results = [] def reset(self): self.results = [] def add_result(self, result, t, name): """add a result to the list""" result.time = t result.name = name if not hasattr(result, "njev"): result.njev = 0 if not hasattr(result, "nhev"): result.nhev = 0 self.results.append(result) def print_results(self): """print the current list of results""" results = self.average_results() results = sorted(results, key=lambda x: (x.nfail, x.mean_time)) if not results: return print("") print("=========================================================") print("Optimizer benchmark: %s" % (self.function_name)) print("dimensions: %d, extra kwargs: %s" % (results[0].ndim, str(self.minimizer_kwargs))) print("averaged over %d starting configurations" % (results[0].ntrials)) print(" Optimizer nfail nfev njev nhev time") print("---------------------------------------------------------") for res in results: print("%11s | %4d | %4d | %4d | %4d | %.6g" % (res.name, res.nfail, res.mean_nfev, res.mean_njev, res.mean_nhev, res.mean_time)) def average_results(self): """group the results by minimizer and average over the runs""" grouped_results = defaultdict(list) for res in self.results: grouped_results[res.name].append(res) averaged_results = dict()
for name, result_list in grouped_results.items(): newres = scipy.optimize.OptimizeResult() newres.name = name newres.mean_nfev = np.mean([r.nfev for r in result_list]) newres.mean_njev = np.mean([r.njev for r in result_list]) newres.mean_nhev = np.mean([r.nhev for r in result_list]) newres.mean_time = np
.mean([r.time for r in result_list]) newres.ntrials = len(result_list) newres.nfail = len([r for r in result_list if not r.success]) try: newres.ndim = len(result_list[0].x) except TypeError: newres.ndim = 1 averaged_results[name] = newres return averaged_results.values() def bench_run(self, x0, methods=None, **minimizer_kwargs): """do an optimization test starting at x0 for all the optimizers""" kwargs = self.minimizer_kwargs if methods is None: methods = ["COBYLA", 'Powell', 'L-BFGS-B', 'BFGS', 'CG', 'TNC', 'SLSQP', "Newton-CG", 'dogleg', 'trust-ncg'] fonly_methods = ["COBYLA", 'Powell'] for method in fonly_methods: if method not in methods: continue t0 = time.time() res = scipy.optimize.minimize(self.fun, x0, method=method, **kwargs) t1 = time.time() self.add_result(res, t1-t0, method) gradient_methods = ['L-BFGS-B', 'BFGS', 'CG', 'TNC', 'SLSQP'] if self.der is not None: for method in gradient_methods: if method not in methods: continue t0 = time.time() res = scipy.optimize.minimize(self.fun, x0, method=method, jac=self.der, **kwargs) t1 = time.time() self.add_result(res, t1-t0, method) hessian_methods = ["Newton-CG", 'dogleg', 'trust-ncg'] if self.hess is not None: for method in hessian_methods: if method not in methods: continue t0 = time.time() res = scipy.optimize.minimize(self.fun, x0, method=method, jac=self.der, hess=self.hess, **kwargs) t1 = time.time() self.add_result(res, t1-t0, method) class BenchSmoothUnbounded(Benchmark): """Benchmark the optimizers with smooth, unbounded, functions""" params = [ ['rosenbrock', 'rosenbrock_tight', 'simple_quadratic', 'asymmetric_quadratic', 'sin_1d', 'booth', 'beale', 'LJ'], ["COBYLA", 'Powell', 'L-BFGS-B', 'BFGS', 'CG', 'TNC', 'SLSQP', "Newton-CG", 'dogleg', 'trust-ncg'], ["mean_nfev", "mean_time"] ] param_names = ["test function", "solver", "result type"] def setup(self, func_name, method_name, ret_val): b = getattr(self, 'run_' + func_name)(methods=[method_name]) results = b.average_results() result = None for r in results: if r.name == method_name: result = getattr(r, ret_val) break if result is None: raise NotImplementedError() self.result = result def track_all(self, func_name, method_name, ret_val): return self.result def run_rosenbrock(self, methods=None): b = _BenchOptimizers("Rosenbrock function", fun=rosen, der=rosen_der, hess=rosen_hess) for i in range(10): b.bench_run(np.random.uniform(-3, 3, 3), methods=methods) return b def run_rosenbrock_tight(self, methods=None): b = _BenchOptimizers("Rosenbrock function", fun=rosen, der=rosen_der, hess=rosen_hess, tol=1e-8) for i in range(10): b.bench_run(np.random.uniform(-3, 3, 3), methods=methods) return b def run_simple_quadratic(self, methods=None): s = funcs.SimpleQuadratic() # print "checking gradient", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3])) b = _BenchOptimizers("simple quadratic function", fun=s.fun, der=s.der, hess=s.hess) for i in range(10): b.bench_run(np.random.uniform(-2, 2, 3), methods=methods) return b def run_asymmetric_quadratic(self, methods=None): s = funcs.AsymmetricQuadratic() # print "checking gradient", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3])) b = _BenchOptimizers("function sum(x**2) + x[0]", fun=s.fun, der=s.der, hess=s.hess) for i in range(10): b.bench_run(np.random.uniform(-2, 2, 3), methods=methods) return b def run_sin_1d(self, methods=None): fun = lambda x: np.sin(x[0]) der = lambda x: np.array([np.cos(x[0])]) b = _BenchOptimizers("1d sin function", fun=fun, der=der, hess=None) for i in range(10): b.bench_run(np.random.uniform(-2, 2, 1), methods=methods) return b def run_booth(self, methods=None): s = funcs.Booth() # print "checking gradient", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3])) b = _BenchOptimizers("Booth's function", fun=s.fun, der=s.der, hess=None) for i in range(10): b.bench_run(np.random.uniform(0, 10, 2), methods=methods) return b
def foo(x): pass x
= 42 y = 4
2 z = 42 foo(x, y, <caret>)
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors # # This file is part of Alignak. # # Alignak is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Alignak is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Alignak. If not, see <http://www.gnu.org/licenses/>. # # # This file incorporates work covered by the foll
owing copyright and # permission notice: # # Copyright (C) 2009-2014: # Jean Gabes, naparuba@gmail.com # Hartmut Goebel, h.goebel@goebel-consult.de # Grégory Starck, g.starck@gmail.com # Sebastien Coavoux, s.coavoux@free.fr # This file is part of Shinken. # # Shinken is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Shinken is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Shinken. If not, see <http://www.gnu.org/licenses/>. # # This file is used to test reading and processing of config files # from alignak_test import * class TestStarInGroups(AlignakTest): def setUp(self): self.setup_with_file('etc/alignak_star_in_hostgroups.cfg') # If we reach a good start, we are ok :) # the bug was that an * hostgroup expand get all host_name != '' # without looking at register 0 or not def test_star_in_groups(self): # # Config is not correct because of a wrong relative path # in the main config file # print "Get the hosts and services" now = time.time() host = self.sched.hosts.find_by_name("test_host_0") host.checks_in_progress = [] host.act_depend_of = [] # ignore the router router = self.sched.hosts.find_by_name("test_router_0") router.checks_in_progress = [] router.act_depend_of = [] # ignore the router svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "TEST") self.assertIsNot(svc, None) svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "TEST_HNAME_STAR") self.assertIsNot(svc, None) if __name__ == '__main__': unittest.main()
## This file is part of PyGaze - the open-source toolbox for eye tracking ## ## PyGaze is a Python module for easily creating gaze contingent experiments ## or other software (as well as non-gaze contingent experiments/software) ## Copyright (C) 2012-2013 Edwin S. Dalmaijer ## ## This program is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 3 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program. If not, see <http://www.gnu.org/licenses/> # # version: 0.4 (25-03-2013) # MAIN DUMMYMODE = True # False for gaze contingent display, True for dummy mode (using mouse or joystick) LOGFILENAME = 'default' # logfilen
ame, without path LOGFILE = LOGFILENAME[:] # .txt; adding path before logfilename is optional; logs responses (NOT eye movements, these are stored in an EDF file!) TRIALS = 5 # DISPLAY # used in libscreen, for the *_display functions. The values may be adjusted, # but not the constant's names SCREENNR = 0 # number of the screen used for displaying experiment DISPTYPE = 'pygame' # either 'psychopy' or 'pygame' DISPSIZE = (1920, 1080) # resolution SCREENS
IZE = (34.5, 19.7) # physical display size in cm MOUSEVISIBLE = False # mouse visibility BGC = (125,125,125) # backgroundcolour FGC = (0,0,0) # foregroundcolour FULLSCREEN = False # SOUND # defaults used in libsound. The values may be adjusted, but not the constants' # names SOUNDOSCILLATOR = 'sine' # 'sine', 'saw', 'square' or 'whitenoise' SOUNDFREQUENCY = 440 # Herz SOUNDLENGTH = 100 # milliseconds (duration) SOUNDATTACK = 0 # milliseconds (fade-in) SOUNDDECAY = 5 # milliseconds (fade-out) SOUNDBUFFERSIZE = 1024 # increase if playback is choppy SOUNDSAMPLINGFREQUENCY = 48000 # samples per second SOUNDSAMPLESIZE = -16 # determines bit depth (negative is signed SOUNDCHANNELS = 2 # 1 = mono, 2 = stereo # INPUT # used in libinput. The values may be adjusted, but not the constant names. MOUSEBUTTONLIST = None # None for all mouse buttons; list of numbers for buttons of choice (e.g. [1,3] for buttons 1 and 3) MOUSETIMEOUT = None # None for no timeout, or a value in milliseconds KEYLIST = None # None for all keys; list of keynames for keys of choice (e.g. ['space','9',':'] for space, 9 and ; keys) KEYTIMEOUT = 1 # None for no timeout, or a value in milliseconds JOYBUTTONLIST = None # None for all joystick buttons; list of button numbers (start counting at 0) for buttons of choice (e.g. [0,3] for buttons 0 and 3 - may be reffered to as 1 and 4 in other programs) JOYTIMEOUT = None # None for no timeout, or a value in milliseconds # EYETRACKER # general TRACKERTYPE = 'smi' # either 'smi', 'eyelink' or 'dummy' (NB: if DUMMYMODE is True, trackertype will be set to dummy automatically) SACCVELTHRESH = 35 # degrees per second, saccade velocity threshold SACCACCTHRESH = 9500 # degrees per second, saccade acceleration threshold # EyeLink only # SMI only SMIIP = '127.0.0.1' SMISENDPORT = 4444 SMIRECEIVEPORT = 5555 # FRL # Used in libgazecon.FRL. The values may be adjusted, but not the constant names. FRLSIZE = 200 # pixles, FRL-size FRLDIST = 125 # distance between fixation point and FRL FRLTYPE = 'gauss' # 'circle', 'gauss', 'ramp' or 'raisedCosine' FRLPOS = 'center' # 'center', 'top', 'topright', 'right', 'bottomright', 'bottom', 'bottomleft', 'left', or 'topleft' # CURSOR # Used in libgazecon.Cursor. The values may be adjusted, but not the constants' names CURSORTYPE = 'cross' # 'rectangle', 'ellipse', 'plus' (+), 'cross' (X), 'arrow' CURSORSIZE = 20 # pixels, either an integer value or a tuple for width and height (w,h) CURSORCOLOUR = 'pink' # colour name (e.g. 'red'), a tuple RGB-triplet (e.g. (255, 255, 255) for white or (0,0,0) for black), or a RGBA-value (e.g. (255,0,0,255) for red) CURSORFILL = True # True for filled cursor, False for non filled cursor CURSORPENWIDTH = 3 # cursor edge width in pixels (only if cursor is not filled)
def send_simple_message(): return requests.post( "https://api.mailgun.net/v3/sandbox049ff464a4d54974bb0143935f9577ef.mailgun.org/messages", auth=("api", "key-679dc79b890e700f11f001a6bf86f4a1"), data={"from": "Mailgun Sandbox <postmaster@sandbox049ff464a
4d54974bb0143935f9577ef.mailgun.org
>", "to": "nick <nicorellius@gmail.com>", "subject": "Hello nick", "text": "Congratulations nick, you just sent an email with Mailgun! You are truly awesome! You can see a record of this email in your logs: https://mailgun.com/cp/log . You can send up to 300 emails/day from this sandbox server. Next, you should add your own domain so you can send 10,000 emails/month for free."}) # cURL command to send mail aith API key # curl -s --user 'api:key-679dc79b890e700f11f001a6bf86f4a1' \ # https://api.mailgun.net/v3/mail.pdxpixel.com/messages \ # -F from='Excited User <mailgun@pdxpixel.com>' \ # -F to=nick@pdxpixel.com \ # -F subject='Hello' \ # -F text='Testing some Mailgun awesomness!'
import os from pyramid.config import Configurator from pyramid.renderers import JSONP from pyramid.settings import aslist from citedby import controller from citedby.controller import cache_region as controller_cache_region def main(global_config, **settings): """ This function returns a Pyramid WSGI application. """ config = Configurator(settings=settings) config.add_renderer('jsonp', JSONP(param_name='callback', indent=4)) def add_controll
er(request): es = os.environ.get( 'ELASTICSEARCH_HOST',
settings.get('elasticsearch_host', '127.0.0.1:9200') ) es_index = os.environ.get( 'ELASTICSEARCH_INDEX', settings.get('elasticsearch_index', 'citations') ) return controller.controller( aslist(es), sniff_on_connection_fail=True, timeout=600 ).set_base_index(es_index) config.add_route('index', '/') config.add_route('status', '/_status/') config.add_route('citedby_pid', '/api/v1/pid/') config.add_route('citedby_doi', '/api/v1/doi/') config.add_route('citedby_meta', '/api/v1/meta/') config.add_request_method(add_controller, 'controller', reify=True) # Cache Settings Config memcached_host = os.environ.get( 'MEMCACHED_HOST', settings.get('memcached_host', None) ) memcached_expiration_time = os.environ.get( 'MEMCACHED_EXPIRATION_TIME', settings.get('memcached_expiration_time', 2592000) # a month cache ) if 'memcached_host' is not None: cache_config = {} cache_config['expiration_time'] = int(memcached_expiration_time) cache_config['arguments'] = {'url': memcached_host, 'binary': True} controller_cache_region.configure('dogpile.cache.pylibmc', **cache_config) else: controller_cache_region.configure('dogpile.cache.null') config.scan() return config.make_wsgi_app()
# Ridiculously simple test of the os.startfile function for Windows. # # empty.vbs is an empty file (except for a comment), which does # nothing when run with cscript or wscript. # # A possible improvement would be to have empty.vbs d
o something that # we can detect here, to make sure that not only the os.startfile() # call succeeded, but also the script actually has run. import unittest from test import support import os import sys from os i
mport path startfile = support.get_attribute(os, 'startfile') class TestCase(unittest.TestCase): def test_nonexisting(self): self.assertRaises(OSError, startfile, "nonexisting.vbs") def test_empty(self): # We need to make sure the child process starts in a directory # we're not about to delete. If we're running under -j, that # means the test harness provided directory isn't a safe option. # See http://bugs.python.org/issue15526 for more details with support.change_cwd(path.dirname(sys.executable)): empty = path.join(path.dirname(__file__), "empty.vbs") startfile(empty) startfile(empty, "open") if __name__ == "__main__": unittest.main()
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import shutil from profile_creators import profile_generator from profile_creators import small_profile_extender from telemetry.page import pa
ge as page_module from telemetry.page import shared_page_state from telemetry import story class Typical25ProfileSharedState(shared_page_state.SharedDesktopPageState): """Shared state associated wi
th a profile generated from 25 navigations. Generates a shared profile on initialization. """ def __init__(self, test, finder_options, story_set): super(Typical25ProfileSharedState, self).__init__( test, finder_options, story_set) generator = profile_generator.ProfileGenerator( small_profile_extender.SmallProfileExtender, 'small_profile') self._out_dir, self._owns_out_dir = generator.Run(finder_options) if self._out_dir: finder_options.browser_options.profile_dir = self._out_dir else: finder_options.browser_options.dont_override_profile = True def TearDownState(self): """Clean up generated profile directory.""" super(Typical25ProfileSharedState, self).TearDownState() if self._owns_out_dir: shutil.rmtree(self._out_dir) class Typical25Page(page_module.Page): def __init__(self, url, page_set, run_no_page_interactions, shared_page_state_class=shared_page_state.SharedDesktopPageState): super(Typical25Page, self).__init__( url=url, page_set=page_set, shared_page_state_class=shared_page_state_class) self._run_no_page_interactions = run_no_page_interactions def RunPageInteractions(self, action_runner): if self._run_no_page_interactions: return with action_runner.CreateGestureInteraction('ScrollAction'): action_runner.ScrollPage() class Typical25PageSet(story.StorySet): """ Pages designed to represent the median, not highly optimized web """ def __init__(self, run_no_page_interactions=False, page_class=Typical25Page): super(Typical25PageSet, self).__init__( archive_data_file='data/typical_25.json', cloud_storage_bucket=story.PARTNER_BUCKET) urls_list = [ # Why: Alexa games #48 'http://www.nick.com/games', # Why: Alexa sports #45 'http://www.rei.com/', # Why: Alexa sports #50 'http://www.fifa.com/', # Why: Alexa shopping #41 'http://www.gamestop.com/ps3', # Why: Alexa shopping #25 'http://www.barnesandnoble.com/u/books-bestselling-books/379003057/', # Why: Alexa news #55 ('http://www.economist.com/news/science-and-technology/21573529-small-' 'models-cosmic-phenomena-are-shedding-light-real-thing-how-build'), # Why: Alexa news #67 'http://www.theonion.com', 'http://arstechnica.com/', # Why: Alexa home #10 'http://allrecipes.com/Recipe/Pull-Apart-Hot-Cross-Buns/Detail.aspx', 'http://www.html5rocks.com/en/', 'http://www.mlb.com/', # pylint: disable=line-too-long 'http://gawker.com/5939683/based-on-a-true-story-is-a-rotten-lie-i-hope-you-never-believe', 'http://www.imdb.com/title/tt0910970/', 'http://www.flickr.com/search/?q=monkeys&f=hp', 'http://money.cnn.com/', 'http://www.nationalgeographic.com/', 'http://premierleague.com', 'http://www.osubeavers.com/', 'http://walgreens.com', 'http://colorado.edu', ('http://www.ticketmaster.com/JAY-Z-and-Justin-Timberlake-tickets/artist/' '1837448?brand=none&tm_link=tm_homeA_rc_name2'), # pylint: disable=line-too-long 'http://www.theverge.com/2013/3/5/4061684/inside-ted-the-smartest-bubble-in-the-world', 'http://www.airbnb.com/', 'http://www.ign.com/', # Why: Alexa health #25 'http://www.fda.gov', ] for url in urls_list: self.AddStory( page_class(url, self, run_no_page_interactions))
import os from unittest import TestCase from click.testing import CliRunner from regparser.commands.clear import clear from regparser.index import entry class CommandsClearTests(TestCase): def setUp(self): self.cli = CliRunner() def test_no_errors_when_clear(self): """Should raise no errors when no cached files are present""" with self.cli.isolated_filesystem(): self.cli.invoke(clear) def test_deletes_fr_cache(self): with self.cli.isolated_filesystem(): open('fr_cache.sqlite', 'w').close() self.assertTrue(os.path.exists('fr_cache.sqlite')) # flag must be present self.cli.invoke(clear) self.assertTrue(os.path.exists('fr_cache.sqlite')) self.cli.invoke(clear, ['--http-cache']) self.assertFalse(os.path.exists('fr_cache.sqlite')) def test_deletes_index(self): with self.cli.isolated_filesyst
em(): entry.Entry('aaa', 'bbb').write('ccc') entry.Entry('bbb', 'ccc').write('ddd')
self.assertEqual(1, len(entry.Entry("aaa"))) self.assertEqual(1, len(entry.Entry("bbb"))) self.cli.invoke(clear) self.assertEqual(0, len(entry.Entry("aaa"))) self.assertEqual(0, len(entry.Entry("bbb"))) def test_deletes_can_be_focused(self): """If params are provided to delete certain directories, only those directories should get removed""" with self.cli.isolated_filesystem(): to_delete = ['delroot/aaa/bbb', 'delroot/aaa/ccc', 'root/delsub/aaa', 'root/delsub/bbb'] to_keep = ['root/othersub/aaa', 'root/aaa', 'top-level-file', 'other-root/aaa'] for path in to_delete + to_keep: entry.Entry(*path.split('/')).write('') self.cli.invoke(clear, ['delroot', 'root/delsub']) self.assertItemsEqual(['top-level-file', 'root', 'other-root'], list(entry.Entry())) self.assertItemsEqual(['othersub', 'aaa'], list(entry.Entry('root'))) self.assertItemsEqual(['aaa'], list(entry.Entry('other-root')))
from util import ap
p import hashlib import os phase2_url = '/phase2-%s/' % os.environ.get('PHASE2_TOKEN') admin_password = u'adminpass' admin_hash = hashlib.sha1(admin_password.encode('utf-8')).hexdigest() session_key = 'sessionkey' admin_session_key = 'adminsessionkey' def init_
data(redis): redis.set('user:test:password', hashlib.sha1(b'test').hexdigest()) redis.set('user:admin:password', admin_hash) redis.set('user:test:1', 'Buy groceries') redis.set('user:test:2', 'Clean the patio') redis.set('user:test:3', 'Take over the world') redis.rpush('items:test', 1, 2, 3) redis.set('session:%s' % session_key, 'test') redis.set('session:%s' % admin_session_key, 'admin') return app def test_home(app): rv = app.get(phase2_url) assert b'Sign In' in rv.data assert rv.status_code == 200 def test_404(app): rv = app.get(phase2_url + 'asdf') assert rv.status_code == 404 def test_get_405(app): rv = app.get(phase2_url + 'login/') assert rv.status_code == 405 def test_403s(app): """These should return 403 instead of 404.""" for url in ('dashboard/', 'dashboard/test/1/', 'dashboard/abc/def/'): rv = app.get(phase2_url + url) assert rv.status_code == 403 rv = app.get(phase2_url + url, headers={'Cookie': 'session=asdf'}) assert rv.status_code == 403 def test_post_405(app): """Be sure this returns 405, instead of 404 or 403.""" for url in ('', 'dashboard/', 'dashboard/test/1/', 'dashboard/abc/def/'): rv = app.post(phase2_url + url) assert rv.status_code == 405 def test_bad_login(app): url = phase2_url + 'login/' init_data(app.application.redis) rv = app.post(url) assert 'dashboard' not in rv.headers.get('Location') assert rv.status_code == 303 rv = app.post(url, data={'username': 'abcdef', 'password': 'abcdef'}) assert 'dashboard' not in rv.headers.get('Location') assert rv.status_code == 303 rv = app.post(url, data={'username': 'test'}) assert 'dashboard' not in rv.headers.get('Location') assert rv.status_code == 303 rv = app.post(url, data={'username': 'test', 'password': 'abcdef'}) assert 'dashboard' not in rv.headers.get('Location') assert rv.status_code == 303 def test_good_login(app): url = phase2_url + 'login/' init_data(app.application.redis) rv = app.post(url, data={'username': 'test', 'password': 'test'}) assert rv.status_code == 303 assert 'session=' in rv.headers.get('Set-Cookie') assert 'dashboard' in rv.headers.get('Location') rv = app.post(url, data={'username': 'admin', 'password': admin_password}) assert rv.status_code == 303 assert 'session=' in rv.headers.get('Set-Cookie') assert 'dashboard' in rv.headers.get('Location') def test_dashboard(app): url = phase2_url + 'dashboard/' init_data(app.application.redis) rv = app.get(url, headers={'Cookie': 'session=%s' % session_key}) assert b'Buy groceries' in rv.data assert b'Take over the world' in rv.data assert rv.status_code == 200 def test_item_404(app): url = phase2_url + 'dashboard/' init_data(app.application.redis) rv = app.get(url + 'abcdef/0/', headers={ 'Cookie': 'session=%s' % session_key}) assert rv.status_code == 404 rv = app.get(url + 'test/0/', headers={ 'Cookie': 'session=%s' % session_key}) assert rv.status_code == 404 rv = app.get(url + 'admin/1/', headers={ 'Cookie': 'session=%s' % session_key}) assert rv.status_code == 404 def test_solution(app): url = phase2_url + 'dashboard/admin/password/' init_data(app.application.redis) rv = app.get(url, headers={'Cookie': 'session=%s' % session_key}) assert admin_hash.encode('utf-8') in rv.data assert rv.status_code == 200 def test_admin_dashboard(app): url = phase2_url + 'dashboard/' init_data(app.application.redis) rv = app.get(url, headers={'Cookie': 'session=%s' % admin_session_key}) assert b'Challenge complete!' in rv.data assert rv.status_code == 200
import unittest from mock import patch, Mock from the_ark import rhino_client __author__ = 'chaley' rhino_client_ojb = None class UtilsTestCase(unittest.TestCase): def setUp(self): self.rhino_client_obj = rhino_client.RhinoClient('test_name', 'url', 'brand', 'branch', 'build_id', 'user', 'rhino_client_url') def test_set_log(self): self.rhino_client_obj.set_log("file_path", "link_text") self.assertEqual('file_path', self.rhino_client_obj.test_data['result_url']) self.assertEqual('link_text', self.rhino_client_obj.test_data['result_text']) @patch('requests.get') def test_get(self, requests_get): r = Mock() r.json.return_value = {"stuff": "stuff"} requests_get.return_value = r response = self.rhino_client_obj.get('test_id') self.assertEqual({"stuff": "stuff"}, response) @patch('requests.post') def test_post(self, requests_post): request_json = Mock() request_json.status_code = 201 requests_post.return_value = request_json self.rhino_client_obj.post() self.assertEqual(True, self.rhino_client_obj.posted) @patch('requests.post') def test_post_fail(self, requests_post): request_json = Mock() request_json.status_code = 400 requests_post.return_value = request_json self.assertRaises(Exception, self.rhino_client_obj.post) @patch('requests.put') def test_put(self, requests_put): self.rhino_client_obj.test_data['test_id'] = 156465465 self.rhino_client_obj.posted = True request_json = Mock() request_json.status_code = 201 request_json.json.return_value = {"stuff": "stuff"} requests_put.return_value = request_json self.rhino_client_obj.put() self.assertEqual(True, self.rhino_client_obj.posted) def test_put_posted_false(self): self.assertRaises(Exception, self.rhino_client_obj.put) @patch('requests.put') def test_put_status_false(self, requests_put): self.rhino_client_obj.test_data['test_id'] = 156465465 self.rhino_client_obj.posted = True request_json = Mock() request_json.status_code = 500 requests_put.return_value = request_json self.assertRaises(rhino_client.RhinoClientException, self.rhino_client_obj.put) @patch('requests.post') def test_send_test_post(self, requests_post): request_json = Mock() request_json.status_code = 201 requests_post.return_value = request_json self.rhino_client_obj.send_test("status") self.assertEqual(True, self.rhino_client_obj.posted) @patch('requests.put') def test_send_test_put(self, requests_put): self.rhino_cl
ient_obj.test_data['test_id'] = 156465465 self.rhino_client_obj.posted = True request_json = Mock() request_json.status_code = 201 requests_put.return_value = request_json self.rhino_client_obj.send_test("status") self.assertEqual(True, self.rhino_client_obj.posted) if __nam
e__ == '__main__': unittest.main()
import zipfile import imghdr from django import forms from .models import Image, ImageBatchUpload, Album class AlbumAdminForm(forms.ModelForm): class Meta: model = Album fields = '__all__' def clean(self): cleaned_data = self.cleaned_data if cleaned_data.get('authorized_users') is None: pass else: if cleaned_data.get('all_users') and cleaned_data.get('authorized_users').count() != 0: cleaned_data['al
l_users'] = False return cleaned_data class ImageAdminForm(forms.Mod
elForm): class Meta: model = Image fields = ('public', 'title', 'image', 'albums', 'user') def clean_image(self): image = self.cleaned_data['image'] if image is None: return image elif not imghdr.what(image): raise forms.ValidationError(u"The file is not an image file") else: return image class ImageBatchUploadAdminForm(forms.ModelForm): class Meta: model = ImageBatchUpload fields = ('public', 'title', 'zip_file', 'albums', 'user') def clean_zip_file(self): image_zip = self.cleaned_data['zip_file'] if image_zip is None: return image_zip elif not zipfile.is_zipfile(image_zip): raise forms.ValidationError(u"The file is not a zip file") else: return image_zip
import CatalogItem from toontown.toonbase import ToontownGlobals from toontown.toonbase import TTLocalizer from otp.otpbase import OTPLocalizer from direct.interval.IntervalGlobal import * from direct.gui.DirectGui import * class CatalogNametagItem(CatalogItem.CatalogItem): sequenceNumber = 0 def makeNewItem(self, nametagStyle): self.nametagStyle = nametagStyle CatalogItem.CatalogItem.makeNewItem(self) def getPurchaseLimit(self): return 1 def reachedPurchaseLimit(self, avatar): if self in avatar.onOrder or self in avatar.mailboxContents or self in avatar.onGiftOrder or self in avatar.awardMailboxContents or self in avatar.onAwardOrder: return 1 if avatar.nametagStyle == self.nametagStyle: return 1 return 0 def getAcceptItemErrorText(self, retcode): if retcode == ToontownGlobals.P_ItemAvailable: return TTLocalizer.CatalogAcceptNametag return CatalogItem.CatalogItem.getAcceptItemErrorText(self, retcode) def saveHistory(self): return 1 def getTypeName(self): return TTLocalizer.NametagTypeName def getName(self): if self.nametagStyle == 100: name = TTLocalizer.U
npaidNameTag else: name = TTLocalizer.NametagFontNames[self.nametagStyle] if TTLocalizer.NametagReverse: name = TTLocalizer.NametagLabel + name else: name = name + TTLocalizer.NametagLab
el return name if self.nametagStyle == 0: name = TTLocalizer.NametagPaid elif self.nametagStyle == 1: name = TTLocalizer.NametagAction elif self.nametagStyle == 2: name = TTLocalizer.NametagFrilly def recordPurchase(self, avatar, optional): if avatar: avatar.b_setNametagStyle(self.nametagStyle) return ToontownGlobals.P_ItemAvailable def getPicture(self, avatar): frame = self.makeFrame() if self.nametagStyle == 100: inFont = ToontownGlobals.getToonFont() else: inFont = ToontownGlobals.getNametagFont(self.nametagStyle) nameTagDemo = DirectLabel(parent=frame, relief=None, pos=(0, 0, 0.24), scale=0.5, text=base.localAvatar.getName(), text_fg=(1.0, 1.0, 1.0, 1), text_shadow=(0, 0, 0, 1), text_font=inFont, text_wordwrap=9) self.hasPicture = True return (frame, None) def output(self, store = -1): return 'CatalogNametagItem(%s%s)' % (self.nametagStyle, self.formatOptionalData(store)) def compareTo(self, other): return self.nametagStyle - other.nametagStyle def getHashContents(self): return self.nametagStyle def getBasePrice(self): return 500 cost = 500 if self.nametagStyle == 0: cost = 600 elif self.nametagStyle == 1: cost = 600 elif self.nametagStyle == 2: cost = 600 elif self.nametagStyle == 100: cost = 50 return cost def decodeDatagram(self, di, versionNumber, store): CatalogItem.CatalogItem.decodeDatagram(self, di, versionNumber, store) self.nametagStyle = di.getUint16() def encodeDatagram(self, dg, store): CatalogItem.CatalogItem.encodeDatagram(self, dg, store) dg.addUint16(self.nametagStyle) def isGift(self): return 0 def getBackSticky(self): itemType = 1 numSticky = 4 return (itemType, numSticky)
org/licenses/>. import time @VOLT.Command( bundles = VOLT.AdminBundle(), description = 'Pause the VoltDB cluster and switch it to admin mode.', options = ( VOLT.BooleanOption('-w', '--wait', 'waiting', 'wait for all DR and Export transactions to be externally processed', default = False) ) ) def pause(runner): # Check the STATUS column. runner.call_proc() detects and aborts on errors. status = runner.call_proc('@Pause', [], []).table(0).tuple(0).column_integer(0) if status <> 0: runner.error('The cluster has failed to pause with status: %d' % status) return runner.info('The cluster is paused.') if runner.opts.waiting: status = runner.call_proc('@Quiesce', [], []).table(0).tuple(0).column_integer(0) if status <> 0: runner.error('The cluster has failed to quiesce with status: %d' % status) return runner.info('The cluster is quiesced.') # check the dr stats partition_min_host = dict() partition_min = dict() partition_max = dict() check_dr(runner, partition_min_host, partition_min, partition_max) # check the export stats twice because they are periodic export_tables_with_data = dict() check_dr(runner, partition_min_host, partition_min, partition_max) last_table_stat_time = 0 last_table_stat_time = check_export(runner, export_tables_with_data, last_table_stat_time) if not partition_min and last_table_stat_time == 1: # there are no outstanding export or dr transactions runner.info('All export and DR transactions have been processed.') return # after 10 seconds notify admin of what transactions have not drained notifyInterval = 10 # have to get two samples of table stats because the cached value could be from before Quiesce while True: time.sleep(1) if partition_min: check_dr(runner
, partition_min_host, partition_min, partition_max) if last_table_stat_time > 1: curr_table_stat_time = check_export(runner, export_tables_with_data, last_table_stat_time) if last_table_stat_time == 1 or curr_table_stat_time > last_table_
stat_time: # have a new sample from table stat cache or there are no tables if not export_tables_with_data and not partition_min: runner.info('All export and DR transactions have been processed.') return notifyInterval -= 1 if notifyInterval == 0: notifyInterval = 10 if last_table_stat_time > 1 and export_tables_with_data: print_export_pending(runner, export_tables_with_data) if partition_min: print_dr_pending(runner, partition_min_host, partition_min, partition_max) def get_stats(runner, component): retry = 5 while True: response = runner.call_proc('@Statistics', [VOLT.FastSerializer.VOLTTYPE_STRING, VOLT.FastSerializer.VOLTTYPE_INTEGER], [component, 0]) status = response.status() if status <> 1 and "timeout" in response.statusString: if retry == 0: runner.error('Unable to collect DR or export statistics from the cluster') else: sleep(1) retry -= 1 continue if status <> 1: runner.error("Unexpected response to @Statistics %s: %s" % (component, resp)) return response def check_dr(runner, partition_min_host, partition_min, partition_max): resp = get_stats(runner, 'DRPRODUCER') partition_data = resp.table(0) for pid in partition_min: # reset all min values to find the new min if pid in partition_max: partition_min[pid] = partition_max[pid] for r in partition_data.tuples(): pid = r[3] hostname = str(r[2]) if str(r[8]) == 'None': last_queued = -1 else: last_queued = r[8] if str(r[9]) == 'None': last_acked = -1 else: last_acked = r[9] # check TOTALBYTES if r[5] > 0: # track the highest seen drId for each partition # use last queued to get the upper bound if pid in partition_max: partition_max[pid] = max(last_queued, partition_max[pid]) else: partition_max[pid] = last_queued if pid in partition_min: if last_acked < partition_min[pid]: # this replica is farther behind partition_min[pid] = last_acked else: partition_min_host[pid] = set() partition_min[pid] = last_acked partition_min_host[pid].add(hostname) else: # this hostname's partition has an empty InvocationBufferQueue if pid in partition_min: # it was not empty on a previous call partition_min_host[pid].discard(hostname) if not partition_min_host[pid]: del partition_min_host[pid] del partition_min[pid] if pid in partition_max: if partition_max[pid] > last_acked: runner.warning("DR Producer reports no data for partition %i on host %s but last acked drId (%i) does not match other hosts last acked drId (%s)" % (pid, hostname, last_acked, partition_max[pid])) partition_max[pid] = max(last_acked, partition_max[pid]) else: partition_max[pid] = last_acked def print_dr_pending(runner, partition_min_host, partition_min, partition_max): runner.info('The following partitions have pending DR transactions that the consumer cluster has not processed:') summaryline = " Partition %i needs acknowledgements for drIds %i to %i on hosts: %s." for pid in partition_min_host: runner.info(summaryline % (pid, partition_min[pid]+1, partition_max[pid], ', '.join(partition_min_host[pid]))) def check_export(runner, export_tables_with_data, last_collection_time): resp = get_stats(runner, 'TABLE') export_tables = 0 collection_time = 0 if not resp.table_count() > 0: # this is an empty database and we don't need to wait for export to drain return 1 else: tablestats = resp.table(0) firsttuple = tablestats.tuple(0) if firsttuple.column(0) == last_collection_time: # this statistic is the same cached set as the last call return last_collection_time else: collection_time = firsttuple.column(0) for r in tablestats.tuples(): # first look for streaming (export) tables if str(r[6]) == 'StreamedTable': pendingData = r[8] tablename = str(r[5]) pid = r[4] hostname = str(r[2]) if pendingData > 0: if not tablename in export_tables_with_data: export_tables_with_data[tablename] = dict() tabledata = export_tables_with_data[tablename] if not hostname in tabledata: tabledata[hostname] = set() tabledata[hostname].add(pid) else: if tablename in export_tables_with_data: tabledata = export_tables_with_data[tablename] if hostname in tabledata: tabledata[hostname].discard(pid) if not tabledata[hostname]: del tabledata[hostname] if not export_tables_with_data[tablename]: del export_tables_with_data[tablename] return collection_time def print_export_pending(runner, export_tables_with_data): runner.info('The following export tables have unacknowledged transactions:') summaryline = " %s needs ack
#!/usr/bin/env python3 import os, sys, glob, pickle, subprocess sys.path.insert(0, os.path.dirname(__file__)) from clang import cindex sys.path = sys.path[1:] def configure_libclang(): llvm_libdirs = ['/usr/lib/llvm-3.2/lib', '/usr/lib64/llvm'] try: libdir = subprocess.check_output(['llvm-config', '--libdir']).decode('utf-8').strip() llvm_libdirs.insert(0, libdir) except OSError: pass for d in llvm_libdirs: if not os.path.exists(d): continue files = glob.glob(os.path.join(d, 'libclang.so*')) if len(files) != 0: cindex.Config.set_library_file(files[0]) return class Call: def __init__(self, cursor, decl): self.ident = cursor.displayname.decode('utf-8') self.filename = cursor.location.file.name.decode('utf-8') ex = cursor.extent self.start_line = ex.start.line self.start_column = ex.start.column self.end_line = ex.end.line self.end_column = ex.end.column self.decl_filename = decl.location.file.name.decode('utf-8') class Definition: def __init__(self, cursor): self.ident = cursor.spelling.decode('utf-8') self.display = cursor.displayname.decode('utf-8') self.filename = cursor.location.file.name.decode('utf-8') ex = cursor.extent self.start_line = ex.start.line self.start_column = ex.start.column self.end_line = ex.end.line self.end_column = ex.end.column def process_diagnostics(tu): diagnostics = tu.diagnostics haserr = False for d in diagnostics: sys.stderr.write('{0}\n'.format(d.format.decode('utf-8'))) if d.severity > cindex.Diagnostic.Warning: haserr = True if haserr: sys.exit(1) def walk_cursors(tu, files): proc = list(tu.cursor.get_children()) while len(proc) > 0: cursor = proc[0] proc = proc[1:] if cursor.location.file is None: continue
fname = cursor.location.file.name.decode('utf-8') if fname in files: yield cursor proc += list(cursor.get_children()) def newer(a, b): try: return os.stat(a).st_mtime > os.stat(b).st_mtime except: return True def scan_libgit2_glib(cflags, files, git2dir): files = [os.path.abspath(f) for f in files] dname = os.path.dirname(__file__) allcalls
= {} l = 0 if not os.getenv('SILENT'): sys.stderr.write('\n') i = 0 for f in files: if not os.getenv('SILENT'): name = os.path.basename(f) if len(name) > l: l = len(name) perc = int((i / len(files)) * 100) sys.stderr.write('[{0: >3}%] Processing ... {1}{2}\r'.format(perc, name, ' ' * (l - len(name)))) i += 1 astf = os.path.join(dname, '.' + os.path.basename(f) + '.cache') if not newer(f, astf): with open(astf, 'rb') as fo: calls = pickle.load(fo) else: tu = cindex.TranslationUnit.from_source(f, cflags) process_diagnostics(tu) calls = {} for cursor in walk_cursors(tu, files): if cursor.kind == cindex.CursorKind.CALL_EXPR or \ cursor.kind == cindex.CursorKind.DECL_REF_EXPR: cdecl = cursor.get_referenced() if cdecl.kind != cindex.CursorKind.FUNCTION_DECL: continue if (not cdecl is None) and (not cdecl.location.file is None): fdefname = cdecl.location.file.name.decode('utf-8') if fdefname.startswith(git2dir): call = Call(cursor, cdecl) if call.ident in calls: calls[call.ident].append(call) else: calls[call.ident] = [call] with open(astf, 'wb') as fo: pickle.dump(calls, fo) for k in calls: if k in allcalls: allcalls[k] += calls[k] else: allcalls[k] = list(calls[k]) if not os.getenv('SILENT'): sys.stderr.write('\r[100%] Processing ... done{0}\n'.format(' ' * (l - 4))) return allcalls def scan_libgit2(cflags, git2dir): tu = cindex.TranslationUnit.from_source(git2dir + '.h', cflags) process_diagnostics(tu) headers = glob.glob(os.path.join(git2dir, '*.h')) defs = {} objapi = ['lookup', 'lookup_prefix', 'free', 'id', 'owner'] objderiv = ['commit', 'tree', 'tag', 'blob'] ignore = set() for deriv in objderiv: for api in objapi: ignore.add('git_' + deriv + '_' + api) for cursor in walk_cursors(tu, headers): if cursor.kind == cindex.CursorKind.FUNCTION_DECL: deff = Definition(cursor) if not deff.ident in ignore: defs[deff.ident] = deff return defs configure_libclang() pos = sys.argv.index('--') cflags = sys.argv[1:pos] files = sys.argv[pos+1:] incdir = os.getenv('LIBGIT2_INCLUDE_DIR') defs = scan_libgit2(cflags, incdir) calls = scan_libgit2_glib(cflags, files, incdir) notused = {} perfile = {} nperfile = {} for d in defs: o = defs[d] if not d in calls: notused[d] = defs[d] if not o.filename in nperfile: nperfile[o.filename] = [o] else: nperfile[o.filename].append(o) if not o.filename in perfile: perfile[o.filename] = [o] else: perfile[o.filename].append(o) ss = [notused[f] for f in notused] ss.sort(key=lambda x: '{0} {1}'.format(os.path.basename(x.filename), x.ident)) lastf = None keys = list(perfile.keys()) keys.sort() for filename in keys: b = os.path.basename(filename) f = perfile[filename] n_perfile = len(f) if filename in nperfile: n_nperfile = len(nperfile[filename]) else: n_nperfile = 0 perc = int(((n_perfile - n_nperfile) / n_perfile) * 100) print('\n File {0}, coverage {1}% ({2} out of {3}):'.format(b, perc, n_perfile - n_nperfile, n_perfile)) cp = list(f) cp.sort(key=lambda x: "{0} {1}".format(not x.ident in calls, x.ident)) for d in cp: if d.ident in calls: print(' \033[32m✓ {0}\033[0m'.format(d.display)) else: print(' \033[31m✗ {0}\033[0m'.format(d.display)) perc = int(((len(defs) - len(notused)) / len(defs)) * 100) print('\nTotal coverage: {0}% ({1} functions out of {2} are being called)\n'.format(perc, len(defs) - len(notused), len(defs))) # vi:ts=4:et
# generate
d from catkin/cmake/template/pkg.context.pc.in CATKIN_PACKAGE_PREFIX = "" PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/bjornl/ros/workspaces/bjorn_ws/install/include".split(';') if "/home/bjornl/ros/workspaces/bjorn_ws/install/include" != "" else [] PROJECT_CATKIN_DEPENDS = "message_runtime".replace(';', ' ') PKG_CONFIG_LIBRARIES_WITH_PREFIX
= "".split(';') if "" != "" else [] PROJECT_NAME = "rosserial_mbed" PROJECT_SPACE_DIR = "/home/bjornl/ros/workspaces/bjorn_ws/install" PROJECT_VERSION = "0.7.6"
# -*- coding: utf-8 -*- ''' Funimation|Now Add-on Copyright (C) 2016 Funimation|Now This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' import logging; import re; import xbmc; import os; import xbmcgui; from resources.lib.modules import utils; logger = logging.getLogger('funimationnow'); EXIT_CODE = 2; SUCCESS_CODE = 3; EXPIRE_CODE = 4; HOME_SCREEN_CODE = 5; BACK_CODE = 6; LOGOUT_CODE = 7; REST_CODE = 8; SEARCH_WINDOW = 100100; HOME_WINDOW = 110101; QUEUE_WINDOW = 110102; ALL_WINDOW = 110103; SIMALCAST_WINDOW = 110104; GENRE_WINDOW = 110105; SETTINGS_WINDOW = 110106; HELP_WINDOW = 110107; LOGOUT_WINDOW = 110108; func = dict({ SEARCH_WINDOW: 'search', HOME_WINDOW: 'home', QUEUE_WINDOW: 'queue', ALL_WINDOW: 'all', SIMALCAST_WINDOW: 'simalcast', GENRE_WINDOW: 'genres', SETTINGS_WINDOW: 'settings', HELP_WINDOW: 'help', LOGOUT_WINDOW: 'logout', }); def chooser(landing_page, parent, child, controlID): result = EXIT_CODE; logger.debug(controlID); logger.debug(child); logger.debug(func.get(controlID)); try: result = g
lobals()[func.get(controlID)](landing_page, parent, child, controlID); except Exception as inst: logger.error(inst); landi
ng_page.result_code = result; return result; def home(landing_page, parent, child, controlID): RESULT_CODE = REST_CODE; if child == HOME_WINDOW: RESULT_CODE = REST_CODE; else: RESULT_CODE = HOME_SCREEN_CODE; return RESULT_CODE; def search(landing_page, parent, child, controlID): RESULT_CODE = REST_CODE; if child == SEARCH_WINDOW: pass; else: try: from resources.lib.gui.searchgui import search; search(landing_page); except Exception as inst: logger.error(inst); return RESULT_CODE; def queue(landing_page, parent, child, controlID): RESULT_CODE = REST_CODE; if child == QUEUE_WINDOW: pass; else: try: from resources.lib.gui.watchlistgui import watchlist; mnavset = dict({ 'width': 95, 'title': 'MY QUEUE', 'params': 'id=myqueue&title=My Queue', 'target': 'longlist', 'path': 'longlist/myqueue/' }); watchlist(landing_page, mnavset); except Exception as inst: logger.error(inst); return RESULT_CODE; def all(landing_page, parent, child, controlID): RESULT_CODE = REST_CODE; if child == ALL_WINDOW: pass; else: try: from resources.lib.gui.genreselectgui import genreselect; mnavset = dict({ 'width': 140, 'title': 'RECENTLY ADDED', 'params': 'id=shows&title=All Shows&showGenres=true', 'target': 'longlist', 'path': 'longlist/content/', 'offset': 0, 'limit': 144 }); genreselect(landing_page, mnavset); except Exception as inst: logger.error(inst); return RESULT_CODE; def simalcast(landing_page, parent, child, controlID): RESULT_CODE = REST_CODE; if child == SIMALCAST_WINDOW: pass; else: try: from resources.lib.gui.audioselectgui import audioselect; mnavset = dict({ 'width': 108, 'title': 'SIMULDUBS', #'params': 'id=simulcasts&title=Simulcasts', 'params': 'id=broadcast-dubs&title=Broadcast Dubs', 'target': 'longlist', 'path': 'longlist/content/' }); audioselect(landing_page, mnavset); except Exception as inst: logger.error(inst); return RESULT_CODE; def genres(landing_page, parent, child, controlID): RESULT_CODE = REST_CODE; if child == GENRE_WINDOW: pass; else: try: from resources.lib.gui.genreshowsgui import genreshows; mnavset = dict({ 'width': 140, 'title': 'RECENTLY ADDED', 'params': 'id=genres&title=Genres&role=b', 'target': 'longlist', 'path': 'longlist/genres/', 'offset': 0, 'limit': 144 }); genreshows(landing_page, mnavset); except Exception as inst: logger.error(inst); return RESULT_CODE; def settings(landing_page, parent, child, controlID): RESULT_CODE = REST_CODE; try: #xbmc.executebuiltin('Addon.OpenSettings(%s)' % utils.getAddonInfo('id')); utils.addon.openSettings(); utils.lock(); utils.sleep(2000); utils.unlock(); addon_data = xbmc.translatePath(utils.getAddonInfo('profile')).decode('utf-8'); tokens = xbmc.translatePath(os.path.join(addon_data, 'tokens.db')); if not os.path.exists(tokens): RESULT_CODE = LOGOUT_CODE; except Exception as inst: logger.error(inst); return RESULT_CODE; def help(landing_page, parent, child, controlID): RESULT_CODE = REST_CODE; try: from resources.lib.gui.helpmenugui import helpmenu; helpmenu(); except Exception as inst: logger.error(inst); return RESULT_CODE; def logout(landing_page, parent, child, controlID): RESULT_CODE = LOGOUT_CODE; from resources.lib.modules import cleardata; logger.debug('Running Cleanup Script'); try: cleardata.cleanup(); except: pass; return RESULT_CODE;
import AnimatedProp from direct.actor import Actor from direct.interval.IntervalGlobal import * from toontown.effects.Splash import * from toontown.effects.Ripples import * import random class FishAnimatedProp(AnimatedProp.AnimatedProp): def __init__(self, node): AnimatedProp.AnimatedProp.__init__(self, node) parent = node.getParent() self.fish = Actor.Actor(node, copy=0) self.fish.reparentTo(parent) self.fish.setTransform(node.getTransform()) node.clearMat() self.fish.loadAnims({'jump': 'phase_4/models/props/SZ_fish-jump', 'swim': 'phase_4/models/props/SZ_fish-swim'}) self.splashSfxList = (loader.loadSfx('phase_4/audio/sfx/TT_splash1.ogg'), loader.loadSfx('phase_4/audio/sfx/TT_splash2.ogg')) self.node = self.fish self.geom = self.fish.getGeomNode() self.exitRipples = Ripples(self.geom) self.exitRipples.setBin('fixed', 25, 1) self.
exitRipples.setPosHprScale(-0.3
, 0.0, 1.24, 0.0, 0.0, 0.0, 0.7, 0.7, 0.7) self.splash = Splash(self.geom, wantParticles=0) self.splash.setPosHprScale(-1, 0.0, 1.23, 0.0, 0.0, 0.0, 0.7, 0.7, 0.7) randomSplash = random.choice(self.splashSfxList) self.track = Sequence(FunctionInterval(self.randomizePosition), Func(self.node.unstash), Parallel(self.fish.actorInterval('jump'), Sequence(Wait(0.25), Func(self.exitRipples.play, 0.75)), Sequence(Wait(1.14), Func(self.splash.play), SoundInterval(randomSplash, volume=0.8, node=self.node))), Wait(1), Func(self.node.stash), Wait(4 + 10 * random.random()), name=self.uniqueName('Fish')) def delete(self): self.exitRipples.destroy() del self.exitRipples self.splash.destroy() del self.splash del self.track self.fish.removeNode() del self.fish del self.node del self.geom def randomizePosition(self): x = 5 * (random.random() - 0.5) y = 5 * (random.random() - 0.5) h = 360 * random.random() self.geom.setPos(x, y, 0) self.geom.setHpr(h, 0, 0) def enter(self): AnimatedProp.AnimatedProp.enter(self) self.track.loop() def exit(self): AnimatedProp.AnimatedProp.exit(self) self.track.finish() self.splash.stop() self.exitRipples.stop()
""" viscount.task.models Task models """ from ..core import db from ..utils import JSONSerializer class TaskInputFile(JSONSerializer, db.Model): __tablename__ = 'tasks_input_files' id = db.Column(db.Integer, primary_key=True) task_id = db.Column(db.Integer, db.ForeignKey('tasks.id'), nullable=False) file_type_id = db.Column(db.Integer, db.ForeignKey('file_types.id'), nullable=False) name = db.Column(db.String(255), nullable=False, primary_key=True) description = db.Column(db.Text, nullable=False) class TaskOutputFile(JSONSerializer, db.Model): __tablename__ = 'tasks_output_files' id = db.Column(db.Integer, primary_key=True)
task_id = db.Column(db.Integer, db.ForeignKey('tasks.id'), nullable=False) file_type_id = db.Column(db.Integer, db.ForeignKey('file_types.id'), nullable=False) name = db.Column(db.String(255), nullable=False, primary_key=True) description = db.Column(db.Text, nullable=False) class TaskJSONSerializer(JSONSerializer): __json_modifiers__ = { 'events': lambda events, _: [dict(id=event.id) for event in events], 'inputs': lambda inputs, _: [dict(id=i
nput.id) for input in inputs], 'outputs': lambda outputs, _: [dict(id=output.id) for output in outputs], 'task_instances': lambda task_instances, _: [dict(id=task_instance.id) for task_instance in task_instances], } class Task(TaskJSONSerializer, db.Model): __tablename__ = 'tasks' id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(32), unique=True) owner_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False) description = db.Column(db.Text, index=False, unique=False, nullable=False) source_file = db.Column(db.Integer, db.ForeignKey('files.id')) events = db.relationship('Event', backref='task', lazy='dynamic') inputs = db.relationship('TaskInputFile', backref='task', lazy='dynamic') outputs = db.relationship('TaskOutputFile', backref='task', lazy='dynamic') task_instances = db.relationship('WorkflowTaskInstance', backref='task', lazy='dynamic') def __repr__(self): return '<Task %r>' % (self.name)
from __future__ import unicode_literals from django.utils.encoding import python_2_unicode_compatible from django.conf import settings from django.db import models from django.core.urlresolvers import reverse from django.db.models.signals import pre_save from django.utils import timezone from django.utils.text import slugify class CareerManager(models.Manager): def active(self, *args, **kwargs): return super(CareerManager, self).filter(draft = False).filter(published_at__lte = timezone.now()) @python_2_unicode_compatible class Career(models.Model): FULLTIME = 'Full-time' PARTTIME = 'Part-time' INTERNSHIP = 'Internship' RESEARCH = 'Research' ROLE_CATEGORY_CHOICES = ( (FULLTIME, 'Full-time'), (PARTTIME, 'Part-time'), (INTERNSHIP, 'Internship'), (RESEARCH, 'Research'), ) role_category = models.CharField( max_length=12, choices=ROLE_CATEGORY_CHOICES, default=FULLTIME, ) # Role role = models.CharField(max_length = 120) # Location city = models.CharField(max_length=255) # Plain text and urlify slug career_slug = models.SlugField(unique = True) career_offer_title = models.CharField(max_length=255, default="") career_offer_description = models.TextField(default="") career_experience = models.TextField(default="") career_terms = models.TextField(default="") # Time and meta staff draft = models.BooleanField(default = False) published_at = models.DateField(auto_now = False, auto_now_add = False) updated = models.DateTimeF
ield(auto_now = True, auto_now_add = False) timestamp = models.DateTimeField(auto_now = False, auto_now_add = True) objects = CareerManager() def __unicode__(self): return self.role def __str__(self): return self.role def get_absolute_url(self): return reverse('careers:detail', kwargs = {'slug':self.career_slug}) class Meta: ordering = ["-timestamp", "-updated"] def create_slug(instance, new_slug = None): career_slug = slugify(insta
nce.title) if new_slug is not None: career_slug = new_slug qs = Career.objects.filter(career_slug = career_slug).order_by("-id") exists = qs.exists() if exists: new_slug = "%s-%s" %(career_slug, qs.first().id) return create_slug(instance, slug = new_slug) return career_slug def pre_save_post_receiver(sender, instance, *args, **kwargs): if not instance.career_slug: instance.career_slug = create_slug(instance) pre_save.connect(pre_save_post_receiver, sender = Career)
def task_hello(): """hello py """ def python_hello(times, text, targets): with open(targets[0], "a") as output: out
put.write(times * text) return {'actions': [(python_hello, [3, "py!\n"])]
, 'targets': ["hello.txt"], }
from numpy imp
ort * from matplotlib.pyplot import * import scipy.constants as sc import copy import scipy.integrate as integ # test sun/earth with hw5(1.989e30,5.972e24,149.6e6,0.0167,1000) def hw5(m1, m2, a, e, tmax, tstep=0.001, tplot=0.025, method='leapfrog'): if method != 'leapfrog' and method != 'odeint': print("That's not a method") return() # initialize commonly used variables period = sqrt((4*(pi**2)*(a**3)) / (sc.G*(m1 + m2))) dt = period*tstep # initialize objects at
time 0 q = m1 / m2 r0 = (1-e)*a/(1+q) v0 = (1/(1+q))*sqrt((1+e)/(1-e))*sqrt(sc.G*(m1+m2)/a) rv = array([r0, 0, 0, v0, -q*r0, 0, 0, -q*v0]) # set up figure figure(1) gca().set_aspect('equal') xlim([-2*a, 2*a]) ylim([-2*a, 2*a]) rv_list = [] if method == 'leapfrog': timeCounter = 0 frameCounter = 0 while timeCounter < tmax: # plot positions if tplot time has passed if frameCounter >= tplot: frameCounter = 0 rv_list.append(copy.deepcopy(rv)) # calc positions rv[0] = rv[0] + rv[2]*dt rv[1] = rv[1] + rv[3]*dt rv[4] = rv[4] + rv[6]*dt rv[5] = rv[5] + rv[7]*dt # calc acceleration r = array([rv[0] - rv[4], rv[1] - rv[5]]) force = ((sc.G*m1*m2)/(np.linalg.norm(r)**2))*(r/np.linalg.norm(r)) # calc velocity rv[2] = rv[2] - (force[0]/m1)*dt rv[3] = rv[3] - (force[1]/m1)*dt rv[6] = rv[6] + (force[0]/m2)*dt rv[7] = rv[7] + (force[1]/m2)*dt # increment counters timeCounter += tstep frameCounter += tstep # plot final position rv_list.append(copy.deepcopy(rv)) rv_list_plot = rv_list else: # odeint rv_list = integ.odeint(deriv, rv, arange(0, tmax*period, dt), (m1, m2)) # needed to calculate using tstep, but we want to plot # using tplot, t_interval = tplot / tstep rv_list_plot = rv_list[::t_interval] # plot for i in range(len(rv_list_plot)): plot(rv_list_plot[i][0],rv_list_plot[i][1],'bo') plot(rv_list_plot[i][4],rv_list_plot[i][5],'go') draw() def deriv(rv, dt, m1, m2): # calc position deriv rv_copy = zeros(8) rv_copy[0] = rv[2] rv_copy[1] = rv[3] rv_copy[4] = rv[6] rv_copy[5] = rv[7] # calc velocity deriv r = array([rv[0] - rv[4], rv[1] - rv[5]]) force = ((sc.G*m1*m2)/(np.linalg.norm(r)**2))*(r/np.linalg.norm(r)) rv_copy[2] = - (force[0]/m1) rv_copy[3] = - (force[1]/m1) rv_copy[6] = + (force[0]/m2) rv_copy[7] = + (force[1]/m2) return rv_copy
heme as csp # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.mathjax', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'FetchGNX design notes' copyright = '2015, Stephen Leach' author = 'Stephen Leach' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.8.3' # The full version, including alpha/beta/rc tags. release = '0.8.3' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # dire
ctories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_f
unction_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'cloud' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = {"defaultcollapsed":True} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = [csp.get_theme_dir()] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'FetchGNXdesignnotesdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'FetchGNXdesignnotes.tex', 'FetchGNX design notes Documentation', 'Stephen Leach', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'fetchgnxdesignnotes', 'FetchGNX design notes Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------
# -*- coding: utf-8 -*- # Generated by Django 1.9.6 on
2016-05-09 23:22 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('player', '0002_auto_20160505_0350'), ] operations = [ migrations.RenameField( model_name='player', old_name='username', new_name='e
mail', ), ]
#!/usr/bin/env python import sys import textwrap try: import virtualenv # @UnresolvedImport except: from .lib import virtualenv # @Reimport from . import snippits __version__ = "0.9.1" def file_search_dirs(): dirs = [] for d in virtualenv.file_search_dirs(): if "vootstrap" not in d: dirs.append(d) return dirs def make_parser(): parser = virtualenv.ConfigOptionParser( usage="usage: %prog [OPTIONS] OUTFILE", version=__version__, formatter=virtualenv.UpdatingDefaultsHelpFormatter()) parser.add_option( "-v", "--verbose", action="count", dest="verbose", default=0, help="Increase verbosity") parser.add_option( "-q", "--quiet", action="count", dest="quiet", default=0, help="Decrease verbosity") parser.add_option( "-p", "--python", dest="python", metavar="PYTHON_EXE", help="The Python interpreter to use, e.g., --python=python2.5 will " "use the python2.5 interpreter to create the new environment. The " "default is the interpreter that virtualenv was installed with (%s)" % sys.executable) parser.add_option( "--clear", dest="clear", action="store_true", help="Clear out the non-root install and start from scratch") parser.add_option( "--no-site-packages", dest="no_site_packages", action="store_true", help="Don't give access to the global site-packages dir to the " "virtu
al environment (defa
ult; deprecated)") parser.add_option( "--system-site-packages", dest="system_site_packages", action="store_true", help="Give access to the global site-packages dir to the " "virtual environment") parser.add_option( "--unzip-setuptools", dest="unzip_setuptools", action="store_true", help="Unzip Setuptools or Distribute when installing it") parser.add_option( "--relocatable", dest="relocatable", action="store_true", help="Make an EXISTING virtualenv environment relocatable. " "This fixes up scripts and makes all .pth files relative") parser.add_option( "--distribute", "--use-distribute", dest="use_distribute", action="store_true", help="Use Distribute instead of Setuptools. Set environ variable " "VIRTUALENV_DISTRIBUTE to make it the default ") parser.add_option( "--extra-search-dir", dest="search_dirs", action="append", default=['.'], help="Directory to look for setuptools/distribute/pip distributions " "in. You can add any number of additional --extra-search-dir paths.") parser.add_option( "--never-download", dest="never_download", action="store_true", help="Never download anything from the network. Instead, virtualenv " "will fail if local distributions of setuptools/distribute/pip are " "not present.") parser.add_option( "--prompt", dest="prompt", help="Provides an alternative prompt prefix for this environment") parser.add_option("--install-requirements", default=False, action="store_true", dest="install_requirements", help="Install requirements.txt after vootstrapping") parser.add_option( "--path", action="append", dest="path", help="Directory to add to vootstrapped sys.path. You can add any " "number of additional --path paths. Relative directories are relative " "to the vootstrapped directory") return parser def adjust_options(options): out_str = "def adjust_options(options, args):\n" opts = [ "verbose", "quiet", "python", "clear", "no_site_packages", "system_site_packages", "unzip_setuptools", "relocatable", "use_distribute", "search_dirs", "never_download", "prompt" ] for opt in opts: out_str += " options.%s = %s\n" % (opt, getattr(options, opt)) out_str += snippits.ADJUST_OPTIONS_ARGS return textwrap.dedent(out_str) def after_install(options): if not (options.install_requirements or options.path): return "" out_str = snippits.AFTER_INSTALL_PREFIX if options.path: out_str += snippits.AFTER_INSTALL_PATH(options.path) if options.install_requirements: out_str += snippits.AFTER_INSTALL_REQUIREMENTS return textwrap.dedent(out_str) def vootify(options): return virtualenv.create_bootstrap_script( adjust_options(options) + after_install(options) ) def main(): parser = make_parser() (options, args) = parser.parse_args() if not len(args): parser.print_help() return 1 with open(args[0], "w") as outfile: outfile.write(vootify(options)) return 0 if __name__ == "__main__": exit_code = main() if exit_code(): sys.exit(exit_code)
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. # Import sile objects from .sile import * from sisl._internal import set_module from sisl import Geometry __all__ = ['moldenSile'] @set_module("sisl.io") class moldenSile(Sile): """ Molden file object """ @sile_fh_open() def write_supercell(self, sc): """ Writes the supercell to the contained file """ # Check that we can write to the file sile_raise_write(self) # Write the number of atoms in the geometry self._write('[Molden Format]\n') # Sadly, MOLDEN does not read this information... @sile_fh_open() def write_geometry(self, geometry, fmt='.8f'): """ Writes the geometry to the contained file """ # Check that we can write to the file sile_raise_write(self) # Be sure to write the
supercell self.write_supercell(geometry.sc) # Write in ATOM mode self._write('[Atoms] Angs\n') # Write out the cell information in the co
mment field # This contains the cell vectors in a single vector (3 + 3 + 3) # quantities, plus the number of supercells (3 ints) fmt_str = '{{0:2s}} {{1:4d}} {{2:4d}} {{3:{0}}} {{4:{0}}} {{5:{0}}}\n'.format(fmt) for ia, a, _ in geometry.iter_species(): self._write(fmt_str.format(a.symbol, ia, a.Z, *geometry.xyz[ia, :])) def ArgumentParser(self, p=None, *args, **kwargs): """ Returns the arguments that is available for this Sile """ newkw = Geometry._ArgumentParser_args_single() newkw.update(kwargs) return self.read_geometry().ArgumentParser(p, *args, **newkw) add_sile('molf', moldenSile, case=False, gzip=True)
#!/usr/bin/env python from __future__ import print_function import boto3 import time from botocore.exceptions import ClientError from datetime import datetime def get_unix_timestamp(): """ Generate a Unix timestamp string. """ d = datetime.now() t = time.mktime(d.timetuple()) return str(int(t)) def lambda_handler(event, context): """ Create EBS AMI for instances identified by the filter. """ if not 'DryRun' in event: event['DryRun'] = False if not 'Filters' in event: event['Filters'] = [{ 'Name': 'tag-key', 'Values': ['ops:snapshot'] }] ec2 = boto3.resource('ec2') # Iterate through instances identified by the filter. for instance in ec2.instances.filter(Filters=event['Filters']): instance_name = instance.instance_id instance_tags = [] # If a Name tag is available, use it to identify the instance # instead of the instance_id. for tag in instance.tags: if tag['Key'] == 'Name' and tag['Value'] != '': instance_name = tag['Val
ue'] else: instance_tags.append(tag) try: # Create the AMI image_name = instance_name + '-' + get_unix_timestamp() i
mage = instance.create_image( Name=image_name, NoReboot=True, DryRun=event['DryRun'] ) print('Started image creation: ' + image_name) image_tags = [{'Key': 'ops:retention', 'Value': '30'}] + instance_tags image.create_tags( Tags=image_tags, DryRun=event['DryRun'] ) except ClientError as e: if e.response['Error']['Code'] == 'DryRunOperation': pass
#!/usr/bin/env python from __future__ import print_function import argparse import numpy as N import os import sys def parse_args(args): p = argparse.ArgumentPar
ser() p.add_argument('-i', '--input-files', default=[sys.stdin], nargs="+", type=argparse.FileType('rt'), help='input file or empty (stdin)') p.add_argument(
'-d', '--decorate',default=False,action='store_true' ,help='put the stat name before the value (e.g mean:1)') g = p.add_mutually_exclusive_group() g.add_argument('-A','--all-stats',action='store_true',default=False) h = p.add_argument_group('stat') h.add_argument('-a', '--mean', action='store_true', default=False) h.add_argument('-D', '--median', action='store_true', default=False) h.add_argument('-s', '--standard_deviation',action='store_true',default=False) h.add_argument('-v', '--variance', action='store_true', default=False) h.add_argument('-m', '--min', action='store_true', default=False) h.add_argument('-M', '--max', action='store_true', default=False) if not args: p.print_help() sys.exit(0) return p.parse_args(args) def main(): args = parse_args(sys.argv[1:]) for input_file in args.input_files: vals = [float(x) for x in input_file.read().split(os.linesep) if x] a = N.array(vals) s = [] for (name,value,f) in [('mean', args.mean, N.mean) , ('median', args.median, N.median) , ('standard_deviation', args.standard_deviation , N.std) , ('variance', args.variance, N.var) , ('min', args.min, N.amin) , ('max', args.max, N.amax)]: if not args.all_stats and not value: continue r = f(a) if args.decorate: s.append('{}:{}'.format(name,r)) else: s.append('{}'.format(r)) print(' '.join(s)) if __name__=='__main__': main()
""" This module implements atom/bond/structure-wise descriptor calculated from pretrained megnet model """ import os from typing import Dict, Union import numpy as np from tensorflow.keras.models import Model from megnet.models import GraphModel, MEGNetModel from megnet.utils.typing import StructureOrMolecule DEFAULT_MODEL = os.path.join(os.path.dirname(__file__), "../../mvl_models/mp-2019.4.1/formation_energy.hdf5") class MEGNetDescriptor: """ MEGNet descriptors. This class takes a trained model and then compute the intermediate outputs as structure features """ def __init__(self, model_name: Union[str, GraphModel, MEGNetModel] = DEFAULT_MODEL, use_cache: bool = True): """ Args: model_name (str or MEGNetModel): trained model. If it is str, then only models in mvl_models are used. use_cache (bool): whether to use cache for structure graph calculations """ if isinstance(model_name, str): model = MEGNetModel.from_file(model_name) elif isinstance(model_name, GraphModel): model = model_name else: raise ValueError("model_name only support str or GraphModel object") layers = model.layers important_prefix = ["meg", "set", "concatenate"] all_names = [i.name for i in layers if any(i.name.startswith(j) for j in important_prefix)] if any(i.startswith("megnet") for i in all_names): self.version = "v2" else: self.version = "v1" valid_outputs = [i.output for i in layers if any(i.name.startswith(j) for j in important_prefix)] outputs = [] valid_names = [] for i, j in zip(all_names, valid_outputs): if isinstance(j, list): for k, l in enumerate(j): valid_names.append(i + f"_{k}") outputs.append(l) else: valid_names.append(i) outputs.append(j) full_model = Model(inputs=model.inputs, outputs=outputs) model.model = full_model self.model = model self.valid_names = valid_names self._cache: Dict[str, float] = {} self.use_cache = use_cache def _predict_structure(self, structure: StructureOrMolecule) -> np.ndarray: graph = self.model.graph_converter.convert(structure) inp = self.model.graph_converter.graph_to_input(graph) return self.model.predict(inp) def _predict_feature(self, structure: StructureOrMolecule) -> np.ndarray: if not self.use_cache: return self._predict_structure(structure) s = str(structure) if s in self._cache: return self._cache[s] result = se
lf._predict_structure(structure) self._cache[s] = result return result def _get_features(self, structure: StructureOrM
olecule, prefix: str, level: int, index: int = None) -> np.ndarray: name = prefix if level is not None: name = f"{prefix}_{level}" if index is not None: name += f"_{index}" if name not in self.valid_names: raise ValueError(f"{name} not in original megnet model") ind = self.valid_names.index(name) out_all = self._predict_feature(structure) return out_all[ind][0] def _get_updated_prefix_level(self, prefix: str, level: int): mapping = { "meg_net_layer": ["megnet", level - 1], "set2_set": ["set2set_atom" if level == 1 else "set2set_bond", None], "concatenate": ["concatenate", None], } if self.version == "v2": return mapping[prefix][0], mapping[prefix][1] # type: ignore return prefix, level def get_atom_features(self, structure: StructureOrMolecule, level: int = 3) -> np.ndarray: """ Get megnet atom features from structure Args: structure: pymatgen structure or molecule level: int, indicating the block number of megnet, starting from 1 Returns: nxm atomic feature matrix """ prefix, level = self._get_updated_prefix_level("meg_net_layer", level) return self._get_features(structure, prefix=prefix, level=level, index=0) def get_bond_features(self, structure: StructureOrMolecule, level: int = 3) -> np.ndarray: """ Get bond features at megnet block level Args: structure: pymatgen structure level: int Returns: n_bond x m bond feature matrix """ prefix, level = self._get_updated_prefix_level("meg_net_layer", level) return self._get_features(structure, prefix=prefix, level=level, index=1) def get_global_features(self, structure: StructureOrMolecule, level: int = 2) -> np.ndarray: """ Get state features at megnet block level Args: structure: pymatgen structure or molecule level: int Returns: 1 x m_g global feature vector """ prefix, level = self._get_updated_prefix_level("meg_net_layer", level) return self._get_features(structure, prefix=prefix, level=level, index=2) def get_set2set(self, structure: StructureOrMolecule, ftype: str = "atom") -> np.ndarray: """ Get set2set output as features Args: structure (StructureOrMolecule): pymatgen structure or molecule ftype (str): atom or bond Returns: feature matrix, each row is a vector for an atom or bond """ mapping = {"atom": 1, "bond": 2} prefix, level = self._get_updated_prefix_level("set2_set", level=mapping[ftype]) return self._get_features(structure, prefix=prefix, level=level) def get_structure_features(self, structure: StructureOrMolecule) -> np.ndarray: """ Get structure level feature vector Args: structure (StructureOrMolecule): pymatgen structure or molecule Returns: one feature vector for the structure """ prefix, level = self._get_updated_prefix_level("concatenate", level=1) return self._get_features(structure, prefix=prefix, level=level)
#!/usr/bin/env python import sys import json import logging from logging import warning, error, info from math import pi, degrees from PyQt4 import Qt, QtCore, QtGui from connection import Connection arrow_points = ( Qt.QPoint(-1, -4), Qt.QPoint(1, -4), Qt.QPoint(1, 4), Qt.QPoint(4, 4), Qt.QPoint(0, 12), Qt.QPoint(-4, 4), Qt.QPoint(-1, 4) ) class PlotGroup: def __init__(self, color=Qt.Qt.black, symbol='cross'): self.color = color self.symbol = symbol self.data = [] class XYPlot(Qt.QWidget): def __init__(self): Qt.QWidget.__init__(self) # little dance to make the background white. p = self.palette() p.setColor(self.backgroundRole(), Qt.Qt.white) self.setPalette(p) self.setAutoFillBackground(True) # map scale self._scale = 1.0 self.symbol_size = 5.0 self._symbol_scale = self.symbol_size/self._scale self._offset_x = 400 self._offset_y = 300 self.messages = [] self.groups = [] def translate(self, x, y): self._offset_x += x self._offset_y += y self.update() def scale(self, s): self._scale *= s self._symbol_scale = self.symbol_size/self._scale self.update() def drawArrow(self, qp, x, y, angle): qp.save() qp.translate(x, y) qp.rotate(angle) qp.scale(self._symbol_scale*0.5, self._symbol_scale*0.5) qp.drawPolygon(*arrow_points) qp.restore() def drawCross(self, qp, x, y): qp.save() qp.translate(x, y) qp.scale(self._symbol_scale, self._symbol_scale) qp.drawLine(-1, -1, 1, 1) qp.drawLine(-1, 1, 1, -1) qp.restore() def drawPlus(self, qp, x, y): qp.save() qp.translate(x, y) qp.scale(self._symbol_scale, self._symbol_scale) qp.drawLine(-1, 0, 1, 0) qp.drawLine(0, -1, 0, 1) qp.restore() def drawModel(self, qp, x, y, angle, steer): # all the units are x10 because there is some rounding(?) # issue where lines don
't joint correctly when using # the meter units directly. # there is a scale(0.1,0.1) further down to put things # back to the correct size. Lf = 16 # length of chass from middle to front axle Lb = 23 # length of chassis from middle to back axle Wa = 13 # half axle length Lw = 10 # wheel length qp.save() qp.translate(x,y) qp.rotate(angle) #qp.scale(self._symbol_scale, self._symbol_scale) qp.s
cale(0.1, 0.1) qp.drawLine(0, -Lb, 0, Lf) # main body qp.save() # begin rear end qp.translate(0.0, -Lb) qp.drawLine(-Wa, 0.0, Wa, 0.0) # rear axle qp.drawLine(-Wa,-Lw, -Wa, Lw) #left wheel qp.drawLine(Wa, -Lw, Wa, Lw) # right wheel qp.restore() qp.translate(0.0, Lf) # begin front end qp.drawLine(-Wa, 0.0, Wa, 0.0) # front axle qp.save() # begin left wheel qp.translate(-Wa, 0.0) qp.rotate(-steer) qp.drawLine(0.0, -Lw, 0.0, Lw) qp.restore() qp.save() # begine right wheel qp.translate(Wa, 0.0) qp.rotate(-steer) qp.drawLine(0.0, -Lw, 0.0, Lw) qp.restore() qp.restore() def paintGrid(self, qp): pass def paintEvent(self, e): #print self.offset_x, self.offset_y, self.s qp = QtGui.QPainter() qp.begin(self) qp.setRenderHint(QtGui.QPainter.Antialiasing, True) line_y = 20 for line in self.messages: qp.drawText(20, line_y, line) line_y += 20 qp.translate(self._offset_x, self._offset_y) qp.scale(self._scale, -self._scale) #qp.translate(200, 200) qp.setBrush(Qt.Qt.black) qp.setPen(Qt.Qt.black) self.drawCross(qp, 0, 0) for group in self.groups: if group.symbol == 'arrow': qp.setBrush(group.color) qp.setPen(Qt.Qt.NoPen) for v in group.data: self.drawArrow(qp, v[0], v[1], v[2]) elif group.symbol == 'cross': qp.setBrush(Qt.Qt.NoBrush) qp.setPen(group.color) for v in group.data: self.drawCross(qp, v[0], v[1]) elif group.symbol == 'model': pen = Qt.QPen() pen.setWidth(self._symbol_scale) pen.setColor(group.color) qp.setBrush(group.color) qp.setPen(pen) for v in group.data: #print("Draw model %0.2f %0.2f %0.2f %0.2f" % (v[0:4])) self.drawModel(qp, v[0], v[1], v[2], v[3]) qp.end() def add_plot_group(self, g): self.groups.append(g) #def update(self): class MapPlot(XYPlot): def __init__(self): XYPlot.__init__(self) self.current_pos = PlotGroup(color=Qt.Qt.blue, symbol='model') self.add_plot_group(self.current_pos) self.waypoint_group = PlotGroup(color=Qt.Qt.black, symbol='cross') self.add_plot_group(self.waypoint_group) self.scale(12) def on_msg(self, msg): try: #t = msg[u'state'][u'time'] current = (msg[u'state'][u'x'], msg[u'state'][u'y'], degrees(msg[u'state'][u'yaw']), degrees(msg[u'controls'][u'steer'])) waypoints = msg[u'waypoint_control'][u'points'] except KeyError: logging.error("Invalid message.") else: self.current_pos.data = [current] self.waypoint_group.data = waypoints self.update() class MainWindow(Qt.QWidget): def __init__(self): Qt.QWidget.__init__(self) self.grid = Qt.QGridLayout() self.setLayout(self.grid) self.plot = MapPlot() self.grid.addWidget(self.plot, 0, 0) self.connection = Connection('localhost', 60212, self.update) def update(self, msg): self.plot.on_msg(msg) def keyPressEvent(self, e): if e.key() == Qt.Qt.Key_Escape: self.close() elif e.key() == Qt.Qt.Key_A: self.plot.scale(2) elif e.key() == Qt.Qt.Key_Z: self.plot.scale(0.5) elif e.key() == Qt.Qt.Key_Up: self.plot.translate(0, 10) elif e.key() == Qt.Qt.Key_Down: self.plot.translate(0, -10) elif e.key() == Qt.Qt.Key_Left: self.plot.translate(10, 0) elif e.key() == Qt.Qt.Key_Right: self.plot.translate(-10, 0) if __name__ == '__main__': logging.basicConfig(level=logging.INFO) app = Qt.QApplication([]) demo = MainWindow() demo.resize(800, 600) demo.show() sys.exit(app.exec_())
""" Example of reasoning about the approximate node completeness. """ from tulip import * from tulipgui import * import tulippaths as tp # Load graph graphFile = '../data/514_4hops.tlp' graph = tlp.loadGraph(graphFile) # Compute completeness for each node label completeness = tp.utils.g
etApproximateAnnotationCompleteness(graph) # Tally completeness numComplete = 0 numAlmostComplete = 0 numIncomplete = 0 for node in graph.getNodes(): currCompl
eteness = completeness[node] if currCompleteness <= 1.0 and currCompleteness > 0.75: numComplete += 1 elif currCompleteness <= 0.75 and currCompleteness > 0.25: numAlmostComplete += 1 else: graph.delNode(node) numIncomplete += 1 print('num complete, num almost complete, num incomplete') print((str(numComplete) + ', ' + str(numAlmostComplete) + ', ' + str(numIncomplete))) nodeLinkView = tlpgui.createNodeLinkDiagramView(graph)
) class RHAbstractIntCommentBase(RHTrackAbstractBase): def _checkParams(self,params): RHTrackAbstractBase._checkParams(self,params) id=params.get("intCommentId","") if id=="": raise MaKaCError( _("the internal comment identifier hasn't been specified")) self._comment=self._abstract.getIntCommentById(id) class RHAbstractIntCommentRem(RHAbstractIntCommentBase): def _process(self): self._abstract.removeIntComment(self._comment) self._redirect(urlHandlers.UHTrackAbstractModIntComments.getURL(self._track,self._abstract)) class RHAbstractIntCommentEdit(RHAbstractIntCommentBase): def _checkParams(self,params): RHAbstractIntCommentBase._checkParams(self,params) self._action="" if params.has_key("OK"): self._action="UPDATE" self._content=params.get("content","") elif params.has_key("CANCEL"): self._action="CANCEL" def _process(self): if self._action=="UPDATE": self._comment.setContent(self._content) self._redirect(urlHandlers.UHTrackAbstractModIntComments.getURL(self._track,self._abstract)) return elif self._action=="CANCEL": self._redirect(urlHandlers.UHTrackAbstractModIntComments.getURL(self._track,self._abstract)) return p=tracks.WPModAbstractIntCommentEdit(self,self._track,self._comment) return p.display() class RHAbstractsParticipantList(RHTrackAbstractsBase): def _checkParams( self, params ): RHTrackAbstractsBase._checkParams( self, params ) self._abstractIds = self._normaliseListParam( params.get("abstracts", []) ) self._displayedGroups = params.get("displayedGroups", []) if type(self._displayedGroups) != list: self._displayedGroups = [self._displayedGroups] self._clickedGroup = params.get("clickedGroup","") def _setGroupsToDisplay(self): if self._clickedGroup in self._displayedGroups: self._displayedGroups.remove(self._clickedGroup) else: self._displayedGroups.append(self._clickedGroup) def _process( self ): if not self._abstractIds: return "<table align=\"center\" width=\"100%%\"><tr><td>There are no abstracts</td></tr></table>" submitters = OOBTree() primaryAuthors = OOBTree() coAuthors = OOBTree() submitterEmails = set() primaryAuthorEmails = set() coAuthorEmails = set() self._setGroupsToDisplay() abMgr = self._conf.getAbstractMgr() for abstId in self._abstractIds: abst = abMgr.getAbstractById(abstId) #Submitters subm = abst.getSubmitter() keySB = "%s-%s-%s"%(subm.getSurName().lower(), subm.getFirstName().lower(), subm.getEmail().lower()) submitters[keySB] = subm submitterEmails.add(subm.getEmail()) #Primary authors for pAut in abst.getPrimaryAuthorList(): keyPA = "%s-%s-%s"%(pAut.getSurName().lower(), pAut.getFirstName().lower(), pAut.getEmail().lower()) primaryAuthors[keyPA] = pAut primaryAuthorEmails.add(pAut.getEmail()) #Co-authors for coAut in abst.getCoAuthorList(): keyCA = "%s-%s-%s"%(coAut.getSurName().lower(), coAut.getFirstName().lower(), coAut.getEmail().lower()) coAuthors[keyCA] = coAut coAuthorEmails.add(coAut.getEmail()) emailList = {"submitters":{},"primaryAuthors":{},"coAuthors":{}} emailList["submitters"]["tree"] = submitters emailList["primaryAuthors"]["tree"] = primaryAuthors emailList["coAuthors"]["tree"] = coAuthors emailList["submitters"]["emails"] = submitterEmails emailList["primaryAuthors"]["emails"] = primaryAuthorEmails emailList["coAuthors"]["emails"] = coAuthorEmails p = conferences.WPConfParticipantList(self, self._target.getConference(), emailList, self._displayedGroups, self._abstractIds ) return p.display() class ContribFilterCrit(filters.FilterCriteria): _availableFields = { \ contribFilters.TypeFilterField.getId():contribFilters.TypeFilterField, \ contribFilters.StatusFilterField.getId():contribFilters.StatusFilterField, \ contribFilters.AuthorFilterField.getId():contribFilters.AuthorFilterField, \ contribFilters.SessionFilterField.getId():contribFilters.SessionFilterField } class ContribSortingCrit(filters.SortingCriteria): _availableFields={\ contribFilters.NumberSF.getId():contribFilters.NumberSF, contribFilters.DateSF.getId():contribFilters.DateSF, contribFilters.ContribTypeSF.getId():contribFilters.ContribTypeSF, contribFilters.TrackSF.getId():contribFilters.TrackSF, contribFilters.SpeakerSF.getId():contribFilters.SpeakerSF,
contribFilters.BoardNumberSF.getId():contribFilters.BoardNumberSF, contribFilters.SessionSF.getId():contribFilters.SessionSF, contribFilters.TitleSF.getId():contribFilters.TitleSF } class RHContribList(RHTrackAbstractsBase): def _checkProtection(self): RHTrackAbstractsBase._checkProtection(self, False) def _checkParams( self, params ): RHTrackAbstractsBase._checkParams(self,params) self._conf=self._track.getConferen
ce() filterUsed=params.has_key("OK") #sorting self._sortingCrit=ContribSortingCrit([params.get("sortBy","number").strip()]) self._order = params.get("order","down") #filtering filter = {"author":params.get("authSearch","")} ltypes = [] if not filterUsed: for type in self._conf.getContribTypeList(): ltypes.append(type.getId()) else: for id in self._normaliseListParam(params.get("types",[])): ltypes.append(id) filter["type"]=ltypes lsessions= [] if not filterUsed: for session in self._conf.getSessionList(): lsessions.append( session.getId() ) filter["session"]=self._normaliseListParam(params.get("sessions",lsessions)) lstatus=[] if not filterUsed: for status in ContribStatusList().getList(): lstatus.append(ContribStatusList().getId(status)) filter["status"]=self._normaliseListParam(params.get("status",lstatus)) self._filterCrit=ContribFilterCrit(self._conf,filter) typeShowNoValue,sessionShowNoValue=True,True if filterUsed: typeShowNoValue = params.has_key("typeShowNoValue") sessionShowNoValue = params.has_key("sessionShowNoValue") self._filterCrit.getField("type").setShowNoValue(typeShowNoValue) self._filterCrit.getField("session").setShowNoValue(sessionShowNoValue) def _process( self ): p = tracks.WPModContribList(self,self._track) return p.display( filterCrit= self._filterCrit, sortingCrit=self._sortingCrit, order=self._order ) class RHContribsActions: """ class to select the action to do with the selected contributions """ def process(self, params): if 'PDF' in params: return RHContribsToPDF().process(params) elif 'AUTH' in params: return RHContribsParticipantList().process(params) return "no action to do" class RHContribsToPDF(RHTrackAbstractsBase): def _checkProtection(self): RHTrackAbstractsBase._checkProtection(self, False) def _checkParams( self, params ): RHTrackAbstractsBase._checkParams( self, params ) self._contribIds = self._normaliseListParam( params.get("contributions", []) ) self._contribs = [] for id in self._contribIds: self._contribs.append(self._conf.getContributionById(id)) def _process(self): tz = self._conf.getTimezone() if not self._contribs: return "No contributions to print" pdf = ContribsToPDF(self._conf, self._contribs, tz) return send_file('Contributions.pdf', p
import logging from functools import partial from unittest import ( TestCase, mock, ) from pcs.common import file_type_codes from pcs.common.reports import ReportItemSeverity as severity from pcs.common.reports import codes as report_codes from pcs.lib.env import LibraryEnvironment from pcs_test.tools.assertions import assert_raise_library_error from pcs_test.tools.custom_mock import MockLibraryReportProcessor from pcs_test.tools.misc import create_patcher patch_env = create_patcher("pcs.lib.env") patch_env_object = partial(mock.patch.object, LibraryEnvironment) class LibraryEnvironmentTest(TestCase): def setUp(self): self.mock_logger = mock.MagicMock(logging.Logger) self.mock_reporter = MockLibraryReportProcessor() def test_logger(self): env = LibraryEnvironment(self.mock_logger, self.mock_reporter) self.assertEqual(self.mock_logger, env.logger) def test_report_processor(self): env = LibraryEnvironment(self.mock_logger, self.mock_reporter) self.assertEqual(self.mock_reporter, env.report_processor) def test_user_set(self): user = "testuser" env = LibraryEnvironment( self.mock_logger, self.mock_reporter, user_login=user ) self.assertEqual(user, env.user_login) def test_user_not_set(self): env = LibraryEnvironment(self.mock_logger, self.mock_reporter) self.assertEqual(None, env.user_login) def test_usergroups_set(self): groups = ["some", "group"] env = LibraryEnvironment( self.mock_logger, self.mock_reporter, user_groups=groups ) self.assertEqual(groups,
env.user_groups) def test_usergroups_not_set(self): env = LibraryEnvironment(self.mock_logger, self.mock_reporter) self.assertEq
ual([], env.user_groups) class GhostFileCodes(TestCase): def setUp(self): self.mock_logger = mock.MagicMock(logging.Logger) self.mock_reporter = MockLibraryReportProcessor() def _fixture_get_env(self, cib_data=None, corosync_conf_data=None): return LibraryEnvironment( self.mock_logger, self.mock_reporter, cib_data=cib_data, corosync_conf_data=corosync_conf_data, ) def test_nothing(self): self.assertEqual(self._fixture_get_env().ghost_file_codes, []) def test_corosync(self): self.assertEqual( self._fixture_get_env(corosync_conf_data="x").ghost_file_codes, [file_type_codes.COROSYNC_CONF], ) def test_cib(self): self.assertEqual( self._fixture_get_env(cib_data="x").ghost_file_codes, [file_type_codes.CIB], ) def test_all(self): self.assertEqual( self._fixture_get_env( cib_data="x", corosync_conf_data="x", ).ghost_file_codes, sorted([file_type_codes.COROSYNC_CONF, file_type_codes.CIB]), ) @patch_env("CommandRunner") class CmdRunner(TestCase): def setUp(self): self.mock_logger = mock.MagicMock(logging.Logger) self.mock_reporter = MockLibraryReportProcessor() def test_no_options(self, mock_runner): expected_runner = mock.MagicMock() mock_runner.return_value = expected_runner env = LibraryEnvironment(self.mock_logger, self.mock_reporter) runner = env.cmd_runner() self.assertEqual(expected_runner, runner) mock_runner.assert_called_once_with( self.mock_logger, self.mock_reporter, { "LC_ALL": "C", }, ) def test_user(self, mock_runner): expected_runner = mock.MagicMock() mock_runner.return_value = expected_runner user = "testuser" env = LibraryEnvironment( self.mock_logger, self.mock_reporter, user_login=user ) runner = env.cmd_runner() self.assertEqual(expected_runner, runner) mock_runner.assert_called_once_with( self.mock_logger, self.mock_reporter, { "CIB_user": user, "LC_ALL": "C", }, ) @patch_env("create_tmp_cib") def test_dump_cib_file(self, mock_tmpfile, mock_runner): tmp_file_name = "a file" expected_runner = mock.MagicMock() mock_runner.return_value = expected_runner mock_instance = mock.MagicMock() mock_instance.name = tmp_file_name mock_tmpfile.return_value = mock_instance env = LibraryEnvironment( self.mock_logger, self.mock_reporter, cib_data="<cib />" ) runner = env.cmd_runner() self.assertEqual(expected_runner, runner) mock_runner.assert_called_once_with( self.mock_logger, self.mock_reporter, { "LC_ALL": "C", "CIB_file": tmp_file_name, }, ) mock_tmpfile.assert_called_once_with(self.mock_reporter, "<cib />") @patch_env_object("cmd_runner", lambda self: "runner") class EnsureValidWait(TestCase): def setUp(self): self.create_env = partial( LibraryEnvironment, mock.MagicMock(logging.Logger), MockLibraryReportProcessor(), ) @property def env_live(self): return self.create_env() @property def env_fake(self): return self.create_env(cib_data="<cib/>") def test_not_raises_if_waiting_false_no_matter_if_env_is_live(self): self.env_live.ensure_wait_satisfiable(False) self.env_fake.ensure_wait_satisfiable(False) def test_raises_when_is_not_live(self): env = self.env_fake assert_raise_library_error( lambda: env.ensure_wait_satisfiable(10), ( severity.ERROR, report_codes.WAIT_FOR_IDLE_NOT_LIVE_CLUSTER, {}, None, ), ) @patch_env("get_valid_timeout_seconds") def test_do_checks(self, get_valid_timeout): timeout = 10 env = self.env_live get_valid_timeout.return_value = timeout env.ensure_wait_satisfiable(timeout) get_valid_timeout.assert_called_once_with(timeout)
addressLine1Union = "union(){" for rowIndex in range(len(addressLine1Dots)): row = addressLine1Dots[rowIndex] for colIndex in range(len(row)): if row[colIndex] == '1': translateHeight = walletHeight if textDepth>0 else walletHeight+textDepth addressLine1Union += "translate([colIndex,rowIndex,translateHeight]){cube([1,1,textDepth]);}".replace('colIndex',str(colIndex)).replace('rowIndex',str(rowIndex)).replace('textDepth',str(abs(textDepth))).replace('translateHeight',str(translateHeight)) addressLine1Union += "}" addressLine1Final = "translate([(8.2/17)*length,(5/11)*width,0]){resize([0,(3/55)*width,0],auto=[true,true,false]){addressLine1Union}}\n\n".replace('length',str(walletLength)).rep
lace('width',str(walletWidth)).replace('addressLine1Union',addressLine1Union) addressParts.append(addressLine1Final) # Create the second line of the address addressLine2Union = "union(){" for rowIndex in range(len(addressLine2Dots)): row = addressLine2Dots[rowIndex] fo
r colIndex in range(len(row)): if row[colIndex] == '1': translateHeight = walletHeight if textDepth>0 else walletHeight+textDepth addressLine2Union += "translate([colIndex,rowIndex,translateHeight]){cube([1,1,textDepth]);}".replace('colIndex',str(colIndex)).replace('rowIndex',str(rowIndex)).replace('textDepth',str(abs(textDepth))).replace('translateHeight',str(translateHeight)) addressLine2Union += "}" addressLine2Final = "translate([(8.2/17)*length,(4.1/11)*width,0]){resize([0,(3/55)*width,0],auto=[true,true,false]){addressLine2Union}}\n\n".replace('length',str(walletLength)).replace('width',str(walletWidth)).replace('addressLine2Union',addressLine2Union) addressParts.append(addressLine2Final) # Create the QR code addressQRUnion = "union(){" for rowIndex in range(len(data["addressQR"])): row = data["addressQR"][rowIndex] for colIndex in range(len(row)): if row[colIndex] == 0: translateHeight = walletHeight if textDepth>0 else walletHeight+textDepth addressQRUnion += "translate([colIndex,rowIndex,translateHeight]){cube([1,1,textDepth]);}".replace('colIndex',str(colIndex)).replace('rowIndex',str(rowIndex)).replace('textDepth',str(abs(textDepth))).replace('translateHeight',str(translateHeight)) addressQRUnion += "}" addressQRFinal = "translate([(0.6/17)*length,(0.6/11)*width,0]){resize([0,(8/12)*width,0],auto=[true,true,false]){addressQRUnion}}\n\n".replace('length',str(walletLength)).replace('width',str(walletWidth)).replace('addressQRUnion',addressQRUnion) addressParts.append(addressQRFinal) finalParts.extend(addressParts) # Draw all the things having to do with the private key if args.layoutStyle == 1 or args.layoutStyle == 2: privkeyParts = [] # Create the privkey title union and size/move it privkeyTitleUnion = "union(){" for rowIndex in range(len(privkeyTitle)): row = privkeyTitle[rowIndex] for colIndex in range(len(row)): if row[colIndex] == '1': translateHeight = walletHeight if textDepth>0 else walletHeight+textDepth privkeyTitleUnion += "translate([colIndex,rowIndex,translateHeight]){cube([1,1,textDepth]);}".replace('colIndex',str(colIndex)).replace('rowIndex',str(rowIndex)).replace('textDepth',str(abs(textDepth))).replace('translateHeight',str(translateHeight)) privkeyTitleUnion += "}" privkeyTitleFinal = "translate([(8.7/17)*length,(7/11)*width,0]){resize([0,(4/55)*width,0],auto=[true,true,false]){privkeyTitleUnion}}\n\n".replace('length',str(walletLength)).replace('width',str(walletWidth)).replace('privkeyTitleUnion',privkeyTitleUnion) privkeyParts.append(privkeyTitleFinal) # Create the first line of the privkey privkeyLine1Union = "union(){" for rowIndex in range(len(privkeyLine1Dots)): row = privkeyLine1Dots[rowIndex] for colIndex in range(len(row)): if row[colIndex] == '1': translateHeight = walletHeight if textDepth>0 else walletHeight+textDepth privkeyLine1Union += "translate([colIndex,rowIndex,translateHeight]){cube([1,1,textDepth]);}".replace('colIndex',str(colIndex)).replace('rowIndex',str(rowIndex)).replace('textDepth',str(abs(textDepth))).replace('translateHeight',str(translateHeight)) privkeyLine1Union += "}" privkeyLine1Final = "translate([(8.2/17)*length,(6/11)*width,0]){resize([0,(3/55)*width,0],auto=[true,true,false]){privkeyLine1Union}}\n\n".replace('length',str(walletLength)).replace('width',str(walletWidth)).replace('privkeyLine1Union',privkeyLine1Union) privkeyParts.append(privkeyLine1Final) # Create the second line of the privkey privkeyLine2Union = "union(){" for rowIndex in range(len(privkeyLine2Dots)): row = privkeyLine2Dots[rowIndex] for colIndex in range(len(row)): if row[colIndex] == '1': translateHeight = walletHeight if textDepth>0 else walletHeight+textDepth privkeyLine2Union += "translate([colIndex,rowIndex,translateHeight]){cube([1,1,textDepth]);}".replace('colIndex',str(colIndex)).replace('rowIndex',str(rowIndex)).replace('textDepth',str(abs(textDepth))).replace('translateHeight',str(translateHeight)) privkeyLine2Union += "}" privkeyLine2Final = "translate([(8.2/17)*length,(5.1/11)*width,0]){resize([0,(3/55)*width,0],auto=[true,true,false]){privkeyLine2Union}}\n\n".replace('length',str(walletLength)).replace('width',str(walletWidth)).replace('privkeyLine2Union',privkeyLine2Union) privkeyParts.append(privkeyLine2Final) # Create the third line of the privkey privkeyLine3Union = "union(){" for rowIndex in range(len(privkeyLine3Dots)): row = privkeyLine3Dots[rowIndex] for colIndex in range(len(row)): if row[colIndex] == '1': translateHeight = walletHeight if textDepth>0 else walletHeight+textDepth privkeyLine3Union += "translate([colIndex,rowIndex,translateHeight]){cube([1,1,textDepth]);}".replace('colIndex',str(colIndex)).replace('rowIndex',str(rowIndex)).replace('textDepth',str(abs(textDepth))).replace('translateHeight',str(translateHeight)) privkeyLine3Union += "}" privkeyLine3Final = "translate([(8.2/17)*length,(4.2/11)*width,0]){resize([0,(3/55)*width,0],auto=[true,true,false]){privkeyLine3Union}}\n\n".replace('length',str(walletLength)).replace('width',str(walletWidth)).replace('privkeyLine3Union',privkeyLine3Union) privkeyParts.append(privkeyLine3Final) # Create the QR code privkeyQRUnion = "union(){" for rowIndex in range(len(data["wifQR"])): row = data["wifQR"][rowIndex] for colIndex in range(len(row)): if row[colIndex] == 0: translateHeight = walletHeight if textDepth>0 else walletHeight+textDepth privkeyQRUnion += "translate([colIndex,rowIndex,translateHeight]){cube([1,1,textDepth]);}".replace('colIndex',str(colIndex)).replace('rowIndex',str(rowIndex)).replace('textDepth',str(abs(textDepth))).replace('translateHeight',str(translateHeight)) privkeyQRUnion += "}" privkeyQRFinal = "translate([(0.6/17)*length,(0.6/11)*width,0]){resize([0,(8/12)*width,0],auto=[true,true,false]){privkeyQRUnion}}\n\n".replace('length',str(walletLength)).replace('widt
def test(options, buildout): from subprocess import Popen, PIPE import os import sys python = options['python'] if not os.path.exists(python): raise IOError("There is no file at %s" % python) if sys.platform == 'darwin': output = Popen([python, "-c", "import platform; print (platform.mac_ver())"], stdout=PIPE).communicate()[0] if not output.startswith("('10."): raise IOError("Your python at %s doesn't return proper data f
or platform.mac_ver(), got: %s" % (pytho
n, output)) elif sys.platform == 'linux2' and (2, 4) <= sys.version_info < (2, 5): output = Popen([python, "-c", "import socket; print (hasattr(socket, 'ssl'))"], stdout=PIPE).communicate()[0] if not output.startswith("True"): raise IOError("Your python at %s doesn't have ssl support, got: %s" % (python, output))
t61.fits 1293120 2021-04-09 08:04:28 pcadf126695890N007_adat71.fits 1293120 2021-04-09 08:04:28 >>> get_archive_file_list(obsid=400, detector='acis', level=2, filetype='evt2') <Table length=1> Filename Filesize Timestamp str24 int64 str19 ------------------------ -------- ------------------- acisf00400N007_evt2.fits 4619520 2011-07-08 13:52:57 :param obsid: int, str Observation ID :param detector: str Detector name (e.g. 'pcad', 'acis') :param level: int, float, str Level name (e.g. 0, 0.5, 1, 1.5, 2, 3) :param dataset: str Dataset name (default='flight') :param **params: dict Additional parameters to filter query (subdetector, filetype, obi, filename) :return: astropy Table Table of archive files """ params['dataset'] = dataset params['detector'] = detector params['level'] = level params['obsid'] = obsid text = _get_cda_service_text('archive_file_list', **params) dat = Table.read(text.splitlines(), format='ascii.basic', delimiter='\t', guess=False) # Original Filesize has commas for the thousands like 11,233,456 filesize = [int(x.replace(',', '')) for x in dat['Filesize']] dat['Filesize'] = filesize return dat def get_proposal_abstract(obsid=None, propnum=None, timeout=60): """Get a proposal abstract from the CDA services. One of ``obsid`` or ``propnum`` must be provided. :param obsid: int, str Observation ID :param propnum: str Proposal number, including leading zeros e.g. '08900073' :param timeout: int, float Timeout in seconds for the request :returns: dict Dictionary of proposal abstract """ params = {} if obsid is not None: params['obsid'] = obsid if propnum is not None: params['propNum'] = propnum if not params: raise ValueError('must provide obsid or propnum') html = _get_cda_service_text('prop_abstract', timeout=timeout, **params) text = html_to_text(html) # Return value is a text string with these section header lines. Use them # to split the text into sections. delims = ['Proposal Title', 'Proposal Number', 'Principal Investigator', 'Abstract', ''] out = {} for delim0, delim1 in zip(delims[:-1], delims[1:]): name = '_'.join(word.lower() for word in delim0.split()) if match := re.search(rf'{delim0}:(.+){delim1}:', text, re.DOTALL): out[name] = clean_text(match.group(1)) else: warnings.warn(f'failed to find {delim0} in result') return out def _update_params_from_kwargs(params, obsid, target_name, resolve_name, ra, dec, radius): """Update params dict for CDA Ocat queries from specified keyword args. """ if obsid is not None: params['obsid'] = obsid if ra is not None: params['ra'] = ra if dec is not None: params['dec'] = dec if target_name is not None: if resolve_name: coord = SkyCoord.from_name(target_name) params['ra'] = coord.ra.deg params['dec'] = coord.dec.deg
else: # SR services API us
es "target" to select substrings of target_name params['target'] = target_name # For any positional search include the radius if 'ra' in params and 'dec' in params: params['radius'] = radius return params def get_ocat_web(obsid=None, *, summary=False, target_name=None, resolve_name=False, ra=None, dec=None, radius=1.0, return_type='auto', timeout=60, **params): """ Get the Ocat target table data from Chandra Data Archive web services. {RETURN_TYPE_DOCS} {CDA_PARAM_DOCS} :param obsid: int, str Observation ID or string with ObsId range or list of ObsIds :param summary: bool Return summary data (26 columns) instead of full data (124 columns) {COMMON_PARAM_DOCS} :param timeout: int, float Timeout in seconds for the request (default=60) :param return_type: str Return type (default='auto' => Table or dict) :param **params: dict Parameters passed to CDA web service :return: astropy Table or dict of the observation details """ # These special params change the returned data and should always be a table if set(['acisWindows', 'rollReqs', 'timeReqs']) & set(params): return_type = 'table' if return_type not in ('auto', 'table'): raise ValueError(f"invalid return_type {return_type!r}, must be 'auto' or 'table'") _update_params_from_kwargs(params, obsid, target_name, resolve_name, ra, dec, radius) params['format'] = 'text' # Force RA, Dec in sexagesimal because decimal returns only 3 decimal digits # which is insufficient. params['outputCoordUnits'] = 'sexagesimal' service = 'ocat_summary' if summary else 'ocat_details' text = _get_cda_service_text(service, timeout=timeout, **params) dat = _get_table_or_dict_from_cda_rdb_text(text, return_type, params.get('obsid')) if dat is None: # Query returned no rows. If a single obsid was specified with return_type # of 'auto' then we would have expected to return a dict, but instead # raise a ValueError. Otherwise we return an empty table with the right # column names. if return_type == 'auto' and _is_int(params.get('obsid')): raise ValueError(f"failed to find obsid {params['obsid']}") else: dat = get_ocat_web(summary=summary, return_type='table', obsid=8000)[0:0] # Change RA, Dec to decimal if those columns exist try: ra, dec = dat['ra'], dat['dec'] except KeyError: pass else: sc = SkyCoord(ra, dec, unit='hr,deg') dat['ra'] = sc.ra.deg dat['dec'] = sc.dec.deg return dat get_ocat_web.__doc__ = get_ocat_web.__doc__.format( RETURN_TYPE_DOCS=RETURN_TYPE_DOCS, CDA_PARAM_DOCS=CDA_PARAM_DOCS, COMMON_PARAM_DOCS=COMMON_PARAM_DOCS) def _get_cda_service_text(service, timeout=60, **params): """ Fetch all observation details from one of the CDA SRService pages :param service: str Name of the service ('prop_abstract', 'ocat_summary', 'ocat_details', 'archive_file_list') :param timeout: int, float Timeout in seconds for the request :param **params: dict Additional parameters to pass to the service :return: str Returned text from the service """ if service not in CDA_SERVICES: raise ValueError(f'unknown service {service!r}, must be one of {list(CDA_SERVICES)}') # Query the service and check for errors url = f'{URL_CDA_SERVICES}/{CDA_SERVICES[service]}.do' verbose = params.pop('verbose', False) resp = requests.get(url, timeout=timeout, params=params) if verbose: print(f'GET {resp.url}') if not resp.ok: raise RuntimeError(f'got error {resp.status_code} for {resp.url}\n' f'{html_to_text(resp.text)}') return resp.text def _get_table_or_dict_from_cda_rdb_text(text, return_type, obsid): """Get astropy Table or dict from the quasi-RDB text returned by the CDA services. :param text: str Text returned by the CDA services for a format='text' query :param return_type: str Return type (default='auto' => Table or dict) :param obsid: int, str, None Observation ID if provided :return: astropy Table, dict, None Table of the returned data, or dict if just one obsid selected, or None if the query returned no data. """ lines = text.splitlines() # Convert the type line to standard RDB # First find the line that begins the column descriptions for i, line in enumerate(lines): if not line.startswith('#'):
#### NOTICE: THI
S FILE IS AUTOGENERATED #### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY #### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES from swgpy.object import * def create(kernel): result = Creature() result.template = "object/mobile/shared_mynock.iff" result.attribute_template_id = 9 result.stfName("monster_name","mynock") #### BEGIN
MODIFICATIONS #### #### END MODIFICATIONS #### return result
#!/usr/bin/env python # Copyright NumFOCUS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0.txt # # Unless required by applic
able law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import itk itk.auto_progress(2) if len(sys.argv) < 3: print("Usage: " + sys.argv[0] + " <InputFileName> <Ou
tputFileName> [Extension]") sys.exit(1) inputFileName = sys.argv[1] outputFileName = sys.argv[2] if len(sys.argv) > 3: extension = sys.argv[3] else: extension = ".png" fileNameFormat = outputFileName + "-%d" + extension Dimension = 3 PixelType = itk.UC InputImageType = itk.Image[PixelType, Dimension] ReaderType = itk.ImageFileReader[InputImageType] reader = ReaderType.New() reader.SetFileName(inputFileName) OutputPixelType = itk.UC RescaleImageType = itk.Image[OutputPixelType, Dimension] RescaleFilterType = itk.RescaleIntensityImageFilter[InputImageType, RescaleImageType] rescale = RescaleFilterType.New() rescale.SetInput(reader.GetOutput()) rescale.SetOutputMinimum(0) rescale.SetOutputMaximum(255) rescale.UpdateLargestPossibleRegion() region = reader.GetOutput().GetLargestPossibleRegion() size = region.GetSize() fnames = itk.NumericSeriesFileNames.New() fnames.SetStartIndex(0) fnames.SetEndIndex(size[2] - 1) fnames.SetIncrementIndex(1) fnames.SetSeriesFormat(fileNameFormat) OutputImageType = itk.Image[OutputPixelType, 2] WriterType = itk.ImageSeriesWriter[RescaleImageType, OutputImageType] writer = WriterType.New() writer.SetInput(rescale.GetOutput()) writer.SetFileNames(fnames.GetFileNames()) writer.Update()
# -*- coding: utf-8 -*- # Generated by Django 1.10.8 on 2018-03-21 13:53 from __future__ import unicode_literals from django.db import mi
grations class Migration(migrations.Migration):
dependencies = [ ('erudit', '0086_auto_20180321_0717'), ] operations = [ migrations.RemoveField( model_name='articleabstract', name='article', ), migrations.DeleteModel( name='ArticleAbstract', ), ]
{ 'name': 'View Editor', 'category': 'Hidden', 'description': """ OpenE
RP Web to edit views. ========================== """, 'version': '2.0', 'depends':['web'], 'data' : [ 'views/web_view_editor.xml', ], 'qweb': ['static/src/x
ml/view_editor.xml'], 'auto_install': True, }
gh=None, get_smpar=False): """Load the initial values for Wilson coefficients from a wcxf.WC instance. Parameters: - `scale_high`: since Wilson coefficients are dimensionless in smeftrunner but not in WCxf, the high scale in GeV has to be provided. If this parameter is None (default), either a previously defined value will be used, or the scale attribute of the WC instance will be used. - `get_smpar`: boolean, optional, defaults to True. If True, an attempt is made to determine the SM parameters from the requirement of reproducing the correct SM masses and mixings at the electroweak scale. As approximations are involved, the result might or might not be reliable, depending on the size of the Wilson coefficients affecting the SM masses and mixings. If False, Standard Model parameters have to be provided separately and are assumed to be in the weak basis used for the Warsaw basis as defined in WCxf, i.e. in the basis where the down-type and charged lepton mass matrices are diagonal. """ import wcxf if wc.eft != 'SMEFT': raise ValueError("Wilson coefficients use wrong EFT.") if wc.basis != 'Warsaw': raise ValueError("Wilson coefficients use wrong basis.") if scale_high is not None: self.scale_high = scale_high elif self.scale_high is None: self.scale_high = wc.scale C = wcxf.translators.smeft.wcxf2arrays(wc.dict) keys_dim5 = ['llphiphi'] keys_dim6 = list(set(definitions.WC_keys_0f + definitions.WC_keys_2f + definitions.WC_keys_4f) - set(keys_dim5)) self.scale_in = wc.scale for k in keys_dim5: if k in C: C[k] = C[k]*self.scale_high for k in keys_dim6: if k in C: C[k] = C[k]*self.scale_high**2 C = definitions.symmetrize(C) # fill in zeros for missing WCs for k, s in definitions.C_keys_shape.items(): if k not in C and k not in definitions.SM_keys: if s == 1: C[k] = 0 else: C[k] = np.zeros(s) if self.C_in is None: self.C_in = C else: self.C_in.update(C) if get_smpar: self.C_in.update(self._get_sm_scale_in()) def load_wcxf(self, stream, get_smpar=True): """Load the initial values for Wilson coefficients from a file-like object or a
string in WCxf format. Note that Standard Model parameters have to be provided separately and are assumed to be in the weak basis used for the Warsaw basis as defined in WCxf, i.e. in the basis where the down-type and charged lepton mass matrices are diagonal.""" import wcxf wc = wcxf.WC.load(stream) self.set_initial_wcxf(wc, get_smpar=get_smpar) def dump(self, C_out, scale_out=None, stream
=None, fmt='lha', skip_redundant=True): """Return a string representation of the parameters and Wilson coefficients `C_out` in DSixTools output format. If `stream` is specified, export it to a file. `fmt` defaults to `lha` (the SLHA-like DSixTools format), but can also be `json` or `yaml` (see the pylha documentation).""" C = OrderedDict() if scale_out is not None: C['SCALES'] = {'values': [[1, self.scale_high], [2, scale_out]]} else: C['SCALES'] = {'values': [[1, self.scale_high]]} sm = io.sm_dict2lha(C_out)['BLOCK'] C.update(sm) wc = io.wc_dict2lha(C_out, skip_redundant=skip_redundant)['BLOCK'] C.update(wc) return pylha.dump({'BLOCK': C}, fmt=fmt, stream=stream) def get_wcxf(self, C_out, scale_out): """Return the Wilson coefficients `C_out` as a wcxf.WC instance. Note that the Wilson coefficients are rotated into the Warsaw basis as defined in WCxf, i.e. to the basis where the down-type and charged lepton mass matrices are diagonal.""" import wcxf C = self.rotate_defaultbasis(C_out) d = wcxf.translators.smeft.arrays2wcxf(C) basis = wcxf.Basis['SMEFT', 'Warsaw'] d = {k: v for k, v in d.items() if k in basis.all_wcs and v != 0} keys_dim5 = ['llphiphi'] keys_dim6 = list(set(definitions.WC_keys_0f + definitions.WC_keys_2f + definitions.WC_keys_4f) - set(keys_dim5)) for k in d: if k.split('_')[0] in keys_dim5: d[k] = d[k] / self.scale_high for k in d: if k.split('_')[0] in keys_dim6: d[k] = d[k] / self.scale_high**2 d = wcxf.WC.dict2values(d) wc = wcxf.WC('SMEFT', 'Warsaw', scale_out, d) return wc def dump_wcxf(self, C_out, scale_out, fmt='yaml', stream=None, **kwargs): """Return a string representation of the Wilson coefficients `C_out` in WCxf format. If `stream` is specified, export it to a file. `fmt` defaults to `yaml`, but can also be `json`. Note that the Wilson coefficients are rotated into the Warsaw basis as defined in WCxf, i.e. to the basis where the down-type and charged lepton mass matrices are diagonal.""" wc = self.get_wcxf(C_out, scale_out) return wc.dump(fmt=fmt, stream=stream, **kwargs) def rgevolve(self, scale_out, **kwargs): """Solve the SMEFT RGEs from the initial scale to `scale_out`. Returns a dictionary with parameters and Wilson coefficients at `scale_out`. Additional keyword arguments will be passed to the ODE solver `scipy.integrate.odeint`.""" self._check_initial() return rge.smeft_evolve(C_in=self.C_in, scale_high=self.scale_high, scale_in=self.scale_in, scale_out=scale_out, **kwargs) def rgevolve_leadinglog(self, scale_out): """Compute the leading logarithmix approximation to the solution of the SMEFT RGEs from the initial scale to `scale_out`. Returns a dictionary with parameters and Wilson coefficients. Much faster but less precise that `rgevolve`. """ self._check_initial() return rge.smeft_evolve_leadinglog(C_in=self.C_in, scale_high=self.scale_high, scale_in=self.scale_in, scale_out=scale_out) def _check_initial(self): """Check if initial values and scale as well as the new physics scale have been set.""" if self.C_in is None: raise Exception("You have to specify the initial conditions first.") if self.scale_in is None: raise Exception("You have to specify the initial scale first.") if self.scale_high is None: raise Exception("You have to specify the high scale first.") def rotate_defaultbasis(self, C): """Rotate all parameters to the basis where the running down-type quark and charged lepton mass matrices are diagonal and where the running up-type quark mass matrix has the form V.S, with V unitary and S real diagonal, and where the CKM and PMNS matrices have the standard phase convention.""" v = sqrt(2*C['m2'].real/C['Lambda'].real) Mep = v/sqrt(2) * (C['Ge'] - C['ephi'] * v**2/self.scale_high**2/2) Mup = v/sqrt(2) * (C['Gu'] - C['uphi'] * v**2/self.scale_high**2/2) Mdp = v/sqrt(2) * (C['Gd'] - C['dphi'] * v**2/self.scale_high**2/2) Mnup = -v**2 * C['llphiphi'] UeL, Me, UeR = ckmutil.diag.msvd(Mep) UuL, Mu, UuR = ckmutil.diag.msvd(Mup) UdL, Md, UdR = ckmutil.diag.msvd(Mdp) Unu, Mnu = ckmutil.diag.mtakfac(Mnup) UuL, UdL, UuR, UdR = ckmutil.phases.rephase_standard(UuL, UdL, UuR, UdR) Unu, UeL, UeR = ckmutil.phases.rephase_pmns_standard(Unu, UeL, UeR)
t_queue() self.SetBackgroundColour("WHITE") self.SetCursor(wx.StockCursor(wx.CURSOR_ARROW)) # Mouse buttons and motion wx.EVT_LEFT_DOWN(self, self.OnLeftDown) wx.EVT_LEFT_UP(self, self.OnLeftUp) wx.EVT_MOTION(self, self.OnMotion) wx.EVT_PAINT(self, self.OnPaint) wx.EVT_IDLE(self, self.OnIdle) self.SetMode("Select") # Register network events callback DispatchEvent. # See net_view.DispatchEvent() for details. model.Bind(net_model.ADD_NODE, self.DispatchEvent, self.add_node) model.Bind(net_model.REMOVE_NODE, self.DispatchEvent, self.del_node) model.Bind(net_model.ADD_LINK, self.DispatchEvent, self.add_radio_link) model.Bind(net_model.REMOVE_LINK, self.DispatchEvent, self.del_radio_link) model.Bind(net_model.NET_CHANGED, self.DispatchEvent, self.new_network) model.Bind(net_model.FORWARD_PACKET, self.DispatchEvent, self.forward_radio_packet) def DispatchEvent(self, callback, *args): """"Queue a net event to be handled on the GUI thread. Many wxPython functions do not work when invoked from a thread other than the main GUI thread. This is a problem for network events, because they occur during the listen thread that was spawned by simClient.py. The solution is to register a meta-callback, this method, with the network model. When DispatchEvent is invoked by the network model, it puts the original GUI callback, along with the arguments, on self.queue and then calls wx.WakeUpIdle(). This causes OnIdle to be invoked on the main GUI thread, which in turn invokes every callback that is on the queue, and these callbacks can invoke wxPython functions without fear of being on the wrong thread. This greatly simplifies the implementation of the callbacks (trust me).""" self.queue.put((callback, args)) # Cause an idle event to occur, which will invoke our idle handler. wx.WakeUpIdle() def FindNode(self, point): "Return the node
that contains the point." for n in self.node_dict.itervalues(): if n.HitTest(point): return n
return None def OnLeftDown(self, evt): node = self.FindNode(evt.GetPosition()) if node: self.dragNode = node self.dragStartPos = evt.GetPosition() def OnLeftUp(self, evt): if not self.dragImage or not self.dragNode: self.dragImage = None self.dragNode = None return # Hide the image, end dragging, and nuke out the drag image. self.dragImage.Hide() self.dragImage.EndDrag() self.dragImage = None dc = wx.ClientDC(self) # reposition and draw the shape self.dragNode.model.pos = ( self.dragNode.model.pos[0] + evt.GetPosition()[0] - self.dragStartPos[0], self.dragNode.model.pos[1] + evt.GetPosition()[1] - self.dragStartPos[1] ) self.dragNode.dragging = False self.dragNode.Draw(dc) # Update the network model. self.model.MoveNode(self.dragNode.model.id, self.dragNode.model.pos[0], self.dragNode.model.pos[1]) self.dragNode = None def OnRightDown(self, event): pass def OnRightUp(self, event): pass def OnMotion(self, evt): # Ignore mouse movement if we're not dragging. if not self.dragNode or not evt.Dragging() or not evt.LeftIsDown(): return # if we have a node, but haven't started dragging yet if self.dragNode and not self.dragImage: # only start the drag after having moved a couple pixels tolerance = 2 pt = evt.GetPosition() dx = abs(pt.x - self.dragStartPos.x) dy = abs(pt.y - self.dragStartPos.y) if dx <= tolerance and dy <= tolerance: return # Create a DragImage to draw this node while it is moving # (The drag image will update even as the bitmap is updating. Magical!) self.dragImage = wx.DragImage(self.dragNode.bmp, wx.StockCursor(wx.CURSOR_HAND)) hotspot = self.dragStartPos - self.dragNode.model.pos + [self.dragNode.node_radius, self.dragNode.node_radius] self.dragImage.BeginDrag(hotspot, self, False) self.dragImage.Move(pt) # erase the node since it will be drawn by the DragImage now dc = wx.ClientDC(self) for link in self.dragNode.model.incoming.itervalues(): if link not in self.link_dict: continue l = self.link_dict[link] l.Erase(dc) l.src.Draw(dc) for link in self.dragNode.model.outgoing.itervalues(): if link not in self.link_dict: continue l = self.link_dict[link] l.Erase(dc) l.dst.Draw(dc) self.dragNode.Erase(dc) self.dragNode.dragging = True self.dragImage.Show() # if we have node and image then move it elif self.dragNode and self.dragImage: self.dragImage.Move(evt.GetPosition()) def OnSize(self, event): pass def OnIdle(self, event): """Handle queued network events. See net_view.DispatchEvent().""" for callback, args in self.queue.get(): callback(*args) def OnPaint(self, event): """ Window expose events come here to refresh. """ dc = wx.PaintDC(self) self.Draw(dc) def Draw(self, dc): dc.BeginDrawing() # for Windows compatibility # Since we are a scrolling window we need to prepare the DC self.PrepareDC(dc) dc.SetBackground(wx.Brush(self.GetBackgroundColour())) dc.Clear() for link in self.link_dict.itervalues(): link.Draw(dc) for node in self.node_dict.itervalues(): node.Draw(dc) dc.EndDrawing() def SetMode(self, mode): self.mode = mode if self.mode == "Select": self.SetCursor(wx.StockCursor(wx.CURSOR_ARROW)) else: self.SetCursor(wx.StockCursor(wx.STANDARD_CURSOR)) # TODO do something about this color parm def add_node(self, nodemodel, color = 'BLUE'): n = node_view(nodemodel, color) self.node_dict[nodemodel] = n nodemodel.Bind(net_model.LED_CHANGED, self.DispatchEvent, self.node_state_changed) n.Update() dc = wx.ClientDC(self) n.Draw(dc) def del_node(self, node): if self.node_dict.has_key(node): dc = wx.ClientDC(self) self.node_dict[node].Erase(dc) del self.node_dict[node] def node_state_changed(self, node): if self.node_dict.has_key(node): n = self.node_dict[node] n.Update() dc = wx.ClientDC(self) n.Draw(dc) def add_radio_link(self, link): if self.node_dict.has_key(link.src) and self.node_dict.has_key(link.dst): src = self.node_dict[link.src] dst = self.node_dict[link.dst] l = link_view(src, dst) self.link_dict[link] = l dc = wx.ClientDC(self) l.Draw(dc) l.src.Draw(dc) l.dst.Draw(dc) def del_radio_link(self, link): if self.link_dict.has_key(link): l = self.link_dict[link] dc = wx.ClientDC(self) l.Erase(dc) l.src.Draw(dc) l.dst.Draw(dc) del self.link_dict[link] def new_network(self, model): self.node_dict.clear() self.link_dict.clear() self.dragNode = None self.dragImage = None dummy = self.queue.get() # empties the list for nodemodel in model.IterNodes(): n = node_view(nodemodel, 'BLUE') self.node_dict[nodemod
from typing import Any from typing import List from typing import Optional from typing import Union import bpy import compas_blender from compas.artists import PrimitiveArtist from compas.geometry import Line from compas.colors import Color from compas_blender.artists import BlenderArtist class LineArtist(BlenderArtist, PrimitiveArtist): """Artist for drawing lines in Blender. Parameters ---------- line : :class:`~compas.geometry.Line` A COMPAS line. collection : str | :blender:`bpy.types.Collection` The Blender scene collection the object(s) created by this artist belong to. **kwargs : dict, optional Additional keyword arguments. For more info, see :class:`~compas_blender.artists.BlenderArtist` and :class:`~compas.artists.PrimitiveArtist`. Examples -------- Use the Blender artist explicitly.
.. code-block:: python from compas.geometry import Line from compas_blender.artists import LineArtist line = Line([0, 0, 0], [1, 1, 1]) artist = LineArtist(line) artist.draw() Or, use the artist through the plugin mechanism. .. code-block:: python from compas.geometry import Line from compas.artists import Artist line = Line([0, 0, 0], [1, 1, 1]) artist = Artist(line)
artist.draw() """ def __init__(self, line: Line, collection: Optional[Union[str, bpy.types.Collection]] = None, **kwargs: Any ): super().__init__(primitive=line, collection=collection or line.name, **kwargs) def draw(self, color: Optional[Color] = None, show_points: bool = False) -> List[bpy.types.Object]: """Draw the line. Parameters ---------- color : tuple[int, int, int] | tuple[float, float, float] | :class:`~compas.colors.Color`, optional The RGB color of the box. The default color is :attr:`compas.artists.PrimitiveArtist.color`. show_points : bool, optional If True, show the start and end point in addition to the line. Returns ------- list[:blender:`bpy.types.Object`] """ color = Color.coerce(color) or self.color start = self.primitive.start end = self.primitive.end objects = [] if show_points: points = [ {'pos': start, 'name': f"{self.primitive.name}.start", 'color': color, 'radius': 0.01}, {'pos': end, 'name': f"{self.primitive.name}.end", 'color': color, 'radius': 0.01}, ] objects += compas_blender.draw_points(points, collection=self.collection) lines = [ {'start': start, 'end': end, 'color': color, 'name': f"{self.primitive.name}"}, ] objects += compas_blender.draw_lines(lines, collection=self.collection) return objects
n = int(input()) grid = [[int(c) for c in input()] for i in range (0, n)] cavities = [] for i in range(0, n): if i > 0 and i < n - 1: for j in range(0, n): if j > 0 and j < n - 1: v = grid[i][j] if grid[i - 1][j]
< v and grid[i + 1][j] < v and grid[i][j - 1] < v and grid[i][j + 1] < v: cavities.append((i, j)) for i, j in cavities: grid[i][j] = 'X' print('\n'.join(''.join(str(i) for i in row) for row in gr
id))
import tkinter tk = tkinter.Tk() tk.title("Bounce") tk.resizable(0, 0) # Keep the window on the top tk.wm_attributes("-topmost", 1) canvas = tkinter.Canvas(tk, width=500, height=400) # Remove border. Apparently no effect o
n Linux, but good on Mac canvas.configure(bd=0) # Make the 0 horizontal and vertical line apparent canvas.configure(highlightthickness=0) canvas.pack() ball = canvas.create_oval(10, 10, 25, 25, fill='red') def handle_timer_event(): canvas.move(ball, 10, 0) tk.after(100, handle_timer
_event) handle_timer_event() tk.mainloop()
hat on windows this DLL is automatically provided for you discord.opus.load_opus('opus') class VoiceEntry: def __init__(self, message, player): self.requester = message.author self.channel = message.channel self.player = player def __str__(self): fmt = '*{0.title}* uploaded by {0.uploader} and requested by {1.display_name}' duration = self.player.duration if duration: fmt = fmt + ' [length: {0[0]}m {0[1]}s]'.format(divmod(duration, 60)) return fmt.format(self.player, self.requester) class VoiceState: def __init__(self, bot): self.current = None self.voice = None self.bot = bot self.play_next_song = asyncio.Event() self.songs = asyncio.Queue() self.skip_votes = set() # a set of user_ids that voted self.audio_player = self.bot.loop.create_task(self.audio_player_task()) def is_playing(self): if self.voice is None or self.current is None: return False player = self.current.player return not player.is_done() @property def player(self): return self.current.player def skip(self): self.skip_votes.clear() if self.is_playing(): self.player.stop() def toggle_next(self): self.bot.loop.call_soon_threadsafe(self.play_next_song.set) async def audio_player_task(self): while True: self.play_next_song.clear() self.current = await self.songs.get() await self.bot.send_message(self.current.channel, 'Now playing ' + str(self.current)) self.current.player.start() await self.play_next_song.wait() class Music: """Voice related commands. Works in multiple servers at once. """ def __init__(self, bot): self.bot = bot self.voice_states = {} def get_voice_state(self, server): state = self.voice_states.get(server.id) if state is None: state = VoiceState(self.bot) self.voice_states[server.id] = state return state async def create_voice_client(self, channel): voice = await self.bot.join_voice_channel(channel) state = self.get_voice_state(channel.serv
er) state.voice = voice def __unload(self): for state in self.voice_states.values(): try: state.audio_player.cancel()
if state.voice: self.bot.loop.create_task(state.voice.disconnect()) except: pass @commands.command(pass_context=True, no_pm=True) async def join(self, ctx, *, channel : discord.Channel): """Joins a voice channel.""" try: await self.create_voice_client(channel) except discord.ClientException: await self.bot.say('Already in a voice channel...') except discord.InvalidArgument: await self.bot.say('This is not a voice channel...') else: await self.bot.say('Ready to play audio in ' + channel.name) @commands.command(pass_context=True, no_pm=True) async def summon(self, ctx): """Summons the bot to join your voice channel.""" summoned_channel = ctx.message.author.voice_channel if summoned_channel is None: await self.bot.say('You are not in a voice channel.') return False state = self.get_voice_state(ctx.message.server) if state.voice is None: state.voice = await self.bot.join_voice_channel(summoned_channel) else: await state.voice.move_to(summoned_channel) return True @commands.command(pass_context=True, no_pm=True) async def play(self, ctx, *, song : str): """Plays a song. If there is a song currently in the queue, then it is queued until the next song is done playing. This command automatically searches as well from YouTube. The list of supported sites can be found here: https://rg3.github.io/youtube-dl/supportedsites.html """ state = self.get_voice_state(ctx.message.server) opts = { 'default_search': 'auto', 'quiet': True, } if state.voice is None: success = await ctx.invoke(self.summon) if not success: return try: player = await state.voice.create_ytdl_player(song, ytdl_options=opts, after=state.toggle_next) except Exception as e: fmt = 'An error occurred while processing this request: ```py\n{}: {}\n```' await self.bot.send_message(ctx.message.channel, fmt.format(type(e).__name__, e)) else: player.volume = 0.6 entry = VoiceEntry(ctx.message, player) await self.bot.say('Enqueued ' + str(entry)) await state.songs.put(entry) @commands.command(pass_context=True, no_pm=True) async def volume(self, ctx, value : int): """Sets the volume of the currently playing song.""" state = self.get_voice_state(ctx.message.server) if state.is_playing(): player = state.player player.volume = value / 100 await self.bot.say('Set the volume to {:.0%}'.format(player.volume)) @commands.command(pass_context=True, no_pm=True) async def pause(self, ctx): """Pauses the currently played song.""" state = self.get_voice_state(ctx.message.server) if state.is_playing(): player = state.player player.pause() @commands.command(pass_context=True, no_pm=True) async def resume(self, ctx): """Resumes the currently played song.""" state = self.get_voice_state(ctx.message.server) if state.is_playing(): player = state.player player.resume() @commands.command(pass_context=True, no_pm=True) async def stop(self, ctx): """Stops playing audio and leaves the voice channel. This also clears the queue. """ server = ctx.message.server state = self.get_voice_state(server) if state.is_playing(): player = state.player player.stop() try: state.audio_player.cancel() del self.voice_states[server.id] await state.voice.disconnect() except: pass @commands.command(pass_context=True, no_pm=True) async def skip(self, ctx): """Vote to skip a song. The song requester can automatically skip. 3 skip votes are needed for the song to be skipped. """ state = self.get_voice_state(ctx.message.server) if not state.is_playing(): await self.bot.say('Not playing any music right now...') return voter = ctx.message.author if voter == state.current.requester: await self.bot.say('Requester requested skipping song...') state.skip() elif voter.id not in state.skip_votes: state.skip_votes.add(voter.id) total_votes = len(state.skip_votes) if total_votes >= 3: await self.bot.say('Skip vote passed, skipping song...') state.skip() else: await self.bot.say('Skip vote added, currently at [{}/3]'.format(total_votes)) else: await self.bot.say('You have already voted to skip this song.') @commands.command(pass_context=True, no_pm=True) async def playing(self, ctx): """Shows info about the currently played song.""" state = self.get_voice_state(ctx.message.server) if state.current is None: await self.bot.say('Not playing anything.') else: skip_count = len(state.skip_votes) await self.bot.say('Now playing {} [skips: {}/3]'.format(state.current, skip_count)) bot = commands.Bot(command_prefix=commands.when_mentioned_or('$'), description='A playlist example for discord.py') bot.add_cog(Music(bot)) @bot.event async def on_ready(): print('Logged in a
impo
rt csv import os from django.contrib.auth.decorators import login_required from django.core.urlresolvers import reverse_lazy from django.utils.decorators import method_decorator from django.db import models from converter.exceptions import UploadException from .models import SystemSource, R
eference, ReferenceKeyValue from django.db import transaction class TimeStampedModel(models.Model): """ An abstract base class model that provides self- . fields. updating ``created`` and ``modified`` """ created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) class Meta: abstract = True class LoginRequiredMixin: @method_decorator(login_required(login_url=reverse_lazy("auth:login"))) def dispatch(self, request, *args, **kwargs): return super(LoginRequiredMixin, self).dispatch(request, *args, **kwargs) def ss_handle_uploaded_file(f): filename = f.name # filepath = os.path.join('/home/niko/' + filename) filepath = os.path.join('C:/Users/nmorozov/Desktop/1/' + filename) with open(filepath, 'wb+') as destination: for chunk in f.chunks(): destination.write(chunk) with open(filepath, newline='') as csvfile: iterator = csv.reader(csvfile, delimiter=',', quotechar='|') with transaction.atomic(): for obj in iterator: if safe_get(obj, 0) == "system": ss = SystemSource(code=safe_get(obj, 1), fullname=safe_get(obj, 2)) ss.save() elif safe_get(obj, 0) == "reference": reference = Reference(code=safe_get(obj, 1), fullname=safe_get(obj, 2), table_name=safe_get(obj, 3), table_charset=safe_get(obj, 4), jdbc_source=safe_get(obj, 5), replication_sql=safe_get(obj, 6), master_id=ss) reference.save() elif safe_get(obj, 0) == "content": content = ReferenceKeyValue(key=safe_get(obj, 1), value=safe_get(obj, 2), reference_id=reference) content.save() else: raise UploadException("Parse error") # raise ValidationError('Invalid value', code='invalid') os.remove(filepath) def safe_get(_list, _index, _default=""): try: return _list[_index] except IndexError: return _default
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Generated code. DO NOT EDIT! # # Snippet for ImportTaxonomies # NOTE: This snippet has been automatically generated for illustrative purposes only. # It may require modifications to work in your environment. # To install the latest published package dependency, execute t
he following: # python3 -m pip install google-cloud-datacatalog # [START datacatalog_generated_datacatalog_v1_PolicyTagManagerSerialization_ImportTaxonomies_sync] from google.cloud import datacatalog_v1 def sample_import_taxonomies(): # Create a client client = datacatalog_v1.PolicyTagManagerSerializationClient() # Initialize request argument(s) inline_source = datacatalog_v1.InlineSource() inline_source.taxonomies.display_name = "display_name_value" request = datac
atalog_v1.ImportTaxonomiesRequest( inline_source=inline_source, parent="parent_value", ) # Make the request response = client.import_taxonomies(request=request) # Handle the response print(response) # [END datacatalog_generated_datacatalog_v1_PolicyTagManagerSerialization_ImportTaxonomies_sync]
import multiprocessing import Library.interfaz import Library.config import handler import server try: config = Library.config.read() except: import sys print("FAILED TO OPEN CONFIG FILE, EXITING") sys.exit() man = multiprocessing.Manager() adios = man.Value(bool, False) interfaz = Library.interfaz.Interfaz(lang=config["lang"]) hand = handler.Handler(interfaz, adios) hand.pant
alla("INIT", prompt=False) input("") key_bits = int(config["key_length"]) hand.pantalla("GENERATING_KEY", args=(key_bits,), prompt=False) server = server.Server(adios, hand, Library.Encriptacion.genera(key_bits), ip=config["host"], port=int(config["port"]
)) g = multiprocessing.Process(target=server.listen) p = multiprocessing.Process(target=server.server_handler) p2 = multiprocessing.Process(target=hand.listen, args=(server, )) p.start() g.start() hand.listen(server) adios.value = True p.join() g.join() server.handler.exit()
# -*- coding: utf-8 -*- # # Viper documentation build configuration file, created by # sphinx-quickstart on Mon May 5 18:24:15 2014. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Viper' copyright = u'2014, Claudio Guarnieri' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '1.1' # The full version, including alpha/beta/rc tags. release = '1.1' # T
he language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to
some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Viperdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'Viper.tex', u'Viper Documentation', u'Claudio Guarnieri', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'viper', u'Viper Documentation', [u'Claudio Guarnieri'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'Viper', u'Viper Documentation', u'Claudio Guarnieri', 'Viper', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote'
# coding: utf-8 """ EVE Swagger Interface An OpenAPI for EVE Online OpenAPI spec version: 0.4.6 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from pprint import pformat from six import iteritems import re class GetOpportunitiesTasksTaskIdOk(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self, description=None, name=None, notification=None, task_id=None): """ GetOpportunitiesTasksTaskIdOk - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'description': 'str', 'name': 'str', 'notification': 'str', 'task_id': 'int' } self.attribute_map = { 'description': 'description', 'name': 'name', 'notification': 'notification', 'task_id': 'task_id' } self._description = description self._name = name self._notification = notification self._task_id = task_id @property def description(self): """ Gets the description of this GetOpportunitiesTasksTaskIdOk. description string :return: The description of this GetOpportunitiesTasksTaskIdOk. :rtype: str """ return self._description @description.setter def description(self, description): """ Sets the description of this GetOpportunitiesTasksTaskIdOk. description string :param description: The description of this GetOpportunitiesTasksTaskIdOk. :type: str """ if description is None: raise ValueError("Invalid value for `description`, must not be `None`") self._description = description @property def name(self): """ Gets the name of this GetOpportunitiesTasksTaskIdOk. name string :return: The name of this GetOpportunitiesTasksTaskIdOk. :rtype: str """ return self._name @name.setter def name(self, name): """ Sets the name of this GetOpportunitiesTasksTaskIdOk. name string :param name: The name of this GetOpportunitiesTasksTaskIdOk. :type: str """ if name is None: raise ValueError("Invalid value for `name`, must not
be `None`") self._name = name @property def notification(self): """ Gets the notification of this GetOpportunitiesTasksTaskIdOk. notification string :return: The notification of this GetOpportunitiesTasksTaskIdOk. :rtype: str """ return self._notification @notification.setter def notification(self, notification): """ Sets the notification of this GetOpportunitiesTas
ksTaskIdOk. notification string :param notification: The notification of this GetOpportunitiesTasksTaskIdOk. :type: str """ if notification is None: raise ValueError("Invalid value for `notification`, must not be `None`") self._notification = notification @property def task_id(self): """ Gets the task_id of this GetOpportunitiesTasksTaskIdOk. task_id integer :return: The task_id of this GetOpportunitiesTasksTaskIdOk. :rtype: int """ return self._task_id @task_id.setter def task_id(self, task_id): """ Sets the task_id of this GetOpportunitiesTasksTaskIdOk. task_id integer :param task_id: The task_id of this GetOpportunitiesTasksTaskIdOk. :type: int """ if task_id is None: raise ValueError("Invalid value for `task_id`, must not be `None`") self._task_id = task_id def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ if not isinstance(other, GetOpportunitiesTasksTaskIdOk): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
# -*- coding: utf-8 -*- import requests import json misperrors = {'error': 'Error'} mispattributes = {'input': ['md5', 'sha1', 'sha256', 'domain', 'url', 'email-src', 'ip-dst|port', 'ip-src|port'], 'output': ['text']} moduleinfo = {'version': '0.1', 'author': 'Corsin Camichel', 'description': 'Module to search for an IOC on ThreatFox by abuse.ch.', 'module-type': ['hover', 'expansion']} moduleconfig = [] API_URL = "https://threatfox-api.abuse.ch/api/v1/" # copied from # https://github.com/marjatech/threatfox2misp/blob/main/threatfox2misp.py def confidence_level_to_tag(level: int) -> str: confidence_tagging = { 0: 'misp:confidence-level="unconfident"', 10: 'misp:confidence-level="rarely-confident"', 37: 'misp:confidence-level="fairly-confident"', 63: 'misp:confidence-level="usually-confident"', 90: 'misp:confidence-level="completely-confident"', } confidence_tag = "" for tag_minvalue, tag in confidence_tagging.items(): if level >= tag_minvalue: confidence_tag = tag return confidence_tag def handler(q=False): if q is False: return False request = json.loads(q)
ret_val = "" for
input_type in mispattributes['input']: if input_type in request: to_query = request[input_type] break else: misperrors['error'] = "Unsupported attributes type:" return misperrors data = {"query": "search_ioc", "search_term": f"{to_query}"} response = requests.post(API_URL, data=json.dumps(data)) if response.status_code == 200: result = json.loads(response.text) if(result["query_status"] == "ok"): confidence_tag = confidence_level_to_tag(result["data"][0]["confidence_level"]) ret_val = {'results': [{'types': mispattributes['output'], 'values': [result["data"][0]["threat_type_desc"]], 'tags': [result["data"][0]["malware"], result["data"][0]["malware_printable"], confidence_tag]}]} return ret_val def introspection(): return mispattributes def version(): moduleinfo['config'] = moduleconfig return moduleinfo
# -*- coding: utf-8 -*- from py3Des.pyDes import triple_des, ECB, PAD_PKCS5 class TripleDES: __triple_des = None @staticmethod def init(): TripleDES.__triple_des
= triple_des('1234567812345678', mode=ECB, IV = '\0\0\0\0\0\0\0\0', pad=None, padmode = PAD_PKCS5) @staticmethod def encrypt(data): return TripleD
ES.__triple_des.encrypt(data) @staticmethod def decrypt(data): return TripleDES.__triple_des.decrypt(data)