prefix
stringlengths
0
918k
middle
stringlengths
0
812k
suffix
stringlengths
0
962k
#!/usr/bin/env python3 """ URL:
http://docs.graphene-python.org/en/latest/types/enums/ """ import graphene class Episode(graphene.Enum): NEWHOPE = 4 EMPIRE = 5 JEDI = 6 @property def description(self): if self == Episode.NEWHOPE: return 'New Hope Episode' return 'O
ther episode'
# # Copyright (C) 2013-2019 The ESPResSo project # # This file is part of ESPResSo. # # ESPResSo is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # """ Simulate a Lennard-Jones liquid with charges. The P3M method is used to calculate electrostatic interactions. """ import numpy as np import espressomd required_features = ["P3M", "WCA"] espressomd.assert_features(required_features) from espressomd import electrostatics import argparse parser = argparse.ArgumentParser() group = parser.add_mutually_exclusive_group() group.add_argument("--cpu", action="store_const", dest="mode", const="cpu", help="P3M on CPU", default="cpu") group.add_argument("--gpu", action="store_const", dest="mode", const="gpu", help="P3M on GPU") args = parser.parse_args() print(""" ======================================================= = p3m.py = ======================================================= """) # System parameters ############################################################# box_l = 10 density = 0.3 # Interaction parameters (repulsive Lennard-Jones) ############################################################# wca_eps = 10.0 wca_sig = 1.0 # Integration parameters ############################################################# system = espressomd.System(box_l=[box_l] * 3) np.random.seed(seed=42) system.time_step = 0.01 system.cell_system.skin = 0.4 # warmup integration (steepest descent) warm_steps = 20 warm_n_times = 30 # convergence criterion (particles are separated by at least 90% sigma) min_dist = 0.9 * wca_sig # integration int_steps = 1000 int_n_times = 10 ############################################################# # Setup System # ############################################################# # Interaction setup ############################################################# system.non_bonded_inter[0, 0].wca.set_params(epsilon=wca_eps, sigma=wca_sig) print("LJ-parameters:") print(system.non_bonded_inter[0, 0].wca.get_params()) # Particle setup ############################################################# volume = box_l**3 n_part = int(volume * density) for i in range(n_part): system.part.add(id=i, pos=np.random.random(3) * system.box_l) print("Simulate {} particles in a cubic box {} at density {}." .format(n_part, box_l, density).strip()) print("Interactions:\n") act_min_dist = system.analysis.min_dist() print("Start with minimal distance {}".format(act_min_dist)) # Assign charges to particles for i in range(n_part // 2 - 1): system.part[2 * i].q = -1.0 system.part[2 * i + 1].q = 1.0 # P3M setup after charge assignment ############################################################# print("\nSCRIPT--->Create p3m\n") if args.mode == "gpu": p3m = electrostatics.P3MGPU(prefactor=2.0, accuracy=1e-2) else: p3m = electrostatics.P3M(prefactor=1.0, accuracy=1e-2) print("\nSCRIPT--->Add actor\n") system.actors.add(p3m) print("\nSCRIPT--->P3M parameter:\n") p3m_params = p3m.get_params() for key in list(p3m_params.keys()): print("{} = {}".format(key, p3m_params[key])) print("\nSCRIPT--->Explicit tune call\n") p3m.tune(accuracy=1e3) print("\nSCRIPT--->P3M parameter:\n") p3m_params = p3m.get_params() for key in list(p3m_params.keys()): print("{} = {}".format(key, p3m_params[key])) print(system.actors) ###################################
########################## # Warmup Integration # #########
#################################################### print(""" Start warmup integration: At maximum {} times {} steps Stop if minimal distance is larger than {} """.strip().format(warm_n_times, warm_steps, min_dist)) # minimize energy using min_dist as the convergence criterion system.integrator.set_steepest_descent(f_max=0, gamma=1e-3, max_displacement=wca_sig / 100) i = 0 while i < warm_n_times and system.analysis.min_dist() < min_dist: print("minimization: {:+.2e}".format(system.analysis.energy()["total"])) system.integrator.run(warm_steps) i += 1 print("minimization: {:+.2e}".format(system.analysis.energy()["total"])) print() system.integrator.set_vv() # activate thermostat system.thermostat.set_langevin(kT=1.0, gamma=1.0, seed=42) # Just to see what else we may get from the C++ core import pprint pprint.pprint(system.cell_system.get_state(), width=1) # pprint.pprint(system.part.__getstate__(), width=1) pprint.pprint(system.__getstate__()) ############################################################# # Integration # ############################################################# print("\nStart integration: run {} times {} steps" .format(int_n_times, int_steps)) for i in range(int_n_times): print("run {} at time={:.2f}".format(i, system.time)) system.integrator.run(int_steps) energies = system.analysis.energy() print(energies['total']) # terminate program print("\nFinished.")
"""autogenerated by genpy from hrl_lib/Pose3DOF.msg. Do not edit.""" import sys python3 = True if sys.hexversion > 0x03000000 else False import genpy import struct import std_msgs.msg class Pose3DOF(genpy.Message): _md5sum = "646ead44a0e6fecf4e14ca116f12b08b" _type = "hrl_lib/Pose3DOF" _has_header = True #flag to mark the presence of a Header object _full_text = """Header header float64 x float64 y float64 theta float64 dt ================================================================================ MSG: std_msgs/Header # Standard metadata for higher-level stamped data types. # This is generally used to communicate timestamped data # in a particular coordinate frame. # # sequence ID: consecutively increasing ID uint32 seq #Two-integer timestamp that is expressed as: # * stamp.secs: seconds (stamp_secs) since epoch # * stamp.nsecs: nanoseconds since stamp_secs # time-handling sugar is provided by the client library time stamp #Frame this data is associated with # 0: no frame # 1: global frame string frame_id """ __slots__ = ['header','x','y','theta','dt'] _slot_types = ['std_msgs/Header','float64','float64','float64','float64'] def __init__(self, *args, **kwds): """ Constructor. Any message fields that are implicitly/explicitly set to None will be assigned a default value. The recommend use is keyword arguments as this is more robust to future message changes. You cannot mix in-order arguments and keyword arguments. The available fields are: header,x,y,theta,dt :param args: complete set of field values, in .msg order :param kwds: use keyword arguments corresponding to message field names to set specific fields. """ if
args or kwds: super(Pose3DOF, self).__init__(*args, **kwds) #message fields cannot be None, assign default values for those that are if self.header is None: self.header = std_msgs.msg.Header() if self.x is None: self.x = 0. if self.y is None: self.y = 0. if self.theta is None: self.theta = 0. if self.dt is None: self.dt = 0. else:
self.header = std_msgs.msg.Header() self.x = 0. self.y = 0. self.theta = 0. self.dt = 0. def _get_types(self): """ internal API method """ return self._slot_types def serialize(self, buff): """ serialize message into buffer :param buff: buffer, ``StringIO`` """ try: _x = self buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs)) _x = self.header.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = self buff.write(_struct_4d.pack(_x.x, _x.y, _x.theta, _x.dt)) except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x)))) except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x)))) def deserialize(self, str): """ unpack serialized message in str into this message instance :param str: byte array of serialized message, ``str`` """ try: if self.header is None: self.header = std_msgs.msg.Header() end = 0 _x = self start = end end += 12 (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.header.frame_id = str[start:end].decode('utf-8') else: self.header.frame_id = str[start:end] _x = self start = end end += 32 (_x.x, _x.y, _x.theta, _x.dt,) = _struct_4d.unpack(str[start:end]) return self except struct.error as e: raise genpy.DeserializationError(e) #most likely buffer underfill def serialize_numpy(self, buff, numpy): """ serialize message with numpy array types into buffer :param buff: buffer, ``StringIO`` :param numpy: numpy python module """ try: _x = self buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs)) _x = self.header.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = self buff.write(_struct_4d.pack(_x.x, _x.y, _x.theta, _x.dt)) except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x)))) except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x)))) def deserialize_numpy(self, str, numpy): """ unpack serialized message in str into this message instance using numpy for array types :param str: byte array of serialized message, ``str`` :param numpy: numpy python module """ try: if self.header is None: self.header = std_msgs.msg.Header() end = 0 _x = self start = end end += 12 (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.header.frame_id = str[start:end].decode('utf-8') else: self.header.frame_id = str[start:end] _x = self start = end end += 32 (_x.x, _x.y, _x.theta, _x.dt,) = _struct_4d.unpack(str[start:end]) return self except struct.error as e: raise genpy.DeserializationError(e) #most likely buffer underfill _struct_I = genpy.struct_I _struct_3I = struct.Struct("<3I") _struct_4d = struct.Struct("<4d")
3'} assert data[1] == {'Column1': 'data4', 'Column_2': 'data5', 'Column_3': 'data6'} assert data[2] == {'Column1': 'data 7', 'Column_2': '', 'Column_3': 'data 9'} assert data[3] == {'Column1': 'data10', 'Column_2': '', 'Column_3': ''} # Test that if we search for trailing data that is always found, then we # should get the whole thing parsed as a table from the header line data = parse_fixed_table( ['foo' + line for line in FIXED_CONTENT_4.splitlines()], heading_ignore=['fooColumn1 '], header_substitute=[('fooColumn1', 'Column1'), ('Column 2', 'Column_2'), ('Column 3', 'Column_3')], trailing_ignore=['foo'] ) assert len(data) == 6 assert data[4] == {'Column1': 'fooTrailing', 'Column_2': 'non-data li', 'Column_3': 'ne'} assert data[5] == {'Column1': 'foo Another', 'Column_2': 'trailing no', 'Column_3': 'n-data line'} data = parse_fixed_table(FIXED_CONTENT_DUP_HEADER_PREFIXES.splitlines()) assert data[0] == {'NAMESPACE': 'default', 'NAME': 'foo', 'LABELS': 'app=superawesome'} data = parse_fixed_table(FIXED_CONTENT_5.splitlines()) assert len(data) == 3 def test_parse_fixed_table_empty_exception(): with pytest.raises(ParseException) as pe: parse_fixed_table(FIXED_CONTENT_1B.splitlines(), empty_exception=True) assert "Incorrect line:" in str(pe.value) def test_optlist_standard(): d = optlist_to_dict('key1,key2=val2,key1=val1,key3') assert sorted(d.keys()) == sorted(['key1', 'key2', 'key3']) assert d['key1'] == 'val1' assert d['key2'] == 'val2' assert d['key3'] is True def test_optlist_no_vals(): d = optlist_to_dict('key1,key2=val2,key1=val1,key3', kv_sep=None) assert sorted(d.keys()) == sorted(['key1', 'key1=val1', 'key2=val2', 'key3']) assert d['key1'] is True assert d['key1=val1'] is True assert d['key2=val2'] is True assert d['key3'] is True def test_optlist_strip_quotes(): d = optlist_to_dict( '''key1="foo",key2='bar',key3="mismatched quotes',key4="inner'quotes"''', strip_quotes=True ) assert sorted(d.keys()) == sorted(['key1', 'key2', 'key3', 'key4']) assert d['key1'] == 'foo' assert d['key2'] == 'bar' assert d['key3'] == '"mismatched quotes\'' assert d['key4'] == "inner'quotes" def test_optlist_with_spaces(): d = optlist_to_dict( '''key1=foo, key2=bar''' ) assert 'key1' in d assert 'key2' in d PS_AUX_TEST = """ USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.0 0.0 19356 1544 ? Ss May31 0:01 /sbin/init root 1821 0.0 0.0 0 0 ? S May31 0:25 [kondemand/0] root 1864 0.0 0.0 18244 668 ? Ss May31 0:05 irqbalance --pid=/var/run/irqbalance.pid user1 20160 0.0 0.0 108472 1896 pts/3 Ss 10:09 0:00 bash root 20357 0.0 0.0 9120 760 ? Ss 10:09 0:00 /sbin/dhclient -1 -q -lf /var/lib/dhclient/dhclient-extbr0.leases -pf /var/run/dhclient-extbr0.pid extbr0 qemu 22673 0.8 10.2 1618556 805636 ? Sl 11:38 1:07 /usr/libexec/qemu-kvm -name rhel7 -S -M rhel6.5.0 -enable-kvm -m 1024 -smp 2,sockets=2,cores=1,threads=1 -uuid 13798ffc-bc1e-d437-4f3f-2e0fa6c923ad """ MISSING_DATA_TEST = """ WARNING: Locking disabled. Be careful! This could corrupt your metadata. LVM2_PV_FMT|LVM2_PV_UUID|LVM2_DEV_SIZE|LVM2_PV_NAME|LVM2_PV_MAJOR|LVM2_PV_MINOR|LVM2_PV_MDA_FREE|LVM2_PV_MDA_SIZE|LVM2_PV_EXT_VSN|LV
M2_PE_START|LVM2_PV_SIZE|LVM2_PV_FREE|LVM2_PV_USED|LVM2_PV_ATTR|LVM2_PV_ALLOCATABLE|LVM2_PV_EXPORTED|LVM2_PV_MISSING|LVM2_PV_PE_COUNT|LVM2_PV_PE_ALLOC_COUNT|LVM2_PV_TAGS|LVM2_PV_MDA_COUNT|LVM2_PV_MDA_USED_COUNT|LVM2_PV_BA_START|LVM2_PV_BA_SIZE|LVM2_PV_IN_USE|LVM2_PV_DUPLICATE|LVM2_VG_NAME WARNING: Locking disabled. Be careful! This could corrupt your metadata. """ SUBSTITUTE_HEADERS_TEST = "
"" address,port,state,read-only 0.0.0.0,3000,LISTEN,N 10.76.19.184,37500,ESTAB,Y """.strip() POSTGRESQL_LOG = """ schema | table | rows public | rhnsnapshotpackage | 47428950 public | rhnpackagefile | 32174333 public | rhnpackagecapability | 12934215 public | rhnpackagechangelogrec | 11269933 public | rhnchecksum | 10129746 public | rhnactionconfigrevision | 2894957 public | rhnpackageprovides | 2712442 public | rhnpackagerequires | 2532861 public | rhn_command_target | 1009152 public | rhnconfigfilename | 0 public | rhnxccdfidentsystem | 0 public | rhndistchannelmap | 0 public | rhnactionvirtshutdown | 0 public | rhnpublicchannelfamily | 0 (402 rows) """.strip() # Normally has a --- separator line, which is ignored using get_active_lines TABLE1 = """ THIS IS A HEADER this is some content_with_blank_prefix This is more content """.strip() TABLE2 = [ "SID Nr Instance SAPLOCALHOST Version DIR_EXECUTABLE", "HA2| 16| D16| lu0417|749, patch 10, changelist 1698137| /usr/sap/HA2/D16/exe", "HA2| 22| D22| lu0417|749, patch 10, changelist 1698137| /usr/sap/HA2/D22/exe" ] TABLE3 = """ THIS | IS | A | HEADER this ^ is ^ some ^ content This ^ is ^ more ^ content """.strip() def test_parse_delimited_table(): # No content? No table. assert parse_delimited_table([]) == [] # Test maximum splits and header 'ignore', which should actually be # called 'header_startswith' tbl = parse_delimited_table( PS_AUX_TEST.splitlines(), max_splits=10, heading_ignore=['USER'] ) assert tbl assert isinstance(tbl, list) assert len(tbl) == 6 assert isinstance(tbl[0], dict) assert tbl[0] == { '%MEM': '0.0', 'TTY': '?', 'VSZ': '19356', 'PID': '1', '%CPU': '0.0', 'START': 'May31', 'COMMAND': '/sbin/init', 'USER': 'root', 'STAT': 'Ss', 'TIME': '0:01', 'RSS': '1544' } assert tbl[5]['COMMAND'] == \ '/usr/libexec/qemu-kvm -name rhel7 -S -M rhel6.5.0 -enable-kvm -m 1024 -smp 2,sockets=2,cores=1,threads=1 -uuid 13798ffc-bc1e-d437-4f3f-2e0fa6c923ad' # Test trailing ignore not found tbl = parse_delimited_table( MISSING_DATA_TEST.splitlines(), delim='|', heading_ignore=['LVM2_PV_FMT'], trailing_ignore=['WARNING', 'ERROR', 'Cannot get lock'] ) assert isinstance(tbl, list) assert len(tbl) == 0 # Header substitution tbl = parse_delimited_table( SUBSTITUTE_HEADERS_TEST.splitlines(), delim=',', strip=False, header_substitute=[('read-only', 'read_only')] ) assert tbl assert isinstance(tbl, list) assert len(tbl) == 2 assert isinstance(tbl[1], dict) assert tbl[1] == { 'address': '10.76.19.184', 'port': '37500', 'state': 'ESTAB', 'read_only': 'Y' } # Test change of delimiter and trailing_ignore tbl = parse_delimited_table(POSTGRESQL_LOG.splitlines(), delim='|', trailing_ignore=['(']) assert isinstance(tbl, list) assert len(tbl) == 14 assert isinstance(tbl[0], dict) assert tbl[0] == { 'schema': 'public', 'table': 'rhnsnapshotpackage', 'rows': '47428950' } # Test using different header delimiter result = parse_delimited_table(TABLE3.splitlines(), delim="^", header_delim="|") assert isinstance(result, list) assert len(result) == 2 assert isinstance(result[0], dict) expected = [{"THIS": "this", "IS": "is", "A": "some", "HEADER": "content"}, {"THIS": "This", "IS": "is", "A": "more", "HEADER": "content"}] assert expected == result # Test explicit None as header delimiter, different from content delimiter result = parse_delimited_table(TABLE2, delim='|', header_delim=None) assert isinstance(result, list) assert len(result) == 2 assert isinstance(result[0], dict) expected = [{"SID": "HA2", "Nr": "16", "Instance": "D16", "SAPLOCALHOST": "lu0417", "Version": "749, patch
"""Screen database.""" import redis_client import control import re from twisted.internet import defer class ScreenDB(object): """A screen database.""" def __init__(self): """Default constructor.""" pass def set_mode(self, screen, mode): redis_client.connection.set('screen:{0}:mode'.format(screen), mode) redis_client.connection.publish('screen:update', 'update') def set_override(self, screen, override): if override is not None: redis_client.connection.set('screen:{0}:override'.format(screen), override) else: redis_client.connection.delete('screen:{0}:override'.format(screen)) redis_client.connection.publish('screen:update', 'update') @defer.inlineCallbacks def list(self): screens = yield redis_client.connection.keys('screen:*:mode') entries = {} for screen in screens: screenID = screen.split(':')[1] mode = yield redis_client
.connection.get('screen:{0}:mode'.format(screenID)) host = yield redis_client.connection.get('screen:{0}:host'.format(screenID)) entries[screenID] = {'mode': mode, 'host'
: host} defer.returnValue(entries) screens = ScreenDB() @control.handler('screen-list') @defer.inlineCallbacks def perform_screen_list(responder, options): screen_list = yield screens.list() for screen, settings in screen_list.iteritems(): if settings['host'] is None: online_string = 'offline' else: online_string = 'online from {0} port {1}'.format(*settings['host'].split(' ')) responder('{0} - {1} ({2})'.format(screen, settings['mode'], online_string)) @control.handler('screen-set-mode') def perform_screen_set_mode(responder, options): screens.set_mode(options['<id>'], options['<mode>']) responder('Mode set.') @control.handler('screen-override') def perform_screen_override(responder, options): screens.set_override(options['<id>'], options['<message>']) responder('Override set.') @control.handler('screen-clear-override') def perform_screen_clear_override(responder, options): screens.set_override(options['<id>'], None) responder('Override cleared.') def got_screen(name): control.broadcast('Screen connected: {0}'.format(name)) redis_client.add_subscribe('screen:connect', got_screen)
from django.core.urlresolvers import reverse, reverse_lazy from django.shortcuts import render, redirect from django.views import generic from common.models import Discipline, Performance # renders index / home page def index(request): return redirect(reverse('tournaments.main')) # renders todo page def process(request): return render(request, 'gymnastics/process.html', None) def performances_index(request): context = { 'performances': Performance.objects.all() \ .select_related('athlete').select_related('discipline') } return render(request, 'gymnastics/performances/index.html', context) class PerformanceCreateView(generic.CreateView): model = Performance fields = ['athlete', 'discipline', 'value', 'value_final'] template_name = 'gymnastics/performances/new.html' success_url = reverse_lazy('performances.index') class PerformanceDetailView(generic.DetailView): model = Performance template_name = 'gymnastics/performances/detail.html' class PerformanceUpdateView(generic.UpdateView): model = Performance fields = ['athlete', 'discipline', 'value', 'value_final'] template_name = 'gymnastics/performances/edit.html' def get_success_url(self): some_kwargs = self.kwargs return reverse('performances.detail', kwargs = { 'pk' : self.kwargs['pk'] }) class PerformanceDeleteView(generic.DeleteView): model = Performance template_name = 'gymnastics/performances/delete.html' success_url = reverse_lazy('performances.
index')
def disciplines_index(request): context = { 'disciplines': Discipline.objects.all() } return render(request, 'gymnastics/disciplines/index.html', context) def discipline_detail(request, id, slug): discipline = Discipline.objects.get(id=id) streams = discipline.stream_set.all() performances = discipline.performance_set.all().select_related('athlete') context = { 'discipline': discipline, 'streams': streams, 'performances': performances } return render(request, 'gymnastics/disciplines/detail.html', context) class DisciplineCreateView(generic.CreateView): model = Discipline fields = ['name'] template_name = 'gymnastics/disciplines/new.html' class DisciplineUpdateView(generic.UpdateView): model = Discipline fields = ['name'] template_name = 'gymnastics/disciplines/edit.html' class DisciplineDeleteView(generic.DeleteView): model = Discipline template_name = 'gymnastics/disciplines/delete.html' success_url = reverse_lazy('disciplines.index')
"""Import an Irish NaPTAN XML file, obtainable from https://data.dublinked.ie/dataset/national-public-transport-nodes/resource/6d997756-4dba-40d8-8526-7385735dc345 """ import warnings import zipfile import xml.etree.cElementTree as ET from django.contrib.gis.geos import Point from django.core.management.base import BaseCommand from ...models import Locality, AdminArea, StopPoint class Command(BaseCommand): ns = {'naptan': 'http://www.naptan.org.uk/'} @staticmethod def add_arguments(parser): parser.add_argument('filenames', nargs='+', type=str) def handle_stop(self, element): stop = StopPoint( atco_code=element.find('naptan:AtcoCode', self.ns).text, locality_centre=element.find('naptan:Place/naptan:LocalityCentre', self.ns).text == 'true', active=element.get('Status') == 'active', ) for subelement in element.find('naptan:Descriptor', self.ns): tag = subelement.tag[27:] if tag == 'CommonName': stop.common_name = subelement.text elif tag == 'Street': stop.street = subelement.text elif tag == 'Indicator': stop.indicator = subelement.text.lower() else: warnings.warn('Stop {} has an unexpected property: {}'.format(stop.atco_code, tag)) stop_classification_element = element.find('naptan:StopClassification', self.ns) stop_type = stop_classification_element.find('naptan:StopType', self.ns).text if stop_type != 'class_undefined': stop.stop_type = stop_type bus_element = stop_classification_element.find('naptan:OnStreet/naptan:Bus', self.ns) if bus_element is not None: stop.bus_stop_type = bus_element.find('naptan:BusStopType', self.ns).text stop.timing_status = bus_element.find('naptan:TimingStatus', self.ns).text compass_point_element = bus_element.find( 'naptan:MarkedPoint/naptan:Bearing/naptan:CompassPoint', self.ns ) if compass_point_element is not None: stop.bearing = compass_point_element.text if stop.bus_stop_type == 'type_undefined': stop.bus_stop_type = '' place_element = element.find('naptan:Place', self.ns) location_element = place_element.find('naptan:Location', self.ns) longitude_element = location_element.find('naptan:Longitude', self.ns) latitude_element = location_element.find('naptan:Latitude', self.ns) if longitude_element is None: warnings.warn('Stop {} has no location'.format(stop.atco_code)) else: stop.latlong = Point(float(longitude_element.text), float(latitude_element.text)) admin_area_id = element.find('naptan:AdministrativeAreaRef', self.ns).text if not AdminArea.objects.filter(atco_code=admin_area_id).exists(): AdminArea.objects.create(id=admin_area_id, atco_code=admin_area_id, region_id='NI') stop.admin_area_id = admin_area_id locality_element = place_element.find('naptan:NptgLocalityRef', self.ns) if locality_element is not None: if not Locality.objects.filter(i
d=locality_element.text).exists(): Locality.objects.create(id=locality_element.text, admin_area_id=admin_area_id) stop.locality_id = locality_element.text stop.save() def handle_file(self, archive, filename): with archive.open(filename) as open_file: iterator = ET.iterparse(open_file)
for _, element in iterator: tag = element.tag[27:] if tag == 'StopPoint': self.handle_stop(element) element.clear() def handle(self, *args, **options): for filename in options['filenames']: with zipfile.ZipFile(filename) as archive: for filename in archive.namelist(): self.handle_file(archive, filename)
# This file is part of Indico. # Copyright (C) 2002 - 2021 CERN # # Indico is free software; you can redistribute it and/or # modify it under the terms of the MIT License; see the # LICENSE file for more details. from indico.core.db import db from indico.modules.events.contributions.models.fields import ContributionFieldValueBase from indico.util.string import format_repr, tex
t_to_repr class AbstractFieldValue(ContributionFieldValueBase): """Store a field values related to abstracts.""" __tablename__ = 'abstract_field_values' __table_args__ = {'schema': 'event_abstracts'} contribution_field_backref_name = 'abstract_values' abstract_id = db.Column( db.Integer, db.ForeignKey('event_abstracts.abstracts.id'
), index=True, nullable=False, primary_key=True ) # relationship backrefs: # - abstract (Abstract.field_values) def __repr__(self): text = text_to_repr(self.data) if isinstance(self.data, str) else self.data return format_repr(self, 'abstract_id', 'contribution_field_id', _text=text)
""" Infobip Client API Libraries OpenAPI Specification OpenAPI specification containing public endpoints supported in client API libraries. # noqa: E501 The version of the OpenAPI document: 1.0.172 Contact: support@infobip.com Generated by: https://openapi-generator.tech """ import re # noqa: F401 import sys # noqa: F401 from infobip_api_client.model_utils import ( # noqa: F401 ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info, ) class SmsDestination(ModelNormal): """ Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator variable value to the discriminator class name. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = {} validations = {} additional_properties_type = None _nullable = False @cached_property def openapi_types(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name
and the value is attribute type. """ return { "to": (str,), # noqa: E501 "message_id": (str,), # noqa: E501 } @cached_property def discriminator(): return None attribute_map = { "
to": "to", # noqa: E501 "message_id": "messageId", # noqa: E501 } _composed_schemas = {} required_properties = set( [ "_data_store", "_check_type", "_spec_property_naming", "_path_to_item", "_configuration", "_visited_composed_classes", ] ) @convert_js_args_to_python_args def __init__(self, to, *args, **kwargs): # noqa: E501 """SmsDestination - a model defined in OpenAPI Args: to (str): Message destination address. Addresses must be in international format (Example: `41793026727`). Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) message_id (str): The ID that uniquely identifies the message sent.. [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) _spec_property_naming = kwargs.pop("_spec_property_naming", False) _path_to_item = kwargs.pop("_path_to_item", ()) _configuration = kwargs.pop("_configuration", None) _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.to = to for var_name, var_value in kwargs.items(): if ( var_name not in self.attribute_map and self._configuration is not None and self._configuration.discard_unknown_keys and self.additional_properties_type is None ): # discard variable. continue setattr(self, var_name, var_value)
ef done(): db.session.remove() db.drop_all() request.addfinalizer(done) # set token storage blueprint.storage = SQLAlchemyStorage(OAuth, db.session) # make users and OAuth tokens for several people alice = User(name="Alice") alice_token = {"access_token": "alice123", "token_type": "bearer"} alice_oauth = OAuth(user=alice, token=alice_token, provider="test-service") bob = User(name="Bob") bob_token = {"access_token": "bob456", "token_type": "bearer"} bob_oauth = OAuth(user=bob, token=bob_token, provider="test-service") sue = User(name="Sue") sue_token = {"access_token": "sue789", "token_type": "bearer"} sue_oauth = OAuth(user=sue, token=sue_token, provider="test-service") db.session.add_all([alice, bob, sue, alice_oauth, bob_oauth, sue_oauth]) db.session.commit() # by default, we should not have a token for anyone sess = blueprint.session assert not sess.token assert not blueprint.token # load token for various users blueprint.config["user"] = alice assert sess.token == alice_token assert blueprint.token == alice_token blueprint.config["user"] = bob assert sess.token == bob_token assert blueprint.token == bob_token blueprint.config["user"] = alice assert sess.token == alice_token assert blueprint.token == alice_token blueprint.config["user"] = sue assert sess.token == sue_token assert blueprint.token == sue_token # load for user ID as well del blueprint.config["user"] blueprint.config["user_id"] = bob.id assert sess.token == bob_token assert blueprint.token == bob_token # try deleting user tokens del blueprint.token assert sess.token == None assert blueprint.token == None # shouldn't affect alice's token blueprint.config["user_id"] = alice.id assert sess.token == alice_token assert blueprint.token == alice_token def test_sqla_flask_login(app, db, blueprint, request): login_manager = LoginManager(app) class User(db.Model, UserMixin): id = db.Column(db.Integer, primary_key=True
) name = db.Column(db.String(80)) class OAuth(OAuthConsumerMixin, db.Model): user_id = db.Column(db.Integer, db.ForeignKey(User.id))
user = db.relationship(User) blueprint.storage = SQLAlchemyStorage(OAuth, db.session, user=current_user) db.create_all() def done(): db.session.remove() db.drop_all() request.addfinalizer(done) # create some users u1 = User(name="Alice") u2 = User(name="Bob") u3 = User(name="Chuck") db.session.add_all([u1, u2, u3]) db.session.commit() # configure login manager @login_manager.user_loader def load_user(userid): return User.query.get(userid) with record_queries(db.engine) as queries: with app.test_client() as client: # reset the session before the request with client.session_transaction() as sess: sess["test-service_oauth_state"] = "random-string" # set alice as the logged in user sess["_user_id"] = u1.id # make the request resp = client.get( "/login/test-service/authorized?code=secret-code&state=random-string", base_url="https://a.b.c", ) # check that we redirected the client assert resp.status_code == 302 assert resp.headers["Location"] == "https://a.b.c/oauth_done" assert len(queries) == 4 # lets do it again, with Bob as the logged in user -- he gets a different token responses.reset() responses.add( responses.POST, "https://example.com/oauth/access_token", body='{"access_token":"abcdef","token_type":"bearer","scope":"bob"}', ) with record_queries(db.engine) as queries: with app.test_client() as client: # reset the session before the request with client.session_transaction() as sess: sess["test-service_oauth_state"] = "random-string" # set bob as the logged in user sess["_user_id"] = u2.id # make the request resp = client.get( "/login/test-service/authorized?code=secret-code&state=random-string", base_url="https://a.b.c", ) # check that we redirected the client assert resp.status_code == 302 assert resp.headers["Location"] == "https://a.b.c/oauth_done" assert len(queries) == 4 # check the database authorizations = OAuth.query.all() assert len(authorizations) == 2 u1_oauth = OAuth.query.filter_by(user=u1).one() assert u1_oauth.provider == "test-service" assert u1_oauth.token == { "access_token": "foobar", "token_type": "bearer", "scope": [""], } u2_oauth = OAuth.query.filter_by(user=u2).one() assert u2_oauth.provider == "test-service" assert u2_oauth.token == { "access_token": "abcdef", "token_type": "bearer", "scope": ["bob"], } u3_oauth = OAuth.query.filter_by(user=u3).all() assert len(u3_oauth) == 0 @requires_blinker def test_sqla_flask_login_misconfigured(app, db, blueprint, request): login_manager = LoginManager(app) class User(db.Model, UserMixin): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(80)) class OAuth(OAuthConsumerMixin, db.Model): user_id = db.Column(db.Integer, db.ForeignKey(User.id)) user = db.relationship(User) blueprint.storage = SQLAlchemyStorage(OAuth, db.session, user=current_user) db.create_all() def done(): db.session.remove() db.drop_all() request.addfinalizer(done) # configure login manager @login_manager.user_loader def load_user(userid): return User.query.get(userid) calls = [] def callback(*args, **kwargs): calls.append((args, kwargs)) oauth_error.connect(callback) request.addfinalizer(lambda: oauth_error.disconnect(callback)) with app.test_client() as client: # reset the session before the request with client.session_transaction() as sess: sess["test-service_oauth_state"] = "random-string" # make the request resp = client.get( "/login/test-service/authorized?code=secret-code&state=random-string", base_url="https://a.b.c", ) # check that we redirected the client assert resp.status_code == 302 assert resp.headers["Location"] == "https://a.b.c/oauth_done" assert len(calls) == 1 assert calls[0][0] == (blueprint,) error = calls[0][1]["error"] assert isinstance(error, ValueError) assert str(error) == "Cannot set OAuth token without an associated user" @requires_blinker def test_sqla_flask_login_anon_to_authed(app, db, blueprint, request): login_manager = LoginManager(app) class User(db.Model, UserMixin): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(80)) class OAuth(OAuthConsumerMixin, db.Model): user_id = db.Column(db.Integer, db.ForeignKey(User.id)) user = db.relationship(User) blueprint.storage = SQLAlchemyStorage(OAuth, db.session, user=current_user) db.create_all() def done(): db.session.remove() db.drop_all() request.addfinalizer(done) # configure login manager @login_manager.user_loader def load_user(userid): return User.query.get(userid) # create a user object when OAuth succeeds def logged_in(sender, token): assert token assert blueprint == sender resp = sender.session.get("/user") user = User(name=resp.json()["name"]) login_user(user) db.session.add(user) db.session.commit() flask.flash("Signed in successfully") oauth_authorized.connect(logged_in, blueprint) request.addfinalizer(lambda: oauth_authorized.disconnect(logged_in, blueprint)) # mock out th
ssed and which can then be used by the program as it sees fit. ''' import argparse # This module gives powerful argument parsing abilities along with auto-generation of --help output. # Specify the various arguments that the program expects and validate them. Additional arguments can be added as required. parser = argparse.ArgumentParser( description = "A python script which simultaneously polls multiple IMAP accounts to display the subjects of all or only unseen messages in the specified folder (INBOX by default) without downloading complete messages.\n For further details please read the man page." ) parser.add_argument( "-c", "--config", help = "Specify the name and path to the configuration file. If not specified the program will use the default configuration file in $HOME/.fetchheaders/fetchheaders.conf. Note: The configuration specification file (fetchheaders.conf.spec) should not be altered casually and the program will only look for it in $HOME/.fetchheaders/" ) # For --accounts and --exclude which we wish to be mutually exclusive optional arguments we create a mutually exclusive group within the parser to hold them. group = parser.add_mutually_exclusive_group() group.add_argument( "-a", "--accounts", help = "Specify the names of IMAP accounts to be polled as a comma-separated list. e.g. -a Gmail,Hotmail. Only accounts specified in the configuration file are allowed." ) group.add_argument( "-x", "--exclude", help = "Specify the names of the IMAP accounts which are NOT to be polled, as a comma-separated list. e.g. -x Gmail,Hotmail. Only accounts specified in the configuration file are allowed to be excluded." ) parser.add_argument( "-n", "--numsonly", help = "Flag: Only show the number of unseen and total number of messages for the specified folder for each account.", action = "store_true" ) parser.add_argument( "--noColor", help = "Flag: Do NOT allow colored output. Useful for shells that don't allow colored text or when the output needs to piped to another application since colored text is implemented by encapsulating the text in xterm color escape codes.", action = "store_true" ) parser.add_argument( "--oldestFirst", help = "Flag: Show oldest email first i.e. chronological order.", action = "store_true" ) parser.add_argument(
"-A", "--showAll", help
= "Flag: Show all emails in specified folder, not just unseen ones.", action = "store_true" ) parser.add_argument( "--showFlags", help = "Flag: Show mutt-style flags (in square brackets) to indicate new/unseen and deleted emails when ALL emails are displayed (i.e. -A is issued).", action = "store_true" ) parser.add_argument( "-t", "--threads", help = "Specify the maximum number of parallel threads the program will use to simultaneously access IMAP servers. Set to 1 for serial (non-parallel) behaviour.", type = int) parser.add_argument( "-T", "--terminal", help = "Flag: Show results in the terminal. Do NOT use urwid.", action = "store_true" ) # Begin reading in arguments and validate them: args = parser.parse_args() # args contains the values of arguments passed. If incorrect arguments are passed the problem will be stopped here and argparse will display the appropriate error and help message. return args def applyArgs( args, servers, globalSettings ) : ''' This function accepts both the arguments read by the script and the 'servers' object (dictionary) created by setOptions(). It will apply the arguments sent via command-line to the 'servers' and 'globalSettings' object to create and return a modified version reflecting these changes. ''' # This function is where we carry out all operations necessary to implement the settings specified by command-line arguments. # -a, --acounts. Limit accounts to the specified ones: if args.accounts : # True if -a or --accounts has been specified # We must perform some error checking on the arguments passed to the --accounts optional argument newServers = {} # Create a new dictionary we will populate ONLY with the specified accounts for item in args.accounts.split( ',' ) : # We are expecting a comma-separated list # We create a list of servers the START of whose names (lowercase) matches the item in the argument list currently under consideration matching_servers = [x for x in servers.keys() if re.match('^' + item.lower(), x.lower())] if matching_servers: # A match has occurred for server in matching_servers: # All matching servers are added to the list displayed newServers[ server ] = servers[ server ] else: # No match has occurred. This is an error. print( '\nError: ' + item + ' is not the beginning of a valid IMAP account name specified in the configuration file.' ) import sys sys.exit(1) servers = newServers # -x, --exclude. Does NOT poll the accounts specified with this argument: if args.exclude : # True if -x or --exclude has been specified # We must perform some error checking on the arguments passed to the --exclude optional argument excludedAccounts = [] # Empty list which we will populate with the excluded accounts newServers = {} # Empty dictionary with which we will construct the new 'servers' dictionary without the excluded accounts for item in args.exclude.split( ',' ) : # We are expecting a comma-separated list if not item in servers.keys() : # If this item in the comma-separated list is NOT an account specified in the configuration file print( '\nError: ' + item + ' is not a vlid IMAP account name specified in the configuration file.' ) import sys sys.exit(1) else : excludedAccounts.append( item ) # Now we remove the excluded accounts when we create the new 'servers' dictionary: for account in servers.keys() : if not account in excludedAccounts : # The current account is not in the excluded list and so can be added to the servers dictionary: newServers[ account ] = servers[ account ] # Place the newly constructed dicionary (with accounts excluded) in to the original 'servers' dictionary: servers = newServers # -n, --numsonly. If specified only the total and unseen number of messages is to be displayed. Similar to 'fetchmail -c'. if args.numsonly : for account in servers.keys() : servers[ account ][ 'showOnlyNums' ] = True # -T, --terminal. If specified the output is displayed on the terminal (stdout) and 'urwid' is NOT used. if args.terminal: globalSettings[ 'terminal' ] = True else : globalSettings[ 'terminal' ] = False # --no-color. If specified the output of the program should NOT be colored. if args.noColor : globalSettings[ 'color' ] = False # -A, --showAll. Show all emails not just unseen ones. if args.showAll : for account in servers.keys() : servers[ account ][ 'showUnseen' ] = False globalSettings[ 'showFlags' ] = True # Flags are shown by default whenever ALL emails are viewed whether --showFlags is passed or not. # --oldestFirst. Show oldest email first i.e. in chronological order. if args.oldestFirst : for account in servers.keys() : servers[ account ][ 'latestEmailFirst' ] = False # --showFlags. Show mutt-style flags (in square brackets) when all emails are being displayed. if args.showFlags : globalSettings[ 'showFlags' ] = True # -t, --threads. Set max. number of parallel threads. if args.threads : globalSettings[ 'maxThreads' ] = args.threads return servers, globalSettings def applyGlobalSettings( globalSettings ) : ''' This function applies the global settings defined in the dictionary 'globalSettings' (created using the configuration file and command-l
# coding=UTF-8 from django.shortcuts import redirect from bellum.common.alliance import isAllied from bellum.common.session.login import must_be_logged from bellum.common.session import getAccount, getRace from bellum.common.session.mother import getCurrentMother from djangomako.shortcuts import render_to_response, render_to_string from bellum.space.models import Planet from bellum.common.gui import PrimaryGUIObject from bellum.common.fixtures.province_build import getCosts from bellum.common.session import getRace, getAccount, getResourceIndex from bellum.meta import MPBI from bellum.province.models import Province, Reinforcement from django.core.exceptions import ObjectDoesNotExist from bellum.orders.mo
dels import LandarmyProvintionalStrikeOrder from bellum.orders.models import LandarmyPlanetaryStrikeOrder, LandarmyProvintionalStrikeOrder, LandarmyMotherPickupOrder from bellum.space.ajax.pinfo
import dx_html from bellum.common.fixtures.relocation import getRelocationTime @must_be_logged def process_onlyprovince(request, province_id): try: # check planet province_id = int(province_id) province = Province.objects.get(id=province_id) except: return redirect('/') return process(request, province.planet.id, province_id=province_id) @must_be_logged def process(request, planet_id, province_id=None): try: # check planet planet = Planet.objects.get(id=planet_id) except: return redirect('/') provinces = Province.objects.filter(planet=planet) provinces_postprocessed = {} prov = None try: # faciliates GET getting province to zoom onto if province_id != None: provgrabber = province_id else: provgrabber = int(request.GET['province']) except: provgrabber = None for province in provinces: # 0 - MINE, 1 - ENEMY, 2 - ALLIED, 3 - NOBODYS try: province.provintionalpresence except: pv = 'gray' else: if province.provintionalpresence.owner == getAccount(request): pv = 'green' elif isAllied(getAccount(request), province.provintionalpresence.owner): pv = 'blue' else: pv = 'red' provinces_postprocessed[province.id] = [pv, False] if province.id == provgrabber: prov = province try: if province.provintionalpresence.owner == getAccount(request): if prov == None: prov = province except: pass if prov == None: prov = provinces[0] provinces_postprocessed[prov.id][1] = True mum = getCurrentMother(request) sfx = dx_html(request, prov, mum).decode('utf8') # can relocate? can_relocate = False relocation_time = None if (planet != mum.duePosition()): # Different position than current can_relocate = mum.canRelocate() if can_relocate: relocation_time = getRelocationTime(mum, getRace(request), mum.orbiting, planet) # can scan? can_scan = False if getRace(request) == 1: if mum.isRelocating() == False: if mum.orbiting == planet: can_scan = True return render_to_response('space/planetview/planetview.html', {'htmldata':sfx, 'race':getRace(request), 'planet':planet, 'postprocessed':provinces_postprocessed, 'can_scan':can_scan, 'firstprovince':prov, 'can_relocate':can_relocate, 'relocation_time':relocation_time, 'wctg':lambda x: int((x+100.0)*(345.0/200.0)), 'pgo':PrimaryGUIObject(request)})
# # gPrime - A web-based genealogy program # # Copyright (C) 2008 Brian G. Matherly # Copyright (C) 2012 Paul Franklin # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # """ This module provides the Plugin class for document generator plugins. """ from . import Plugin from .docgen import TextDoc, DrawDoc class DocGenPlugin(Plugin): """ This class represents a plugin for generating documents from Gramps """ def __init__(self, name, description, basedoc, pap
er, style, extension, docoptclass, basedocname): """ :param name: A friendly name to call this plugin. Example: "Plain Text" :type name: string :param description: A short description of the plugin. Example: "This plugin will generate text documents in plain text." :type description: string :param basedoc: A class that implements
the BaseDoc interface. :type basedoc: BaseDoc :param paper: Indicates whether the plugin uses paper or not. True = use paper; False = do not use paper :type paper: bool :param style: Indicates whether the plugin uses styles or not. True = use styles; False = do not use styles :type style: bool :param extension: The extension for the output file. Example: "txt" :type extension: str :param docoptclass: either None or a subclass of DocOptions :type docoptclass: either None or a DocOptions subclass :param basedocname: The BaseDoc name of this plugin. Example: "AsciiDoc" :type basedocname: string :return: nothing """ Plugin.__init__(self, name, description, basedoc.__module__) self.__basedoc = basedoc self.__paper = paper self.__style = style self.__extension = extension self.__docoptclass = docoptclass self.__basedocname = basedocname def get_basedoc(self): """ Get the :class:`.BaseDoc` class for this plugin. :return: the :class:`.BaseDoc` class passed into :meth:`__init__` """ return self.__basedoc def get_paper_used(self): """ Get the paper flag for this plugin. :return: bool - True = use paper; False = do not use paper """ return self.__paper def get_style_support(self): """ Get the style flag for this plugin. :return: bool - True = use styles; False = do not use styles """ return self.__style def get_extension(self): """ Get the file extension for the output file. :return: str """ return self.__extension def get_doc_option_class(self): """ Get the :class:`.DocOptions` subclass for this plugin, if any :return: the :class:`.DocOptions` subclass passed into :meth:`__init__` """ return self.__docoptclass def get_basedocname(self): """ Get the :class:`.BaseDoc` name for this plugin. :return: the :class:`.BaseDoc` name passed into :meth:`__init__` """ return self.__basedocname def get_text_support(self): """ Check if the plugin supports the :class:`.TextDoc` interface. :return: bool: True if :class:`.TextDoc` is supported; False if :class:`.TextDoc` is not supported. """ return bool(issubclass(self.__basedoc, TextDoc)) def get_draw_support(self): """ Check if the plugin supports the :class:`.DrawDoc` interface. :return: bool: True if :class:`.DrawDoc` is supported; False if :class:`.DrawDoc` is not supported. """ return bool(issubclass(self.__basedoc, DrawDoc))
import traceback from PyQt5 import QtCore import androguard.session as session from androguard.core import androconf import logging log = logging.getLogger("androguard.gui") class FileLoadingThread(QtCore.QThread): file_loaded = QtCore.pyqtSignal(bool) def __init__(self, parent=None): QtCore.QThread.__init__(self, parent) self.parent = parent self.file_path = None self.incoming_file = () def load(self, file_path): self.file_path = file_path if file_path.endswith(".ag"): self.incoming_file = (file_path, 'SESSION') else: file_type = androconf.is_android(file_path) self.incoming_file = (file_path, file_t
ype) self.start(QtCore.QThread.LowestPriority) def run(self): if self.incoming_file: try: file_path, file_type = self.incoming_file if file_type in ["APK", "DEX", "DEY"]:
ret = self.parent.session.add(file_path, open(file_path, 'rb').read()) self.file_loaded.emit(ret) elif file_type == "SESSION": self.parent.session = session.Load(file_path) self.file_loaded.emit(True) else: self.file_loaded.emit(False) except Exception as e: log.debug(e) log.debug(traceback.format_exc()) self.file_loaded.emit(False) self.incoming_file = [] else: self.file_loaded.emit(False)
from django.contrib import admin from puzzle_captcha.models import Puzzle, PuzzlePiece class PuzzlePieceInline(admin.StackedInline): model = PuzzlePiece readonly_fields = ('key', 'image', 'order') can_delete = False extra = 0 class PuzzleAdmin(admin.ModelAdmin):
list_display = ('key', 'rows', 'cols') readonly_fields = ('key', 'rows', 'cols') class Meta: model = Puzzle inlines = [ PuzzlePieceInline, ] admin.site.r
egister(Puzzle, PuzzleAdmin)
""" 02-read-from-disk-2.py - Catching the `end-of-file` signal from the SfPlayer object. This example demonstrates how to use the `end-of-file` signal
of the SfPlayer object to trigger another playback (possibly with another sound, another speed, etc.). When a SfPlayer reaches the end of the file, it sends a trigger (more on trigger later) that the user can retrieve with the syntax : variable_name["trig"] """ from pyo import * import random s = Server().boot() # Sound bank folder = "../
snds/" sounds = ["alum1.wav", "alum2.wav", "alum3.wav", "alum4.wav"] # Creates the left and right players sfL = SfPlayer(folder + sounds[0], speed=1, mul=0.5).out() sfR = SfPlayer(folder + sounds[0], speed=1, mul=0.5).out(1) # Function to choose a new sound and a new speed for the left player def newL(): sfL.path = folder + sounds[random.randint(0, 3)] sfL.speed = random.uniform(0.75, 1.5) sfL.out() # The "end-of-file" signal triggers the function "newL" tfL = TrigFunc(sfL["trig"], newL) # Function to choose a new sound and a new speed for the right player def newR(): sfR.path = folder + sounds[random.randint(0, 3)] sfR.speed = random.uniform(0.75, 1.5) sfR.out(1) # The "end-of-file" signal triggers the function "newR" tfR = TrigFunc(sfR["trig"], newR) s.gui(locals())
# Copyright The Cloud Custodian Authors. # SPDX-License-Identifier: Apache-2.0 from common_openstack import OpenStackTest class ServerTest(OpenStackTest): def test_server_query(self): factory = self.replay_flight_data() p = self.load_policy({ 'name': 'all-servers', 'resource': 'openstack.server'}, session_factory=factory) resources = p.run() self.assertEqual(len(resources), 2) def test_server_filter_name(self): factory = self.replay_flight_data() policy = { 'name': 'get-server-c7n-test-1', 'resource': 'openstack.server', 'filters': [ { "type": "value", "key": "name", "value": "c7n-test-1", }, ], } p = self.load_policy(policy, session_factory=factory) resources = p.run()
self.assertEqual(len(resources), 1) self.assertEqual(resources[0].name, "c7n-test-1") def test_server_filter_flavor(self): factory = self.replay_flight_data() policy = { 'name': 'get-server-c7n-test-1', 'resource': 'openstack.server', 'filters': [ { "type": "flavor",
"flavor_name": "m1.tiny", }, ], } p = self.load_policy(policy, session_factory=factory) resources = p.run() self.assertEqual(len(resources), 1) self.assertEqual(resources[0].name, "c7n-test-1") def test_server_filter_tags(self): factory = self.replay_flight_data() policy = { 'name': 'get-server-c7n-test-1', 'resource': 'openstack.server', 'filters': [ { "type": "tags", "tags": [ { "key": "a", "value": "a", }, { "key": "b", "value": "b", }, ], "op": "all", }, ], } p = self.load_policy(policy, session_factory=factory) resources = p.run() self.assertEqual(len(resources), 1) self.assertEqual(resources[0].name, "c7n-test-2")
# This file is NOT licensed under the GPLv3, which is the license for the re
st # of YouCompleteMe. # # Here's the license text for this file: # # This is free and unencumbered software released into the public domain. # # Anyone is free to copy, modify, publish, use, compile, sell, or # distribute this software, either in source code form or as a compiled # binary, for any purpose, commercial or non-commercial, and by any # means. # # In jurisdictions that recognize copyright laws, the author or authors # of this software dedicate any and all copyright inte
rest in the # software to the public domain. We make this dedication for the benefit # of the public at large and to the detriment of our heirs and # successors. We intend this dedication to be an overt act of # relinquishment in perpetuity of all present and future rights to this # software under copyright law. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # # For more information, please refer to <http://unlicense.org/> import os import ycm_core # These are the compilation flags that will be used in case there's no # compilation database set (by default, one is not set). # CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR. flags = [ '-Wall', '-Wextra', '-Werror', '-Wc++98-compat', '-Wno-long-long', '-Wno-variadic-macros', '-fexceptions', '-DNDEBUG', # You 100% do NOT need -DUSE_CLANG_COMPLETER in your flags; only the YCM # source code needs it. #'-DUSE_CLANG_COMPLETER', # THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which # language to use when compiling headers. So it will guess. Badly. So C++ # headers will be compiled as C headers. You don't want that so ALWAYS specify # a "-std=<something>". # For a C project, you would set this to something like 'c99' instead of # 'c++11'. '-std=c99', # ...and the same thing goes for the magic -x option which specifies the # language that the files to be compiled are written in. This is mostly # relevant for c++ headers. # For a C project, you would set this to 'c' instead of 'c++'. '-x', 'c', '-isystem', '../BoostParts', '-isystem', # This path will only work on OS X, but extra paths that don't exist are not # harmful '/System/Library/Frameworks/Python.framework/Headers', '-isystem', '../llvm/include', '-isystem', '../llvm/tools/clang/include', '-I', '.', '-I', './flibs/final_libraries/include', '-I', './src', '-I', './http-parser', ] # Set this to the absolute path to the folder (NOT the file!) containing the # compile_commands.json file to use that instead of 'flags'. See here for # more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html # # Most projects will NOT need to set this to anything; you can just change the # 'flags' list of compilation flags. Notice that YCM itself uses that approach. compilation_database_folder = '' if os.path.exists( compilation_database_folder ): database = ycm_core.CompilationDatabase( compilation_database_folder ) else: database = None SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ] def DirectoryOfThisScript(): return os.path.dirname( os.path.abspath( __file__ ) ) def MakeRelativePathsInFlagsAbsolute( flags, working_directory ): if not working_directory: return list( flags ) new_flags = [] make_next_absolute = False path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ] for flag in flags: new_flag = flag if make_next_absolute: make_next_absolute = False if not flag.startswith( '/' ): new_flag = os.path.join( working_directory, flag ) for path_flag in path_flags: if flag == path_flag: make_next_absolute = True break if flag.startswith( path_flag ): path = flag[ len( path_flag ): ] new_flag = path_flag + os.path.join( working_directory, path ) break if new_flag: new_flags.append( new_flag ) return new_flags def IsHeaderFile( filename ): extension = os.path.splitext( filename )[ 1 ] return extension in [ '.h', '.hxx', '.hpp', '.hh' ] def GetCompilationInfoForFile( filename ): # The compilation_commands.json file generated by CMake does not have entries # for header files. So we do our best by asking the db for flags for a # corresponding source file, if any. If one exists, the flags for that file # should be good enough. if IsHeaderFile( filename ): basename = os.path.splitext( filename )[ 0 ] for extension in SOURCE_EXTENSIONS: replacement_file = basename + extension if os.path.exists( replacement_file ): compilation_info = database.GetCompilationInfoForFile( replacement_file ) if compilation_info.compiler_flags_: return compilation_info return None return database.GetCompilationInfoForFile( filename ) def FlagsForFile( filename, **kwargs ): if database: # Bear in mind that compilation_info.compiler_flags_ does NOT return a # python list, but a "list-like" StringVec object compilation_info = GetCompilationInfoForFile( filename ) if not compilation_info: return None final_flags = MakeRelativePathsInFlagsAbsolute( compilation_info.compiler_flags_, compilation_info.compiler_working_dir_ ) # NOTE: This is just for YouCompleteMe; it's highly likely that your project # does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR # ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT. #try: # final_flags.remove( '-stdlib=libc++' ) #except ValueError: # pass else: relative_to = DirectoryOfThisScript() final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to ) return { 'flags': final_flags, 'do_cache': True }
#!/usr/bin/python2.7 -u import os.path if os.path.isdir('/dev/null'): print
'/dev/null' if os.path.isdir('/dev'): print
'/dev'
import ast import logging import time import unittest from malcolm.profiler import Profiler # https://github.com/bdarnell/plop/blob/master/plop/test/collector_test.py class ProfilerTest(unittest.TestCase): def filter_stacks(self, results): # Kind of hacky, but this is the simplest way to keep the tests # working after the internals of the collector changed to support # multiple formatters. stack_counts = ast.literal_eval(results) counts = {} for stack, count in stack_counts.items(): filtered_stack = [ frame[2] for frame in stack if frame[0].endswith("test_profiler.py") ] if filtered_stack: counts[tuple(filtered_stack)] = count return counts def check_counts(self, counts, expected): failed = False output = [] for stack, count in expected.items(): # every expected frame should appear in the data, but # the inverse is not true if the signal catches us between # calls. self.assertTrue(stack in counts) ratio = float(counts[stack]) / float(count) output.append( "%s: expected %s, got %s (%s)" % (stack, count, counts[stack], ratio) ) if not (0.70 <= ratio <= 1.25): failed = True if failed: for line in output: logging.warning(line) for key in set(counts.keys()) - set(expected.keys()): logging.warning("unexpected key: %s: got %s" % (key, counts[key])) self.
fail("collected data did not meet expectations") def test_collector(self): start = time.time() def a(end): while time.time() < end: pass c(time.time() + 0.1) def b(end): while time.time() < end: pass c(time.time() + 0.1) def c(end): while time.time() < end: pass profiler = Profiler("/tmp") profiler.start(interval=0.01) a(tim
e.time() + 0.1) b(time.time() + 0.2) c(time.time() + 0.3) end = time.time() profiler.stop("profiler_test.plop") elapsed = end - start self.assertTrue(0.8 < elapsed < 0.9, elapsed) with open("/tmp/profiler_test.plop") as f: results = f.read() counts = self.filter_stacks(results) expected = { ("a", "test_collector"): 10, ("c", "a", "test_collector"): 10, ("b", "test_collector"): 20, ("c", "b", "test_collector"): 10, ("c", "test_collector"): 30, } self.check_counts(counts, expected)
# coding=utf-8 #! /usr/bin/python import matplotlib.pyplot as plt import numpy as np import mlpy import sys import math from config import loc_store_path, times_day class LocTrain(object): """ """ def __init__(self, file_name , flag): if flag == "ridge": self.lrs = [ mlpy.Ridge() for i in range(7) ] else : self.lrs = [ mlpy.OLS() for i in range(7) ] self.file_name = file_name self.x_weekday = dict() self.y_weekday = dict() #self.x ,self.y =
def train(self): self.get_input(self.file_name) for weekday in range(7): self.lrs[weekday].learn(self.x_weekday[weekday], self.y_weekday[weekday]) def predict(self,weekday,speeds): pre_speed = self.lrs
[weekday].pred(speeds) return pre_speed def test(self): pass def get_input(self, filename): fin = open(filename) x = [] y = [] for each in fin: each = each[:each.find('\n')] l = each.split(' ') each_x = [] each_x.append(1) each_x.append(float(l[0])) each_x.append(float(l[1])) each_x.append(float(l[2])) each_x.append(float(l[3])) weekday = int(l[5]) if weekday not in self.x_weekday: self.x_weekday[weekday] = [] self.y_weekday[weekday] = [] self.x_weekday[weekday].append(each_x) self.y_weekday[weekday].append(float(l[4])) def main(): tmp = LocTrain('../data/train/3860_data',"ridge") tmp.train() print tmp.predict(1,[1,10,10,20,10]) pass if __name__ == '__main__': main()
# -*- coding: utf-8
-*- # # Copyright 2013 Red Hat, Inc. # # This software is licensed to you under the GNU General Public License, # version 2 (GPLv2). There is NO WARRANTY for this software, express or # implied
, including the implied warranties of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2 # along with this software; if not, see # http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. # # Red Hat trademarks are not licensed under GPLv2. No permission is # granted to use or replicate Red Hat trademarks that are incorporated # in this software or its documentation. from katello.client.api.base import KatelloAPI class PackageAPI(KatelloAPI): """ Connection class to access package calls """ def package(self, packageId, repoId): path = "/api/repositories/%s/packages/%s" % (repoId, packageId) pack = self.server.GET(path)[1] return pack def packages_by_repo(self, repoId): path = "/api/repositories/%s/packages" % repoId pack_list = self.server.GET(path)[1] return pack_list def search(self, query, repoId): path = "/api/repositories/%s/packages/search" % repoId pack_list = self.server.GET(path, {"search": query})[1] return pack_list
g: utf-8 """ Onshape REST API The Onshape REST API consumed by all clients. # noqa: E501 The version of the OpenAPI document: 1.113 Contact: api-support@onshape.zendesk.com Generated by: https://openapi-generator.tech """ from __future__ import absolute_import import re # noqa: F401 import sys # noqa: F401 import six # noqa: F401 import nulltype # noqa: F401 from onshape_client.oas.model_utils import ( # noqa: F401 ModelComposed, ModelNormal, ModelSimple, date, datetime, file_type, int, none_type, str, validate_get_composed_info, ) try: from onshape_client.oas.models import btp_expression9 except ImportError: btp_expression9 = sys.modules["onshape_client.oas.models.btp_expression9"] try: from onshape_client.oas.models import btp_expression_operator244_all_of except ImportError: btp_expression_operator244_all_of = sys.modules[ "onshape_client.oas.models.btp_expression_operator244_all_of" ] try: from onshape_client.oas.models import btp_identifier8 except ImportError: btp_identifier8 = sys.modules["onshape_client.oas.models.btp_identifier8"] try: from onshape_client.oas.models import btp_space10 except ImportError: btp_space10 = sys.modules["onshape_client.oas.models.btp_space10"] class BTPExpressionOperator244(ModelComposed): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator variable value to the discriminator class name. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepte
d as additional properties values. """ allowed_values = { ("operator",): { "NONE": "NONE", "PLUS": "PLUS", "MINUS": "MINUS", "TIMES": "TIMES", "DIVIDE": "DIVIDE", "MODULUS": "MODULUS", "POWER": "POWER", "NEGATE": "NEGATE", "OR": "OR", "AND": "AND", "NOT": "NOT", "EQUAL_TO": "EQUAL_TO", "NOT_EQUA
L_TO": "NOT_EQUAL_TO", "GREATER": "GREATER", "LESS": "LESS", "GREATER_OR_EQUAL": "GREATER_OR_EQUAL", "LESS_OR_EQUAL": "LESS_OR_EQUAL", "CONCATENATE": "CONCATENATE", "CONDITIONAL": "CONDITIONAL", }, ("documentation_type",): { "FUNCTION": "FUNCTION", "PREDICATE": "PREDICATE", "CONSTANT": "CONSTANT", "ENUM": "ENUM", "USER_TYPE": "USER_TYPE", "FEATURE_DEFINITION": "FEATURE_DEFINITION", "FILE_HEADER": "FILE_HEADER", "UNDOCUMENTABLE": "UNDOCUMENTABLE", "UNKNOWN": "UNKNOWN", }, } validations = {} additional_properties_type = None @staticmethod def openapi_types(): """ This must be a class method so a model may have properties that are of type self, this ensures that we don't create a cyclic import Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ return { "bt_type": (str,), # noqa: E501 "for_export": (bool,), # noqa: E501 "global_namespace": (bool,), # noqa: E501 "import_microversion": (str,), # noqa: E501 "namespace": ([btp_identifier8.BTPIdentifier8],), # noqa: E501 "operand1": (btp_expression9.BTPExpression9,), # noqa: E501 "operand2": (btp_expression9.BTPExpression9,), # noqa: E501 "operand3": (btp_expression9.BTPExpression9,), # noqa: E501 "operator": (str,), # noqa: E501 "space_after_namespace": (btp_space10.BTPSpace10,), # noqa: E501 "space_after_operator": (btp_space10.BTPSpace10,), # noqa: E501 "space_before_operator": (btp_space10.BTPSpace10,), # noqa: E501 "written_as_function_call": (bool,), # noqa: E501 "atomic": (bool,), # noqa: E501 "documentation_type": (str,), # noqa: E501 "end_source_location": (int,), # noqa: E501 "node_id": (str,), # noqa: E501 "short_descriptor": (str,), # noqa: E501 "space_after": (btp_space10.BTPSpace10,), # noqa: E501 "space_before": (btp_space10.BTPSpace10,), # noqa: E501 "space_default": (bool,), # noqa: E501 "start_source_location": (int,), # noqa: E501 } @staticmethod def discriminator(): return None attribute_map = { "bt_type": "btType", # noqa: E501 "for_export": "forExport", # noqa: E501 "global_namespace": "globalNamespace", # noqa: E501 "import_microversion": "importMicroversion", # noqa: E501 "namespace": "namespace", # noqa: E501 "operand1": "operand1", # noqa: E501 "operand2": "operand2", # noqa: E501 "operand3": "operand3", # noqa: E501 "operator": "operator", # noqa: E501 "space_after_namespace": "spaceAfterNamespace", # noqa: E501 "space_after_operator": "spaceAfterOperator", # noqa: E501 "space_before_operator": "spaceBeforeOperator", # noqa: E501 "written_as_function_call": "writtenAsFunctionCall", # noqa: E501 "atomic": "atomic", # noqa: E501 "documentation_type": "documentationType", # noqa: E501 "end_source_location": "endSourceLocation", # noqa: E501 "node_id": "nodeId", # noqa: E501 "short_descriptor": "shortDescriptor", # noqa: E501 "space_after": "spaceAfter", # noqa: E501 "space_before": "spaceBefore", # noqa: E501 "space_default": "spaceDefault", # noqa: E501 "start_source_location": "startSourceLocation", # noqa: E501 } required_properties = set( [ "_data_store", "_check_type", "_from_server", "_path_to_item", "_configuration", "_composed_instances", "_var_name_to_model_instances", "_additional_properties_model_instances", ] ) def __init__( self, _check_type=True, _from_server=False, _path_to_item=(), _configuration=None, **kwargs ): # noqa: E501 """btp_expression_operator244.BTPExpressionOperator244 - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _from_server (bool): True if the data is from the server False if the data is from the client (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion
from _
_future__ import absolute_import, unicode_literals from .api.psd_image import PSDImage from .composer import c
ompose __all__ = ['PSDImage', 'compose']
# Copyright 2009-2010 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). __metaclass__ = type __all__ = [ 'BranchRevision', ] from storm.locals import ( Int, Reference, Storm, ) from zope.interface import implements from lp.code.interfaces.branchrevision import IBranchRevision class BranchRevision(Storm): """See `IBranchRevision`.""" __storm_table__ = 'BranchRevision' __storm_primary__ = ("branch_id", "revision_id") implements(IBranchRevision) branch_id = Int(name='branch', allow_none=False) branch = Reference(branch_id, 'Branch.id') revision_id = Int(name='revision', allow_none=False) revision = Reference(revision_id, 'Revision.id') sequence = Int(name='sequence', allow_none=True)
def __init__(self, branch, revision, sequence=None): self.branch = branch self.revision = revision self.sequence
= sequence
#!/usr/bin/python import os import sys import getopt import shutil import string # Globals quiet = 0 test = 0 comments = 0 sysfsprefix = "/sys/devices/system/rttest/rttest" statusfile = "/status" commandfile = "/command" # Command opcodes cmd_opcodes = { "schedother" : "1", "schedfifo" : "2", "lock" : "3", "locknowait" : "4", "lockint" : "5", "lockintnowait" : "6", "lockcont" : "7", "unlock" : "8", "lockbkl" : "9", "unlockbkl" : "10", "signal" : "11", "resetevent" : "98", "reset" : "99", } test_opcodes = { "prioeq" : ["P" , "eq" , None], "priolt" : ["P" , "lt" , None], "priogt" : ["P" , "gt" , None], "nprioeq" : ["N" , "eq" , None], "npriolt" : ["N" , "lt" , None], "npriogt" : ["N" , "gt" , None], "unlocked" : ["M" , "eq" , 0], "trylock" : ["M" , "eq" , 1], "blocked" : ["M" , "eq" , 2], "blockedwake" : ["M" , "eq" , 3], "locked" : ["M" , "eq" , 4], "opcodeeq" : ["O" , "eq" , None], "opcodelt" : ["O" , "lt" , None], "opcodegt" : ["O" , "gt" , None], "eventeq" : ["E" , "eq" , None], "eventlt" : ["E" , "lt" , None], "eventgt" : ["E" , "gt" , None], } # Print usage information def usage(): print "rt-tester.py <-c -h -q -t> <testfile>" print " -c display comments after first command" print " -h help" print " -q quiet mode" print " -t test mode (syntax check)" print " testfile: read test specification from testfile" print " otherwise from stdin" return # Print progress when not in quiet mode def progress(str): if not quiet: print str # Analyse a status value def analyse(val, top, arg): intval = int(val) if top[0] == "M": intval = intval / (10 ** int(arg)) intval = intval % 10 argval = top[2] elif top[0] == "O": argval = int(cmd_opcodes.get(arg, arg)) else: argval = int(arg) # progress("%d %s %d" %(intval, top[1], argval)) if top[1] == "eq" and intval == argval: return 1 if top[1] == "lt" and intval < argval: return 1 if top[1] == "gt" and intval > argval: return 1 return 0 # Parse the commandline try: (options, arguments) = getopt.getopt(sys.argv[1:],'chqt') except getopt.GetoptError, ex: usage() sys.exit(1) # Parse commandline options for option, value in options: if option == "-c": comments = 1 elif option == "-q": quiet = 1 elif option == "-t": test = 1 elif option == '-h': usage() sys.exit(0) # Select the input source if arguments: try: fd = open(arguments[0]) except Exception,ex: sys.stderr.write("File not found %s\n" %(arguments[0])) sys.exit(1) else: fd = sys.stdin linenr = 0 # Read the test patterns while 1: linenr = linenr + 1 line = fd.readline() if not len(line): break line = line.strip() parts = line.split(":") if not parts or len(parts) < 1: continue if len(parts[0]) == 0: continue if parts[0].startswith("#"): if comments > 1: progress(line) continue if comments == 1: comments = 2 progress(line) cmd = parts[0].strip().lower() opc = parts[1].strip().lower() tid = parts[2].strip() dat = parts[3].strip() try: # Test or wait for a status value if cmd == "t" or cmd == "w": testop = test_opcodes[opc] fname = "%s%s%s" %(sysfsprefix, tid, statusfile) if test: print fname continue while 1: query = 1 fsta = open(fname, 'r') status = fsta.readline().strip() fsta.close() stat = status.split(",") for s in stat: s = s.strip() if s.startswith(testop[0
]): # Seperate status value val = s[2:].strip() query = analyse(val, testop, dat) break if query or cmd == "t": break progress(" " + status) if not query: sys.stderr.write("Test failed in line %d\n" %(linenr)) sys.exit(1) # Issue a command to the tester elif cmd == "c":
cmdnr = cmd_opcodes[opc] # Build command string and sys filename cmdstr = "%s:%s" %(cmdnr, dat) fname = "%s%s%s" %(sysfsprefix, tid, commandfile) if test: print fname continue fcmd = open(fname, 'w') fcmd.write(cmdstr) fcmd.close() except Exception,ex: sys.stderr.write(str(ex)) sys.stderr.write("\nSyntax error in line %d\n" %(linenr)) if not test: fd.close() sys.exit(1) # Normal exit pass print "Pass" sys.exit(0)
# -*- coding: utf-
8; -*- # # @file urls.py # @brief collgate # @author Frédéric SCHERMA (INRA UMR1095) # @date 2018-09-20 # @copyright Copyright (c) 2018 INRA/CIRAD # @license MIT (see LICENSE file) # @details coll-
gate printer module url entry point. from django.conf.urls import include, url urlpatterns = [ ]
from base import Cho
icesEnum from _version import
__version__
ype, codename='test') user.user_permissions.add(perm) user.save() # reloading user to purge the _perm_cache user = User.objects.get(username='test') self.assertEqual(user.get_all_permissions() == set([u'auth.test']), True) self.assertEqual(user.get_group_permissions(), set([])) self.assertEqual(user.has_module_perms('Group'), False) self.assertEqual(user.has_module_perms('auth'), True) perm = Permission.objects.create(name='test2', content_type=content_type, codename='test2') user.user_permissions.add(perm) user.save() perm = Permission.objects.create(name='test3', content_type=content_type, codename='test3') user.user_permissions.add(perm) user.save() user = User.objects.get(username='test') self.assertEqual(user.get_all_permissions(), set([u'auth.test2', u'auth.test', u'auth.test3'])) self.assertEqual(user.has_perm('test'), False) self.assertEqual(user.has_perm('auth.test'), True) self.assertEqual(user.has_perms(['auth.test2', 'auth.test3']), True) perm = Permission.objects.create(name='test_group', content_type=content_type, codename='test_group') group = Group.objects.create(name='test_group') group.permissions.add(perm) group.save() user.groups.add(group) user = User.objects.get(username='test') exp = set([u'auth.test2', u'auth.test', u'auth.test3', u'auth.test_group']) self.assertEqual(user.get_all_permissions(), exp) self.assertEqual(user.get_group_permissions(), set([u'auth.test_group'])) self.assertEqual(user.has_perms(['auth.test3', 'auth.test_group']), True) user = AnonymousUser() self.assertEqual(user.has_perm('test'), False) self.assertEqual(user.has_perms(['auth.test2', 'auth.test3']), False) def test_has_no_object_perm(self): """Regressiontest for #12462""" user = User.objects.get(username='test') content_type=ContentType.objects.get_for_model(Group) perm = Permission.objects.create(name='test', content_type=content_type, codename='test') user.user_permissions.add(perm) user.save() self.assertEqual(user.has_perm('auth.test', 'object'), False) self.assertEqual(user.get_all_permissions('object'), set([])) self.assertEqual(user.has_perm('auth.test'), True) self.assertEqual(user.get_all_permissions(), set(['auth.test'])) class TestObj(object): pass class SimpleRowlevelBackend(object): supports_object_permissions =
True # This class also supports tests for anonymous user permissions, # via subclasses which just set the 'supports_anonymous_user' attribute. def has_perm(self, user, perm, obj=None): if not obj: return # We only support row level perms
if isinstance(obj, TestObj): if user.username == 'test2': return True elif user.is_anonymous() and perm == 'anon': # not reached due to supports_anonymous_user = False return True return False def has_module_perms(self, user, app_label): return app_label == "app1" def get_all_permissions(self, user, obj=None): if not obj: return [] # We only support row level perms if not isinstance(obj, TestObj): return ['none'] if user.is_anonymous(): return ['anon'] if user.username == 'test2': return ['simple', 'advanced'] else: return ['simple'] def get_group_permissions(self, user, obj=None): if not obj: return # We only support row level perms if not isinstance(obj, TestObj): return ['none'] if 'test_group' in [group.name for group in user.groups.all()]: return ['group_perm'] else: return ['none'] class RowlevelBackendTest(TestCase): """ Tests for auth backend that supports object level permissions """ backend = 'django.contrib.auth.tests.auth_backends.SimpleRowlevelBackend' def setUp(self): self.curr_auth = settings.AUTHENTICATION_BACKENDS settings.AUTHENTICATION_BACKENDS = tuple(self.curr_auth) + (self.backend,) self.user1 = User.objects.create_user('test', 'test@example.com', 'test') self.user2 = User.objects.create_user('test2', 'test2@example.com', 'test') self.user3 = User.objects.create_user('test3', 'test3@example.com', 'test') self.save_warnings_state() warnings.filterwarnings('ignore', category=DeprecationWarning, module='django.contrib.auth') def tearDown(self): settings.AUTHENTICATION_BACKENDS = self.curr_auth self.restore_warnings_state() def test_has_perm(self): self.assertEqual(self.user1.has_perm('perm', TestObj()), False) self.assertEqual(self.user2.has_perm('perm', TestObj()), True) self.assertEqual(self.user2.has_perm('perm'), False) self.assertEqual(self.user2.has_perms(['simple', 'advanced'], TestObj()), True) self.assertEqual(self.user3.has_perm('perm', TestObj()), False) self.assertEqual(self.user3.has_perm('anon', TestObj()), False) self.assertEqual(self.user3.has_perms(['simple', 'advanced'], TestObj()), False) def test_get_all_permissions(self): self.assertEqual(self.user1.get_all_permissions(TestObj()), set(['simple'])) self.assertEqual(self.user2.get_all_permissions(TestObj()), set(['simple', 'advanced'])) self.assertEqual(self.user2.get_all_permissions(), set([])) def test_get_group_permissions(self): content_type=ContentType.objects.get_for_model(Group) group = Group.objects.create(name='test_group') self.user3.groups.add(group) self.assertEqual(self.user3.get_group_permissions(TestObj()), set(['group_perm'])) class AnonymousUserBackend(SimpleRowlevelBackend): supports_anonymous_user = True class NoAnonymousUserBackend(SimpleRowlevelBackend): supports_anonymous_user = False class AnonymousUserBackendTest(TestCase): """ Tests for AnonymousUser delegating to backend if it has 'supports_anonymous_user' = True """ backend = 'django.contrib.auth.tests.auth_backends.AnonymousUserBackend' def setUp(self): self.curr_auth = settings.AUTHENTICATION_BACKENDS settings.AUTHENTICATION_BACKENDS = (self.backend,) self.user1 = AnonymousUser() def tearDown(self): settings.AUTHENTICATION_BACKENDS = self.curr_auth def test_has_perm(self): self.assertEqual(self.user1.has_perm('perm', TestObj()), False) self.assertEqual(self.user1.has_perm('anon', TestObj()), True) def test_has_perms(self): self.assertEqual(self.user1.has_perms(['anon'], TestObj()), True) self.assertEqual(self.user1.has_perms(['anon', 'perm'], TestObj()), False) def test_has_module_perms(self): self.assertEqual(self.user1.has_module_perms("app1"), True) self.assertEqual(self.user1.has_module_perms("app2"), False) def test_get_all_permissions(self): self.assertEqual(self.user1.get_all_permissions(TestObj()), set(['anon'])) class NoAnonymousUserBackendTest(TestCase): """ Tests that AnonymousUser does not delegate to backend if it has 'supports_anonymous_user' = False """ backend = 'django.contrib.auth.tests.auth_backends.NoAnonymousUserBackend' def setUp(self): self.curr_auth = settings.AUTHENTICATION_BACKENDS settings.AUTHENTICATION_BACKENDS = tuple(self.curr_auth) + (self.backend,) self.user1 = AnonymousUser() def tearDown(self): settings.AUTHENTICATION_BACKENDS = self.curr_auth def test_has_perm(self): self.assertEqual(self.user1.has_perm('perm', TestObj()), False) self.assertEqual(self.user1.has_perm('anon', TestObj()), False) def test_has_perms(self): self.assertEqual(self.user1.has_perms(['anon'], TestObj()), False) def test_has_m
"""Interfaces with iAlarm control panels.""" from homeassistant.components.alarm_control_panel import AlarmControlPanelEntity from homeassistant.components.alarm_control_panel.const import ( SUPPORT_ALARM_ARM_AWAY, SUPPORT_ALARM_ARM_HOME, ) from homeassistant.helpers.entity import DeviceInfo from homeassistant.helpers.update_coordinator import CoordinatorEntity from .const import DATA_COORDINATOR, DOMAIN async def async_setup_entry(hass, entry, async_add_entities) -> None: """Set up a iAlarm alarm control panel based on a config entry.""" coordinator = hass.data[DOMAIN][entry.entry_id][DATA_COORDINATOR] async_add_entities([IAlarmPanel(coordinator)], False) class IAlarmPanel(CoordinatorEntity, AlarmControlPanelEntity): """Representation of an iAlarm device.""" @property def device_info(self) -> DeviceInfo: """Return device info for this device.""" return DeviceInfo( identifiers={(DOMAIN, self.unique_id)}, m
anufacturer="Antifurto365 - Meian", name=self.name, )
@property def unique_id(self): """Return a unique id.""" return self.coordinator.mac @property def name(self): """Return the name.""" return "iAlarm" @property def state(self): """Return the state of the device.""" return self.coordinator.state @property def supported_features(self) -> int: """Return the list of supported features.""" return SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY def alarm_disarm(self, code=None): """Send disarm command.""" self.coordinator.ialarm.disarm() def alarm_arm_home(self, code=None): """Send arm home command.""" self.coordinator.ialarm.arm_stay() def alarm_arm_away(self, code=None): """Send arm away command.""" self.coordinator.ialarm.arm_away()
elf.host, self.port = host, port self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) def connect(self): try: self.sock.connect((self.host, self.port)) log.debug("%s connected to DataNode" % self) return True except Exception: log.debug("%s connection to DataNode failed" % self) return False def _close_socket(self): self.sock.close() def _read_bytes(self, n, depth=0): if depth > self.MAX_READ_ATTEMPTS: raise Exception("Tried to read %d more bytes, but failed after %d attempts" % (n, self.MAX_READ_ATTEMPTS)) bytes = self.sock.recv(n) if len(bytes) < n: left = n - len(bytes) depth += 1 bytes += self._read_bytes(left, depth) return bytes def write(self, data): if log.getEffectiveLevel() == logging.DEBUG: log.debug("Sending: %s", format_bytes(data)) self.sock.send(data) def write_delimited(self, data): self.write(encoder._VarintBytes(len(data))) self.write(data) def readBlock(self, length, pool_id, block_id, generation_stamp, offset, block_token, check_crc): '''Send a read request to given block. If we receive a successful response, we start reading packets. Send read request: +---------------------------------------------------------------------+ | Data Transfer Protocol Version, 2 bytes | +---------------------------------------------------------------------+ | Op code, 1 byte (READ_BLOCK = 81) | +---------------------------------------------------------------------+ | Delimited serialized OpReadBlockProto (varint len + request) | +---------------------------------------------------------------------+ Receive response: +---------------------------------------------------------------------+ | Delimited BlockOpResponseProto (varint len + response) | +---------------------------------------------------------------------+ Start reading packets. Each packet has the following structure: +---------------------------------------------------------------------+ | Packet length (4 bytes/32 bit int) | +---------------------------------------------------------------------+ | Serialized size of header, 2 bytes | +---------------------------------------------------------------------+ | Packet Header Proto | +---------------------------------------------------------------------+ | x checksums, 4 bytes each | +---------------------------------------------------------------------+ | x chunks of payload data | +---------------------------------------------------------------------+ ''' log.debug("%s sending readBlock request" % self) # Send version and opcode self.sock.send(struct.pack('>h', 28)) self.sock.send(struct.pack('b', self.READ_BLOCK)) length = length - offset # Create and send OpReadBlockProto message request = OpReadBlockProto() request.offset = offset request.len = length header = request.header header.clientName = "snakebite" base_header = header.baseHeader # TokenProto token = base_header.token token.identifier = block_token.identifier token.password = block_token.password token.kind = block_token.kind token.service = block_token.service # ExtendedBlockProto block = base_header.block block.poolId = pool_id block.blockId = block_id block.generationStamp = generation_stamp s_request = request.SerializeToString() log_protobuf_message("OpReadBlockProto:", request) self.write_delimited(s_request) byte_stream = RpcBufferedReader(self.sock) block_op_response_bytes = get_delimited_message_bytes(byte_stream)[1] block_op_response = BlockOpResponseProto() block_op_response.ParseFromString(block_op_response_bytes) log_protobuf_message("BlockOpResponseProto", block_op_response) checksum_type = block_op_response.readOpChecksumInfo.checksum.type bytes_per_chunk = block_op_response.readOpChecksumInfo.checksum.bytesPerChecksum log.debug("Checksum type: %s, bytesPerChecksum: %s" % (checksum_type, bytes_per_chunk)) if checksum_type in [self.CHECKSUM_CRC32C, self.CHECKSUM_CRC32]: checksum_len = 4 else: raise Exception("Checksum type %s not implemented" % checksum_type) total_read = 0 if block_op_response.status == 0: # datatransfer_proto.Status.Value('SUCCESS') while total_read < length: log.debug("== Reading next packet") packet_len = struct.unpack("!I", byte_stream.read(4))[0] log.debug("Packet length: %s", packet_len) serialized_size = struct.unpack("!H", byte_stream.read(2))[0] log.debug("Serialized size: %s", serialized_size) packet_header_bytes = byte_stream.read(serialized_size) packet_header = PacketHeaderProto() packet_header.ParseFromString(packet_header_bytes) log_protobuf_message("PacketHeaderProto", packet_header) data_len = packet_header.dataLen chunks_per_packet = int((data_len + bytes_per_chunk - 1) / bytes_per_chunk)
log.debug("Nr of chunks: %d", chunks_per_packet) data_len = packet_len - 4 - chunks_per_packet * checksum_len log.debug("Payload len: %d", data_len) byte_stream.reset() # Collect checksums if check_crc: checksums = [] for _ in xrange(0, chunks_per_packet): checksum = self._read_bytes(checksum_len)
checksum = struct.unpack("!I", checksum)[0] checksums.append(checksum) else: self._read_bytes(checksum_len * chunks_per_packet) # We use a fixed size buffer (a "load") to read only a couple of chunks at once. bytes_per_load = self.LOAD_SIZE - (self.LOAD_SIZE % bytes_per_chunk) chunks_per_load = int(bytes_per_load / bytes_per_chunk) loads_per_packet = int(math.ceil(bytes_per_chunk * chunks_per_packet / bytes_per_load)) read_on_packet = 0 for i in range(loads_per_packet): load = '' for j in range(chunks_per_load): log.debug("Reading chunk %s in load %s:", j, i) bytes_to_read = min(bytes_per_chunk, data_len - read_on_packet) chunk = self._read_bytes(bytes_to_read) if check_crc: checksum_index = i * chunks_per_load + j if checksum_index < len(checksums) and crc(chunk) != checksums[checksum_index]: raise Exception("Checksum doesn't match") load += chunk total_read += len(chunk) read_on_packet += len(chunk) yield load # Send ClientReadStatusProto message confirming successful read request = ClientReadStatusProto() request.status = 0 # SUCCESS log_protobuf_message("ClientReadStatusProto:", request) s_request = request.SerializeToString() self.write_delimited(s_request) self._close_socket() def __repr__(self): return "D
shuffle: Whether or not to shuffle dataset. seed: tf.int64 scalar tf.Tensor (or None). Used as shuffle seed for tf.data. vocabulary: unused argument, maintains compatibility with other dataaset_fns num_inference_examples: maximum number of examples per task to do inference on. If None or less than 0, use all examples. use_cached: bool, whether to load the cached version of this dataset. evals but should not be used for iterative decoding. priming_sequence_length: If the Task only has "targets", select the first this many tokens from each target sequence to use as "inputs". This is useful for decoder-only language models where you would like to use a portion of the targets as a priming sequence for generation. Returns: A list of mesh_tensorflow.transformer.dataset.EvalDataset tuples. """ del vocabulary mixture_or_task = t5.data.get_mixture_or_task(mixture_or_task_name) def _split_targets_for_primed_inference(ex): ex["inputs"] = ex["targets"][:priming_sequence_length] ex["targets"] = ex["targets"][priming_sequence_length:] ex["inputs"] = tf.pad( ex["inputs"], [[0, priming_sequence_length - tf.shape(ex["inputs"])[0]]], "CONSTANT") ex["inputs"] = tf.reshape(ex["inputs"], shape=(priming_sequence_length,)) return ex def _prepare_for_unprimed_inference(ex): ex["inputs"] = tf.constant([], dtype=tf.int64) return ex def _get_dataset_for_single_task(task, sequence_length): """Get a tensorflow.data.Dataset for the provided task.""" ds = task.get_dataset( sequence_length, split=dataset_split, use_cached=use_cached, shuffle=shuffle, seed=seed) if "inputs" not in ds.element_spec: if not priming_sequence_length or priming_sequence_length <= 0: logging.warning("Priming sequence length not specified so priming " "with the empty string.") ds = ds.map(_prepare_for_unprimed_inference) else: logging.info("Using the first %d tokens of each target as input.", priming_sequence_length) ds = ds.map(_split_targets_for_primed_inference) elif priming_sequence_length is
not None: raise ValueError( "Setting a priming sequence length only makes sense for decoder-only " "Tasks, which have `targets` but no `inputs`.") eos_keys = set( k for k, f i
n mixture_or_task.output_features.items() if f.add_eos) logging.info( "Padding '%s' with sequence lengths: %s", task.name, sequence_length) ds = transformer_dataset.pack_or_pad( ds, sequence_length, pack=False, feature_keys=tuple(task.output_features), ensure_eos=eos_keys) if num_inference_examples is not None and num_inference_examples >= 0: ds = ds.take(num_inference_examples) return ds outputs = [] for task in t5.data.get_subtasks(mixture_or_task): if dataset_split not in task.splits: logging.info("Task %s has no '%s' split, skipping inference.", task.name, dataset_split) continue outputs.append( transformer_dataset.EvalDataset( task.name, functools.partial( _get_dataset_for_single_task, task=task, sequence_length=sequence_length), task.postprocess_fn, task.metric_fns, ) ) if not outputs: logging.warning("No %s data found for %s.", dataset_split, mixture_or_task_name) return outputs @gin.configurable() def mesh_eval_dataset_fn( mixture_or_task_name, sequence_length, dataset_split, vocabulary=None, num_eval_examples=-1, use_cached=False, pack=False, shuffle_eval_examples=False, seed=None): """Returns all tf.data.Datasets for evaluation on a given mixture. This uses the format required for utils.run's `eval_dataset_fn` argument in the Mesh TF transformer standalone. Args: mixture_or_task_name: string, an identifier for a Mixture or Task in the appropriate registry. Must be specified via gin. sequence_length: dict mapping feature key to the int length for that feature the max sequence length. If set to None, packing and padding will be disabled. dataset_split: string, which split of the dataset to load. vocabulary: unused argument, maintains compatibility with other dataaset_fns num_eval_examples: maximum number of examples per task to use for continuous eval. If None or less than 0, use all examples. use_cached: bool, whether to load the cached version of this dataset. pack: a boolean, whether to pack examples. This is useful for perplexity evals but should not be used for iterative decoding. shuffle_eval_examples: boolean, whether to shuffle eval examples, applied only when num_eval_examples is not None. Intended to be able to eval on a different eval slice at every iteration. seed: tf.int64 scalar tf.Tensor (or None). Used for both the global seed and shuffle seed for tf.data Returns: A list of mesh_tensorflow.transformer.dataset.EvalDataset tuples. """ del vocabulary mixture_or_task = t5.data.get_mixture_or_task(mixture_or_task_name) def _get_dataset_for_single_task(task, sequence_length): """Get a tensorflow.data.Dataset for the provided task.""" if shuffle_eval_examples and seed is None: logging.warning(("shuffle_seed_examples is true but no seed was ", "provided. Using a random seed.")) ds = task.get_dataset( sequence_length, split=dataset_split, use_cached=use_cached, shuffle=shuffle_eval_examples, seed=seed, ) eos_keys = set( k for k, f in mixture_or_task.output_features.items() if f.add_eos) if sequence_length is None: logging.info( "Skipping packing/padding for '%s' since sequence length is None.", task.name) else: logging.info( "%sing '%s' with sequence lengths: %s", "Pack" if pack else "Padd", task.name, sequence_length) ds = transformer_dataset.pack_or_pad( ds, sequence_length, pack=pack, feature_keys=tuple(task.output_features), ensure_eos=eos_keys) if num_eval_examples is not None and num_eval_examples >= 0: ds = ds.take(num_eval_examples) return ds outputs = [] for task in t5.data.get_subtasks(mixture_or_task): if dataset_split not in task.splits: logging.info( "Task %s has no '%s' split, skipping eval.", task.name, dataset_split ) continue outputs.append( transformer_dataset.EvalDataset( task.name, functools.partial( _get_dataset_for_single_task, task=task, sequence_length=sequence_length), task.postprocess_fn, task.metric_fns, ) ) if not outputs: logging.warning("No %s data found for %s.", dataset_split, mixture_or_task_name) return outputs @gin.configurable() def tsv_dataset_fn( filename, sequence_length, dataset_split, vocabulary, shuffle_buffer_size=10000): r"""Returns a dataset based on a TSV file formatted as `<input>\t<target>`.""" # Currently `tf.gfile.glob` is broken on GCS, so we only read a file or # list of files. return transformer_dataset.packed_parallel_tsv_dataset( dataset=tf.data.TextLineDataset(filename).shuffle(shuffle_buffer_size), sequence_length=sequence_length, vocabulary=vocabulary, dataset_split=dataset_split, append_eos=True, eos_id=1) @gin.configurable() def get_vocabulary(mixture_or_task_name=None): """Get the appropriate value for the utils.run.vocabulary argument. Args: mixture_or_task_name: string, an identifier for a Mixture or Task in the appropriate registry. Must be specified via gin. Returns: Either a single t5.data.vocabularies.Vocabulary or a tuple of t5.data.vocabularies.Vocabulary for inputs and targets.
""" This module contains the global configurations
of the framework """ #: This name suppose to be used for general meta decoration for functions, #: methods or even clas
ses meta_field = '_pyrsmeta'
import feedparser import logging from rss import sources from util import date, dict_tool, tags log = logging.getLogger('app') def parse_feed_by_name(name): feed_params = sources.get_source(name) if not feed_params: raise ValueError('There is no feed with name %s' % name) source_name = feed_params['name'] feed = feedparser.parse(feed_params['url']) data = [] for entry in feed['entries']: data.append( create_doc( source_name, feed, entry, feed_params.get('tags', ()), feed_params.get('author_name'), feed_params.get('author_link'), feed_params.get('dressing_params'), ) ) log.info('%s: got %d documents', source_name, len(data)) return data def create_doc(source_name, feed, entry, additional_tags, default_author_name, default_author_link, dressing_params): link = dict_tool.get_alternative(entry, 'fee
dburner_origlink', 'link', assert_val=True) published = date.utc_format( dict_tool.get_alternative(entry, 'published', 'updated', assert_val=True) ) description = dict_tool.get_alternati
ve(entry, 'summary', 'description', 'title', assert_val=True) picture = dict_tool.get_deep(entry, 'gd_image', 'src') text = dict_tool.get_deep(entry, 'content', 0, 'value') author_name = handle_default_param( entry, dict_tool.get_deep(entry, 'authors', 0, 'name'), default_author_name ) author_link = handle_default_param( entry, dict_tool.get_deep(entry, 'authors', 0, 'href'), default_author_link ) entry_tags = [] for tag in entry.get('tags', []): tag_text = dict_tool.get_alternative(tag, 'term', 'label') if tag_text: entry_tags.append(tag_text.lower()) additional_tags += tuple(entry_tags) comments_count = entry.get('slash_comments') if comments_count is not None: comments_count = int(comments_count) return { 'link': link, 'title': entry['title'], 'published': published, 'picture': picture, 'author_name': author_name, 'author_link': author_link, 'description': description, 'text': text, 'source_name': source_name, 'source_type': 'rss', 'source_title': feed['feed']['title'], 'source_link': feed['feed']['link'], 'comments_count': comments_count, 'tags': tags.create_tags_list(*additional_tags), '__dressing_params': dressing_params, } def handle_default_param(entry, val, default_val): if callable(default_val): return default_val(entry, val) return val or default_val
#!/usr/bin/python3 """ Merges raw data from geneteka into larger json files. """ from collections import defaultdict import html import json import os import re INPUT_DIR = 'data_raw' OUTPUT_DIR = 'data' def extractNotes(value): match = re.search(r'i.png" title="([^"]*)"', value) if match: return (value.split('<', 1)[0].strip(), match.group(1)) return (value.strip(), None) def convertPersonRecord(record): # Unparsed column with various information. stuff = record[9] lastName, lastNameNotes = extractNotes(record[3]) motherLastName, motherLastNameNotes = extractNotes(record[6]) output = { 'year': record[0].strip(), 'record_number': record[1].strip(), 'first_name': record[2].strip(), 'last_name': lastName, 'father_first_name': record[4].strip(), 'mother_first_name': record[5].strip(), 'mother_last_name': motherLastName, 'parish': record[7].strip(), 'place': record[8].strip(), 'stuff': stuff, } # Last name notes. if lastNameNotes: output['last_name_notes'] = lastNameNotes if motherLastNameNotes: output['mother_last_name_notes'] = motherLastNameNotes # List of notes. match = re.search(r'i.png" title="([^"]*)"', stuff) if match: output['notes'] = html.unescape(match.group(1)).strip().split('\r') # Where archives are kept. match = re.search(r'z.png" title="([^"]*)"', stuff) if match: output['archives'] = html.unescape(match.group(1)).strip() # URL to the place the archives are kept. match = re.search(r'href="([^"]*)" target', stuff) if match: output['archives_url'] = match.group
(1) # URL to metryki.genealodzy.pl where scans can be found. match = re.search(r'href="([^"]*)">[^>]*s.png', stuff) if match: output['metryki_url'] = html.unescape(match.group(1)) # User that entered this record to the database. match = re.search(r'uname=([^"]*)"', stuff) if match: output['user_enter
ed'] = match.group(1) return output def convertMarriageRecord(record): # Unparsed column with various information. stuff = record[9] husbandLastName, husbandLastNameNotes = extractNotes(record[3]) wifeLastName, wifeLastNameNotes = extractNotes(record[6]) output = { 'year': record[0].strip(), 'record_number': record[1].strip(), 'husband_first_name': record[2].strip(), 'husband_last_name': husbandLastName, 'husband_parents': record[4].strip(), 'wife_first_name': record[5].strip(), 'wife_last_name': wifeLastName, 'wife_parents': record[7].strip(), 'parish': record[8].strip(), 'stuff': stuff, } # Last name notes. if husbandLastNameNotes: output['nazwisko_meza_uwagi'] = husbandLastNameNotes if wifeLastNameNotes: output['nazwisko_zony_uwagi'] = wifeLastNameNotes # List of notes. match = re.search(r'i.png" title="([^"]*)"', stuff) if match: output['notes'] = html.unescape(match.group(1)).strip().split('\r') # Where archives are kept. match = re.search(r'z.png" title="([^"]*)"', stuff) if match: output['archives'] = html.unescape(match.group(1)).strip() # URL to the place the archives are kept. match = re.search(r'href="([^"]*)" target', stuff) if match: output['archives_url'] = match.group(1) # URL to metryki.genealodzy.pl where scans can be found. match = re.search(r'href="([^"]*)">[^>]*s.png', stuff) if match: output['metryki_url'] = match.group(1) # User that entered this record to the database. match = re.search(r'uname=([^"]*)"', stuff) if match: output['user_entered'] = match.group(1) return output def main(): # Map from prefix to list of records. data = defaultdict(list) # Read all files from INPUT_DIR. for fileName in os.listdir(INPUT_DIR): prefix = re.search('[^_]+_._[^_]+', fileName).group(0) with open(os.path.join(INPUT_DIR, fileName)) as file: content = json.load(file) data[prefix] += content['data'] if not os.path.exists(OUTPUT_DIR): os.makedirs(OUTPUT_DIR) # Parse records and write one parish per file. for key, value in data.items(): voivodeship, recordType, parishId = key.split('_') if recordType == 'S': converter = convertMarriageRecord else: converter = convertPersonRecord value[:] = [converter(x) for x in value] print("Writing %s" % key) metadata = { 'voivodeship': voivodeship, 'record_type': recordType, 'parish_id': parishId, } outputFile = os.path.join(OUTPUT_DIR, key + '.json') with open(outputFile, 'w') as file: outputData = { 'data': value, 'metadata': metadata, } json.dump(outputData, file) if __name__ == "__main__": main()
#----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from pygments.lexers import PythonLex
er, BashLexer from pygments.lexer import bygroups, using from pygments.token import Keyword, Operator, Name, Text #----------------------------------------------------------------------------- # Classes #---------------------------------------------------------
-------------------- class IPythonLexer(PythonLexer): name = 'IPython' aliases = ['ip', 'ipython'] filenames = ['*.ipy'] tokens = PythonLexer.tokens.copy() tokens['root'] = [ (r'(\%+)(\w+)\s+(\.*)(\n)', bygroups(Operator, Keyword, using(BashLexer), Text)), (r'(\%+)(\w+)\b', bygroups(Operator, Keyword)), (r'^(!)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)), ] + tokens['root']
# j4cDAC test code # # Copyright 2011 Jacob Potter # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, version 3. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License fo
r more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http:/
/www.gnu.org/licenses/>. import socket import time import struct def pack_point(x, y, r, g, b, i = -1, u1 = 0, u2 = 0, flags = 0): """Pack some color values into a struct dac_point. Values must be specified for x, y, r, g, and b. If a value is not passed in for the other fields, i will default to max(r, g, b); the rest default to zero. """ if i < 0: i = max(r, g, b) return struct.pack("<HhhHHHHHH", flags, x, y, r, g, b, i, u1, u2) class ProtocolError(Exception): """Exception used when a protocol error is detected.""" pass class Status(object): """Represents a status response from the DAC.""" def __init__(self, data): """Initialize from a chunk of data.""" self.protocol_version, self.le_state, self.playback_state, \ self.source, self.le_flags, self.playback_flags, \ self.source_flags, self.fullness, self.point_rate, \ self.point_count = \ struct.unpack("<BBBBHHHHII", data) def dump(self, prefix = " - "): """Dump to a string.""" lines = [ "Light engine: state %d, flags 0x%x" % (self.le_state, self.le_flags), "Playback: state %d, flags 0x%x" % (self.playback_state, self.playback_flags), "Buffer: %d points" % (self.fullness, ), "Playback: %d kpps, %d points played" % (self.point_rate, self.point_count), "Source: %d, flags 0x%x" % (self.source, self.source_flags) ] for l in lines: print prefix + l class BroadcastPacket(object): """Represents a broadcast packet from the DAC.""" def __init__(self, st): """Initialize from a chunk of data.""" self.mac = st[:6] self.hw_rev, self.sw_rev, self.buffer_capacity, \ self.max_point_rate = struct.unpack("<HHHI", st[6:16]) self.status = Status(st[16:36]) def dump(self, prefix = " - "): """Dump to a string.""" lines = [ "MAC: " + ":".join( "%02x" % (ord(o), ) for o in self.mac), "HW %d, SW %d" % (self.hw_rev, self.sw_rev), "Capabilities: max %d points, %d kpps" % (self.buffer_capacity, self.max_point_rate) ] for l in lines: print prefix + l #self.status.dump(prefix) class DAC(object): """A connection to a DAC.""" def read(self, l): """Read exactly length bytes from the connection.""" while l > len(self.buf): self.buf += self.conn.recv(4096) obuf = self.buf self.buf = obuf[l:] return obuf[:l] def readresp(self, cmd): """Read a response from the DAC.""" data = self.read(22) response = data[0] cmdR = data[1] status = Status(data[2:]) # status.dump() if cmdR != cmd: raise ProtocolError("expected resp for %r, got %r" % (cmd, cmdR)) if response != "a": raise ProtocolError("expected ACK, got %r" % (response, )) self.last_status = status return status def __init__(self, host, port = 7765): """Connect to the DAC over TCP.""" conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM) conn.connect((host, port)) #print "Connected to %s:%s" % (host, port) self.conn = conn self.buf = "" # Read the "hello" message first_status = self.readresp("?") #first_status.dump() def begin(self, lwm, rate): cmd = struct.pack("<cHI", "b", lwm, rate) self.conn.sendall(cmd) return self.readresp("b") def update(self, lwm, rate): cmd = struct.pack("<cHI", "u", lwm, rate) self.conn.sendall(cmd) return self.readresp("u") def encode_point(self, point): try: return pack_point(*point) except Exception as e: ##print "Exception" #print point raise e def write(self, points): epoints = map(self.encode_point, points) cmd = struct.pack("<cH", "d", len(epoints)) self.conn.sendall(cmd + "".join(epoints)) return self.readresp("d") def prepare(self): self.conn.sendall("p") return self.readresp("p") def stop(self): self.conn.sendall("s") return self.readresp("s") def estop(self): self.conn.sendall("\xFF") return self.readresp("\xFF") def clear_estop(self): self.conn.sendall("c") return self.readresp("c") def ping(self): self.conn.sendall("?") return self.readresp("?") def play_stream(self, stream): # First, prepare the stream if self.last_status.playback_state == 2: raise Exception("already playing?!") elif self.last_status.playback_state == 0: self.prepare() started = 0 while True: # How much room? cap = 1799 - self.last_status.fullness points = stream.read(cap) if cap < 100: time.sleep(0.005) cap += 150 # print "Writing %d points" % (cap, ) t0 = time.time() self.write(points) t1 = time.time() # print "Took %f" % (t1 - t0, ) if not started: self.begin(0, 30000) started = 1 def find_dac(): """Listen for broadcast packets.""" s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.bind(("0.0.0.0", 7654)) while True: data, addr = s.recvfrom(1024) bp = BroadcastPacket(data) #print "Packet from %s: " % (addr, ) #bp.dump() def find_first_dac(): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.bind(("0.0.0.0", 7654)) data, addr = s.recvfrom(1024) bp = BroadcastPacket(data) #print "Packet from %s: " % (addr, ) return addr[0]
from django.conf import settings from django.http import HttpRequest, HttpResponse from django.shortcuts import redirect from django.utils.translation import gettext as _ from zerver.decorator import require_realm_admin from zerver.lib.actions import do_change_icon_source from zerver.lib.exceptions import JsonableError from zerver.lib.realm_icon import realm_icon_url from zerver.lib.response import json_success from zerver.lib.upload import upload_icon_image from zerver.lib.url_encoding import append_url_query_string from zerver.models import UserProfile @require_realm_admin def upload_icon(request: HttpRequest, user_profile: UserProfile) -
> HttpResponse: if len(request.FILES) != 1: raise JsonableError(_("You must upload exactly one
icon.")) icon_file = list(request.FILES.values())[0] if (settings.MAX_ICON_FILE_SIZE_MIB * 1024 * 1024) < icon_file.size: raise JsonableError( _("Uploaded file is larger than the allowed limit of {} MiB").format( settings.MAX_ICON_FILE_SIZE_MIB, ) ) upload_icon_image(icon_file, user_profile) do_change_icon_source( user_profile.realm, user_profile.realm.ICON_UPLOADED, acting_user=user_profile ) icon_url = realm_icon_url(user_profile.realm) json_result = dict( icon_url=icon_url, ) return json_success(json_result) @require_realm_admin def delete_icon_backend(request: HttpRequest, user_profile: UserProfile) -> HttpResponse: # We don't actually delete the icon because it might still # be needed if the URL was cached and it is rewritten # in any case after next update. do_change_icon_source( user_profile.realm, user_profile.realm.ICON_FROM_GRAVATAR, acting_user=user_profile ) gravatar_url = realm_icon_url(user_profile.realm) json_result = dict( icon_url=gravatar_url, ) return json_success(json_result) def get_icon_backend(request: HttpRequest, user_profile: UserProfile) -> HttpResponse: url = realm_icon_url(user_profile.realm) # We can rely on the URL already having query parameters. Because # our templates depend on being able to use the ampersand to # add query parameters to our url, get_icon_url does '?version=version_number' # hacks to prevent us from having to jump through decode/encode hoops. url = append_url_query_string(url, request.META["QUERY_STRING"]) return redirect(url)
nvalid." USER_LINK_EXPIRED_ERROR = "The link you used to create an account may have expired." def has_validation_errors(data, field_name): document = html.fromstring(data) form_field = document.xpath('//input[@name="{}"]'.format(field_name)) return 'invalid' in form_field[0].classes or 'invalid' in form_field[0].getparent().classes class TestLogin(BaseApplicationTest): def setup(self): super(TestLogin, self).setup() data_api_client_config = {'authenticate_user.return_value': self.user( 123, "email@email.com", 1234, 'name', 'name' )} self._data_api_client = mock.patch( 'app.main.views.login.data_api_client', **data_api_client_config ) self.data_api_client_mock = self._data_api_client.start() def teardown(self): self._data_api_client.stop() def test_should_show_login_page(self): res = self.client.get(self.expand_path('/login')) assert res.status_code == 200 assert 'private' in res.headers['Cache-Control'] assert "Sign in to the Marketplace" in res.get_data(as_text=True) @mock.patch('app.main.views.login.data_api_client') def test_redirect_on_buyer_login(self, data_api_client): with self.app.app_context(): data_api_client.authenticate_user.return_value = self.user(123, "email@email.com", None, None, 'Name') data_api_client.get_user.return_value = self.user(123, "email@email.com", None, None, 'Name') res = self.client.post(self.url_for('main.process_login'), data={ 'email_address': 'valid@email.com', 'password': '1234567890', 'csrf_token': FakeCsrf.valid_token, }) assert res.status_code == 302 assert res.location == 'http://localhost/2/buyer-dashboard' assert 'Secure;' in res.headers['Set-Cookie'] @mock.patch('app.main.views.login.data_api_client') def test_redirect_on_supplier_login(self, data_api_client): with self.app.app_context(): data_api_client.authenticate_user.return_value = self.user( 123, 'email@email.com', None, None, 'Name', role='supplier' ) data_api_client.get_user.return_value = self.user( 123, 'email@email.com', None, None, 'Name', role='supplier' ) res = self.client.post(self.url_for('main.process_login'), data={ 'email_address': 'valid@email.com', 'password': '1234567890', 'csrf_token': FakeCsrf.valid_token, }) assert res.status_code == 302 assert res.location == 'http://localhost' + \ self.expand_path('/2/seller-dashboard') assert 'Secure;' in res.headers['Set-Cookie'] def test_should_redirect_logged_in_buyer(self): self.login_as_buyer() res = self.client.get(self.url_for('main.render_login')) assert res.status_code == 302 assert res.location == 'http://localhost/2/buyer-dashboard' def test_should_strip_whitespace_surrounding_login_email_address_field(self): self.client.post(self.expand_path('/login'), data={ 'email_address': ' valid@email.com ', 'password': '1234567890', 'csrf_token': FakeCsrf.valid_token, }) self.data_api_client_mock.authenticate_user.assert_called_with('valid@email.com', '1234567890') def test_should_not_strip_whitespace_surrounding_login_password_field(self): self.client.post(self.expand_path('/login'), data={ 'email_address': 'valid@email.com', 'password': ' 1234567890 ', 'csrf_token': FakeCsrf.valid_token, }) self.data_api_client_mock.authenticate_user.assert_called_with( 'valid@email.com', ' 1234567890 ') @mock.patch('app.main.views.login.data_api_client') def test_ok_next_url_redirects_buyer_on_login(self, data_api_client): with self.app.app_context(): data_api_client.authenticate_user.return_value = self.user(123, "email@email.com", None, None, 'Name') data_api_client.get_user.return_value = self.user(123, "email@email.com", None, None, 'Name') data = { 'email_address': 'valid@email.com', 'password': '1234567890', 'csrf_token': FakeCsrf.valid_token, } res = self.client.post(self.expand_path('/login?next={}'.format(self.expand_path('/bar-foo'))), data=data) assert res.status_code == 302 assert res.location == 'http://localhost' + self.expand_path('/bar-foo') @mock.patch('app.main.views.login.data_api_client') def test_bad_next_url_redirects_user(self, data_api_client): with self.app.app_context(): data_api_client.authenticate_user.return_value = self.user(123, "email@email.com", None, None, 'Name') data_api_client.get_user.return_value = self.user(123, "email@email.com", None, None, 'Name') data = { 'email_address': 'valid@email.com', 'password': '1234567890', 'csrf_token': FakeCsrf.valid_token, } res = self.client.post(self.expand_path('/login?next=http://badness.com'), data=data) assert res.status_code == 302 assert res.location == 'http://localhost/2/buyer-dashboard' def test_should_have_cookie_on_redirect(self): with self.app.app_context(): self.app.config['SESSION_COOKIE_DOMAIN'] = '127.0.0.1' self.app.config['SESSION_COOKIE_SECURE'] = True res = self.client.post(self.expand_path('/login'), data={ 'email_address': 'valid@email.com', 'password': '1234567890', 'csrf_token': FakeCsrf.valid_token, }) cookie_value = self.get_cookie_by_name(res, 'dm_session') assert cookie_value['dm_session'] is not None assert cookie_value["Domain"] == "127.0.0.1" def test_should_redirect_to_login_on_logout(self): res = self.client.get(self.expand_path('/logout')) assert res.status_code == 302 assert res.location == 'http://localhost/2/login' @mock.patch('app.main.views.login.data_api_client') def test_should_return_a_403_for_invalid_login(self, data_api_client): data_api_client.authenticate_user.return_v
alue = None data_api_client.get_user.return_value = None res = self.client.post(self.expand_path('/login'), data={ 'email_address': 'valid@email.com', 'password': '1234567890', 'csrf_token': FakeCsrf.valid_token, }) assert self.strip_all_whitespace("Make sure you've entered the right email address and password") \ in self.
strip_all_whitespace(res.get_data(as_text=True)) assert res.status_code == 403 def test_should_be_validation_error_if_no_email_or_password(self): res = self.client.post(self.expand_path('/login'), data={'csrf_token': FakeCsrf.valid_token}) data = res.get_data(as_text=True) assert res.status_code == 400 assert has_validation_errors(data, 'email_address') assert has_validation_errors(data, 'password') def test_should_be_validation_error_if_invalid_email(self): res = self.client.post(self.expand_path('/login'), data={ 'email_address': 'invalid', 'password': '1234567890', 'csrf_token': FakeCsrf.valid_token, }) data = res.get_data(as_text=True) assert res.status_code == 400 assert has_validation_errors(data, 'email_address') assert not has_validation_errors(data, 'password') def test_valid_email_formats(self): cases = [ 'good@example.com', 'good-email@example.com', 'good-email+plus@exam
''' AxelProxy XBMC Addon Copyright (C) 2013 Eldorado This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
. ''' import axelcommon import axelproxy # This is xbmc linked class. TODO: read the settings here and send it to proxy for port etc #TODO: check if start at launch setting is configured! #Address and IP for Proxy to listen on HOST_NAME = '127.0.0.1' #HOST_NAME = 'localhost' PORT_NUMBER = 45550 ##move this somewhere which could be configured by UI if _
_name__ == '__main__': file_dest = axelcommon.profile_path #replace this line if you want to be specific about the download folder print file_dest axelproxy.ProxyManager().start_proxy(port=PORT_NUMBER, host_name=HOST_NAME,download_folder=file_dest), #more param to come
on(StockController): def __init__(self, *args, **kwargs): super(StockReconciliation, self).__init__(*args, **kwargs) self.head_row = ["Item Code", "Warehouse", "Quantity", "Valuation Rate"] def validate(self): if not self.expense_account: self.expense_account = frappe.get_cached_value('Company', self.company, "stock_adjustment_account") if not self.cost_center: self.cost_center = frappe.get_cached_value('Company', self.company, "cost_center") self.validate_posting_time() self.remove_items_with_no_change() self.validate_data() self.validate_expense_account() self.set_total_qty_and_amount() def on_submit(self): self.update_stock_ledger() self.make_gl_entries() def on_cancel(self): self.delete_and_repost_sle() self.make_gl_entries_on_cancel() def remove_items_with_no_change(self): """Remove items if qty or rate is not changed""" self.difference_amount = 0.0 def _changed(item): qty, rate = get_stock_balance(item.item_code, item.warehouse, self.posting_date, self.posting_time, with_valuation_rate=True) if (item.qty==None or item.qty==qty) and (item.valuation_rate==None or item.valuation_rate==rate): return False else: # set default as current rates if item.qty==None: item.qty = qty if item.valuation_rate==None: item.valuation_rate = rate item.current_qty = qty item.current_valuation_rate = rate self.difference_amount += (flt(item.qty, item.precision("qty")) * \ flt(item.valuation_rate or rate, item.precision("valuation_rate")) \ - flt(qty, item.precision("qty")) * flt(rate, item.precision("valuation_rate"))) return True items = list(filter(lambda d: _changed(d), self.items)) if not items: frappe.throw(_("None of the items have any change in quantity or value."), EmptyStockReconciliationItemsError) elif len(items) != len(self.items): self.items = items for i, item in enumerate(self.items): item.idx = i + 1 frappe.msgprint(_("Removed items with no change in quantity or value.")) def validate_data(self): def _get_msg(row_num, msg): return _("Row # {0}: ").format(row_num+1) + msg self.validation_messages = [] item_warehouse_combinations = [] default_currency = frappe.db.get_default("currency") for row_num, row in enumerate(self.items): # find duplicates if [row.item_code, row.warehouse] in item_warehouse_combinations: self.validation_messages.append(_get_msg(row_num, _("Duplicate entry"))) else: item_warehouse_combinations.append([row.item_code, row.warehouse]) self.validate_item(row.item_code, row_num+1) # validate warehouse if not frappe.db.get_value("Warehouse", row.warehouse): self.validation_messages.append(_get_msg(row_num, _("Warehouse not found in the system"))) # if both not specified if row.qty in ["", None] and row.valuation_rate in ["", None]: self.validation_messages.append(_get_msg(row_num, _("Please specify either Quantity or Valuation Rate or both"))) # do not allow negative quantity if flt(row.qty) < 0: self.validation_messages.append(_get_msg(row_
num, _("Negative Quantity is not allowed"))) # do not allow negative valuation if flt(row.valuation_rate) < 0: self.validation_messages.append(_get_msg(row_num, _("Negative Valuation Rate is not allowed"))) if row.qty and row.valuation_rate in ["",
None]: row.valuation_rate = get_stock_balance(row.item_code, row.warehouse, self.posting_date, self.posting_time, with_valuation_rate=True)[1] if not row.valuation_rate: # try if there is a buying price list in default currency buying_rate = frappe.db.get_value("Item Price", {"item_code": row.item_code, "buying": 1, "currency": default_currency}, "price_list_rate") if buying_rate: row.valuation_rate = buying_rate else: # get valuation rate from Item row.valuation_rate = frappe.get_value('Item', row.item_code, 'valuation_rate') # throw all validation messages if self.validation_messages: for msg in self.validation_messages: msgprint(msg) raise frappe.ValidationError(self.validation_messages) def validate_item(self, item_code, row_num): from erpnext.stock.doctype.item.item import validate_end_of_life, \ validate_is_stock_item, validate_cancelled_item # using try except to catch all validation msgs and display together try: item = frappe.get_doc("Item", item_code) # end of life and stock item validate_end_of_life(item_code, item.end_of_life, item.disabled, verbose=0) validate_is_stock_item(item_code, item.is_stock_item, verbose=0) # item should not be serialized if item.has_serial_no == 1: raise frappe.ValidationError(_("Serialized Item {0} cannot be updated using Stock Reconciliation, please use Stock Entry").format(item_code)) # item managed batch-wise not allowed if item.has_batch_no == 1: raise frappe.ValidationError(_("Batched Item {0} cannot be updated using Stock Reconciliation, instead use Stock Entry").format(item_code)) # docstatus should be < 2 validate_cancelled_item(item_code, item.docstatus, verbose=0) except Exception as e: self.validation_messages.append(_("Row # ") + ("%d: " % (row_num)) + cstr(e)) def update_stock_ledger(self): """ find difference between current and expected entries and create stock ledger entries based on the difference""" from erpnext.stock.stock_ledger import get_previous_sle for row in self.items: previous_sle = get_previous_sle({ "item_code": row.item_code, "warehouse": row.warehouse, "posting_date": self.posting_date, "posting_time": self.posting_time }) if previous_sle: if row.qty in ("", None): row.qty = previous_sle.get("qty_after_transaction", 0) if row.valuation_rate in ("", None): row.valuation_rate = previous_sle.get("valuation_rate", 0) if row.qty and not row.valuation_rate: frappe.throw(_("Valuation Rate required for Item in row {0}").format(row.idx)) if ((previous_sle and row.qty == previous_sle.get("qty_after_transaction") and row.valuation_rate == previous_sle.get("valuation_rate")) or (not previous_sle and not row.qty)): continue self.insert_entries(row) def insert_entries(self, row): """Insert Stock Ledger Entries""" args = frappe._dict({ "doctype": "Stock Ledger Entry", "item_code": row.item_code, "warehouse": row.warehouse, "posting_date": self.posting_date, "posting_time": self.posting_time, "voucher_type": self.doctype, "voucher_no": self.name, "company": self.company, "stock_uom": frappe.db.get_value("Item", row.item_code, "stock_uom"), "is_cancelled": "No", "qty_after_transaction": flt(row.qty, row.precision("qty")), "valuation_rate": flt(row.valuation_rate, row.precision("valuation_rate")) }) self.make_sl_entries([args]) def delete_and_repost_sle(self): """ Delete Stock Ledger Entries related to this voucher and repost future Stock Ledger Entries""" existing_entries = frappe.db.sql("""select distinct item_code, warehouse from `tabStock Ledger Entry` where voucher_type=%s and voucher_no=%s""", (self.doctype, self.name), as_dict=1) # delete entries frappe.db.sql("""delete from `tabStock Ledger Entry` where voucher_type=%s and voucher_no=%s""", (self.doctype, self.name)) # repost future entries for selected item_code, warehouse for entries in existing_entries: update_entries_after({ "item_code": entries.item_code, "warehouse": entries.warehouse, "posting_date": self.posting_date, "posting_time": self.posting_time }) def get_gl_entries(self, warehouse_account=None): if not self.cost_center: msgprint(_("Please enter Cost Center"), raise_exception=1) return super(StockReconciliation, self).get_gl_entries(warehouse_account, self.expense_account, self.cost_center) def validate_expense_account(self): if not cint(erpnext.is_perpetual_inventory_enabled(self.company)): return if not self.expense_account: msgprint(_("Please enter Expense Account"), raise_exception=1) elif not frappe.db.sql("""selec
"""Imports for Python API. This file is MACHINE GENERATED! Do not edit. Generated by: tensorflow/tools/api/generator/create_python_api.py script. """ from tensorflow.tools.api.generator.api.keras.preprocessing import image from tensorflow.tools.api.generator.api.keras.preprocessing import sequence
from tensorflow.tools.api.generator.api.keras.preprocessing imp
ort text
''' Swap counting Status: Accepted ''' ############################################################################### def inversions(constants, variables): """Number of swaps""" if variables: pow2 = pow(2, variables - 1, 1_000_000_007) return pow2 * (constants * 2 + variables) return constants #################################################################
############## def main(): """Read input and print output""" zeroes, qmarks, swaps = 0, 0, 0 for glyph in r
eversed(input()): if glyph == '0': zeroes += 1 else: if glyph == '1': swaps += inversions(zeroes, qmarks) if glyph == '?': swaps += inversions(zeroes, qmarks) + swaps qmarks += 1 swaps %= 1_000_000_007 print(swaps) ############################################################################### if __name__ == '__main__': main()
tract_interface, address=to_checksum_address(contract_address) ) def get_transaction_receipt(self, tx_hash: bytes): return self.web3.eth.getTransactionReceipt(encode_hex(tx_hash)) def deploy_solidity_contract( self, # pylint: disable=too-many-locals contract_name: str, all_contracts: Dict[str, ABI], libraries: Dict[str, str] = None, constructor_parameters: Tuple[Any] = None, contract_path: str = None, ): """ Deploy a solidity contract. Args: contract_name: The name of the contract to compile. all_contracts: The json dictionary containing the result of compiling a file. libraries: A list of libraries to use in deployment. constructor_parameters: A tuple of arguments to pass to the constructor. contract_path: If we are dealing with solc >= v0.4.9 then the path to the contract is a required argument to extract the contract data from the `all_contracts` dict. """ if libraries: libraries = dict(libraries) else: libraries = dict() ctor_parameters = constructor_parameters or () all_contracts = copy.deepcopy(all_contracts) if contract_name in all_contracts: contract_key = contract_name elif contract_path is not None: contract_key = os.path.basename(contract_path) + ":" + contract_name if contract_key not in all_contracts: raise ValueError("Unknown contract {}".format(contract_name)) else: raise ValueError( "Unknown contract {} and no contract_path given".format(contract_name) ) contract = all_contracts[contract_key] contract_interface = contract["abi"] symbols = solidity_unresolved_symbols(contract["bin"]) if symbols: available_symbols = list(map(solidity_library_symbol, all_contracts.keys())) unknown_symbols = set(symbols) - set(available_symbols) if unknown_symbols: msg = "Cannot deploy contract, known symbols {}, unresolved symbols {}.".format( available_symbols, unknown_symbols ) raise Exception(msg) dependencies = deploy_dependencies_symbols(all_contracts) deployment_order = dependencies_order_of_build(contract_key, dependencies) deployment_order.pop() # remove `contract_name` from the list log.debug( "Deploying dependencies: {}".format(str(deployment_order)), node=pex(self.address) ) for deploy_contract in deployment_order: dependency_contract = all_contracts[deploy_contract] hex_bytecode = solidity_resolve_symbols(dependency_contract["bin"], libraries) bytecode = decode_hex(hex_bytecode) dependency_contract["bin"] = bytecode gas_limit = self.web3.eth.getBlock("latest")["gasLimit"] * 8 // 10 transaction_hash = self.send_transaction( to=Address(b""), startgas=gas_limit, data=bytecode ) self.poll(transaction_hash) receipt = self.get_transaction_receipt(transaction_hash) contract_address = receipt["contractAddress"] # remove the hexadecimal prefix 0x from the address contract_address = remove_0x_prefix(contract_address) libraries[deploy_contract] = contract_address deployed_code = self.web3.eth.getCode(to_checksum_address(contract_address)) if not deployed_code: raise RuntimeError("Contract address has no code, check gas usage.") hex_bytecode = solidity_resolve_symbols(contract["bin"], libraries) bytecode = decode_hex(hex_bytecode) contract["bin"] = bytecode if isinstance(contract["bin"], str): contract["bin"] = decode_hex(contract["bin"]) contract_object = self.web3.eth.contract(abi=contract["abi"], bytecode=contract["bin"]) contract_transaction = contract_object.constructor(*ctor_parameters).buildTransaction() transaction_hash = self.send_transaction( to=Address(b""), data=contract_transaction["data"], startgas=self._gas_estimate_correction(contract_transaction["gas"]), ) self.poll(transaction_hash) receipt = self.get_transaction_receipt(transaction_hash) contract_address = receipt["contractAddress"] deployed_code = self.web3.eth.getCode(to_checksum_address(contract_address)) if not deployed_code: raise RuntimeError( "Deployment of {} failed. Contract address has no code, check gas usage.".format( contract_name ) ) return self.new_contract_proxy(contract_interface, contract_address), receipt def send_transaction( self, to: Address, startgas: int, value: int = 0, data: bytes = b"" ) -> bytes: """ Helper to send signed messages. This method will use the `privkey` provided in the constructor to locally sign the transaction. This requires an extended server implementation that accepts the variables v, r, and s. """ if to == to_canonical_address(constants.NULL_ADDRESS): warnings.warn("For contract creation the empty string must be used.") with self._nonce_lock: nonce = self._available_nonce gas_price = self.gas_price() transaction = { "data": data, "gas": startgas, "nonce": nonce, "value": value, "gasPrice": gas_price, } node_gas_price = self.web3.eth.gasPrice log.debug( "Calculated gas price for transaction", node=pex(self.address), calculated_gas_price=gas_price, node_gas_price=node_gas_price, ) # add the to address if not deploying a contract if to != b"": transaction["to"] = to_checksum_address(to) signed_txn = self.web3.eth.account.signTransaction(transaction, self.privkey) log_details = { "node": pex(self.address), "nonce": transaction["nonce"], "gasLimit": transaction["gas"], "gasPrice": transaction["gasPrice"], } log.debug
("send_raw_transaction called", **log_details) tx_hash = self.web3.eth.sendRawTransaction(signed_txn.rawTransaction) self._available_nonce += 1 log.debug("send_raw_transaction returned", tx_hash=encode_hex(tx_hash), **log_details) return tx_
hash def poll(self, transaction_hash: bytes): """ Wait until the `transaction_hash` is applied or rejected. Args: transaction_hash: Transaction hash that we are waiting for. """ if len(transaction_hash) != 32: raise ValueError("transaction_hash must be a 32 byte hash") transaction_hash = encode_hex(transaction_hash) # used to check if the transaction was removed, this could happen # if gas price is too low: # # > Transaction (acbca3d6) below gas price (tx=1 Wei ask=18 # > Shannon). All sequential txs from this address(7d0eae79) # > will be ignored # last_result = None while True: # Could return None for a short period of time, until the # transaction is added to the pool transaction = self.web3.eth.getTransaction(transaction_hash) # if the transaction was added to the pool and then removed if transaction is None and last_result is not None: raise Exception("invalid transaction, check gas
t_function, unicode_literals import itertools import os import sys try: # Work around a traceback on Python < 2.7.4 and < 3.3.1 # http://bugs.python.org/issue15881#msg170215 import multiprocessing except ImportError: pass # pyflakes workaround __unused__ = (multiprocessing, ) PYTHON_VERSION = sys.version_info[:3] PY2 = (PYTHON_VERSION[0] == 2) PY26 = (PYTHON_VERSION < (2, 7)) versions_required_message = """ Pywikibot not available on: %s Pywikibot is only supported under Python 2.6.5+, 2.7.2+ or 3.3+ """ def python_is_supported(): """Check that Python is supported.""" # Any change to this must be copied to pwb.py return (PYTHON_VERSION >= (3, 3, 0) or (PY2 and PYTHON_VERSION >= (2, 7, 2)) or (PY26 and PYTHON_VERSION >= (2, 6, 5))) if not python_is_supported(): raise RuntimeError(versions_required_message % sys.version) test_deps = ['bz2file', 'mock'] dependencies = ['requests!=2.18.2'] # the irc module has no Python 2.6 support since 10.0 irc_dep = 'irc==8.9' if sys.version_info < (2, 7) else 'irc' csv_dep = 'unicodecsv!=0.14.0' if PYTHON_VERSION < (2, 7) else 'unicodecsv' extra_deps = { # Core library dependencies 'eventstreams': ['sseclient'], 'isbn': ['python-stdnum'], 'Graphviz': ['pydot>=1.
0.28'], 'Google': ['google>=1.7'], 'IRC': [irc_dep], 'mwparserfromhell': ['mwparserfromhell>=0.3.3'], 'Tkinter': ['Pillow<3.5.0' if PY26 else 'Pillow'], # 0.6.1 supports socket.io 1.0, but WMF is using 0.9 (T91393 and T85716) 'rcst
ream': ['socketIO-client<0.6.1'], 'security': ['requests[security]', 'pycparser!=2.14'], 'mwoauth': ['mwoauth>=0.2.4,!=0.3.1'], 'html': ['BeautifulSoup4'], } if PY2: # Additional core library dependencies which are only available on Python 2 extra_deps.update({ 'csv': [csv_dep], 'MySQL': ['oursql'], 'unicode7': ['unicodedata2>=7.0.0-2'], }) script_deps = { 'flickrripper.py': ['Pillow<3.5.0' if PY26 else 'Pillow'], 'states_redirect.py': ['pycountry'], 'weblinkchecker.py': ['memento_client>=0.5.1,!=0.6.0'], 'patrol.py': ['mwparserfromhell>=0.3.3'], } # flickrapi 1.4.4 installs a root logger in verbose mode; 1.4.5 fixes this. # The problem doesnt exist in flickrapi 2.x. # pywikibot accepts flickrapi 1.4.5+ on Python 2, as it has been stable for a # long time, and only depends on python-requests 1.x, whereas flickrapi 2.x # depends on python-requests 2.x, which is first packaged in Ubuntu 14.04 # and will be first packaged for Fedora Core 21. # flickrapi 1.4.x does not run on Python 3, and setuptools can only # select flickrapi 2.x for Python 3 installs. script_deps['flickrripper.py'].append( 'flickrapi>=1.4.5,<2' if PY26 else 'flickrapi') # lunatic-python is only available for Linux if sys.platform.startswith('linux'): script_deps['script_wui.py'] = [irc_dep, 'lunatic-python', 'crontab'] # The main pywin32 repository contains a Python 2 only setup.py with a small # wrapper setup3.py for Python 3. # http://pywin32.hg.sourceforge.net:8000/hgroot/pywin32/pywin32 # The main pywinauto repository doesnt support Python 3. # The repositories used below have a Python 3 compliant setup.py dependency_links = [ 'git+https://github.com/AlereDevices/lunatic-python.git#egg=lunatic-python', 'hg+https://bitbucket.org/TJG/pywin32#egg=pywin32', 'git+https://github.com/vasily-v-ryabov/pywinauto-64#egg=pywinauto', 'git+https://github.com/nlhepler/pydot#egg=pydot-1.0.29', ] if PYTHON_VERSION < (2, 7, 3): # work around distutils hardcoded unittest dependency # work around T106512 import unittest __unused__ += (unittest, ) if 'test' in sys.argv: import unittest2 sys.modules['unittest'] = unittest2 if sys.version_info[0] == 2: if PY26: script_deps['replicate_wiki.py'] = ['argparse'] dependencies.append('future>=0.15.0') # provides collections backports dependencies += extra_deps['unicode7'] # T102461 workaround # tools.ip does not have a hard dependency on an IP address module, # as it falls back to using regexes if one is not available. # The functional backport of py3 ipaddress is acceptable: # https://pypi.python.org/pypi/ipaddress # However the Debian package python-ipaddr is also supported: # https://pypi.python.org/pypi/ipaddr # Other backports are likely broken. # ipaddr 2.1.10+ is distributed with Debian and Fedora. See T105443. dependencies.append('ipaddr>=2.1.10') if sys.version_info < (2, 7, 9): # Python versions before 2.7.9 will cause urllib3 to trigger # InsecurePlatformWarning warnings for all HTTPS requests. By # installing with security extras, requests will automatically set # them up and the warnings will stop. See # <https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning> # for more details. dependencies += extra_deps['security'] script_deps['data_ingestion.py'] = extra_deps['csv'] try: import bz2 __unused__ += (bz2, ) except ImportError: # Use bz2file if the python is not compiled with bz2 support. dependencies.append('bz2file') # Some of the ui_tests depend on accessing the console window's menu # to set the console font and copy and paste, achieved using pywinauto # which depends on pywin32. # These tests may be disabled because pywin32 depends on VC++, is time # comsuming to build, and the console window cant be accessed during appveyor # builds. # Microsoft makes available a compiler for Python 2.7 # http://www.microsoft.com/en-au/download/details.aspx?id=44266 # If you set up your own compiler for Python 3, on 3.3 two demo files # packaged with pywin32 may fail. Remove com/win32com/demos/ie*.py if os.name == 'nt' and os.environ.get('PYSETUP_TEST_NO_UI', '0') != '1': # FIXME: tests/ui_tests.py suggests pywinauto 0.4.2 # which isnt provided on pypi. test_deps += ['pywin32', 'pywinauto>=0.4.0'] extra_deps.update(script_deps) # Add all dependencies as test dependencies, # so all scripts can be compiled for script_tests, etc. if 'PYSETUP_TEST_EXTRAS' in os.environ: test_deps += list(itertools.chain(*(extra_deps.values()))) if 'oursql' in test_deps and os.name == 'nt': test_deps.remove('oursql') # depends on Cython if 'requests[security]' in test_deps: # Bug T105767 on Python 2.7 release 9+ if sys.version_info[:2] == (2, 7) and sys.version_info[2] >= 9: test_deps.remove('requests[security]') # These extra dependencies are needed other unittest fails to load tests. if sys.version_info[0] == 2: test_deps += extra_deps['csv'] + ['mock'] else: test_deps += ['six'] from setuptools import setup, find_packages name = 'pywikibot' version = '3.0' try: import subprocess date = subprocess.check_output(['git', 'log', '-1', '--format=%ci']).strip() date = date.decode().split(' ')[0].replace('-', '') version = version + "." + date except Exception as e: print(e) version = version + "-dev" github_url = 'https://github.com/wikimedia/pywikibot-core' setup( name=name, version=version, description='Python MediaWiki Bot Framework', long_description=open('pypi_description.rst').read(), keywords=('pywikibot', 'python', 'mediawiki', 'bot', 'wiki', 'framework', 'wikimedia', 'wikipedia', 'pwb', 'pywikipedia', 'API'), maintainer='The Pywikibot team', maintainer_email='pywikibot@lists.wikimedia.org', license='MIT License', packages=[str(name)] + [package for package in find_packages() if package.startswith('pywikibot.')], install_requires=dependencies, dependency_links=dependency_links, extras_require=extra_deps, url='https://www.mediawiki.org/wiki/Pywikibot', test_suite="tests.collector", tests_require=test_deps, classifiers=[ 'License :: OSI Approved :: MIT License', 'Development Status :: 4 - Beta', 'Operating System :: OS Independent', 'Intended Audience :: Developer
__author__ = 'http://www.python-course.eu/python3_inheritance.php' class Person: def __init__(self, first,
last): self.firstname = first self.lastname = last def Name(self): return self.firstname + " " + s
elf.lastname class Employee(Person): def __init__(self, first, last, staffnum): Person.__init__(self,first, last) self.staffnumber = staffnum def GetEmployee(self): return self.Name() + ", " + self.staffnumber x = Person("Marge", "Simpson") y = Employee("Homer", "Simpson", "1007") print(x.Name()) print(y.GetEmployee())
y( payload: Dict[str, Any], action: Dict[str, Any], entity: str ) -> str: pull_request_action: Dict[str, Any] = get_action_with_primary_id(payload) kwargs = { "name_template": STORY_NAME_TEMPLATE.format(**action), "name": pull_request_action.get("number") if entity == "pull-request" or entity == "pull-request-comment" else pull_request_action.get("name"), "url": pull_request_action["url"], "workflow_state_template": "", } # Sometimes the workflow state of the story will not be changed when linking to a PR. if action["changes"].get("workflow_state_id") is not None: new_state_id = action["changes"]["workflow_state_id"]["new"] old_state_id = action["changes"]["workflow_state_id"]["old"] new_state = get_reference_by_id(payload, new_state_id)["name"] old_state = get_reference_by_id(payload, old_state_id)["name"] kwargs["workflow_state_template"] = TRAILING_WORKFLOW_STATE_CHANGE_TEMPLATE.format( new=new_state, old=old_state ) if entity == "pull-request": template = STORY_GITHUB_PR_TEMPLATE elif entity == "pull-request-comment": template = STORY_GITHUB_COMMENT_PR_TEMPLATE else: template = STORY_GITHUB_BRANCH_TEMPLATE return template.format(**kwargs) def get_story_update_attachment_body( payload: Dict[str, Any], action: Dict[str, Any] ) -> Optional[str]: kwargs = { "name_template": STORY_NAME_TEMPLATE.format( name=action["name"], app_url=action["app_url"], ), } file_ids_added = action["changes"]["file_ids"].get("adds") # If this is a payload for when an attachment is removed, ignore it if not file_ids_added: return None file_id = file_ids_added[0] for ref in payload["references"]: if ref["id"] == file_id: kwargs.update( type=ref["entity_type"], file_name=ref["name"], ) return FILE_ATTACHMENT_TEMPLATE.format(**kwargs) def get_story_joined_label_list( payload: Dict[str, Any], action: Dict[str, Any], label_ids_added: List[int] ) -> str: labels = [] for label_id in label_ids_added: label_name = "" for action in payload["actions"]: if action.get("id") == label_id: label_name = action.get("name", "") if label_name == "": label_name = get_reference_by_id(payload, label_id).get("name", "") labels.append(LABEL_TEMPLATE.format(name=label_name)) return ", ".join(labels) def get_story_label_body(payload: Dict[str, Any], action: Dict[str, Any]) -> Optional[str]: kwargs = { "name_template": STORY_NAME_TEMPLATE.format( name=action["name"], app_url=action["app_url"], ), } label_ids_added = action["changes"]["label_ids"].get("adds") # If this is a payload for when no label is added, ignore it if not label_ids_added: return None kwargs.update(labels=get_story_joined_label_list(payload, action, label_ids_added)) return ( STORY_LABEL_TEMPLATE.format(**kwargs) if len(label_ids_added) == 1 else STORY_LABEL_PLURAL_TEMPLATE.format(**kwargs) ) def get_story_update_project_body(payload: Dict[str, Any], action: Dict[str, Any]) -> str: kwargs = { "name_template": STORY_NAME_TEMPLATE.format( name=action["name"], app_url=action["app_url"], ), } new_project_id = action["changes"]["project_id"]["new"] old_project_id = action["changes"]["project_id"]["old"] for ref in payload["references"]: if ref["id"] == new_project_id: kwargs.update(new=ref["name"]) if ref["id"] == old_project_id: kwargs.update(old=ref["name"]) return STORY_UPDATE_PROJECT_TEMPLATE.format(**kwargs) def get_story_update_type_body(payload: Dict[str, Any], action: Dict[str, Any]) -> str: kwargs = { "name_template": STORY_NAME_TEMPLATE.format( name=action["name"], app_url=action["app_url"], ), "new_type": action["changes"]["story_type"]["new"], "old_type": action["changes"]["story_type"]["old"], } return STORY_UPDATE_TYPE_TEMPLATE.format(**kwargs) def get_story_update_owner_body(payload: Dict[str, Any], action: Dict[str, Any]) -> str: kwargs = { "name_template": STORY_NAME_TEMPLATE.format( name=action["name"], app_url=action["app_url"], ), } return STORY_UPDATE_OWNER_TEMPLATE.format(**kwargs) def get_story_update_batch_body(payload: Dict[str, Any], action: Dict[str, Any]) -> Optional[str]: # When the user selects one or more stories with the checkbox, they can perform # a batch update on multiple stories while changing multiple attribtues at the # same time. changes = action["changes"] kwargs = { "name_template": STORY_NAME_TEMPLATE.format( name=action["name"], app_url=action["app_url"], ), "workflow_state_template": "", } templates = [] last_change = "other" move_sub_templates = [] if "epic_id" in changes: last_change = "epic" move_sub_templates.append( STORY_UPDATE_BATCH_CHANGED_SUB_TEMPLATE.format( entity_type="Epic", old=get_reference_by_id(payload, changes["epic_id"].get("old")).get("name"), new=get_reference_by_id(payload, changes["epic_id"].get("new")).get("name"), ) ) if "project_id" in changes: last_change = "project" move_sub_templates.append( STORY_UPDATE_BATCH_CHANGED_SUB_TEMPLATE.format( entity_type="Project", old=get_reference_by_id(payload, changes["project_id"].get("old")).get("name"), new=get_reference_by_id(payload, changes["project_id"].get("new")).get("name"), ) ) if len(move_sub_templates) > 0: templates.append( STORY_UPDATE_BATCH_CHANGED_TEMPLATE.format( operation="was moved", sub_templates=", ".join(move_sub_templates), ) ) if "story_type" in changes: last_change = "type" templates.append( STORY_UPDATE_BATCH_CHANGED_TEMPLATE.format( operation="{} changed".format("was" if len(templates) == 0 else "and"), sub_templates=STORY_UPDATE_BATCH_CHANGED_SUB_TEMPLATE.format( entity_type="type", old=changes["story_type"].get("old"), new=changes["story_type"].get("new"), ), ) ) if "label_ids" in changes: label_ids_added = changes["label_ids"].get("adds") # If this is a payload for when no label is added, ignore it if label_ids_added is not None: last_change = "label" labels = get_story_joined_label_list(payload, action, label_ids_added) templates.append( STORY_UPDATE_BATCH_ADD_REMOVE_TEMPLATE.format( operation="{} added".format("was" if len(templates) == 0 else "and"), entity="the new label{plural} {labels}".format( plural="s" if len(changes["label_ids"]) > 1 else "", labels=labels ), ) ) if "workflow_state_id" in changes: last_change = "state" kwargs.update( workflow_state_template=TRAILING_WORKFLOW_STATE_CHANGE_TEMPLATE.format( old=get_reference_by_id(payload, changes["workflow_state_id"].get("old")).get( "name" ), new=get_reference_by_id(payload, changes["workflow_state_id"].get("new")).get( "name" ), ) ) # Use the default template for state change i
f it is the
only one change. if len(templates) <= 1 or (len(templates) == 0 and last_change == "state"): event: st
import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1.inset_locator import inset_axes fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[6, 3]) axins1 = inset_axes(ax1, width="50%", # width = 10% of parent_bbox width height="5%", # height : 50% loc=1) im1=ax1.imshow([[1,2],[2, 3]]) plt.colorbar(im1, cax=axins1, orientation="horizontal", ticks=[1,2,3]) axins
1.xaxis.set_ticks_position("bottom") axins = inset_axes(ax2,
width="5%", # width = 10% of parent_bbox width height="50%", # height : 50% loc=3, bbox_to_anchor=(1.05, 0., 1, 1), bbox_transform=ax2.transAxes, borderpad=0, ) # Controlling the placement of the inset axes is basically same as that # of the legend. you may want to play with the borderpad value and # the bbox_to_anchor coordinate. im=ax2.imshow([[1,2],[2, 3]]) plt.colorbar(im, cax=axins, ticks=[1,2,3]) plt.draw() plt.show()
import os import string import random import logging from thug.ActiveX.modules import WScriptShell from thug.ActiveX.modules import TextStream from thug.ActiveX.modules
import File from thug.ActiveX.modules import Folder from thug.OS.Windows import win32_files from thug.OS.Windows import win32_folders log = logging.getLogger("Thug") def BuildPath(self, arg0, arg1): # pylint:disable=unused-argument log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] BuildPath("{arg0}", "{arg1}")') return f"{arg0}\\{arg1}" def CopyFile(self, source, destination, overwritefiles = False): # pylint:disable=unused-argument log.ThugLogging.add_behavior_warn
(f'[Scripting.FileSystemObject ActiveX] CopyFile("{source}", "{destination}")') log.TextFiles[destination] = log.TextFiles[source] def DeleteFile(self, filespec, force = False): # pylint:disable=unused-argument log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] DeleteFile("{filespec}", {force})') def CreateTextFile(self, filename, overwrite = False, _unicode = False): # pylint:disable=unused-argument log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] CreateTextFile("{filename}", ' f'"{overwrite}", ' f'"{_unicode}")') stream = TextStream.TextStream() stream._filename = filename return stream def CreateFolder(self, path): # pylint:disable=unused-argument log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] CreateFolder("{path}")') return Folder.Folder(path) def FileExists(self, filespec): # pylint:disable=unused-argument log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] FileExists("{filespec}")') if not filespec: return True if filespec.lower() in win32_files: return True if getattr(log, "TextFiles", None) and filespec in log.TextFiles: return True return False def FolderExists(self, folder): # pylint:disable=unused-argument log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] FolderExists("{folder}")') return str(folder).lower() in win32_folders def GetExtensionName(self, path): # pylint:disable=unused-argument log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] GetExtensionName("{path}")') ext = os.path.splitext(path)[1] return ext if ext else "" def GetFile(self, filespec): # pylint:disable=unused-argument log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] GetFile("{filespec}")') return File.File(filespec) def GetSpecialFolder(self, arg): log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] GetSpecialFolder("{arg}")') arg = int(arg) folder = '' if arg == 0: folder = WScriptShell.ExpandEnvironmentStrings(self, "%windir%") elif arg == 1: folder = WScriptShell.ExpandEnvironmentStrings(self, "%SystemRoot%\\system32") elif arg == 2: folder = WScriptShell.ExpandEnvironmentStrings(self, "%TEMP%") log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] Returning {folder} for GetSpecialFolder("{arg}")') return folder def GetTempName(self): # pylint:disable=unused-argument log.ThugLogging.add_behavior_warn('[Scripting.FileSystemObject ActiveX] GetTempName()') return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(8)) def MoveFile(self, source, destination): # pylint:disable=unused-argument log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] MoveFile("{source}", "{destination}")') log.TextFiles[destination] = log.TextFiles[source] del log.TextFiles[source] def OpenTextFile(self, sFilePathAndName, ForWriting = True, flag = True): log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] OpenTextFile("{sFilePathAndName}", ' f'"{ForWriting}" ,' f'"{flag}")') log.ThugLogging.log_exploit_event(self._window.url, "Scripting.FileSystemObject ActiveX", "OpenTextFile", data = { "filename" : sFilePathAndName, "ForWriting": ForWriting, "flag" : flag }, forward = False) if getattr(log, 'TextFiles', None) is None: log.TextFiles = {} if sFilePathAndName in log.TextFiles: return log.TextFiles[sFilePathAndName] stream = TextStream.TextStream() stream._filename = sFilePathAndName if log.ThugOpts.local and sFilePathAndName in (log.ThugLogging.url, ): # pragma: no cover with open(sFilePathAndName, encoding = 'utf-8', mode = 'r') as fd: data = fd.read() stream.Write(data) log.TextFiles[sFilePathAndName] = stream return stream
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2015 clowwindy # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import, division, print_function, \ with_statement # use the features of python 3 import collections import logging import time # this LRUCache is optimized for concurrency, not QPS # n: concurrency, keys stored in the cache # m: visits not timed out, proportional to QPS * timeout # get & set is O(1), not O(n). thus we can support very large n # TODO: if timeout or QPS is too large, then this cache is not very efficient, # as sweep() causes long pause class LRUCache(collections.MutableMapping): # ABCs for read-only and mutable mappings. """This class is not thread safe""" def __init__(self, timeout=60, close_callback=None, *args, **kwargs): self.timeout = timeout # the cache expire time self.close_callback = close_callback # called when value will be swept from cache self._store = {} # dict<key, value>: store cache data key value self._time_to_keys = collections.defaultdict(list) # defaultdict<time, list<key>> # defaultdict: dict subclass that calls a factory function to supply missing values self._keys_to_last_time = {} # dict<key, time> stores the last time of one key visited. self._last_visits = collections.deque() # deque<time> store all the time once key is visited. self.update(dict(*args, **kwargs)) # use the free update to set keys def __getitem__(self, key): # O(1) t = time.time() self._keys_to_last_time[key] = t self._time_to_keys[t].append(key) self._last_visits.append(t) return self._store[key] def __setitem__(self, key, value): # O(1) t = time.time() self._keys_to_last_time[key] = t self._store[key] = value self._time_to_keys[t].append(key) self._last_visits.append(t) def __delitem__(self, key): # O(1) del self._store[key] del self._keys_to_last_time[key] def __iter__(self): return iter(self._store) def __len__(self): return len(self._store) def sweep(self): # O(m) now = time.time() c = 0 # use to log how many keys has been swept. while len(self._last_visits) > 0: least = self._last_visits[0] # fetch the oldest time point if now - least <= self.timeout: # the oldest time point hasn't expire break if self.close_callback is not None: # callback function has been set for key in self._time_to_keys[least]: # fetch each key visited on the oldest time if key in self._store: # finded the cache key if now - self._keys_to_last_time[key] > self.timeout: value = self._store[key] # get the key of the last time and check expire or yet. self.close_callback(value) # call callback for key in self._time_to_keys[least]: self._last_visits.popleft() # can't understand and have error personally # @Sunny: use popleft to remove oldest time point in last visits if key in self._store: if now - se
lf._keys_to_last_time[key] > self.timeout: del self._store[key] del self._keys_to_last_time[key] c += 1 del self._time_to_keys[least] if c
: logging.debug('%d keys swept' % c) def test(): c = LRUCache(timeout=0.3) c['a'] = 1 assert c['a'] == 1 time.sleep(0.5) c.sweep() assert 'a' not in c c['a'] = 2 c['b'] = 3 time.sleep(0.2) c.sweep() assert c['a'] == 2 assert c['b'] == 3 time.sleep(0.2) c.sweep() c['b'] time.sleep(0.2) c.sweep() assert 'a' not in c assert c['b'] == 3 time.sleep(0.5) c.sweep() assert 'a' not in c assert 'b' not in c if __name__ == '__main__': test()
# -*- coding: utf-8 -*- from __future__ import unic
ode_literals from blinker import Namespace namespace = Namespace() #: Trigerred when a dataset is published on_dataset_published = namespace.signal('on-data
set-published')
# AnalogClock's font selector for setup dialog # E. A. Tacao <e.a.tacao |at| estadao.com.br> # http://j.domaindlx.com/elements28/wxpython/ # 15 Fev 2006, 22:00 GMT-03:00 # Distributed under the wxWidgets license. import wx from wx.lib.newevent import NewEvent from wx.lib.buttons import GenButton #---------------------------------------------------------------------------- (FontSelectEvent, EVT_FONTSELECT) = NewEvent() #---------------------------------------------------------------------------- class FontSelect(GenButton): def __init__(self, parent, size=(75, 21), value=None): GenButton.__init__(self, parent, wx.ID_ANY, label="Select...", size=size) self.SetBezelWidth(1) self.parent = parent self.SetValue(value) self.parent.Bind(wx.EVT_BUTTON, self.OnClick, self) def GetValue(self): return self.value def SetValue(self, value): if value is None: value = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT) self.value = value def OnClick(self, event): data = wx.FontData() data.EnableEffects(False) font = self.value; font.SetPointSize(10) data.SetInitialFont(font) dlg = wx.FontDialog(self, data) changed = dlg.ShowModal() == wx.ID_OK
if changed: data = dlg.GetFontData() self.value = data.GetChosenFont() self.Refresh() dlg.Destroy() if changed: nevt = FontSelectEvent(id=self.GetId(), obj=s
elf, val=self.value) wx.PostEvent(self.parent, nevt) # ## ### eof
import binascii
from cryptography.hazmat.primitives import hmac from cryptography.hazmat.primitives.hashes import SHA1 from cryptography.hazmat.primitives.constant_time import bytes_eq import cryptography.hazmat.backends from django.conf import settings from django.core.mail import send_mail from django.http im
port HttpResponse from django.views.decorators.csrf import csrf_exempt crypto_backend = cryptography.hazmat.backends.default_backend() def verify_signature(request, request_body): hmac_hmac = hmac.HMAC(settings.GITHUB_WEBHOOK_SECRET, SHA1(), crypto_backend) hmac_hmac.update(request_body) signature = b'sha1=' + binascii.hexlify(hmac_hmac.finalize()) return bytes_eq(signature, request.META['HTTP_X_HUB_SIGNATURE']) @csrf_exempt def receive_hook(request): verify_signature(request, request.body) send_mail('Hook from github', request.body, settings.SERVER_EMAIL, map(lambda t: t[-1], settings.ADMINS)) return HttpResponse(status=200)
# # spyne - Copyright (C) Spyne contributors. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 # """The typed dict module""" from itertools import chain class tdict(dict): def __init__(self, kt=None, vt=None, data=None): """This is a typed dict implementation that optionally enforces given types on contained values on assignment.""" self._kt = kt self._vt = vt if kt is None and vt is None: self.check = self._check_noop elif kt is None: self.check = self._check_v elif vt is None: self.check = self._check_k else: self.check = self._check_kv if data is not None: self.update(data) def _check_noop(self, *_): pass def _check_k(self, key, _): if not isinstance(key, self._kt): raise TypeError(repr(key)) def _check_v(self, _, value): if not isinstance(value, self._vt):
raise TypeError(repr(value)) def _check_kv(self, key, value): if not isinstance(key, self._kt): raise TypeError(repr(key)) if not isinstance(value, self._vt): raise TypeError(repr(value)) def __setitem__(self, k
ey, value): self.check(key, value) super(tdict, self).__setitem__(key, value) def update(self, E=None, **F): try: it = chain(E.items(), F.items()) except AttributeError: it = chain(E, F) for k, v in it: self[k] = v def setdefault(self, k, d=None): self._check_k(k, d) if self._kt is None else None self._check_v(k, d) if self._vt is None else None super(tdict, self).setdefault(k, d) @classmethod def fromkeys(cls, S, v=None): kt = vt = None if len(S) > 0: kt, = set((type(s) for s in S)) if v is not None: vt = type(v) retval = tdict(kt, vt) for s in S: retval[s] = v return retval def repr(self): return "tdict(kt=%s, vt=%s, data=%s)" % \ (self._kt, self._vt, super(tdict, self).__repr__())
governing permissions and # limitations under the License. # # Author: mwu@google.com (Mingyu Wu) """Unittest for baserunner module.""" __author__ = 'mwu@google.com (Mingyu Wu)' import os import shutil import sys import tempfile import time import unittest from lib import baserunner from lib import filesystemhandlerextend from lib import mock_emailmessage from lib import mock_reporter from lib import mock_scanscripts from lib import pyreringconfig from lib import pyreringutil global_settings = pyreringconfig.GlobalPyreRingConfig.settings class BaseRunnerTest(unittest.TestCase): """Unit test cases for BaseRunner class.""" def setUp(self): # I should config global_settings here instead of read it from file system. self.tempdir = tempfile.mkdtemp() root_dir = os.path.abspath(os.path.join(os.path.split(sys.argv[0])[0], '../')) global_settings.update( {'report_dir': os.path.join(self.tempdir, 'report'), 'email_recipients': os.getenv('LOGNAME'), 'host_name': 'test.host', 'log_file': 'pyrering.log', 'file_errors': False, 'project_name': 'pyrering_unittest', 'root_dir': root_dir, 'sendmail': False, 'runner': 'baserunner', 'source_dir': os.path.join(root_dir, 'test'), 'tester': os.getenv('LOGNAME'), 'FATAL_STRING': 'Fatal:', 'header_file': 'header_info.txt', 'time': time.strftime('%Y%m%d%H%M'), 'skip_setup': False, }) # get a default config and mocks self.one_config = pyreringutil.PRConfigParser().Default() self.scanner = mock_scanscripts.MockScanScripts() self.emailmessage = mock_emailmessage.MockEmailMessage() self.reporter = mock_reporter.MockTxtReporter() self.runner = baserunner.BaseRunner( name='test', scanner=self.scanner, email_message=self.emailmessage, filesystem=filesystemhandlerextend.FileSystemHandlerExtend(), reporter=self.reporter) self.runner.Prepare() if not os.path.isdir(global_settings['report_dir']): os.makedirs(global_settings['report_dir']) # I don't want the unit test to mess with the original log file. global_settings['log_file'] += '.unittest' def tearDown(self): self.runner.CleanUp() self.runner = '' pyreringconfig.Reset() self.scanner.CleanConfig() shutil.rmtree(self.tempdir) def testFindHeaderInfoFile(self): global_settings['header_file'] = os.path.join(self.tempdir, 'header.txt') fh = open(global_settings['header_file'], 'w') fh.write('test info') fh.close() self.one_config['TEST_SCRIPT'] = 'echo 1' self.scanner.SetConfig([self.one_config]) result = self.runner.Run(['testFindHeaderInfoFile'], False) self.assertEqual(self.reporter.header, 'test info') self.assertEqual(result, 0) self.assertEqual(self.runner.passed, 1) # Positive Test Cases: def testOneCommand(self): """A simple sleep command takes some time to finish.""" # prepare the test script here self.one_config['TEST_SCRIPT'] = 'sleep 3' # set the mock scanscript to return this thing. self.scanner.SetConfig([self.one_config]) # now run the test and return should be expected. result = self.runner.Run(['testOneCommand'], False) self.assertEqual(result, 0) self.assertEqual(self.runner.passed, 1) def testEchoCommand(self): """A simple command has output on stdout.""" self.one_config['TEST_SCRIPT'] = 'echo testEchoCommand' self.scanner.SetConfig([self.one_config]) result = self.runner.Run(['testEchoCommand'], False) self.assertEqual(result, 0) self.assertEqual(self.runner.passed, 1) #TODO(mwu): need to check the log file has this hello line def testEchoToSTDERRCommand(self): """A simple command has output redirect to stderr.""" self.one_config['TEST_SCRIPT'] = 'echo testEchoToSTDERRCommand >&2' self.scanner.SetConfig([self.one_config]) result = self.runner.Run(['testEchoSTDERRCommand'], False) self.assertEqual(result, 0) self.assertEqual(self.runner.passed, 1) #TODO(mwu): need to check the log file has this hello line def testRunScript(self): """A real script to run.""" self.one_config['TEST_SCRIPT'] = os.path.join(global_settings['root_dir'], 'test/test1_echo.sh') self.scanner.SetConfig([self.one_config]) result = self.runner.Run(['testRunScript'], False) self.assertEqual(result, 0) self.assertEqual(self.runner.passed, 1) #TODO(mwu): need to check the log file has the echo output def testRunScripts(self): """2 scripts to be run.""" self.one_config['
TEST_SCRIPT'] = 'echo testRunScripts1' config2 = pyreringutil.PRConfigParser().Default() config2['TE
ST_SCRIPT'] = 'echo testRunScripts2' self.scanner.SetConfig([self.one_config, config2]) result = self.runner.Run(['testRunScripts'], False) self.assertEqual(result, 0) self.assertEqual(self.runner.passed, 2) # TODO(mwu): verify both scripts run fine def testEmailSend(self): """Test Email should be send.""" self.one_config['TEST_SCRIPT'] = 'echo send_email_test;exit 1' self.scanner.SetConfig([self.one_config]) try: self.runner.Run(['testEmailSend'], True) except self.emailmessage.EmailCalledError: self.assertTrue(True) else: self.fail(msg='Send email was not called') def testEmailNotSendIfTestPass(self): """Test email should not go if all tests pass.""" self.one_config['TEST_SCRIPT'] = 'echo send_email_test;exit 0' self.scanner.SetConfig([self.one_config]) try: self.runner.Run(['testEmailSend'], True) except self.emailmessage.EmailCalledError: self.fail() # Negative Test Cases def testTimeoutCommand(self): """A command times out.""" self.one_config['TEST_SCRIPT'] = 'echo timeouttest; sleep 8' self.one_config['TIMEOUT'] = 2 self.scanner.SetConfig([self.one_config]) result = self.runner.Run(['testTimeoutCommand'], False) self.assertEqual(result, 1) self.assertEqual(self.runner.timeout, 1) def testNonExistCommand(self): """Test a wrong system command.""" self.one_config['TEST_SCRIPT'] = 'nonexist_command' self.scanner.SetConfig([self.one_config]) result = self.runner.Run(['testNonExistCommand'], False) self.assertEqual(result, 1) self.assertEqual(self.runner.failed, 1) def testNonExistScript(self): """Test a nonexist script.""" self.one_config['TEST_SCRIPT'] = '/tmp/nonexist_script.sh' self.scanner.SetConfig([self.one_config]) result = self.runner.Run(['testNonExistScript'], False) self.assertEqual(result, 1) self.assertEqual(self.runner.failed, 1) def testPermissionDenied(self): """Test something without permission.""" self.one_config['TEST_SCRIPT'] = 'touch /pyrering.txt' self.scanner.SetConfig([self.one_config]) result = self.runner.Run(['testPermissionDenied'], False) self.assertEqual(result, 1) self.assertEqual(self.runner.failed, 1) def testCatchWarningMessage(self): """Test a command has warning output.""" self.one_config['TEST_SCRIPT'] = 'echo warn message' self.scanner.SetConfig([self.one_config]) result = self.runner.Run(['testCatchWarningMessage'], False) self.assertEqual(result, 0) self.assertEqual(self.runner.passed, 1) def testCatchFatalMessage(self): """Test a command has fatal error message even exit code still 0.""" self.one_config['TEST_SCRIPT'] = 'echo Fatal:;echo anotherline' self.scanner.SetConfig([self.one_config]) result = self.runner.Run(['testCatchFatalMessage'], False) self.assertEqual(result, 1) self.assertEqual(self.runner.failed, 1) def testOutputLargeMessage(self): """Test a test can have large screen output. As default the stdout only has a 4k buffer limit, so the code should clean up the buffer while running the test, otherwise the writing to buffer will be blocked when the buffer is full. """ self.one_config['TEST_SCRIPT'] =
_grad} if self.momentum > 0: kwargs['momentum'] = self.momentum if self.clip_gradient: kwargs['clip_gradient'] = self.clip_gradient if aggregate: if not multi_precision: if self.momentum > 0: multi_sgd_mom_update(*_flatten_list(zip(weights, grads, states)), out=weights, num_weights=len(weights), lrs=lrs, wds=wds, **kwargs) else: multi_sgd_update(*_flatten_list(zip(weights, grads)), out=weights, num_weights=len(weights), lrs=lrs, wds=wds, **kwargs) else: if self.momentum > 0: multi_mp_sgd_mom_update(*_flatten_list(zip(weights, grads, *zip(*states))), out=weights, num_weights=len(weights), lrs=lrs, wds=wds, **kwargs) else: multi_mp_sgd_update(*_flatten_list(zip(weights, grads, list(zip(*states))[1])), out=weights, num_weights=len(weights), lrs=lrs, wds=wds, **kwargs) else: for weight, grad, state, lr, wd in zip(weights, grads, states, lrs, wds): if not multi_precision: if state is not None: sgd_mom_update(weight, grad, state, out=weight, lazy_update=self.lazy_update, lr=lr, wd=wd, **kwargs) else: sgd_update(weight, grad, out=weight, lazy_update=self.lazy_update, lr=lr, wd=wd, **kwargs) else: if state[0] is not None: mp_sgd_mom_update(weight, grad, state[0], state[1], out=weight, lr=lr, wd=wd, **kwargs) else: mp_sgd_update(weight, grad, state[1], out=weight, lr=lr, wd=wd, **kwargs) def update(self, index, weight, grad, state): self._update_impl(index, weight, grad, state, multi_precision=False) def update_multi_precision(self, index, weight, grad, state): if not isinstance(index, (tuple, list)): use_multi_precision = self.multi_precision and weight.dtype == numpy.float16 else: use_multi_precision = self.multi_precision and weight[0].dtype == numpy.float16 self._update_impl(index, weight, grad, state, multi_precision=use_multi_precision) @register class Signum(Optimizer): r"""The Signum optimizer that takes the sign of gradient or momentum. The optimizer updates the weight by:: rescaled_grad = rescale_grad * clip(grad, clip_gradient) + wd * weight state = momentum * state + (1-momentum)*rescaled_grad weight = (1 - lr * wd_lh) * weight - lr * sign(state) References ---------- Jeremy Bernstein, Yu-Xiang Wang, Kamyar Azizzadenesheli & Anima Anandkumar. (2018). signSGD: Compressed Optimisation for Non-Convex Problems. In ICML'18. See: https://arxiv.org/abs/1802.04434 For details of the update algorithm see :class:`~mxnet.ndarray.signsgd_update` and :class:`~mxnet.ndarray.signum_update`. This optimizer accepts the following parameters in addition to those accepted by :class:`.Optimizer`. Parameters ---------- momentum : float, optional The momentum value. wd_lh : float, optional The amount of decoupled weight decay regularization, see details in the original paper at:\ https://arxiv.org/abs/1711.05101 """ def __init__(self, learning_rate=0.01, momentum=0.9, wd_lh=0.0, **kwargs): super(Signum, self).__init__(learning_rate=learning_rate, **kwargs) self.momentum = momentum self.wd_lh = wd_lh def create_state(self, index, weight): momentum = None if self.momentum != 0.0: momentum = zeros(weight.shape, weight.context, dtype=weight.dtype, stype=weight.stype) return momentum def _update_impl(self, index, weight, grad, state): assert(isinstance(weight, NDArray)) assert(isinstance(grad, NDArray)) self._update_count(index) lr = self._get_lr(index) wd = self._get_wd(index) kwargs = {'rescale_grad': self.rescale_grad} if self.momentum > 0: kwargs['momentum'] = self.momentum if self.clip_gradient: kwargs['clip_gradient'] = self.clip_gradient if self.wd_lh: kwargs['wd_lh'] = self.wd_lh if state is not None: signum_update(weight, grad, state, out=weight, lr=lr, wd=wd, **kwargs) else: signsgd_update(weight, grad, out=weight, lr=lr, wd=wd, **kwargs) def update(self, index, weight, grad, state): self._update_impl(index, weight, grad, state) @register class FTML(Optimizer): """The FTML optimizer. This class implements the optimiz
er described in *FTML - Follow the Moving Leader in Deep Learning*, available at http://proceedings.mlr.press/v70/zheng17a/zheng17a.pdf. Denote time step by t. The optimizer updates the weight by:: rescaled_grad = clip(grad * rescale_grad + wd * weight, clip_gradient) v = beta2 * v + (1 - beta2) * square(rescaled_grad)
d_t = (1 - power(beta1, t)) / lr * square_root(v / (1 - power(beta2, t))) + epsilon) z = beta1 * z + (1 - beta1) * rescaled_grad - (d_t - beta1 * d_(t-1)) * weight weight = - z / d_t For details of the update algorithm, see :class:`~mxnet.ndarray.ftml_update`. This optimizer accepts the following parameters in addition to those accepted by :class:`.Optimizer`. Parameters ---------- beta1 : float, optional 0 < beta1 < 1. Generally close to 0.5. beta2 : float, optional 0 < beta2 < 1. Generally close to 1. epsilon : float, optional Small value to avoid division by 0. """ def __init__(self, beta1=0.6, beta2=0.999, epsilon=1e-8, **kwargs): super(FTML, self).__init__(**kwargs) self.beta1 = beta1 self.beta2 = beta2 self.epsilon = epsilon def create_state(self, index, weight): return (zeros(weight.shape, weight.context, dtype=weight.dtype), # d_0 zeros(weight.shape, weight.context, dtype=weight.dtype), # v_0 zeros(weight.shape, weight.context, dtype=weight.dtype)) # z_0 def update(self, index, weight, grad, state): assert(isinstance(weight, NDArray)) assert(isinstance(grad, NDArray)) self._update_count(index) lr = self._get_lr(index) wd = self._get_wd(index) t = self._index_update_count[index] kwargs = {'beta1': self.beta1, 'beta2': self.beta2, 'epsilon': self.epsilon, 'rescale_grad': self.rescale_grad, 't': t} if self.clip_gradient: kwargs['clip_grad'] = self.clip_gradient prev_d, prev_v, prev_z = state ftml_update(weight, grad, prev_d, prev_v, prev_z, out=weight, lr=lr, wd=wd, **kwargs) @register class LBSGD(Optimizer): """The Large Batch SGD optimizer with momentum and weight decay. The optimizer updates the weight by:: state = momentum * state + lr * rescale_grad * clip(grad, clip_gradient) + wd * weight weight = weight - state For details of the update algorithm see :class:`~mxnet.ndarray.sgd_update` and :class:`~mxnet.ndarray.sgd_mom_update`. In addition to the SGD updates the LBSGD optimizer uses the LARS, Layer-wise Adaptive Rate Scaling, algorithm to have a separate learning rate for each layer of the network, which leads to better stability over large batch sizes. This optimizer accepts the following parameters in addition to those accepted
# -*- coding: utf-8 -*- # Form implementation generated from reading ui fil
e 'qhangups/qhangupsbrowser.ui' # # Created by: PyQt5 UI code generator 5.7 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_QHangupsBrowser(object): def setupUi(self, QHangups
Browser): QHangupsBrowser.setObjectName("QHangupsBrowser") QHangupsBrowser.resize(600, 450) self.verticalLayout = QtWidgets.QVBoxLayout(QHangupsBrowser) self.verticalLayout.setObjectName("verticalLayout") self.browserWebView = QtWebKitWidgets.QWebView(QHangupsBrowser) self.browserWebView.setUrl(QtCore.QUrl("about:blank")) self.browserWebView.setObjectName("browserWebView") self.verticalLayout.addWidget(self.browserWebView) self.retranslateUi(QHangupsBrowser) QtCore.QMetaObject.connectSlotsByName(QHangupsBrowser) def retranslateUi(self, QHangupsBrowser): _translate = QtCore.QCoreApplication.translate QHangupsBrowser.setWindowTitle(_translate("QHangupsBrowser", "QHangups - Browser")) from PyQt5 import QtWebKitWidgets
'''Test cases for QLayout handling of child widgets references''' import unittest from sys import getrefcount from PySide.QtGui import QHBoxLayout, QVBoxLayout, QGridLayout, QWidget from PySide.QtGui import QStackedLayout, QFormLayout from PySide.QtGui import QApplication, QPushButton, QLabel from helper import UsesQApplication class SaveReference(UsesQApplication): '''Test case to check if QLayout-derived classes increment the refcount of widgets passed to addWidget()''' # Adding here as nose can't see the qapplication attrib we inherit qapplication = True def setUp(self): #Acquire resources super(SaveReference, self).setUp() self.widget1 = QPushButton('click me') self.widget2 = QLabel('aaa') def tearDown(self): #Release resources del self.widget2 del self.widget1 super(SaveReference, self).tearDown() def checkLayoutReference(self, layout): #Checks the reference cound handling of layout.addWidget self.assertEqual(getrefcount(self.widget1), 2) layout.addWidget(self.widget1) self.assertEqual(getrefcount(self.widget1), 3) self.assertEqual(getrefcount(self.widget2), 2) layout.addWidget(self.widget2) self.assertEqual(getrefcount(self.widget2), 3) # Check if doesn't mess around with previous widget refcount self.assertEqual(getrefcount(self.widget1), 3) def testMoveLayout(self): l = QHBoxLayout() self.assertEqual(getrefcount(self.widget1), 2) l.addWidget(self.widget1) self.assertEqual(getrefcount(self.widget1), 3) w = QWidget() w.setLayout(l) self.assertEqual(getrefcount(self.widget1), 3) def testHBoxReference(self): #QHBoxLayout.addWidget reference count w = QWidget() self.checkLayoutReference(QHBoxLayout(w)) def testVBoxReference(self): #QVBoxLayout.addWidget reference count w = QWidget() self.checkLayoutReference(QVBoxLayout(w)) def testGridReference(self): #QGridLayout.addWidget reference count w = QWidget() self.checkLayoutReference(QGridLayout(w)) def testFormReference(self): #QFormLayout.addWidget reference count w = QWidget() self.checkLayoutReference(QFormLayout(w)) def testStackedReference(self): #QStackedLayout.addWidget reference count w = QWidget() self.checkLayoutReference(QStackedLayout(w)) class MultipleAdd(UsesQApplication): '''Test case to check if refcount is incremented only once when multiple calls to addWidget are made with the same widget''' qapplication = True def setUp(self): #Acquire resources super(MultipleAdd, self).setUp() self.widget = QPushButton('click me') self.win = QWidget() self.layout = QHBoxLayout(self.win) def tearDown(self): #Release resources del self.widget del self.layout del self.win super(MultipleAdd, self).tearDown() def testRefCount(self): #Multiple QLayout.addWidget calls on the same widget self.assertEqual(getrefcount(self.widget), 2) self.layout.addWidget(self.widget) self.assertEqual(getrefcount(self.widget), 3) self.layout.addWidget(self.widget) self.assertEqual(getrefcount(self.widget), 3) self.layout.addWidget(self.widget) self.assertEqual(getrefcount(self.widget), 3) class InternalAdd(UsesQApplication): def testInternalRef(
self): mw = QWidget() w = QWidget() ow = QWidget() topLayout = QGridLayout() # unique reference self.assertEqual(getrefcount(w), 2) self.assertEqual(getrefcount(ow), 2) topLayout.addWidget(w, 0, 0) topLayout.addWidget(ow, 1, 0) # layout keep the referemce self.assertEqual(getrefcount(w), 3) self.assertEqual(getrefcount(ow), 3) mainLayout
= QGridLayout() mainLayout.addLayout(topLayout, 1, 0, 1, 4) # the same reference self.assertEqual(getrefcount(w), 3) self.assertEqual(getrefcount(ow), 3) mw.setLayout(mainLayout) # now trasfer the ownership to mw self.assertEqual(getrefcount(w), 3) self.assertEqual(getrefcount(ow), 3) del mw # remove the ref and invalidate the widget self.assertEqual(getrefcount(w), 2) self.assertEqual(getrefcount(ow), 2) if __name__ == '__main__': unittest.main()
# Copyright (c) by it's authors. # Some rights reserved. See LICENSE, AUTHORS. from peer import * from viewer import Viewer from editor import Editor class SearchDocument(Peer): #Receiving pillows: Search = Pillow.In Sending = [ Viewer.In.Document ] Routings = [ (Editor.Out.FieldChanged, Viewer.In.Refresh) ]
def __init__(self, searchRoom, resultRoom=None, document=None, identifier=None, appendWildcard=False): Peer.__init__(self, searchRoom) if not resultRoom: resultRoom = searchRoom self._resultRoom = resultRoom self._identifier = identifier self._appendWildcard = appendWildcard if document:
self._document = document else: from wallaby.common.queryDocument import QueryDocument self._document = QueryDocument() self._document.set('identifier', self._identifier) self._document.set('query', '*') self._catch(SearchDocument.In.Search, self._search) def initialize(self): # set search field editable self._throw(Viewer.In.Document, self._document) self._throw(Editor.In.Enable, True) def _search(self, pillow, feathers): query = self._document.get('query') if self._appendWildcard and (len(query) == 0 or query[-1] != "*"): self._document.set('query', query + "*") from abstractQuery import AbstractQuery self._throw(self._resultRoom+":"+AbstractQuery.In.Query, self._document)
def countup(n): if n >= 10: print "Blastoff!" else: print n countup(n+1) def main(): countup(1) main() def countdown_from_to(start,stop): if start == stop: print "Blastoff!" elif start <= stop: print "Invalid pair" else: print start countdown_from_to(start - 1,stop) def main(): countdown_from_to(89,53) main() def adder(sum_): number
= (raw_input("Next Number")) if (number) == "": print "The Sum Is {}".format(sum_) elif number == float: print number else: sum_ += float(number) print "Running total: {}".format(sum_)
adder(sum_) def main(): sum_ = 0 adder(sum_) main()
#!/usr/bin/env python """zip source directory tree""" import argparse import fnmatch import logging import os import re import subprocess import zipfile def get_version(): command = ['git', 'describe', '--tags', '--dirty', '--always'] return subprocess.check_output(command).decode('utf-8') def source_walk(root): root = os.path.abspath(root) regex = re.compile(fnmatch.translate('*.py[co]')) for path, _, files in os.walk(root): files[:] = [f for f in files if rege
x.match(f) is None] for filename in files: fullpath = os.path.join(path, filename) yield fullpath, os.path.relpath(fullpath, root) def setup(): argparser = argparse.ArgumentParser(description=__doc__) argparser.add_argument( '-d', '--debug', action='store_true', help='print debug information'
) argparser.add_argument( '-o', metavar='zipfile', dest='output', help='output file name') argparser.add_argument( 'source', help='source directory') args = argparser.parse_args() loglevel = logging.DEBUG if args.debug else logging.WARNING logging.basicConfig(format='%(levelname)s: %(message)s', level=loglevel) if not os.path.isdir(args.source): logging.critical('"%s" is not a directory', args.source) return if args.output is None: args.output = args.source + '.zip' with zipfile.ZipFile(args.output, 'w', zipfile.ZIP_DEFLATED) as fzip: fzip.writestr('version.txt', get_version()) for path, relpath in source_walk(args.source): fzip.write(path, relpath) if __name__ == '__main__': setup()
# -*- coding: utf-8 -*- # File: enemy.py # Author: Casey Jones # # Created on July 20, 2009, 4:48 PM # # This file is part of Alpha Beta Gamma (abg). # # ABG is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ABG is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with ABG. If not, see <http://www.gnu.org/licenses/>. #class to handle all enemies on screen import sys, pygame, frametime, properties, random from enemy import Enemy class Enemies: enemies = [] blackSurface = pygame.Surface([Enemy.enemy.get_width(), Enemy.enemy.get_height()]) blackSurface.fill([0,0,0]) screen = None def set_screen(self, screen): self.screen = screen def create(self): #range that the current player ship can shoot where_spawn = random.randint(1, properties.width - Enemy.enemy.get_width()) lenemy = Enemy(where_spawn) self.enemies.append(lenemy) def move(self, bullet): to_update = [] if frametime.can_create_enemy(): self.create() to_delete = [] to_update += [x.enemyrect for x in self.enemies] if len(self.enemies) > 0: for i in range(len(self.enemies)): self.enemies[i].update(bullet) self.screen.blit(self.blackSurface, self.enemies[i].enemyrect) self.screen.blit(Enemy.enemy, self.enemies[i].enemyrect) #If enemy goes off the bottom of the screen if self.enemies[i].enemyrect.top > 800: to_delete.append(i) for x in to_delete: self.remove(x) to_update += [x.enemyrect for x in self.enemies] return to_update def getEnemies(self): return self.enemies def
remove(self, index): try: to_update = self.enemies[index].enemyrect self.screen.blit(self.blackSurface, self.enemies[index].enemyrect) del self.enemies[index] return to_update except IndexError: print("IndexError for enemy {0} of {1}".format(index, len(self.enemies))) def game_over(self): for i in range(len(self.enemies)): self.screen.blit(self.blackSurface, sel
f.enemies[i].enemyrect) del self.enemies[:]
## # Copyright 2011-2019 Ghent University # # This file is part of EasyBuild, # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), # with support of Ghent University (http://ugent.be/hpc), # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be), # Flemish Research Foundation (FWO) (http://www.fwo.be/en) # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). # # https://github.com/easybuilders/easybuild # # EasyBuild is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation v2. # # EasyBuild is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. ## """ Module naming scheme API. :author: Jens Timmerman (Ghent University) :author: Kenneth Hoste (Ghent University) """ import re from easybuild.base import fancylogger from easybuild.tools.build_log import EasyBuildError from easybuild.tools.config import Singleton from easybuild.tools.py2vs3 import create_base_metaclass DEVEL_MODULE_SUFFIX = '-easybuild-devel' # singleton metaclass: only one instance is created BaseModuleNamingScheme = create_base_metaclass('BaseModuleNamingScheme', Singleton, object) class ModuleNamingScheme(BaseModuleNamingScheme): """Abstract class for a module naming scheme implementation.""" REQUIRED_KEYS = None def __init__(self, *args, **kwargs): """Initialize logger.""" self.log = fancylogger.getLogger(self.__class__.__name__, fname=False) def is_sufficient(self, keys): """Determine whether specified list of easyconfig parameters is sufficient for this module naming scheme.""" if self.REQUIRED_KEYS is not None: return set(keys).issuperset(set(self.REQUIRED_KEYS)) else: raise EasyBuildError("Constant REQUIRED_KEYS is not defined, " "should specify required easyconfig parameters.") def requires_toolchain_details(self): """ Determine whether toolchain details are required by this module naming scheme, e.g. whether one of det_toolchain_* functions are relied upon. """ return False def det_full_module_name(self, ec): """ Determine full module name, relative to the top of the module path. :param ec: dict-like object with easyconfig parameter values; for now only the 'name', 'version', 'versionsuffix' and 'toolchain' parameters are guaranteed to be available :return: string with full module name, e.g.: '<compiler>/<mpi_lib>/<name>/<version>' """ raise NotImplementedError def det_short_module_name(self, ec): """ Determine short module name, i.e. the name under which modules will be exposed to users. :param ec: dict-like object with easyconfig parameter values; for now only the 'name', 'version', 'versionsuffix' and 'toolchain' parameters are guaranteed to be available :return: string with module name, e.g. '<name>/<version>' """ # by default: full module name doesn't include a $MODULEPATH subdir return self.det_full_module_name(ec) def det_install_subdir(self, ec): """ Determine name of software installation subdirectory of install path. :param ec: dict-like object with easyconfig parameter values; for now only the 'name', 'version', 'versionsuffix' and 'toolchain' parameters are guaranteed to be available :return: string with name of subdirectory, e.g.: '<compiler>/<mpi_lib>/<name>/<version>' """ # by default: use full module name as name for install subdir return self.det_full_module_name(ec) def det_module_subdir(self, ec): """ Determine subdirectory for module file in $MODULEPATH. This determines the separation between module names exposed to users, and what's part of the $MODULEPATH. :param ec: dict-like object with easyconfig parameter values; for now only the 'name', 'version', 'versionsuffix' and 'toolchain' parameters are guaranteed to be available :return: string with subdir path (relative to $MODULEPATH), e.g. '<compiler>/<mpi_lib>' """ # by default: no subdirectory return '' def det_module_symlink_paths(self, ec): """ Determine list of paths in which symlinks to module files must be created. """ # by default: make a symlink from moduleclass subdirectory of $MODULEPATH return [ec['moduleclass']] def det_modpath_extensions(self, ec): """ Determine list of subdirectories for which to extend $MODULEPATH with when this module is loaded (if any). :param ec: dict-like object with easyconfig parameter values; for now only the 'name', 'version', 'versionsuffix' and 'toolchain' parameters are guaranteed to be available :return: A list of $MODULEPATH subdirectories. """ # by default: an empty list of subdirectories to extend $MODULEPATH with return [] def det_user_modpath_extensions(self, ec): """ Determine list of subdirectories relative to the user-specific modules directory for which to extend $MODULEPATH with when this module is loaded (if any). :param ec: dict-like object with easyconfig parameter values; for now only the 'name', 'version', 'versionsuffix' and 'toolchain' parameters are guaranteed to be available :return: A list of $MODULEPATH subdirectories. """ # by default: use "system" module path extensions of naming scheme return self.det_modpath_extensions(ec) def det_init_modulepaths(self, ec): """
Determine initial module paths, where the modules that are top of the hierarchy (if any) live. """ return [] def expand_toolchain_load(self, ec=None): """ Determine whether load statements for a toolchain should be expanded to load statements for its depende
ncies. This is useful when toolchains are not exposed to users. """ # by default: just include a load statement for the toolchain return False def is_short_modname_for(self, short_modname, name): """ Determine whether the specified (short) module name is a module for software with the specified name. Default implementation checks via a strict regex pattern, and assumes short module names are of the form: <name>/<version>[-<toolchain>] """ modname_regex = re.compile('^%s(/\S+)?$' % re.escape(name)) res = bool(modname_regex.match(short_modname)) self.log.debug("Checking whether '%s' is a module name for software with name '%s' via regex %s: %s", short_modname, name, modname_regex.pattern, res) return res def det_make_devel_module(self): """ Determine if a devel module should be generated. Can be used to create a separate set of modules with a different naming scheme. Software is already installed beforehand with one naming scheme, including development module. """ return True
#!/usr/bin/env python # Copyright 2018, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test for the gtest_json_output module.""" import json import os from googletest.test import gtest_json_test_utils from googletest.test import gtest_test_utils GTEST_OUTPUT_SUBDIR = 'json_outfiles' GTEST_OUTPUT_1_TEST = 'gtest_xml_outfile1_test_' GTEST_OUTPUT_2_TEST = 'gtest_xml_outfile2_test_' EXPECTED_1 = { u'tests': 1, u'failures': 0, u'disabled': 0, u'errors': 0, u'time': u'*', u'timestamp': u'*', u'name': u'AllTests', u'testsuites': [{ u'name': u'PropertyOne', u'tests': 1, u'failures': 0, u'disabled': 0, u'errors': 0, u'time': u'*', u'timestamp': u'*', u'testsuite': [{ u'name': u'TestSomeProperties', u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', u'timestamp': u'*', u'classname': u'PropertyOne', u'SetUpProp': u'1', u'TestSomeProperty': u'1', u'TearDownProp'
: u'1', }], }], } EXPECTED_2 = { u'tests': 1, u'failures': 0, u'disabled': 0, u'errors': 0, u'time': u'*', u'timestamp': u'*', u'name': u'AllTests', u'testsuites': [{ u'name': u'PropertyTwo', u'tests': 1, u'failures': 0, u'disabled': 0, u'errors': 0,
u'time': u'*', u'timestamp': u'*', u'testsuite': [{ u'name': u'TestSomeProperties', u'status': u'RUN', u'result': u'COMPLETED', u'timestamp': u'*', u'time': u'*', u'classname': u'PropertyTwo', u'SetUpProp': u'2', u'TestSomeProperty': u'2', u'TearDownProp': u'2', }], }], } class GTestJsonOutFilesTest(gtest_test_utils.TestCase): """Unit test for Google Test's JSON output functionality.""" def setUp(self): # We want the trailing '/' that the last "" provides in os.path.join, for # telling Google Test to create an output directory instead of a single file # for xml output. self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(), GTEST_OUTPUT_SUBDIR, '') self.DeleteFilesAndDir() def tearDown(self): self.DeleteFilesAndDir() def DeleteFilesAndDir(self): try: os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + '.json')) except os.error: pass try: os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + '.json')) except os.error: pass try: os.rmdir(self.output_dir_) except os.error: pass def testOutfile1(self): self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_1) def testOutfile2(self): self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_2) def _TestOutFile(self, test_name, expected): gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name) command = [gtest_prog_path, '--gtest_output=json:%s' % self.output_dir_] p = gtest_test_utils.Subprocess(command, working_dir=gtest_test_utils.GetTempDir()) self.assert_(p.exited) self.assertEquals(0, p.exit_code) output_file_name1 = test_name + '.json' output_file1 = os.path.join(self.output_dir_, output_file_name1) output_file_name2 = 'lt-' + output_file_name1 output_file2 = os.path.join(self.output_dir_, output_file_name2) self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2), output_file1) if os.path.isfile(output_file1): with open(output_file1) as f: actual = json.load(f) else: with open(output_file2) as f: actual = json.load(f) self.assertEqual(expected, gtest_json_test_utils.normalize(actual)) if __name__ == '__main__': os.environ['GTEST_STACK_TRACE_DEPTH'] = '0' gtest_test_utils.Main()
###
##################################### # Automatically generated, do not edit. ######################################## from pyvisdk.thirdparty import Enum ProfileNumericComparator = Enum( 'equal', 'greaterThan', 'greaterThanEqual', 'lessTh
an', 'lessThanEqual', 'notEqual', )
__author__ = 'saeedamen' # # Copyright 2015 Thalesians Ltd. - http//www.th
alesians.com / @thalesians # # Licensed under the Apache License, Version 2.0 (the "License"); you
may not use this file except in compliance with the # License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and limitations under the License. # """ marketliquidity_examples Shows how to calculate market liquidity using bid/ask data and tick counts. """ # TODO
# NamoInstaller ActiveX Control 1.x - 3.x # CVE-NOMATCH import logging log = logging.getLogger("Thug") def Install(self, arg): if len(arg) > 1024: log.ThugLogging.log_exploit_event(self._window.url, "NamoInstaller ActiveX", "Overflow in Install method") log.DFT.check_shellcode(arg) if str([arg]).find('http') > -1: log.ThugLogging.add_behavior_warn('[NamoInstaller ActiveX] Insecure download from URL %s' % (arg, )) log.ThugLogging.log_exploit_event(self._window.url, "NamoInsta
ller ActiveX", "Insecure download from URL", forward = False, data = {
"url": arg } ) try: self._window._navigator.fetch(arg, redirect_type = "NamoInstaller Exploit") except Exception: log.ThugLogging.add_behavior_warn('[NamoInstaller ActiveX] Fetch failed')
#!/usr/bin/env python #Alberto from __future__ import print_function, division import os import glob import argparse from subprocess import Popen, PIPE from argparse import RawTextHelpFormatter import sys import re from textwrap import dedent from subprocess import call def warn(*objs): print(*objs, file=sys.stderr) class FileLineWrapper(object): """File reader with line numbers and readRequiredLine""" def __init__(self, f): self.f = f self.line = 0 def close(self): return self.f.close() def readline(self): self.line += 1 return self.f.readline() def readRequiredLine(self): line = self.readline() if not line: raise IOError("Unexpected end of file found: %d" % self.line) return line def __enter__(self): self.f.__enter__() return self def __exit__(self, tp, value, traceback): self.f.__exit__(tp, value, traceback) class GJob: """Class representing a gaussian job""" COMMANDPat = re.compile("\s*#[PTN]* ") SKIPGeomPat = re.compile("geom=\S*check", re.IGNORECASE) LINKPat = re.compile("\s*--Link1--") OPTPat = re.compile("\sopt([=\(]+([^\s\)]+)?\)?)", re.IGNORECASE) FREQPat = re.compile("\sfreq[\s=\(]", re.IGNORECASE) MAXCylPat = re.compile("MaxCycles=\d+", re.IGNORECASE) CALCFCPat = re.compile("readFC|calcfc|calchffc|rcfc", re.IGNORECASE) GEOMPat = re.compile("\s*geom(=\S+)", re.IGNORECASE) GUESSPat = re.compile("\s*guess(=\S+)", re.IGNORECASE) def __init__(self, start, command, middle, coords, end): self.start = start # % directives self.command = command # gaussian command line self.middle = middle # comment, charge and multiplicity self.coords = coords self.end = end # anything after the coordinates def __str__(self): return ''.join( [self.start, self.command, self.middle, self.coords, self.end]) def isOpt(self): return GJob.OPTPat.search(self.command) def isFreq(self): return GJob.FREQPat.search(self.command) def execute(self, outName): com = dedent(""" date>>%s;gaussian.csh >>%s<<'gJOBComs' %s'gJOBComs'""") % (outName,outName, str(self)) #warn(com) status = call(["/bin/csh", "-fc", com]) if status > 0: raise IOError("Gaussian returned error code=%d" % status) p = Popen("tail -n 10 "+outName, shell=True, bufsize=2048, stdin=PIPE, stdout=PIPE, close_fds=True) stdin,stdout= p.stdin, p.stdout #stdin,stdout = os.popen2("tail -n 10 "+outName) stdin.close() lines = stdout.read() stdout.close() return b" Normal termination of Gaussian" in lines def copy(self, chkGeom=False, optSteps='', optCalcFC=False, optReadFC=False): newCom = self.command newMiddle = self.middle newCoords = self.coords ma = GJob.OPTPat.search(newCom) if (optSteps or optCalcFC or optReadFC) and not ma: raise Exception("Not an optimization:" + str(self)) elif optSteps or optCalcFC or optReadFC: optArgs= ma.group(2) if optSteps: optArgs= GJob.MAXCylPat.sub("",optArgs) if optArgs: optArgs += "," optArgs += "MaxCycles="+str(optSteps) if optCalcFC: optArgs = GJob.CALCFCPat.sub("",optArgs) if optArgs: optArgs += "," optArgs += "CalcFC" if optReadFC: optArgs = GJob.CALCFCPat.sub("",optArgs) if optArgs: optArgs += "," optArgs += "ReadFC" optArgs = optArgs.replace(",,",",") if optArgs.startswith(",") : optArgs = optArgs[1:] newCom = GJob.OPTPat.sub(" opt=(%s)"%optArgs,newCom) if chkGeom: newCom = GJob.GEOMPat.sub("",newCom) newCom = GJob.GUESSPat.sub("",newCom) newCom = newCom.rstrip() + " Geom=AllCheck Guess=TCheck\n" newMiddle = "" newCoords = "" return GJob(self.start, newCom, newMiddle, newCoords, self.end) @staticmethod def readNext(inFile): start = "" command = "" middle = "" coords = "" end = "" line = inFile.readline(
) if not line: return None while not GJob.COMMANDPat.match(line): start += line line = inFile.readRequiredLine() while line.strip(): command += line line = inFile.readRequiredLine() if not GJob.SKIPGeomPat.search(command): middle = "\n" line = inFile.readRequiredLine() # read comment lines while line.strip(): middle += line line = inFile.readRequiredLine() middle += line
# read charge and multiplicity middle += inFile.readRequiredLine() line = inFile.readRequiredLine() while line.strip(): coords += line line = inFile.readRequiredLine() while line and not GJob.LINKPat.match(line): end += line line = inFile.readline() return GJob(start, command, middle, coords, end) desc = """Run guassian optimization run. Your gInFile may contain multiple jobs. Whenever an optimization job is found it will be executed in multiple subjobs with MaxCycle=optSteps. If the optimization does not complete a frequency calculation is done with the final geometry. If the n-1'ed step was a freq job it's parameters will be retained, if not then the "CalcFC" option will be added to the opt keyword. Note that gOpt will modify your gaussian options somewhat. Example: set n=myName.g ; set nCPU=4 ; mysub.py -q medium -jobName $n:r -nCPU $nCPU -totalMem 10 -- gOpt.py -in $n""" parser = argparse.ArgumentParser(description=desc, formatter_class=RawTextHelpFormatter) parser.add_argument('-in', dest='gInFileName', required=True, help='gaussian command file, out will be name.out') parser.add_argument('-optSteps', dest='optSteps', required=False, default=8, help='Number of optimizaton steps to execute before recalculating freq (def=%d)'%8) parser.add_argument('-restartJob', metavar="<n>", type=int, required=False, default=0, help='restart this computation with job number <n>. Only for opt jobs.') args = parser.parse_args() gInFileName = args.gInFileName gOutFileName, dummy = os.path.splitext(gInFileName) gOutFileName += ".out" restartJob = args.restartJob optSteps=args.__dict__.get('optSteps',8) gJobs = [] with FileLineWrapper(open(gInFileName)) as gInFile: gJob = GJob.readNext(gInFile) while gJob: gJobs.append(gJob) gJob = GJob.readNext(gInFile) lastGJob = None for gJob in gJobs: restartJob -= 1 if restartJob > 0: continue if gJob.isOpt(): # and lastGJob != None: newGJob = gJob.copy(optSteps=optSteps) success = newGJob.execute(gOutFileName) while not success: if lastGJob and lastGJob.isFreq(): newGJob = lastGJob.copy(chkGeom=True) if not newGJob.execute(gOutFileName) : raise IOError("Freq calculation did not complete!") newGJob = gJob.copy(optSteps=optSteps,optReadFC=True) success = newGJob.execute(gOutFileName) else: newGJob = gJob.copy(chkGeom=True,optSteps=optSteps,optCalcFC=True) success = newGJob.execute(gOutFileName) else: gJob.execute(gOutFileName) lastGJob = gJob
import itertools # Local modules import uncertainties.core as uncert_core from uncertainties.core import (to_affine_scalar, AffineScalarFunc, LinearCombination) ############################################################################### # We wrap the functions from the math module so that they keep track of # uncertainties by returning a AffineScalarFunc object. # Some functions from the math module cannot be adapted in a standard # way so to work with AffineScalarFunc objects (either as their result # or as their arguments): # (1) Some functions return a result of a type whose value and # variations (uncertainties) cannot be represented by AffineScalarFunc # (e.g., math.frexp, which returns a tuple). The exception raised # when not wrapping them with wrap() is more obvious than the # one obtained when wrapping them (in fact, the wrapped functions # attempts operations that are not supported, such as calculation a # subtraction on a result of type tuple). # (2) Some functions don't take continuous scalar arguments (which can # be varied during differentiation): math.fsum, math.factorial... # Such functions can either be: # - wrapped in a special way. # - excluded from standard wrapping by adding their name to # no_std_wrapping # Math functions that have a standard interface: they take # one or more float arguments, and return a scalar: many_scalars_to_scalar_funcs = [] # Some functions require a specific treatment and must therefore be # excluded from standard wrapping. Functions # no_std_wrapping = ['modf', 'frexp', 'ldexp', 'fsum', 'factorial'] # Functions with numerical derivatives: # # !! Python2.7+: {..., ...} num_deriv_funcs = set(['fmod', 'gamma', 'lgamma']) # Functions are by definition l
ocally constant (on real # numbers): their value does not depend o
n the uncertainty (because # this uncertainty is supposed to lead to a good linear approximation # of the function in the uncertainty region). The type of their output # for floats is preserved, as users should not care about deviations # in their value: their value is locally constant due to the nature of # the function (0 derivative). This situation is similar to that of # comparisons (==, >, etc.). # # !! Python 2.7+: {..., ...} locally_cst_funcs = set(['ceil', 'floor', 'isinf', 'isnan', 'trunc']) # Functions that do not belong in many_scalars_to_scalar_funcs, but # that have a version that handles uncertainties. These functions are # also not in numpy (see unumpy/core.py). non_std_wrapped_funcs = [] # Function that copies the relevant attributes from generalized # functions from the math module: # This is a copy&paste job from the functools module, changing # the default arugment for assigned def wraps(wrapper, wrapped, assigned=('__doc__',), updated=('__dict__',)): """Update a wrapper function to look like the wrapped function. wrapper -- function to be updated wrapped -- original function assigned -- tuple naming the attributes assigned directly from the wrapped function to the wrapper function updated -- tuple naming the attributes of the wrapper that are updated with the corresponding attribute from the wrapped function. """ for attr in assigned: setattr(wrapper, attr, getattr(wrapped, attr)) for attr in updated: getattr(wrapper, attr).update(getattr(wrapped, attr, {})) # Return the wrapper so this can be used as a decorator via partial() return wrapper ######################################## # Wrapping of math functions: # Fixed formulas for the derivatives of some functions from the math # module (some functions might not be present in all version of # Python). Singular points are not taken into account. The user # should never give "large" uncertainties: problems could only appear # if this assumption does not hold. # Functions not mentioned in _fixed_derivatives have their derivatives # calculated numerically. # Functions that have singularities (possibly at infinity) benefit # from analytical calculations (instead of the default numerical # calculation) because their derivatives generally change very fast. # Even slowly varying functions (e.g., abs()) yield more precise # results when differentiated analytically, because of the loss of # precision in numerical calculations. #def log_1arg_der(x): # """ # Derivative of log(x) (1-argument form). # """ # return 1/x def log_der0(*args): """ Derivative of math.log() with respect to its first argument. Works whether 1 or 2 arguments are given. """ if len(args) == 1: return 1/args[0] else: return 1/args[0]/math.log(args[1]) # 2-argument form # The following version goes about as fast: ## A 'try' is used for the most common case because it is fast when no ## exception is raised: #try: # return log_1arg_der(*args) # Argument number check #except TypeError: # return 1/args[0]/math.log(args[1]) # 2-argument form def _deriv_copysign(x,y): if x >= 0: return math.copysign(1, y) else: return -math.copysign(1, y) def _deriv_fabs(x): if x >= 0: return 1 else: return -1 def _deriv_pow_0(x, y): if y == 0: return 0. elif x != 0 or y % 1 == 0: return y*math.pow(x, y-1) else: return float('nan') def _deriv_pow_1(x, y): if x == 0 and y > 0: return 0. else: return math.log(x) * math.pow(x, y) erf_coef = 2/math.sqrt(math.pi) # Optimization for erf() fixed_derivatives = { # In alphabetical order, here: 'acos': [lambda x: -1/math.sqrt(1-x**2)], 'acosh': [lambda x: 1/math.sqrt(x**2-1)], 'asin': [lambda x: 1/math.sqrt(1-x**2)], 'asinh': [lambda x: 1/math.sqrt(1+x**2)], 'atan': [lambda x: 1/(1+x**2)], 'atan2': [lambda y, x: x/(x**2+y**2), # Correct for x == 0 lambda y, x: -y/(x**2+y**2)], # Correct for x == 0 'atanh': [lambda x: 1/(1-x**2)], 'copysign': [_deriv_copysign, lambda x, y: 0], 'cos': [lambda x: -math.sin(x)], 'cosh': [math.sinh], 'degrees': [lambda x: math.degrees(1)], 'erf': [lambda x: math.exp(-x**2)*erf_coef], 'erfc': [lambda x: -math.exp(-x**2)*erf_coef], 'exp': [math.exp], 'expm1': [math.exp], 'fabs': [_deriv_fabs], 'hypot': [lambda x, y: x/math.hypot(x, y), lambda x, y: y/math.hypot(x, y)], 'log': [log_der0, lambda x, y: -math.log(x, y)/y/math.log(y)], 'log10': [lambda x: 1/x/math.log(10)], 'log1p': [lambda x: 1/(1+x)], 'pow': [_deriv_pow_0, _deriv_pow_1], 'radians': [lambda x: math.radians(1)], 'sin': [math.cos], 'sinh': [math.cosh], 'sqrt': [lambda x: 0.5/math.sqrt(x)], 'tan': [lambda x: 1+math.tan(x)**2], 'tanh': [lambda x: 1-math.tanh(x)**2] } # Many built-in functions in the math module are wrapped with a # version which is uncertainty aware: this_module = sys.modules[__name__] def wrap_locally_cst_func(func): ''' Return a function that returns the same arguments as func, but after converting any AffineScalarFunc object to its nominal value. This function is useful for wrapping functions that are locally constant: the uncertainties should have no role in the result (since they are supposed to keep the function linear and hence, here, constant). ''' def wrapped_func(*args, **kwargs): args_float = list(map(uncert_core.nominal_value, args)) # !! In Python 2.7+, dictionary comprehension: {argname:...} kwargs_float = dict( (arg_name, uncert_core.nominal_value(value)) for (arg_name, value) in kwargs.items()) return func(*args_float, **kwargs_float) return wrapped_func # for (name, attr) in vars(math).items(): for name in dir(math): if name in fixed_derivatives: # Priority to functions in fixed_derivatives derivatives = fixed_derivatives[name] elif name in num_deriv_funcs: # Functions whose derivatives are calculated numerically by # this module fall here
#forces : A fast customized optimization solver. # #Copyright (C) 2013-2016 EMBOTECH GMBH [info@embotech.com]. All rights reserved. # # #This software is intended for simulation and testing purposes only. #Use of this software for any commercial purpose is prohibited. # #This program is distributed in the hope that it will be useful. #EMBOTECH makes NO WARRANTIES with respect to the use of the software #without even the implied warranty of MERCHANTABILITY or FITNESS FOR A #PARTICULAR PURPOSE. # #EMBOTECH shall not have any liability for any damage arising from the use #of the software. # #This Agreement shall exclusively be governed by and interpreted in #accordance with the laws of Switzerland, excluding its principles #of conflict of laws. The Courts of Zurich-City shall have exclusive #jurisdiction in case of any dispute. # from distutils.ccompiler import new_compiler c = new_compiler() #from
numpy.distutils.intelccom
piler import IntelCCompiler #c = IntelCCompiler() import os import sys import distutils # determine source file sourcefile = os.path.join(os.getcwd(),"forces","src","forces"+".c") # determine lib file if sys.platform.startswith('win'): libfile = os.path.join(os.getcwd(),"forces","lib","forces"+".lib") else: libfile = os.path.join(os.getcwd(),"forces","lib","forces"+".so") # create lib dir if it does not exist yet if not os.path.exists(os.path.join(os.getcwd(),"forces","lib")): os.makedirs(os.path.join(os.getcwd(),"forces","lib")) # compile into object file objdir = os.path.join(os.getcwd(),"forces","obj") if isinstance(c,distutils.unixccompiler.UnixCCompiler): objects = c.compile([sourcefile], output_dir=objdir, extra_preargs=['-O3','-fPIC','-fopenmp','-mavx']) if sys.platform.startswith('linux'): c.set_libraries(['rt','gomp']) else: objects = c.compile([sourcefile], output_dir=objdir) # create libraries libdir = os.path.join(os.getcwd(),"forces","lib") exportsymbols = ["%s_solve" % "forces"] c.create_static_lib(objects, "forces", output_dir=libdir) c.link_shared_lib(objects, "forces", output_dir=libdir, export_symbols=exportsymbols)
def __repr__(self): module = self.__class__.__module__ class_name = self.__class__.__name__ return '<{0}.{1}: {2}>'.format(module, class_name, self.to_id()) class Token(namedtuple('Token', ['token', 'expires'])): """ Wrapper around access-token. """ class EPOClient: """ Client to call EPO-OPS REST-API using `requests`. Features auto-throttling based on OPS throttling headers and automatic retries on server-side error codes. Parameters ---------- accept_type : str Http accept type. key : str, optional EPO OPS user key. secret : str, optional EPO OPS user secret. cache : bool If True, try to use `requests_cache` for caching. Default False. cache_kwargs : dict, optional. Passed to :py:func:`requests_cache.install_cache` as keyword arguments if provided. max_retries : int Number of allowed retries at 500-responses. retry_timeout : float, int Timeout in seconds between calls when retrying at 500-responses. Attributes ---------- secret : str key : str token : Token or None quota_per_hour_used : int quota_per_week_used : int """ HAS_FULLTEXT = {'EP'} def __init__(self, accept_type='xml', key=None, secret=None, cache=False, cache_kwargs=None, max_retries=1, retry_timeout=10): try: _check_epoclient_input(accept_type, key, secret, cache, cache_kwargs, max_retries, retry_timeout) except AssertionError as e: raise ValueError(str(e)) if accept_type.startswith('application/'): self.accept_type = accept_type else: self.accept_type = 'application/{}'.format(accept_type) if cache and _HAS_CACHE: logging.info('Installs cache.') requests_cache.install_cache(**(cache_kwargs or dict())) elif cache: raise ValueError('cache is set to True but requests_cache ' 'is not available.') self.secret = secret self.key = key self.max_retries = max_retries self.retry_timeout = retry_timeout self.quota_per_hour_used = 0 self.quota_per_week_used = 0 if all([secret, key]): logging.debug('Auth provided.') self.token = self.authenticate() else: logging.debug('Auth not provided') self.token = None self._last_call = { 'search': None, 'retrieval': None, 'inpadoc': None, 'images': None, 'other': None } self._next_call = self._last_call.copy() def fetch(self, service, ref_type, api_input, endpoint='', options=None, extra_headers=None): """ Generic function to fetch data from the EPO-OPS API. Parameters ---------- service : epo_utils.ops.Services OPS-service to fetch from. ref_type : epo_utils.ops.ReferenceType OPS-reference type of data to fetch. api_input : APIInput, list[APIInput] Input to API-call. endpoint : str API-endpoint to call. options : list, optional API-call constitents. extra_headers : dict, optional Additional or custom headers to be used. use_post : bool If True, POST will be used for request. Returns ------- requests.Response """ if not isinstance(ref_type, ReferenceType): raise ValueError('invalid ref_type: {}'.format(ref_type)) if not isinstance(service, Services): raise ValueError('invalid service: {}'.format(service)) if endpoint not in VALID_ENDPOINTS: raise ValueError('invalid endpoint: {}'.format(endpoint)) try: input_text = ','.join(i.to_id() for i in api_input) except TypeError: input_text = api_input.to_id() id_types = {api_input.id_type} else: id_types = {i.id_type for i in api_input} if len(id_types) > 1: raise ValueError('non-matching id-types') options = options or list() url = build_ops_url(service, ref_type, id_types.pop(), endpoint, options) headers = self._make_headers(extra_headers) logging.debug('Makes request to: {}\nheaders: {}'.format(url, headers)) logging.info('fetches {}'.format(input_text)) try: response = self.post('retrieval', url, input_text, headers=headers) except requests.HTTPError as e: if e.response.status_code == requests.codes.not_found: logging.error('{} not found'.format(input_text)) raise FetchFailed(input_text) else: raise logging.info('Fetch succeeded.') return response def search(self, query,
fetch_range, service=Services.PublishedSearch, endpoint='', extra_headers=None): """ Post a GET-search query. Parameters ---------- query : str Query string. fetch_range : tuple[int, int] Get entries `fetch_range[0]` to `fetch_range[1]`. service : Services Which service to use for search. endpoint : str, list[str] Endpoint(s) to search. extra_headers :
dict, optional Additional or custom headers to be used. Returns ------- requests.Response """ if not isinstance(service, Services): raise ValueError('invalid service: {}'.format(service)) if not isinstance(endpoint, (list, tuple)): endpoint = [endpoint] if not all(e in VALID_ENDPOINTS for e in endpoint): invalid = filter(lambda e: e not in VALID_ENDPOINTS, endpoint) raise ValueError('invalid endpoint: {}'.format(next(invalid))) if not len(fetch_range) == 2 \ and all(isinstance(i, int) for i in fetch_range): raise ValueError('invalid fetch_range: {}'.format(fetch_range)) headers = self._make_headers( {'Accept': 'application/exchange+xml', 'X-OPS-Range': '{}-{}'.format(*fetch_range)} ) headers.update(extra_headers or dict()) url = build_ops_url(service, options=endpoint) logging.info('Sends query: {}'.format(query)) response = self.post('search', url, headers=headers, data={'q': query}) logging.info('Query successful.') return response def authenticate(self): """ If EPO-OPS customer key and secret is available get access-token. Returns ------- token : Token Token and expiration time. """ if not all([self.secret, self.key]): return None logging.info('Attempts to authenticate.') # Post base 64-encoded credentials to get access-token. credentials = '{0}:{1}'.format(self.key, self.secret) encoded_creds = b64encode(credentials.encode('ascii')).decode('ascii') headers = {'Authorization': 'Basic {}'.format(encoded_creds)} payload = {'grant_type': 'client_credentials'} response = requests.post(AUTH_URL, headers=headers, data=payload) response.raise_for_status() logging.info('Authentication succeeded.') # Parse response. content = response.json() token = content['access_token'] expires_in = int(content['expires_in']) expires = datetime.now() + timedelta(seconds=expires_in) token = Token(token, expires) return token def post(self, service, *args, **kwargs): """ Makes an auto-throttled POST to the OPS-API. Parameters ---------- service : str OPS-system called. *args Positional arguments passed to :py:`requests.post` **kwargs Keyword arguments
import os import re import sys import json import shlex import logging import inspect import functools import importlib from pprint import pformat from collections import namedtuple from traceback import format_tb from requests.exceptions import RequestException import strutil from cachely.loader import Loader from .lib import library, interpreter_library, DataProxy from . import utils from . import core from . import exceptions logger = logging.getLogger(__name__) BASE_LIBS = ['snagit.lib.text', 'snagit.lib.lines', 'snagit.lib.soup'] ReType = type(re.compile('')) class Instruction(namedtuple('Instruction', 'cmd args kws line lineno')): ''' ``Instruction``'s take the form:: command [arg [arg ...]] [key=arg [key=arg ...]] Where ``arg`` can be one of: single quoted string, double quoted string, digit, True, False, None, or a simple, unquoted string. ''' values_pat = r''' [rj]?'(?:(\'|[^'])*?)' | [r]?"(?:(\"|[^"])*?)" | (\d+) | (True|False|None) | ([^\s,]+) ''' args_re = re.compile( r'''^( (?P<kwd>\w[\w\d-]*)=(?P<val>{0}) | (?P<arg>{0}|([\s,]+)) )\s*'''.format(values_pat), re.VERBOSE ) value_dict = {'True': True, 'False': False, 'None': None} def __str__(self): def _repr(w): if isinstance(w, ReType): return 'r"{}"'.format(str(w.pattern)) return repr(w) return '{}{}{}'.format( self.cmd.upper(), ' {}'.format( ' '.join([_repr(c) for c in self.args]) if self.args else '' ), ' {}'.format(' '.join( '{}={}'.format(k, _repr(v)) for k, v in self.kws.items() ) if self.kws else '') ) @classmethod def get_value(cls, s): if s.isdigit(): return int(s) elif s in cls.value_dict: return cls.value_dict[s] elif s.startswith(('r"', "r'")): return re.compile(utils.escaped(s[2:-1])) elif s.startswith("j'"): return json.loads(utils.escaped(s[2:-1])) elif s.startswith(('"', "'")): return utils.escaped(s[1:-1]) else: return s.strip() @classmethod def parse(cls, line, lineno): args = [] kws = {} cmd, text = strutil.splitter(line, expected=2, strip=True) cmd = cmd.lower() while text: m = cls.args_re.search(text) if not m: break gdict = m.groupdict() kwd = gdict.get('kwd') if kwd: kws[kwd] = cls.get_value(gdict.get('val', '')) else: arg = gdict.get('arg', '').strip() if arg != ',': args.append(cls.get_value(arg)) text = text[len(m.group()):] if text: raise SyntaxError( 'Syntax error: "{}" (line {})'.format(text, lineno) ) return cls(cmd, args, kws, line, lineno) def lexer(code, lineno=0): ''' Takes the script source code, scans it, and lexes it into ``Instructions`` ''' for chars in code.splitlines(): lineno += 1 line = chars.rstrip() if not line or line.lstrip().startswith('#'): c
ontinue logger.debug('Lexed {} byte(s) line {}'.format(len(line), chars)) yield Instruction.parse(line, lineno) def load_libraries(extensions=None): if isin
stance(extensions, str): extensions = [extensions] libs = BASE_LIBS + (extensions or []) for lib in libs: importlib.import_module(lib) class Interpreter: def __init__( self, contents=None, loader=None, use_cache=False, do_pm=False, extensions=None ): self.use_cache = use_cache self.loader = loader if loader else Loader(use_cache=use_cache) self.contents = Contents(contents) self.do_debug = False self.do_pm = do_pm self.instructions = [] load_libraries(extensions) def load_sources(self, sources, use_cache=None): use_cache = self.use_cache if use_cache is None else bool(use_cache) contents = self.loader.load_sources(sources) self.contents.update([ ct.decode() if isinstance(ct, bytes) else ct for ct in contents ]) def listing(self, linenos=False): items = [] for instr in self.instructions: items.append('{}{}'.format( '{} '.format(instr.lineno) if linenos else '', instr.line )) return items def lex(self, code): lineno = self.instructions[-1].lineno if self.instructions else 0 instructions = list(lexer(code, lineno)) self.instructions.extend(instructions) return instructions def execute(self, code): for instr in self.lex(code): try: self._execute_instruction(instr) except exceptions.ProgramWarning as why: print(why) return self.contents def _load_handler(self, instr): if instr.cmd in library.registry: func = library.registry[instr.cmd] return self.contents, (func, instr.args, instr.kws) elif instr.cmd in interpreter_library.registry: func = interpreter_library.registry[instr.cmd] return func, (self, instr.args, instr.kws) raise exceptions.ProgramWarning( 'Unknown instruction (line {}): {}'.format(instr.lineno, instr.cmd) ) def _execute_instruction(self, instr): logger.debug('Executing {}'.format(instr.cmd)) handler, args = self._load_handler(instr) do_debug, self.do_debug = self.do_debug, False if do_debug: utils.pdb.set_trace() try: handler(*args) except Exception: exc, value, tb = sys.exc_info() if self.do_pm: logger.error( 'Script exception, line {}: {} (Entering post_mortem)'.format( # noqa instr.lineno, value ) ) utils.pdb.post_mortem(tb) else: raise def execute_script(filename, contents=''): code = utils.read_file(filename) return execute_code(code, contents) def execute_code(code, contents=''): intrep = Interpreter(contents) return str(intrep.execute(code)) class Contents: def __init__(self, contents=None): self.stack = [] self.set_contents(contents) def __iter__(self): return iter(self.contents) def __len__(self): return len(self.contents) def __str__(self): return '\n'.join(str(c) for c in self) # def __getitem__(self, index): # return self.contents[index] def pop(self): if self.stack: self.contents = self.stack.pop() def __call__(self, func, args, kws): contents = [] for data in self: result = func(data, args, kws) contents.append(result) self.update(contents) def merge(self): if self.contents: first = self.contents[0] data = first.merge(self.contents) self.update([data]) def update(self, contents): if self.contents: self.stack.append(self.contents) self.set_contents(contents) def set_contents(self, contents): self.contents = [] if isinstance(contents, (str, bytes)): contents = [contents] contents = contents or [] for ct in contents: if isinstance(ct, (str, bytes)): ct = DataProxy(ct) self.contents.append(ct)
import unittest import os from sqltxt.table import Table from sqltxt.column import Column, ColumnName, AmbiguousColumnNameError from sqltxt.expression import Expression class TableTest(unittest.TestCase): def setUp(self): self.data_path = os.path.join(os.path.dirname(__file__), '../data') table_header = ["col_a", "col_b"] table_contents = """1,1 2,3 3,2""" self.table_a = Table.from_cmd( name = 'table_a', cmd = 'echo -e "{0}"'.format(table_contents), columns = table_header ) table_header = ["col_a", "col_b"] table_contents = """1,w 2,x 2,y 5,z""" self.table_b = Table.from_cmd( name = 'table_b', cmd = 'echo -e "{0}"'.format(table_contents), columns = table_header ) def test_subset_rows(self): conditions = [ [Expression('col_b', '==', '1'), 'or', Expression('col_a', '==', '2')] ] self.table_a.subset_rows(conditions) cmds_actual = self.table_a.cmds cmds_expected = [ 'echo -e "1,1\n2,3\n3,2"', "awk -F',' 'OFS=\",\" { if (($2 == 1 || $1 == 2)) { print $1,$2 } }'"] self.assertEqual(cmds_actual, cmds_expected) def test_order_columns(self): col_name_order = [ColumnName('col_b'), ColumnName('col_a')] self.table_a.order_columns(col_name_order) cmds_actual = self.table_a.cmds cmds_expected = ['echo -e "1,1\n2,3\n3,2"', "awk -F',' 'OFS=\",\" { print $2,$1 }'"] self.assertEqual(cmds_actual, cmds_expected) def test_sort(self): sort_by_col_names = [ColumnName('col_a'), ColumnName('col_b')] self.table_a.sort(sort_by_col_names) cmds_actual = self.table_a.cmds cmds_expected = ['echo -e "1,1\n2,3\n3,2"', "sort -t, -k 1,1 -k 2,2"] self.assertEqual(cmds_actual, cmds_expected) sort_by_cols = [self.table_a.get_column_for_name(cn) for cn in sort_by_col_names] self.assertEqual(self.table_a.sorted_by, sort_by_cols) def test_is_sorted_by(self): table_from_cmd = Table.from_cmd( name = 'table_a', cmd = 'echo -e ""', columns = ['col_a', 'col_b']) table_from_cmd.sorted_by = [Column('table_a.col_a'), Column('table_a.col_b')] self.assertTrue(table_from_cmd.is_sorted_by([0])) self.assertFalse(table_from_cmd.is_sorted_by([1])) self.assertTrue(table_from_cmd.is_sorted_by([0,1])) def test_get_column_for_name_raises_on_ambiguity(self): table_from_cmd = Table.from_cmd( name = 'table_a', cmd = 'echo -e ""', columns = ['col_a', 'col_a']) with self.assertRaisesRegexp(AmbiguousColumnNameError, 'Ambiguous column reference'): table_f
rom_cmd.get_column_for_name(ColumnName('col_a')) table_from_cmd = Table.from_cmd( name = 'table_a', cmd = 'echo -e ""', columns = ['ta.col_a', 'tb.col_a']) with self.assertRaisesRegexp(AmbiguousColumnNameError, 'Ambiguous column reference'): table_from_
cmd.get_column_for_name(ColumnName('col_a')) first_column = Column('ta.col_a') first_column.add_name('col_alpha') second_column = Column('tb.col_a') table_from_cmd = Table.from_cmd( name = 'table_a', cmd = 'echo -e ""', columns = [first_column, second_column]) with self.assertRaisesRegexp(AmbiguousColumnNameError, 'Ambiguous column reference'): table_from_cmd.get_column_for_name(ColumnName('col_a')) def test_sample_rows(self): self.table_a.sample_rows(1) cmds_actual = self.table_a.cmds cmds_expected = ['echo -e "1,1\n2,3\n3,2"', """awk -v seed=$RANDOM -v n={0} ' BEGIN {{ srand(seed) }} NR <= n {{ reservoir[NR] = $0 }} NR > n {{ M = int(rand() * NR) + 1; if (M <= n) {{ reservoir[M] = $0 }}}} END {{ for (key in reservoir) {{ print reservoir[key] }}}}'""".format(1) ] self.assertEqual(cmds_actual, cmds_expected) def test_get_cmd_str(self): table_from_file = Table.from_file_path(os.path.join(self.data_path, 'table_a.txt')) # output from a file-backed Table to STDOUT cmd_actual = table_from_file.get_cmd_str() cmd_expected = 'tail -n+2 {}/table_a.txt'.format(self.data_path) self.assertEqual(cmd_actual, cmd_expected) table_from_cmd = Table.from_cmd( 'table_a', cmd = 'echo -e "1,2,3,4"', columns = ['col_a', 'col_b', 'col_c', 'col_d']) # output from a command-backed Table to STDOUT cmd_actual = table_from_cmd.get_cmd_str() cmd_expected = 'echo -e "1,2,3,4"' self.assertEqual(cmd_actual, cmd_expected) # add a command, then output table_from_cmd.cmds += ['sort'] # to STDOUT cmd_actual = table_from_cmd.get_cmd_str() cmd_expected = 'echo -e "1,2,3,4" | sort' self.assertEqual(cmd_actual, cmd_expected)
alty='l1') self.neutral_train = None self.vct_neutral = None
self.neutral_labels = np.array([]) self.vcn = vcn
self._kvalues = [10, 25, 50, 75, 100] self.subpool = subpool self.lambda_value = lambda_value def pick_next(self, pool=None, step_size=1): list_pool = list(pool.remaining) indices = self.randgen.permutation(len(pool.remaining)) remaining = [list_pool[index] for index in indices] uncertainty = [] if self.subpool is None: self.subpool = len(pool.remaining) for i in remaining[:self.subpool]: # data_point = candidates[i] utility, k, unc = self.x_utility(pool.data[i], pool.text[i]) if show_utilitly: print "%s\t %s \t %.3f" % (i, k, utility) uncertainty.append([utility, k, unc]) uncertainty = np.array(uncertainty) unc_copy = uncertainty[:, 0] sorted_ind = np.argsort(unc_copy, axis=0)[::-1] chosen = [[remaining[x], uncertainty[x, 1]] for x in sorted_ind[:int(step_size)]] util = [uncertainty[x] for x in sorted_ind[:int(step_size)]] # print util ## chosen returns the chosen and the k value associated with it return chosen, util def x_utility(self, instance, instance_text): prob = self.model.predict_proba(instance) unc = 1 - prob.max() utility = np.array([[self.obj_fn_p2(unc, xik, k), k] for k, xik in self.getk(instance_text)]) order = np.argsort(utility[:, 0], axis=0)[::-1] ## descending order utility_sorted = utility[order, :] # print format_list(utility_sorted) if show_utilitly: print "\t{0:.5f}".format(unc), return utility_sorted[0, 0], utility_sorted[0, 1], unc ## return the max def obj_fn_p2(self, uncertainty, instance_k, k): ## change the text to use the vectorizer # xik = self.vct_neutral.transform([instance_k]) xik = self.vcn.transform([instance_k]) # neu = self.neutral_model.predict_proba(xik) if self.neutral_model is not None else [1] neu = 1 if self.neutral_model is not None: neu = self.neutral_model.predict_proba(xik)[0, 1] # probability of being not-neutral costk = self.predict_cost(k) utility = (uncertainty * neu) - (self.lambda_value *costk) ## u(x) * N(xk) / C(xk) # print utility if show_utilitly: print "\t{0:.3f}".format(neu), print "\t{0:.3f}".format(costk), return utility def getk(self, doc_text): ''' Return a set of subinstance of k words in classifier format :param doc_text: :return: set of subinstances of doc_text of fixk size ''' qk = [] analize = self.vcn.build_tokenizer() for k in self._kvalues: qk.append(" ".join(analize(doc_text)[0:k])) return zip(self._kvalues, qk) def update_neutral(self, train_data, train_labels): ## add the neutral instance to the neutral set ## TODO: create the neutral dataset ## recompute neutral data ## eliminate features using the student model # try: # coef = self.model.coef_[0] # names = self.vcn.get_feature_names() # # vocab = [names[j] for j in np.argsort(coef)[::-1] if coef[j] != 0] # # self.vct_neutral = CountVectorizer(encoding='ISO-8859-1', min_df=5, max_df=1.0, binary=False, ngram_range=(1, 3), # token_pattern='\\b\\w+\\b', tokenizer=StemTokenizer(), vocabulary=vocab) # # train_x = self.vct_neutral.fit_transform(train_data) if not isinstance(train_data, list): clf = copy.copy(self.base_neutral) clf.fit(train_data, train_labels) else: clf = None # except ValueError: # clf = None return clf def train_all(self, train_data=None, train_labels=None, neu_train=None, neu_labels=None): ''' Return a new copy of a retrain classifier based on paramters. If no data, return an un-trained classifier :param train_data: training data :param train_labels: target values :return: trained classifier @param neu_train: @param neu_labels: ''' clf = super(AnytimeLearnerDiff, self).train(train_data=train_data, train_labels=train_labels) self.model = clf self.neutral_model = self.update_neutral(neu_train, neu_labels) return clf def __str__(self): string = "{0}(model={1}, neutral-model={2}, subpool={3}, lambda={4})".format(self.__class__.__name__,self.model,self.neutral_model, self.subpool, self.lambda_value) return string def __repr__(self): string = "{0}(model={1}, neutral-model={2}, subpool={3}, lambda={4})".format(self.__class__.__name__,self.model,self.neutral_model, self.subpool, self.lambda_value) return string class AnytimeLearnerV2(BaseLearner): def __init__(self, model=None, accuracy_model=None, budget=None, seed=None, vcn=None, subpool=None, cost_model=None): super(AnytimeLearnerV2, self).__init__(model=model, accuracy_model=accuracy_model, budget=budget, seed=seed, cost_model=cost_model) self.model = model self.neutral_model = LogisticRegression(penalty='l1') self.base_neutral = LogisticRegression(penalty='l1') self.neutral_train = None self._kvalues = [10, 25, 50, 75, 100] self.subpool = subpool def pick_next(self, pool=None, alldata=None, step_size=1): list_pool = list(pool.remaining) indices = self.randgen.permutation(len(pool.remaining)) remaining = [list_pool[index] for index in indices] uncertainty = [] if self.subpool is None: self.subpool = len(pool.remaining) for i in remaining[:self.subpool]: utility, k = self.x_utility(pool.data[i], i, alldata) uncertainty.append([utility, k]) uncertainty = np.array(uncertainty) unc_copy = uncertainty[:, 0] sorted_ind = np.argsort(unc_copy, axis=0)[::-1] chosen = [[remaining[x], uncertainty[x, 1]] for x in sorted_ind[:int(step_size)]] ## chosen returns the chosen and the k value associated with it return chosen def x_utility(self, instance, instance_index, alldata): prob = self.model.predict_proba(instance) unc = 1 - prob.max() utility = np.array([[self.obj_fn_p2(xik, k) * unc, k] for k, xik in self.getk(instance_index, alldata)]) order = np.argsort(utility[:, 0], axis=0)[::-1] ## descending order utility_sorted = utility[order, :] # print utility_sorted return utility_sorted[0, 0], utility_sorted[0, 1] ## return the max def obj_fn_p2(self, instance_k, k): ## change the text to use the vectorizer xik = self.vct_neutral.transform([instance_k]) # neu = self.neutral_model.predict_proba(xik) if self.neutral_model is not None else [1] neu = 1 if self.neutral_model is not None: neu = self.neutral_model.predict_proba(xik)[0, 1] # probability of being not-neutral costk = self.predict_cost(k) utility = neu / costk ## u(x) * N(xk) / C(xk) # print utility return utility def getk(self, doc_index, alldata): ''' Return a set of subinstance of k words in classifier format :param doc_text: :return: set of subinstances of doc_text of fixk size ''' qk = [] for k in self._kvalues: qk.append(alldata[k].bow.tocsr()[doc_index]) return zip(self._kvalues, qk) def update_neutral(self, train_data, train_labels): ## add the neutral instance to the neutral set ## TODO: create the neutral dataset ## recompute neutral data ## eliminate features using the student model try: coef = self.model.coef_[0] vocab = [j for j in np.argsort(coef)[::-1] if coef[j] !=
payload.""" payload = test_support.decode_task_payload(task) return model.MapreduceSpec.from_json_str(payload["mapreduce_spec"]) def validate_map_started(self, mapreduce_id, queue_name=None): """Tests that the map has been started.""" queue_name = queue_name or self.QUEUE_NAME self.assertTrue(mapreduce_id) # Note: only a kickoff job is pending at this stage, shards come later. tasks = self.taskqueue.GetTasks(queue_name) self.assertEquals(1, len(tasks)) # Checks that tasks are scheduled into the future. task = tasks[0] self.assertEqual("/mapreduce_base_path/kickoffjob_callback/" + mapreduce_id, task["url"]) handler = test_support.execute_task(task) self.assertEqual(mapreduce_id, handler.request.get("mapreduce_id")) state = model.MapreduceState.get_by_job_id(mapreduce_id) params = map_job.JobConfig._get_default_mr_params() params.update({"foo": "bar", "base_path": "/mapreduce_base_path", "queue_name": queue_name}) self.assertEqual(state.mapreduce_spec.params, params) return task["eta"] def testStartMap(self): """Test start_map function. Most of start_map functionality is already tested by handlers_test. Just a smoke test is enough. """ TestEntity().put() shard_count = 4 mapreduce_id = control.start_map( "test_map", __name__ + ".test_handler", "mapreduce.input_readers.DatastoreInputReader", { "entity_kind": __name__ + "." + TestEntity.__name__, }, shard_count, mapreduce_parameters={"foo": "bar"}, base_path="/mapreduce_base_path", queue_name=self.QUEUE_NAME) self.validate_map_started(mapreduce_id) def testStartMap_Countdown(self): """Test that MR can be scheduled into the future. Most of start_map functionality is already tested by handlers_test. Just a smoke test is enough. """ TestEntity().put() # MR should be scheduled into the future. now_sec = long(time.time()) shard_count = 4 mapreduce_id = control.start_map( "test_map", __name__ + ".test_handler", "mapreduce.input_readers.DatastoreInputReader", { "entity_kind": __name__ + "." + TestEntity.__name__, }, shard_count, mapreduce_parameters={"foo": "bar"}, base_path="/mapreduce_base_path", queue_name=self.QUEUE_NAME, countdown=1000) task_eta = sel
f.validate_map_started(mapreduce_id) eta_sec = time.mktime(time.strptime(task_eta, "%Y/%m/%d %H:%M:%S")) self.assertTrue(now_sec + 1000 <= eta_sec) def testStartMap_Eta(self): """Test that MR can be scheduled into the future. Most of start_map functionality is already tested by handlers_test. Just a smoke test is enough. """ TestEntity().put() # MR should be scheduled into the future. eta =
datetime.datetime.utcnow() + datetime.timedelta(hours=1) shard_count = 4 mapreduce_id = control.start_map( "test_map", __name__ + ".test_handler", "mapreduce.input_readers.DatastoreInputReader", { "entity_kind": __name__ + "." + TestEntity.__name__, }, shard_count, mapreduce_parameters={"foo": "bar"}, base_path="/mapreduce_base_path", queue_name=self.QUEUE_NAME, eta=eta) task_eta = self.validate_map_started(mapreduce_id) self.assertEquals(eta.strftime("%Y/%m/%d %H:%M:%S"), task_eta) def testStartMap_QueueEnvironment(self): """Test that the start_map inherits its queue from the enviornment.""" TestEntity().put() shard_count = 4 os.environ["HTTP_X_APPENGINE_QUEUENAME"] = self.QUEUE_NAME try: mapreduce_id = control.start_map( "test_map", __name__ + ".test_handler", "mapreduce.input_readers.DatastoreInputReader", { "entity_kind": __name__ + "." + TestEntity.__name__, }, shard_count, mapreduce_parameters={"foo": "bar"}, base_path="/mapreduce_base_path") finally: del os.environ["HTTP_X_APPENGINE_QUEUENAME"] self.validate_map_started(mapreduce_id) def testStartMap_Hooks(self): """Tests that MR can be scheduled with a hook class installed. Most of start_map functionality is already tested by handlers_test. Just a smoke test is enough. """ TestEntity().put() shard_count = 4 mapreduce_id = control.start_map( "test_map", __name__ + ".test_handler", "mapreduce.input_readers.DatastoreInputReader", { "entity_kind": __name__ + "." + TestEntity.__name__, }, shard_count, mapreduce_parameters={"foo": "bar"}, base_path="/mapreduce_base_path", queue_name="crazy-queue", hooks_class_name=__name__+"."+TestHooks.__name__) self.assertTrue(mapreduce_id) task, queue_name = TestHooks.enqueue_kickoff_task_calls[0] self.assertEqual("/mapreduce_base_path/kickoffjob_callback/" + mapreduce_id, task.url) self.assertEqual("crazy-queue", queue_name) def testStartMap_RaisingHooks(self): """Tests that MR can be scheduled with a dummy hook class installed. The dummy hook class raises NotImplementedError for all method calls so the default scheduling logic should be used. Most of start_map functionality is already tested by handlers_test. Just a smoke test is enough. """ TestEntity().put() shard_count = 4 mapreduce_id = control.start_map( "test_map", __name__ + ".test_handler", "mapreduce.input_readers.DatastoreInputReader", { "entity_kind": __name__ + "." + TestEntity.__name__, }, shard_count, mapreduce_parameters={"foo": "bar"}, base_path="/mapreduce_base_path", queue_name="crazy-queue", hooks_class_name=hooks.__name__+"."+hooks.Hooks.__name__) self.validate_map_started(mapreduce_id) def testStartMap_HugePayload(self): """Test start_map function. Most of start_map functionality is already tested by handlers_test. Just a smoke test is enough. """ TestEntity().put() shard_count = 4 mapreduce_id = "" mapreduce_id = control.start_map( "test_map", __name__ + ".test_handler", "mapreduce.input_readers.DatastoreInputReader", { "entity_kind": __name__ + "." + TestEntity.__name__, "huge_parameter": random_string(900000) }, shard_count, mapreduce_parameters={"foo": "bar"}, base_path="/mapreduce_base_path", queue_name=self.QUEUE_NAME) self.validate_map_started(mapreduce_id) def testStartMapTransactional(self): """Test start_map function. Most of start_map functionality is already tested by handlers_test. Just a smoke test is enough. """ TestEntity().put() shard_count = 4 mapreduce_id = "" @db.transactional(xg=True) def tx(): some_entity = TestEntity() some_entity.put() return control.start_map( "test_map", __name__ + ".test_handler", "mapreduce.input_readers.DatastoreInputReader", { "entity_kind": __name__ + "." + TestEntity.__name__, }, shard_count, mapreduce_parameters={"foo": "bar"}, base_path="/mapreduce_base_path", queue_name=self.QUEUE_NAME, in_xg_transaction=True) mapreduce_id = tx() self.validate_map_started(mapreduce_id) def testStartMapTransactional_HugePayload(self): """Test start_map function. Most of start_map functionality is already tested by handlers_test. Just a smoke test is enough. """ TestEntity().put() shard_count = 4 mapreduce_id = "" @db.transactional(xg=True) def tx(): some_entity = TestEntity() some_entity.put() return control.start_map( "test_map", __name__ + ".test_handler", "mapreduce.input_readers.D
# -*- coding: utf-8 -*- # # This file is part of Linux Show Player # # Copyright 2012-2016 Francesco Ceruti <ceppofrancy@gmail.com> # # Linux Show Player is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Linux Show Player is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. S
ee the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Linux Show Player. If not, see <http://www.gnu.org/licenses/>. import mido from lisp.modules.midi.midi_common import MIDICommon from lisp.modules.midi.midi_utils import mido_backend, mido_port_name class MIDIOutput(MIDICommon): def __init__(self, port_na
me='AppDefault'): super().__init__(port_name=port_name) def send_from_str(self, str_message): self.send(mido.parse_string(str_message)) def send(self, message): self._port.send(message) def open(self): port_name = mido_port_name(self._port_name, 'O') self._port = mido_backend().open_output(port_name)
# ! /usr/bin/env python # _*_ coding:utf-8 _*_ """ @author = lucas.wang @create_time = 2018-01-12 """ from xml.etree import ElementTree as ET import fnmatch class change_xml(object): """ xml main function """ names = ['iConn.CreateXmlTools.vshost.exe', 'AutoUpdater.dll', 'NewTonsoft.Json.dll', 'Oracle.ManagedDataAccess.dll', 'Renci.SshNet.dll', 'Renci.SshNet.xml', 'zxing.dll', 'Images/ARX_HK.png'] old_path_name = r"http://172.16.1.81:8081/UpdateClient/" def __init__(self, path_name, file_path, file_name="AutoupdateService.xml"): """ Init file path name :param fileName: """ self.file_path = file_path
self.file_name = file_name self.tree = ET.parse(file_path + file_name) self.path_name = path_name def read_xml(self): """ Read xml file :return: """ root = self.tree.getroot() # print(root) for item in root.getchildren(): # root.iter("file"): # print(item.get("url")) item.s
et("url", item.get('url').replace(self.old_path_name, self.path_name)) if fnmatch.filter(self.names, item.get('path')): root.remove(item) self.write_xml() def write_xml(self): self.tree.write(self.file_path + self.file_name) print("Update xml file success. file: " + self.file_path + self.file_name) if __name__ == '__main__': """ Test use """ read = change_xml(r'http://172.16.1.81:8081/UpdateClient/', r'D:\\CodeWorkspace\\iConnAll\\Client-dev\\iConn.CreateXmlTools\\bin\\Release\\') read.read_xml()
te"] ] for r in reverse_relations: if r.remote_field.model.objects.filter( **{r.field.name: self} ).exists(): return False return True @classmethod def check(cls, **kwargs): errors = super().check(**kwargs) errors.extend(cls._check_doc_versions(**kwargs)) return errors @classmethod def _check_doc_versions(cls, **kwargs): try: if len( cls.objects.filter(doc_version__lt=str(FW_DOCUMENT_VERSION)) ): return [ checks.Warning( "Document templates need to be upgraded. Please " "navigate to /admin/document/document/maintenance/ " "with a browser as a superuser and upgrade all " "document templates on this server.", obj=cls, ) ] else: return [] except (ProgrammingError, OperationalError): # Database has not yet been initialized, so don't throw any error. return [] class Document(models.Model): title = models.CharField(max_length=255, default="", blank=True) path = models.TextField(default="", blank=True) content = models.JSONField(default=dict) doc_version = models.DecimalField( max_digits=3, decimal_places=1, default=FW_DOCUMENT_VERSION ) # The doc_version is the version of the data format in the content field. # We upgrade the content field in JavaScript and not migrations so that # the same code can be used for migrations and for importing old fidus # files that are being uploaded. This field is only used for upgrading data # and is therefore not handed to the editor or document overview page. version = models.PositiveIntegerField(default=0) diffs = models.JSONField(default=list, blank=True) # The last few diffs that were received and approved. The number of stored # diffs should always be equivalent to or more than all the diffs since the # last full save of the document. owner = models.ForeignKey( settings.AUTH_USER_MODEL, related_name="owner", on_delete=models.deletion.CASCADE, ) added = models.DateTimeField(auto_now_add=True) updated = models.DateTimeField(auto_now=True) comments = models.JSONField(default=dict, blank=True) bibliography = models.JSONField(default=dict, blank=True) # Whether or not document is listed on document overview list page. # True by default and for all normal documents. Can be set to False when # documents are added in plugins that list these documents somewhere else. listed = models.BooleanField(default=True) template = models.ForeignKey( DocumentTemplate, on_delete=models.deletion.CASCADE ) def __str__(self): if len(self.title) > 0: return "%(title)s (%(id)s)" % {"title": self.title, "id": self.id} else: return str(self.id) class Meta(object): ordering = ["-id"] def clean(self, *args, **kwargs): if self.comments is None: self.comments = "{}" if self.bibliography is None: self.bibliography = "{}" def save(self, *args, **kwargs): self.clean() super().save(*args, **kwargs) def get_absolute_url(self): return "/document/%i/" % self.id def is_deletable(self): reverse_relations = [ f for f in self._meta.model._meta.get_fields() if (f.one_to_many or f.one_to_one) and f.auto_created and not f.concrete and f.name not in [ "accessright", "accessrightinvite", "documentrevision", "documentimage", ] ] for r in reverse_relations: if r.remote_field.model.objects.filter( **{r.field.name: self} ).exists(): return False return True @classmethod def check(cls, **kwargs): errors = super().check(**kwargs) errors.extend(cls._check_doc_versions(**kwargs)) return errors @classmethod def _check_doc_versions(cls, **kwargs): try: if len( cls.objects.filter(doc_version__lt=str(FW_DOCUMENT_VERSION)) ): return [ checks.Warning( "Documents need to be upgraded. Please navigate to " "/admin/document/document/maintenance/ with a browser " "as a superuser and upgrade all documents on this " "server.", obj=cls, ) ] else: return [] except (ProgrammingError, OperationalError): # Database has not yet been initialized, so don't throw any error. return [] RIGHTS_CHOICES = ( ("write", "Writer"), # Can write content and can read+write comments. # Can chat with collaborators. # Has read access to revisions. ("write-tracked", "Write with tracked changes"), # Can write tracked content and can read/write comments. # Cannot turn off tracked changes. # Can chat with collaborators. # Has read access to revisions. ("comment", "Commentator"), # Can read content and can read+write comments. # Can chat with collaborators. # Has read access to revisions. ("review-tracked", "Reviewer who can write with tracked changes"), # Can write tracked content and can read/write his own comments. # Cannot turn off tracked changes. # Cannot chat with collaborators. # Has no access to revisions. ("review", "Reviewer"), # Can read the content and can read/write his own comments. # Comments by users with this access right only show the user's # numeric ID, not their username. # Cannot chat with collaborators nor see that they are connected. # Has no access to revisions. ("read", "Reader"), # Can read content, including comments # Can chat with collaborators. # Has read access to revisions. ("read-without-comments", "Reader without comment access"), # Can read content, but not the comments. # Cannot chat with collaborators. # Has no access to revisions. ) # Editor and Reviewer can only comment and not edit document COMMENT_ONLY = ("review", "comment") CAN_UPDATE_
DOCUMENT = [ "write", "write-tracked", "review", "review-tracked", "comment", ] # Whether the collaborator is allowed to know about other collaborators # and communicate with them. CAN_COMMUNICATE = ["read", "write", "comment", "
write-tracked"] class AccessRight(models.Model): document = models.ForeignKey(Document, on_delete=models.deletion.CASCADE) path = models.TextField(default="", blank=True) holder_choices = models.Q(app_label="user", model="user") | models.Q( app_label="user", model="userinvite" ) holder_type = models.ForeignKey( ContentType, on_delete=models.CASCADE, limit_choices_to=holder_choices ) holder_id = models.PositiveIntegerField() holder_obj = GenericForeignKey("holder_type", "holder_id") rights = models.CharField( max_length=21, choices=RIGHTS_CHOICES, blank=False ) class Meta(object): unique_together = (("document", "holder_type", "holder_id"),) def __str__(self): return "%(name)s %(rights)s on %(doc_id)d" % { "name": self.holder_obj.readable_name, "rights": self.rights, "doc_id": self.document.id, } def revision_filename(instance, filename): return "document-revisions/{id}.fidus".format(id=instance.pk) class DocumentRevision(models.Model): document = models.ForeignKey(Document, on_delete=models.deletion.CASCADE) doc_version = models.DecimalField( max_digits=3, decimal_places=1, default=FW_DOCUMENT_VERSION
continuous scale version (P-Values, expression values, ...) and a thresholded version (0 and 1 for genelist membership). 2. The pipeline builds a matrix of gene list annotations to test against. To this end, it collects: ENSEMBL GO annotations KEGG Pathways User supplied pathways GSEA database signatures 3. The pipeline performs various gene set enrichment analyses. These are: 1. Hypergeometric GO analysis 2. Gene set enrichment analysis 4. The pipeline creates various QC metrics. To this end it looks for biases in any of the gene lists supplied. Biases the pipeline looks at are: 1. Gene length 2. Nucleotide composition 3. Gene intron/exon structure 4. User supplied table with biases. Usage ===== See :ref:`PipelineSettingUp` and :ref:`PipelineRunning` on general information how to use CGAT pipelines. Configuration ------------- The pipeline requires a configured :file:`pipeline.ini` file. The sphinxreport report requires a :file:`conf.py` and :file:`sphinxreport.ini` file (see :ref:`PipelineReporting`). To start with, use the files supplied with the Example_ data. Input ----- Optional inputs +++++++++++++++ Requirements ------------ The pipeline requires the results from :doc:`pipeline_annotations`. Set the configuration variable :py:data:`annotations_database` and :py:data:`annotations_dir`. On top of the default CGAT setup, the pipeline requires the following software to be in the path: +----------+-----------+---------------------------+ |*Program* |*Version* |*Purpose* | +----------+-----------+---------------------------+ | | | | +----------+-----------+---------------------------+ Pipeline output =============== The major output is in the database file :file:`csvdb`. Glossary ======== .. glossary:: Code ==== """ from ruffus import * import sys import os import sqlite3 import pandas import CGAT.Experiment as E import CGAT.IOTools as IOTools import CGAT.Database as Database import CGAT.SetTools as SetTools import CGATPipelines.PipelineGO as PipelineGO ################################################### ################################################### ################################################### # Pipeline configuration ################################################### # load options from the config file import CGATPipelines.Pipeline as P PARAMS = P.getParameters( ["%s/pipeline.ini" % os.path.splitext(__file__)[0], "../pipeline.ini", "pipeline.ini"]) PARAMS.update(P.peekParameters( PARAMS["annotations_dir"], "pipeline_annotations.py", prefix="annotations_", update_interface=True)) # Update the PARAMS dictionary in any PipelineModules # e.g.: # import CGATPipelines.PipelineGeneset as PipelineGeneset # PipelineGeneset.PARAMS = PARAMS def connect(): '''connect to database. Use this method to connect to additional databases. Returns a database connection. ''' dbh = sqlite3.connect(PARAMS["database_name"]) statement = '''ATTACH DATABASE '%s' as annotations''' % ( PARAMS["annotations_database"]) cc = dbh.cursor() cc.execute(statement) cc.close() return dbh @transform('genelists.dir/*.tsv.gz', suffix(".tsv.gz"), ".load") def loadGeneLists(infile, outfile): '''load gene list data into database.''' P.load(infile, outfile, tablename="genelist_%s" % P.toTable(outfile)) @merge('genelists.dir/*.tsv.gz', 'genelists.tsv.gz') def buildGeneListMatrix(infiles, outfile): '''build a gene list matrix for simple pathway analysis based on hypergeometric test. A gene list is derived from a gene set by applying thresholds to the input data set. The thresholds are defined in the configuration file. ''' genesets = [] backgrounds = [] headers = [] for infile in infiles: genelist = pandas.read_csv( IOTools.openFile(infile), index_col=0, sep='\t') track = P.snip(os.path.basename(infile), ".tsv.gz") headers.append(track) field = PARAMS[P.matchParameter("%s_foreground_field" % track)] min_threshold = PARAMS[P.matchParameter( "%s_foreground_min_threshold" % track)] max_threshold = PARAMS[P.matchParameter( "%s_foreground_max_threshold" % track)] genesets.append(set(genelist[ (genelist[field] >= min_threshold) & (genelist[field] <= max_threshold)].index)) E.info('%s: foreground: %f <= %s <= %f' % (track,
min_threshold, field, max_threshold)) field = PARAMS[P.matchParameter("%s_background_field" % track)] min_threshold = PARAMS[P.matchParameter( "%s_background_min_thresho
ld" % track)] max_threshold = PARAMS[P.matchParameter( "%s_background_max_threshold" % track)] E.info('%s: background: %f <= %s <= %f' % (track, min_threshold, field, max_threshold)) backgrounds.append(set(genelist[ (genelist[field] >= min_threshold) & (genelist[field] <= max_threshold)].index)) E.info("%s: fg=%i, bg=%i" % (track, len(genesets[-1]), len(backgrounds[-1]))) E.info("writing gene list matrix") with IOTools.openFile(outfile, "w") as outf: SetTools.writeSets(outf, genesets, labels=headers) with IOTools.openFile(outfile + ".bg.tsv.gz", "w") as outf: SetTools.writeSets(outf, backgrounds, labels=headers) E.info("writing intersection/union matrix") # build set intersection matrix matrix = SetTools.unionIntersectionMatrix(genesets) with IOTools.openFile(outfile + ".matrix.gz", "w") as outf: IOTools.writeMatrix(outf, matrix, headers, headers) matrix = SetTools.unionIntersectionMatrix(backgrounds) with IOTools.openFile(outfile + ".bg.matrix.gz", "w") as outf: IOTools.writeMatrix(outf, matrix, headers, headers) @transform(buildGeneListMatrix, suffix(".tsv.gz"), ".load") def loadGeneListMatrix(infile, outfile): '''load fgene list matrix into table.''' track = P.snip(infile, ".tsv.gz") P.load(infile, outfile, tablename="%s_foreground" % track) P.load(infile + ".bg.tsv.gz", outfile, tablename="%s_background" % track) @transform('pathways.dir/*.tsv.gz', regex('.*/(.*).tsv.gz'), r"pathways_\1.load") def loadPathways(infile, outfile): '''load pathway information into database.''' P.load(infile, outfile, "--add-index=gene_id --add-index=go_id") @follows(mkdir('hypergeometric.dir')) @transform('pathways.dir/*.tsv.gz', regex('.*/(.*).tsv.gz'), add_inputs(buildGeneListMatrix), r'hypergeometric.dir/\1.tsv') def runHypergeometricAnalysis(infiles, outfile): '''run pathway analysis on pathway files in the directory pathways.dir. ''' infile_pathways, infile_genelist = infiles infile_background = infile_genelist + ".bg.tsv.gz" # TODO: # gene annotations # category annotations # # os.path.join( # PARAMS["annotations_dir"], # PARAMS_ANNOTATIONS["interface_go_obo"]), PipelineGO.runGOFromFiles( outfile=outfile, outdir=outfile + ".dir", fg_file=infile_genelist, bg_file=infile_background, go_file=infile_pathways, ontology_file=None, minimum_counts=PARAMS["hypergeometric_minimum_counts"], pairs=False, gene2name=None) def computePathwayBiases(infile, outfile): pass @transform(runHypergeometricAnalysis, suffix(".tsv"), r"\1.load") def loadHypergeometricAnalysis(infile, outfile): '''load GO results.''' track = P.toTable(outfil
import dns import requests import socket from recursortests import RecursorTest class RootNXTrustRecursorTest(RecursorTest): def getOutgoingQueriesCount(self): headers = {'x-api-key': self._apiKey} url = 'http://127.0.0.1:' + str(self._wsPort) + '/api/v1/servers/localhost/statistics' r = requests.get(url, headers=headers, timeout=self._wsTimeout) self.assertTrue(r) self.assertEquals(r.status_code, 200) self.assertTrue(r.json()) content = r.json() for entry in content: if entry['name'] == 'all-outqueries': return int(entry['value']) return 0 class testRootNXTrustDisabled(RootNXTrustRecursorTest): _confdir = 'RootNXTrustDisabled' _wsPort = 8042 _wsTimeout = 2 _wsPassword = 'secretpassword' _apiKey = 'secretapikey' _config_template = """ root-nx-trust=no qname-minimization=no webserver=yes webserver-port=%d webserver-address=127.0.0.1 webserver-password=%s api-key=%s """ % (_wsPort, _wsPassword, _apiKey) def testRootNXTrust(self): """ Check that, with root-nx-trust disabled, we still query the root for www2.nx-example. after receiving a NXD from "." for nx-example. as an answer for www.nx-example. """ # first query nx.example. before = self.getOutgoingQueriesCount()
query = dns.message.make_query('www.nx-example.', 'A') res = self.sendUDPQuery(query) self.assertRcodeEqual(res, dns.rcode.NXDOMAIN) print(res) self.assertAuthorityHasSOA(res) # check that we sent one query to the root after = self.getOutgoingQueriesCount() self.assertEqual(after, before + 1) #
then query nx2.example. before = after query = dns.message.make_query('www2.nx-example.', 'A') res = self.sendUDPQuery(query) self.assertRcodeEqual(res, dns.rcode.NXDOMAIN) self.assertAuthorityHasSOA(res) after = self.getOutgoingQueriesCount() self.assertEqual(after, before + 1) class testRootNXTrustEnabled(RootNXTrustRecursorTest): _confdir = 'RootNXTrustEnabled' _wsPort = 8042 _wsTimeout = 2 _wsPassword = 'secretpassword' _apiKey = 'secretapikey' _config_template = """ root-nx-trust=yes webserver=yes webserver-port=%d webserver-address=127.0.0.1 webserver-password=%s api-key=%s """ % (_wsPort, _wsPassword, _apiKey) def testRootNXTrust(self): """ Check that, with root-nx-trust enabled, we don't query the root for www2.nx-example. after receiving a NXD from "." for nx-example. as an answer for www.nx-example. """ # first query nx.example. before = self.getOutgoingQueriesCount() query = dns.message.make_query('www.nx-example.', 'A') res = self.sendUDPQuery(query) self.assertRcodeEqual(res, dns.rcode.NXDOMAIN) print(res) self.assertAuthorityHasSOA(res) # check that we sent one query to the root after = self.getOutgoingQueriesCount() self.assertEqual(after, before + 1) # then query nx2.example. before = after query = dns.message.make_query('www2.nx-example.', 'A') res = self.sendUDPQuery(query) self.assertRcodeEqual(res, dns.rcode.NXDOMAIN) self.assertAuthorityHasSOA(res) after = self.getOutgoingQueriesCount() self.assertEqual(after, before)
# -*- coding: utf-8 -*- import json def json_pre_process_hook(action, request, *args, **kwargs): json_data = request.body if not json_data: action.ret('002').msg('json_params_required') return False try: param_dict = json.loads(json_data) except ValueError: action.ret('003').msg('json_params_invalid') return False for key, value in param_dict.items(): setattr(action, key, value) return True def query_pre_process_hook(action, request, *args, **kwargs): params_dict = request.GET if not params_dict: return True for key, value in params_dict.items(): setattr(action, key, value) return True def form_pre_process_hook(action, request, *args, **kwargs):
param_dict = request.POST if not param_dict: action.ret('004').msg('form_params_required') return False for key, value in param_dict.items(): setattr(action, key, value) return True def jsonp_post_render_hook(action): if action.jsonp_callback: action.resp_data_json( action.jsonp_callback + '(' + action.resp_data_json + ')', )
else: action.ret('005').msg('jsonp_callback_required') if action._data: del action._data action.render() return False return True
import * from dlab.meta_lib import * from dlab.actions_lib import * import os import argparse parser = argparse.ArgumentParser() parser.add_argument('--uuid', type=str, default='') args = parser.parse_args() if __name__ == "__main__": local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id']) local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s', level=logging.DEBUG, filename=local_log_filepath) notebook_config = dict() try: notebook_config['exploratory_name'] = os.environ['exploratory_name'] except: notebook_config['exploratory_name'] = '' notebook_config['service_base_name'] = os.environ['conf_service_base_name'] notebook_config['instance_type'] = os.environ['aws_notebook_instance_type'] notebook_config['key_name'] = os.environ['conf_key_name'] notebook_config['user_keyname'] = os.environ['edge_user_name'] notebook_config['instance_name'] = '{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'], os.environ['edge_user_name'], notebook_config['exploratory_name'], args.uuid) notebook_config['expected_image_name'] = '{}-{}-notebook-image'.format(notebook_config['service_base_name'], os.environ['application']) notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name')) notebook_config['role_profile_name'] = '{}-{}-nb-de-Profile' \ .format(notebook_config['service_base_name'].lower().replace('-', '_'), os.environ['edge_user_name']) notebook_config['security_group_name'] = '{}-{}-nb-SG'.format(notebook_config['service_base_name'], os.environ['edge_user_name']) notebook_config['tag_name'] = '{}-Tag'.format(notebook_config['service_base_name']) notebook_config['dlab_ssh_user'] = os.environ['conf_os_user'] notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled'] # generating variables regarding EDGE proxy on Notebook instance instance_hostname = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name']) edge_instance_name = os.environ['conf_service_base_name'] + "-" + os.environ['edge_user_name'] + '-edge' edge_instance_hostname = get_instance_hostname(notebook_config['tag_name'], edge_instance_name) edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Public') keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name']) try: if os.environ['conf_os_family'] == 'debian': initial_user = 'ubuntu' sudo_group = 'sudo' if os.environ['conf_os_family'] == 'redhat': initial_user = 'ec2-user' sudo_group = 'wheel' logging.info('[CREATING DLAB SSH USER]') print('[CREATING DLAB SSH USER]') params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\ (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user, notebook_config['dlab_ssh_
user'], sudo_group) try: local("~/scripts/{}.py {}".format('create_ssh_user', params)) except: traceback.print_exc() raise Exception except Exception as err: append_result("Failed creating ssh user 'dlab'.", str(err)) remove_ec2(notebook_config['tag_name'], notebook_config['instance_name']) sys.exit(1) # configuring proxy on Notebook instance try:
logging.info('[CONFIGURE PROXY ON JUPYTER INSTANCE]') print('[CONFIGURE PROXY ON JUPYTER INSTANCE]') additional_config = {"proxy_host": edge_instance_hostname, "proxy_port": "3128"} params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\ .format(instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config), notebook_config['dlab_ssh_user']) try: local("~/scripts/{}.py {}".format('common_configure_proxy', params)) except: traceback.print_exc() raise Exception except Exception as err: append_result("Failed to configure proxy.", str(err)) remove_ec2(notebook_config['tag_name'], notebook_config['instance_name']) sys.exit(1) # updating repositories & installing python packages try: logging.info('[INSTALLING PREREQUISITES TO JUPYTER NOTEBOOK INSTANCE]') print('[INSTALLING PREREQUISITES TO JUPYTER NOTEBOOK INSTANCE]') params = "--hostname {} --keyfile {} --user {} --region {}".\ format(instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], os.environ['aws_region']) try: local("~/scripts/{}.py {}".format('install_prerequisites', params)) except: traceback.print_exc() raise Exception except Exception as err: append_result("Failed installing apps: apt & pip.", str(err)) remove_ec2(notebook_config['tag_name'], notebook_config['instance_name']) sys.exit(1) # installing and configuring jupiter and all dependencies try: logging.info('[CONFIGURE JUPYTER NOTEBOOK INSTANCE]') print('[CONFIGURE JUPYTER NOTEBOOK INSTANCE]') params = "--hostname {} " \ "--keyfile {} " \ "--region {} " \ "--spark_version {} " \ "--hadoop_version {} " \ "--os_user {} " \ "--scala_version {} " \ "--r_mirror {} " \ "--exploratory_name {}".\ format(instance_hostname, keyfile_name, os.environ['aws_region'], os.environ['notebook_spark_version'], os.environ['notebook_hadoop_version'], notebook_config['dlab_ssh_user'], os.environ['notebook_scala_version'], os.environ['notebook_r_mirror'], notebook_config['exploratory_name']) try: local("~/scripts/{}.py {}".format('configure_jupyter_node', params)) except: traceback.print_exc() raise Exception except Exception as err: append_result("Failed to configure jupyter.", str(err)) remove_ec2(notebook_config['tag_name'], notebook_config['instance_name']) sys.exit(1) try: print('[INSTALLING USERs KEY]') logging.info('[INSTALLING USERs KEY]') additional_config = {"user_keyname": notebook_config['user_keyname'], "user_keydir": os.environ['conf_key_dir']} params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format( instance_hostname, keyfile_name, json.dumps(additional_config), notebook_config['dlab_ssh_user']) try: local("~/scripts/{}.py {}".format('install_user_key', params)) except: append_result("Failed installing users key") raise Exception except Exception as err: append_result("Failed installing users key.", str(err)) remove_ec2(notebook_config['tag_name'], notebook_config['instance_name']) sys.exit(1) try: print('[SETUP USER GIT CREDENTIALS]') logging.info('[SETUP USER GIT CREDENTIALS]') params = '--os_user {} --notebook_ip {} --keyfile "{}"' \ .format(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name) try: local("~/scripts/{}.py {}".format('common_download_git_certfile', params)) local("~/scripts/{}.p
#
-*- coding:utf8 -*- # Author: shizhenyu96@gamil.com # github: https://github.com/imndszy from flask import Blueprint admin = Blueprint('admin', __name__) from . import views
#!/usr/bin/env
python import sys from setuptools import setup if sys.hexversion < 0x030200a1: print ("LightSweeper requires python 3.2 or higher.") print("Exiting...") sys.exit(1) setup(name='LightSweeper', version='0.6b', description='The LightSweeper API', author='The LightSweeper Team', author_email='codewizards@lightsweeper.org', url='http://www.lightsweeper.org', packages=['lightsweeper'], package_data={"lightsweeper" : ["sounds/*.wav"]}, include_pack
age_data=True )
from megaera import local, json from oauth import signed_url from google.appengine.api import urlfetch __TWITTER_API__ = "http://api.twitter.com/1" def tweet(status, **credentials): if not credentials: # shortcut for no-credentials case credentials = local.config_get('twitter') if not credentials: return update_url = "%s/statuses/update.json" % __TWITTER_API__ fetch_url = signed_url(url=update_url, method='POST', status=status, **credentials) response = urlfetch.fetch(fetch_url, method=urlfetch.POST) try: content = json.read(response.content) return content.get('id') except json.ReadException: pass def untweet(status_id, **credentials): if not credentials: # shortcut for no-credentials case credentials = loc
al.config_get('twitter') if not credentials: return destroy_url = "%s/statuses/destroy.json" % __TWITTER_API__ fetch_url = signed_url(url=destroy_url, method='POST', id=status_id, **credentials) response = urlfetch.fetch(fetch_url, method=urlfetc
h.POST) try: content = json.read(response.content) return content.get('id') except json.ReadException: pass
# -*- coding: utf-8 -*- # Generated by Django 1.11.3 on 2017-08-06 11:13 from __future__ import unic
ode_literals from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='NewsItem', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.Char
Field(max_length=256)), ('content', models.TextField()), ], ), ]
import unittest import logging logging.getLogger().setLevel(logging.DEBUG) from ...auction.worker import Worker from ...database import Database from ...rc import sql class TestCase(unittest.TestCase): def setUp(self): self.db
= Database.pymysql(**sql)
self.ob = Worker(self.db, fail=True) def test_init(self): pass
ipient.PERSONAL) # Users on the different realms can not PM each other with assert_disallowed(): self.send_message(user1_email, user2_email, Recipient.PERSONAL) # Users on non-zulip realms can't PM "ordinary" Zulip users with assert_disallowed(): self.send_message(user1_email, random_zulip_email, Recipient.PERSONAL) # Users on three different realms can not PM each other with assert_disallowed(): self.send_message(user1_email, [user2_email, user3_email], Recipient.PERSONAL) class PersonalMessagesTest(ZulipTestCase): def test_auto_subbed_to_personals(self): # type: () -> None """ Newly created users are auto-subbed to the ability to receive personals. """ self.register("test", "test") user_profile = get_user_profile_by_email('test@zulip.com') old_messages_count = message_stream_count(user_profile) self.send_message("test@zulip.com", "test@zulip.com", Recipient.PERSONAL) new_messages_count = message_stream_count(user_profile) self.assertEqual(new_messages_count, old_messages_count + 1) recipient = Recipient.objects.get(type_id=user_profile.id, type=Recipient.PERSONAL) message = most_recent_message(user_profile) self.assertEqual(message.recipient, recipient) with mock.patch('zerver.models.get_display_recipient', return_value='recip'): self.assertEqual(str(message), u'<Message: recip / / ' '<UserProfile: test@zulip.com <Realm: zulip.com 1>>>') user_message = most_recent_usermessage(user_profile) self.assertEqual(str(user_message), u'<UserMessage: recip / test@zulip.com ([])>' ) @slow("checks several profiles") def test_personal_to_self(self): # type: () -> None """ If you send a personal to yourself, only you see it. """ old_user_profiles = list(UserProfile.objects.all()) self.register("test1", "test1") old_messages = [] for user_profile in old_user_profiles: old_messages.append(message_stream_count(user_profile)) self.send_message("test1@zulip.com", "test1@zulip.com", Recipient.PERSONAL) new_messages = [] for user_profile in old_user_profiles: new_messages.append(message_stream_count(user_profile)) self.assertEqual(old_messages, new_messages) user_profile = get_user_profile_by_email("test1@zulip.com") recipient = Recipient.objects.get(type_id=user_profile.id, type=Recipient.PERSONAL) self.assertEqual(most_recent_message(user_profile).recipient, recipient) def assert_personal(self, sender_email, receiver_email, content="test content"): # type: (Text, Text, Text) -> None """ Send a private message from `sender_email` to `receiver_email` and check that only those two parties actually received the message. """ sender = get_user_profile_by_email(sender_email) receiver = get_user_profile_by_email(receiver_email) sender_messages = message_stream_count(sender) receiver_messages = message_stream_count(receiver) other_user_profiles = UserProfile.objects.filter(~Q(email=sender_email) & ~Q(email=receiver_email)) old_other_messages = [] for user_profile in other_user_profiles: old_other_messages.append(message_stream_count(user_profile)) self.send_message(sender_email, receiver_email, Recipient.PERSONAL, content) # Users outside the conversation don't get the message. new_other_messages = [] for user_profile in other_user_profiles: new_other_messages.append(message_stream_count(user_profile)) self.assertEqual(old_other_messages, new_other_messages) # The personal message is in the streams of both the sender and receiver. self.assertEqual(message_stream_count(sender), sender_messages + 1) self.assertEqual(message_stream_count(receiver), receiver_messages + 1) recipient = Recipient.objects.get(type_id=receiver.id, type=Recipient.PERSONAL) self.assertEqual(most_recent_message(sender).recipient, recipient) self.assertEqual(most_recent_message(receiver).recipient, recipient) @slow("assert_personal checks several profiles") def test_personal(self): # type: () -> None """ If you send a personal, only you and the recipient see it. """ self.login("hamlet@zulip.com") self.assert_personal("hamlet@zulip.com", "othello@zulip.com") @slow("assert_personal checks several profiles") def test_non_ascii_personal(self): # type: () -> None ""
" Send
ing a PM containing non-ASCII characters succeeds. """ self.login("hamlet@zulip.com") self.assert_personal("hamlet@zulip.com", "othello@zulip.com", u"hümbüǵ") class StreamMessagesTest(ZulipTestCase): def assert_stream_message(self, stream_name, subject="test subject", content="test content"): # type: (Text, Text, Text) -> None """ Check that messages sent to a stream reach all subscribers to that stream. """ realm = get_realm_by_string_id('zulip') subscribers = self.users_subscribed_to_stream(stream_name, realm) old_subscriber_messages = [] for subscriber in subscribers: old_subscriber_messages.append(message_stream_count(subscriber)) non_subscribers = [user_profile for user_profile in UserProfile.objects.all() if user_profile not in subscribers] old_non_subscriber_messages = [] for non_subscriber in non_subscribers: old_non_subscriber_messages.append(message_stream_count(non_subscriber)) non_bot_subscribers = [user_profile for user_profile in subscribers if not user_profile.is_bot] a_subscriber_email = non_bot_subscribers[0].email self.login(a_subscriber_email) self.send_message(a_subscriber_email, stream_name, Recipient.STREAM, subject, content) # Did all of the subscribers get the message? new_subscriber_messages = [] for subscriber in subscribers: new_subscriber_messages.append(message_stream_count(subscriber)) # Did non-subscribers not get the message? new_non_subscriber_messages = [] for non_subscriber in non_subscribers: new_non_subscriber_messages.append(message_stream_count(non_subscriber)) self.assertEqual(old_non_subscriber_messages, new_non_subscriber_messages) self.assertEqual(new_subscriber_messages, [elt + 1 for elt in old_subscriber_messages]) def test_not_too_many_queries(self): # type: () -> None recipient_list = ['hamlet@zulip.com', 'iago@zulip.com', 'cordelia@zulip.com', 'othello@zulip.com'] for email in recipient_list: self.subscribe_to_stream(email, "Denmark") sender_email = 'hamlet@zulip.com' sender = get_user_profile_by_email(sender_email) message_type_name = "stream" sending_client = make_client(name="test suite") stream = 'Denmark' subject = 'foo' content = 'whatever' realm = sender.realm def send_message(): # type: () -> None check_send_message(sender, sending_client, message_type_name, [stream], subject, content, forwarder_user_profile=sender, realm=realm) send_message() # prime the caches with queries_captured() as queries: send_message() self.assert_max_length(queries, 8) def test_stream_message_unicode(self): # type: () -> None
# # Copyright (c) 2009--2010 Red Hat, Inc. #
# This software is licensed to you under the GNU General Public License, # version 2 (GPLv2). There is NO WARRANTY for this software, express or # implied, including the implied warranties of MERCHANTABILITY or FITNESS # FOR
A PARTICULAR PURPOSE. You should have received a copy of GPLv2 # along with this software; if not, see # http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. # # Red Hat trademarks are not licensed under GPLv2. No permission is # granted to use or replicate Red Hat trademarks that are incorporated # in this software or its documentation. # import os import sys from optparse import Option, OptionParser from spacewalk.common import rhnTB from spacewalk.common.rhnConfig import CFG, initCFG from spacewalk.server import rhnSQL import satCerts DEFAULT_TRUSTED_CERT = 'RHN-ORG-TRUSTED-SSL-CERT' def fetchTraceback(method=None, req=None, extra=None): """ a cheat for snagging just the string value of a Traceback NOTE: this tool may be needed for RHN Satellite 3.2 as well, which doesn't have a fetchTraceback. So... this if for compatibility. """ from cStringIO import StringIO exc = StringIO() rhnTB.Traceback(method=method, req=req, mail=0, ostream=exc, extra=extra, severity=None) return exc.getvalue() def processCommandline(): initCFG('server.satellite') options = [ Option('--ca-cert', action='store', default=DEFAULT_TRUSTED_CERT, type="string", help='public CA certificate, default is %s' % DEFAULT_TRUSTED_CERT), Option('--label', action='store', default='RHN-ORG-TRUSTED-SSL-CERT', type="string", help='FOR TESTING ONLY - alternative database label for this CA certificate, default is "RHN-ORG-TRUSTED-SSL-CERT"'), Option('-v','--verbose', action='count', help='be verbose (accumulable: -vvv means "be *really* verbose").'), ] values, args = OptionParser(option_list=options).parse_args() # we take no extra commandline arguments that are not linked to an option if args: msg = ("ERROR: these arguments make no sense in this context (try " "--help): %s\n" % repr(args)) raise ValueError(msg) if not os.path.exists(values.ca_cert): sys.stderr.write("ERROR: can't find CA certificate at this location: " "%s\n" % values.ca_cert) sys.exit(10) try: db_backend = CFG.DB_BACKEND db_host = CFG.DB_HOST db_port = CFG.DB_PORT db_user = CFG.DB_user db_password = CFG.DB_PASSWORD db_name = CFG.DB_NAME rhnSQL.initDB(backend=db_backend, host=db_host, port=db_port, username=db_user, password=db_password, database=db_name) except: sys.stderr.write("""\ ERROR: there was a problem trying to initialize the database: %s\n""" % fetchTraceback()) sys.exit(11) if values.verbose: print 'Public CA SSL certificate: %s' % values.ca_cert return values #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - def main(): """ main routine 10 CA certificate not found 11 DB initialization failure 12 no Organization ID. Something very bad is going on. 13 Couldn't insert the certificate for whatever reason. """ values = processCommandline() def writeError(e): sys.stderr.write('\nERROR: %s\n' % e) try: satCerts.store_rhnCryptoKey(values.label, values.ca_cert, verbosity=values.verbose) except satCerts.NoOrgIdError, e: writeError("no organization ID!?!\n\n%s\n" % fetchTraceback()) sys.exit(12) except satCerts.CaCertInsertionError, e: writeError("no organization ID!?!\n\n%s\n" % fetchTraceback()) sys.exit(13) return 0 #------------------------------------------------------------------------------- if __name__ == "__main__": sys.stderr.write('\nWARNING: intended to be wrapped by another executable\n' ' calling program.\n') sys.exit(main() or 0) #===============================================================================
# -*- coding: utf-8 -*- # +---------------------------------------------------------------------------+ # | 01001110 01100101 01110100 01111010 01101111 01100010 | # | | # | Netzob : Inferring communication protocols | # +---------------------------------------------------------------------------+ # | Copyright (C) 2011-2014 Georges Bossert and Frédéric Guihéry | # | This program is free software: you can redistribute it and/or modify | # | it under the terms of the GNU General Public License as published by | # | the Free Software Foundation, either version 3 of the License, or | # | (at your option) any later version. | # | | # | This program is distributed in the hope that it will be useful, | # | but WITHOUT ANY WARRANTY; without even the implied warranty of | # | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | # | GNU General Public License for more details. | # | | # | You should have received a copy of the GNU General Public License | # | along with this program. If not, see <http://www.gnu.org/licenses/>. | # +---------------------------------------------------------------------------+ # | @url : http://www.netzob.org | # | @contact : contact@netzob.org | # | @sponsors : Amossys, http://www.amossys.fr | # | Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ | # +---------------------------------------------------------------------------+ # +---------------------------------------------------------------------------+ # | File contributors : | # | - Georges Bossert <georges.bossert (a) supelec.fr> | # | - Frédéric Guihéry <frederic.guihery (a) amossys.fr> | # +---------------------------------------------------------------------------+ # +---------------------------------------------------------------------------+ # | Standard library imports | # +---------------------------------------------------------------------------+ import random import os from bitarray import bitarray # +---------------------------------------------------------------------------+ # | Related third party imports | # +---------------------------------------------------------------------------+ # +---------------------------------------------------------------------------+ # | Local application imports | # +---------------------------------------------------------------------------+ from netzob.Common.Models.Types.AbstractType import AbstractType class Raw(AbstractType): """Raw netzob data type expressed in bytes. For instance, we can use this type to parse any raw field of 2 bytes: >>> from netzob.all import * >>> f = Field(Raw(nbBytes=2)) or with a specific value (default is little endianness) >>> f = Field(Raw('\x01\x02\x03')) >>> print f.domain.dataType Raw='\\x01\\x02\\x03' ((0, 24)) >>> f.domain.dataType.endianness = AbstractType.ENDIAN_BIG >>> print f.domain.dataType Raw='\\x01\\x02\\x03' ((0, 24)) """ def __init__(self, value=None, nbBytes=None, unitSize=AbstractType.defaultUnitSize(), endianness=AbstractType.defaultEndianness(), sign=AbstractType.defaultSign()): if value is not None and not isinstance(value, bitarray): from netzob.Common.Models.Types.TypeConverter import TypeConverter from netzob.Common.Models.Types.BitArray import BitArray value = TypeConverter.convert(value, Raw, BitArray) nbBits = self._convertNbBytesinNbBits(nbBytes) super(Raw, self).__init__(self.__class__.__name__, value, nbBits, unitSize=unitSize, endianness=endianness, sign=sign) def __str__(self): if self.value is not None: from netzob.Common.Models.Types.TypeConverter import TypeConverter from netzob.Common.Models.Types.BitArray import BitArray from netzob.Common.Models.Types.HexaString import HexaString return "{0}={1} ({2})".format(self.typeName, repr(TypeConverter.convert(self.value, BitArray, Raw)), self.size) else: return "{0}={1} ({2})".format(self.typeName, self.value, self.size) def __repr__(self): if self.value is not None: from netzob.Common.Models.Types.TypeConverter import TypeConverter from netzob.Common.Models.Types.BitArray import BitArray return str(TypeConverter.convert(self.value, BitArray, self.__class__)) else: return str(self.value) def _convertNbBytesinNbBits(self, nbBytes): nbMinBit = None nbMaxBit = None if nbBytes is not None: if isinstance(nbBytes, int): nbMinBit = nbBytes * 8 nbMaxBit = nbMinBit else: if nbBytes[0] is not None: nbMinBit = nbBytes[0] * 8 if nbBytes[1] is not None: nbMaxBit = nbBytes[1] * 8 return (nbMinBit, nbMaxBit) def generate(self, generationStrategy=None): """Generates a random Raw that respects the requested size. >>> from netzob.all import * >>> a = Raw(nbBytes=(10)) >>> gen = a.generate() >>> print len(gen) 80 >>> from netzob.all import * >>> a = Raw(nbBytes=(10, 20)) >>> gen = a.generate() >>> print 10<=len(gen) and 20<=len(gen) True """ from netzob.Common.Models.Types.TypeConverter import TypeConverter from netzob.Common.Models.Types.BitArray import BitArray minSize, maxSize = self.size if maxSize is None: maxSize = AbstractType.MAXIMUM_GENERATED_DATA_SIZE if minSize is None: minSize = 0 generatedSize = random.randint(minSize, maxSize) return TypeConverter.convert(os.urandom(generatedSize / 8), Raw, BitArray) @staticmethod def decode(data, unitSize=AbstractType.defaultUnitSize(), endianness=AbstractType.defaultEndianness(), sign=AbstractType.defaultSign()): return data @staticmethod def encode(data, unitSize=Abst
ractType.defaultUnitSize(), endianness=AbstractType.defaultEndianness(),
sign=AbstractType.defaultSign()): return data @staticmethod def canParse(data): """Computes if specified data can be parsed as raw which is always the case if the data is at least 1 length and aligned on a byte. >>> from netzob.all import * >>> Raw.canParse(TypeConverter.convert("hello netzob", ASCII, BitArray)) True The ascii table is defined from 0 to 127: >>> Raw.canParse(TypeConverter.convert(128, Decimal, BitArray, src_sign=AbstractType.SIGN_UNSIGNED)) True :param data: the data to check :type data: python raw :return: True if data can be parsed as a Raw which is always the case (if len(data)>0) :rtype: bool :raise: TypeError if the data is None """ if data is None: raise TypeError("data cannot be None") if len(data) == 0: return False if len(data) % 8 != 0: return False return True
import unittest imp
ort maya.cmds as mc import vrayformayaUtils as vfm class TestMeshAttributes(unittest.TestCase): """ This is a generic TestCase for most v-ray mesh attributes. Note that it doesn't test every single case of changes, but it should capture overall changes of the code. """ def setUp(self): self.mesh = mc.polyCube()[0] def test_subdivision(self): transform = self.mesh shapes = mc.listRelatives(transform, children=True,
shapes=True) vfm.attributes.vray_subdivision(self.mesh, state=True, smartConvert=True) for shape in shapes: self.assertTrue(mc.objExists("{0}.vraySubdivEnable".format(shape))) vfm.attributes.vray_subdivision(self.mesh, state=True, smartConvert=True, vraySubdivEnable=False) for shape in shapes: self.assertEqual(mc.getAttr("{0}.vraySubdivEnable".format(shape)), False) vfm.attributes.vray_subdivision(self.mesh, state=True, smartConvert=True, vraySubdivEnable=True) for shape in shapes: self.assertEqual(mc.getAttr("{0}.vraySubdivEnable".format(shape)), True) vfm.attributes.vray_subdivision(self.mesh, state=False) for shape in shapes: self.assertFalse(mc.objExists("{0}.vraySubdivEnable".format(shape))) # Apply to transform without smart convert (should not work) vfm.attributes.vray_subdivision(self.mesh, state=True, smartConvert=False) for shape in shapes: self.assertFalse(mc.objExists("{0}.vraySubdivEnable".format(shape))) for shape in shapes: vfm.attributes.vray_subdivision(shape, state=True, smartConvert=False) self.assertTrue(mc.objExists("{0}.vraySubdivEnable".format(shape))) vfm.attributes.vray_subdivision(shape, state=False, smartConvert=False) self.assertFalse(mc.objExists("{0}.vraySubdivEnable".format(shape))) def test_vray_subquality(self): transform = self.mesh shapes = mc.listRelatives(transform, children=True, shapes=True) # should run without errors: vfm.attributes.vray_subquality(transform, vrayEdgeLength=1, vrayMaxSubdivs=1, vrayOverrideGlobalSubQual=1, vrayViewDep=1) vfm.attributes.vray_subquality(transform, vrayEdgeLength=0, vrayMaxSubdivs=0, vrayOverrideGlobalSubQual=0, vrayViewDep=0) for shape in shapes: self.assertTrue(mc.objExists("{0}.vrayEdgeLength".format(shape))) self.assertTrue(mc.objExists("{0}.vrayMaxSubdivs".format(shape))) self.assertTrue(mc.objExists("{0}.vrayOverrideGlobalSubQual".format(shape))) self.assertTrue(mc.objExists("{0}.vrayViewDep".format(shape))) vfm.attributes.vray_subquality(shapes, smartConvert=False, state=False) for shape in shapes: self.assertFalse(mc.objExists("{0}.vrayEdgeLength".format(shape))) self.assertFalse(mc.objExists("{0}.vrayMaxSubdivs".format(shape))) self.assertFalse(mc.objExists("{0}.vrayOverrideGlobalSubQual".format(shape))) self.assertFalse(mc.objExists("{0}.vrayViewDep".format(shape))) def test_vray_user_attributes(self): transform = self.mesh shapes = mc.listRelatives(transform, children=True, shapes=True) value = "Testing this attribute" vfm.attributes.vray_user_attributes(transform, vrayUserAttributes=value) for shape in shapes: self.assertEqual(mc.getAttr("{0}.vrayUserAttributes".format(shape)), value) value2 = "Aaaaaaap" for shape in shapes: self.assertNotEqual(mc.getAttr("{0}.vrayUserAttributes".format(shape)), value2) vfm.attributes.vray_user_attributes(transform, vrayUserAttributes=value2) for shape in shapes: self.assertEqual(mc.getAttr("{0}.vrayUserAttributes".format(shape)), value2) vfm.attributes.vray_user_attributes(shapes, state=False) for shape in shapes: self.assertFalse(mc.objExists("{0}.vrayUserAttributes".format(shape))) def test_vray_displacement(self): transform = self.mesh shapes = mc.listRelatives(transform, children=True, shapes=True) vfm.attributes.vray_displacement(transform, vrayDisplacementAmount=10) vfm.attributes.vray_displacement(transform, vrayDisplacementShift=5) vfm.attributes.vray_displacement(transform, vrayDisplacementType=2, vrayDisplacementUseBounds=True, vrayEnableWaterLevel=True, vrayWaterLevel=2.0) for shape in shapes: self.assertTrue(mc.objExists("{0}.vrayDisplacementNone".format(shape))) vfm.attributes.vray_displacement(shapes, state=False) for shape in shapes: self.assertFalse(mc.objExists("{0}.vrayDisplacementNone".format(shape))) vfm.attributes.vray_displacement(shapes, state=0) for shape in shapes: self.assertFalse(mc.objExists("{0}.vrayDisplacementNone".format(shape))) def tearDown(self): mc.delete(self.mesh) #import unittest #import vrayformayaUtils_tests.attributes_tests as attrTest #reload(attrTest) #suite = unittest.TestLoader().loadTestsFromTestCase(attrTest.TestMeshAttributes) #unittest.TextTestRunner(verbosity=2).run(suite) if __name__ == "__main__": unittest.main()
# -*- coding: utf-8 -*- ############################################################################## # # Post-installation configuration helpers # Copyright (C) 2015 OpusVL (<http://opusvl.com/>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## """Common code for scripting installation of a chart of accounts into a company. The function you probably want to use is setup_company_accounts() """ from datetime import date import logging from . import confutil _logger = logging.getLogger(__name__) def setup_company_accounts(cr, registry, uid, company, chart_template, code_digits=None, context=None): """This sets up accounts, fiscal year and periods for the given company. company: A res.company object chart_template: An account.chart.template object code_digits: The number of digits (the default is usually 6) context: e.g. {'lang': 'en_GB', 'tz': False, 'uid': openerp.SUPERUSER_ID} A financial year is set up starting this year on 1st Jan and ending this year on 31st Dec. """ unconfigured_companies = unconfigured_company_ids(cr, registry, uid, context=context) if company.id in unconfigured_companies: setup_chart_of_accounts(cr, registry, uid, company_id=company.id, chart_template_id=chart_template.id, code_digits=code_digits, context=context, ) today = date.today() fy_name = today.strftime('%Y') fy_code = 'FY' + fy_name account_start = today.strftime('%Y-01-01') account_end = today.strftime('%Y-12-31') create_fiscal_year(cr, registry, uid, company_id=company.id, name=fy_name, code=fy_code, start_date=account_start, end_date=account_end, context=context, ) confutil.set_account_settings(cr, registry, uid, company=company, changes={ 'date_start': account_start, 'date_stop': account_end, 'period': 'month', }, context=context, ) def unconfigured_company_ids(cr, registry, uid, context=None): """Return list of ids of companies without a chart of accounts. """ account_installer = registry['account.installer'] return account_installer.get_unconfigured_cmp(cr, uid, context=context) def setup_chart_of_accounts(cr, registry, uid, company_id, chart_template_id, code_digits=None, context=None): chart_wizard = registry['wizard.multi.charts.accounts'] defaults = chart_wizard.default_get(cr, uid, ['bank_accounts_id', 'currency_id'], context=context) bank_accounts_spec = defaults.pop('bank_accounts_id') bank_accounts_id = [(0, False, i) for i in bank_accounts_spec] data = defaults.copy() data.update({ "chart_template_id":
chart_template_id, 'company_id': company_id, 'bank_accounts_id': bank_accounts_id, }) onchange = chart_wizard.onchange_chart_template_id(cr, uid, [], data['chart_template_id'], context=context) data.update(onchange['value']) if code_digits: data.update({'code_digits': code_digits}) conf_id = chart_wizard.create(cr, uid, data, context=context) chart_wizard.execute(cr, uid, [conf_id], context=context) def create_fiscal_year(cr, regist
ry, uid, company_id, name, code, start_date, end_date, context=None): fy_model = registry['account.fiscalyear'] fy_data = fy_model.default_get(cr, uid, ['state', 'company_id'], context=context).copy() fy_data.update({ 'company_id': company_id, 'name': name, 'code': code, 'date_start': start_date, 'date_stop': end_date, }) fy_id = fy_model.create(cr, uid, fy_data, context=context) fy_model.create_period(cr, uid, [fy_id], context=context) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
import cherrypy, json from bottle import request, get from ring_api.server.api import ring, user class Root(object): def __init__(self, dring): self.dring = dring self.user = user.User(dring) @cherrypy.expose def index(self):
return 'todo' @cherrypy.expose def r
outes(self): return 'todo'
import os import shutil usage = {} usage['import'] = """catmap import <mkm-file> Open a *.mkm project file and work with it interactively. """ def get_options(args=None, get_parser=False): import optparse import os from glob import glob import catmap parser = optparse.OptionParser( 'Usage: %prog [help] (' + '|'.join(sorted(usage.keys())) + ') [options]', version=catmap.__version__) if args is not None: options, args = parser.parse_args(args.split()) else: options, args = parser.parse_args() if len(args) < 1: parser.error('Command expected') if get_parser: return options, args, parser else: return options, args def match_keys(arg, usage, parser): """Try to match part of a command against the set of commands from usage. Throws an error if not successful. """ possible_args = [key for key in usage if key.startswith(arg)] if len(possible_args) == 0: parser.error('Command "%s" not understood.' % arg) elif len(possible_args) > 1: parser.error(('Command "%s" ambiguous.\n' 'Could be one of %s\n\n') % (arg, possible_args)) else: return possible_args[0] def main(args=None): """The CLI main entry point function. The optional argument args, can be used to directly supply command line argument like $ catmap <args> otherwise args will be taken from STDIN. """ from glob import glob options, args, parser = get_options(args, get_parser=True) if not args[0] in usage.keys(): args[0] = match_keys(args
[0], usage, parser) elif args[0] == 'import': if len(args) < 2: parser.error('mkm filename expected.') from catmap import ReactionModel mkm_file = args[1] global model model = ReactionModel(setup_file=mkm_file) sh(banner='Note: model =
catmap.ReactionModel(setup_file=\'%s\')\n# do model.run()\nfor a fully initialized model.' % args[1]) def sh(banner): """Wrapper around interactive ipython shell that factors out ipython version depencies. """ from distutils.version import LooseVersion import IPython if hasattr(IPython, 'release'): try: from IPython.terminal.embed import InteractiveShellEmbed InteractiveShellEmbed(banner1=banner)() except ImportError: try: from IPython.frontend.terminal.embed \ import InteractiveShellEmbed InteractiveShellEmbed(banner1=banner)() except ImportError: from IPython.Shell import IPShellEmbed IPShellEmbed(banner=banner)() else: from IPython.Shell import IPShellEmbed IPShellEmbed(banner=banner)()
s) def assert_no_local_failure(contacted): assert not contacted['local'].get('failed') def assert_local_failure(contacted): assert contacted['local'].get('failed') class FakeAnsibleModuleBailout(BaseException): def __init__(self, success, params): super(FakeAnsibleModuleBailout, self).__init__(success, params) self.success = success self.params = params class FakeAnsibleModule(object): def __init__(self, params, check_mode): self.params = params self.check_mode = check_mode def __call__(self, argument_spec, supports_check_mode): self.argument_spec = argument_spec for name, spec in self.argument_spec.iteritems(): if name not in self.params: self.params[name] = spec.get('default') return self def exit_json(self, **params): raise FakeAnsibleModuleBailout(success=True, params=params) def fail_json(self, **params): raise FakeAnsibleModuleBailout(success=False, params=params) def setup_change_checker(params): must_change = params.pop('_must_change', False) must_not_change = params.pop('_must_not_change', False) if must_change and must_not_change: raise ValueError('invalid request: must change and must not change') if must_change: def check(changed): assert changed elif must_not_change: def check(changed): assert not changed else: check = None return check @pytest.fixture(params=['real', 'fake']) def runit_sv(request, idempotency_state): if request.param == 'real': ansible_module = request.getfuncargvalue('ansible_module') def do(**params): should_fail = params.pop('_should_fail', False) params['_runner_kwargs'] = { 'check': params.pop('_check', False), } check_change = setup_change_checker(params) contacted = ansible_module.runit_sv(**params) if should_fail: assert_local_failure(contacted) else: assert_no_local_failure(contacted) if check_change is not None: check_change(contacted['local']['changed']) elif request.param == 'fake': def do(**params): should_fail = params.pop('_should_fail', False) check = params.pop('_check', False) check_change = setup_change_checker(params) module = FakeAnsibleModule(params, check) with pytest.raises(FakeAnsibleModuleBailout) as excinfo: _runit_sv_module.main(module) assert excinfo.value.success != should_fail if check_change is not None: check_change(excinfo.value.params['changed']) else: raise ValueError('unknown param', request.param) if idempotency_state == 'checked': _do = do def do(**params): _do(_must_change=True, **params) _do(_must_not_change=True, **params) return do @pytest.fixture def basedir(tmpdir): tmpdir.join('sv').mkdir() tmpdir.join('service').mkdir() tmpdir.join('init.d').mkdir() return tmpdir def base_directories(basedir, **overrides): ret = {'sv_directory': [basedir.join('sv').strpath], 'service_directory': [basedir.join('service').strpath], 'init_d_directory': [basedir.join('init.d').strpath]} ret.update(overrides) return ret def settable_mode(path): return path.stat().mode & SETTABLE_MASK def assert_file(path, contents, mode): assert path.read() == contents and settable_mode(path) == mode @idempotent def test_basic_runscript(runit_sv, basedir): """ A basic invocation with name and runscript creates the sv directory containing just the runscript, links the service directory, and links an LSB service. """ runit_sv( name='testsv', runscript='spam eggs', **base_directories(basedir)) sv = basedir.join('sv', 'testsv') assert len(sv.listdir()) == 1 assert_file(sv.join('run'), contents='spam eggs', mode=0o755) assert basedir.join('service', 'testsv').readlink() == sv.strpath assert basedir.join('init.d', 'testsv').readlink() == '/usr/bin/sv' @ide
mpotent def test_lo
g_runscript(runit_sv, basedir): """ Adding a log_runscript creates a log/run script as well. """ runit_sv( name='testsv', runscript='spam eggs', log_runscript='eggs spam', **base_directories(basedir)) sv_log = basedir.join('sv', 'testsv', 'log') assert len(sv_log.listdir()) == 1 assert_file(sv_log.join('run'), contents='eggs spam', mode=0o755) @idempotent def test_supervise_link(runit_sv, basedir): """ The supervise_link option will create a link to some arbitrary location. """ runit_sv( name='testsv', runscript='spam eggs', supervise_link='/spam/eggs', **base_directories(basedir)) sv = basedir.join('sv', 'testsv') assert len(sv.listdir()) == 2 assert sv.join('supervise').readlink() == '/spam/eggs' @idempotent def test_log_supervise_link(runit_sv, basedir): """ The log_supervise_link option will also create a link to some arbitrary location. """ runit_sv( name='testsv', runscript='spam eggs', log_runscript='eggs spam', log_supervise_link='/eggs/spam', **base_directories(basedir)) sv_log = basedir.join('sv', 'testsv', 'log') assert len(sv_log.listdir()) == 2 assert sv_log.join('supervise').readlink() == '/eggs/spam' @idempotent def test_extra_files(runit_sv, basedir): """ Adding extra_files will copy additional files into the sv directory. """ runit_sv( name='testsv', runscript='spam eggs', extra_files={ 'spam': 'eggs', 'eggs': 'spam', }, **base_directories(basedir)) sv = basedir.join('sv', 'testsv') assert len(sv.listdir()) == 3 assert_file(sv.join('spam'), contents='eggs', mode=0o644) assert_file(sv.join('eggs'), contents='spam', mode=0o644) @idempotent def test_extra_scripts(runit_sv, basedir): """ Adding extra_scripts will copy additional scripts into the sv directory. """ runit_sv( name='testsv', runscript='spam eggs', extra_scripts={ 'spam': 'eggs', 'eggs': 'spam', }, **base_directories(basedir)) sv = basedir.join('sv', 'testsv') assert len(sv.listdir()) == 3 assert_file(sv.join('spam'), contents='eggs', mode=0o755) assert_file(sv.join('eggs'), contents='spam', mode=0o755) @idempotent def test_extra_files_and_scripts(runit_sv, basedir): """ Adding extra_files and extra_scripts both will create both additional files and additional scripts. """ runit_sv( name='testsv', runscript='spam eggs', extra_files={ 'spam': 'eggs', 'eggs': 'spam', }, extra_scripts={ 'spams': 'eggs', 'eggss': 'spam', }, **base_directories(basedir)) sv = basedir.join('sv', 'testsv') assert len(sv.listdir()) == 5 assert_file(sv.join('spam'), contents='eggs', mode=0o644) assert_file(sv.join('eggs'), contents='spam', mode=0o644) assert_file(sv.join('spams'), contents='eggs', mode=0o755) assert_file(sv.join('eggss'), contents='spam', mode=0o755) def test_no_overlapping_extra_files_and_scripts(runit_sv, basedir): """ If extra_files and extra_scripts both touch the same path, there's an immediate failure. """ runit_sv( _should_fail=True, name='testsv', runscript='spam eggs', extra_files={ 'spam': 'eggs', }, extra_scripts={ 'spam': 'eggs', }, **base_directories(basedir)) def test_no_overlapping_extra_scripts_with_runscripts(runit_sv, basedir): """ Similarly if extra_scripts specifies the name of a runscript there's an immediate failure. """ runit_sv( _s
""" Parse string to create Regex object. TODO: - Support \: \001, \x00, \0, \ \[, \(, \{, etc. - Support Python extensions: (?:...), (?P<name>...), etc. - Support \<, \>, \s, \S, \w, \W, \Z <=> $, \d, \D, \A <=> ^, \b, \B, [[:space:]], etc. """ from hachoir_regex import (RegexString, RegexEmpty, RegexRepeat, RegexDot, RegexWord, RegexStart, RegexEnd, RegexRange, RegexRangeItem, RegexRangeCharacter) import re REGEX_COMMAND_CHARACTERS = '.^$[](){}|+?*\\' def parseRange(text, start): r""" >>> parseRange('[a]b', 1) (<RegexRange '[a]'>, 3) >>> parseRange('[a-z]b', 1) (<RegexRange '[a-z]'>, 5) >>> parseRange('[^a-z-]b', 1) (<RegexRange '[^a-z-]'>, 7) >>> parseRange('[^]-]b', 1) (<RegexRange '[^]-]'>, 5) >>> parseRange(r'[\]abc]', 1) (<RegexRange '[]a-c]'>, 7) >>> parseRange(r'[a\-x]', 1) (<RegexRange '[ax-]'>, 6) """ index = start char_range = [] exclude = False if text[index] == '^': exclude = True index += 1 if text[index] == ']': char_range.append(RegexRangeCharacter(']')) index += 1 while index < len(text) and text[index] != ']': if index+1 < len(text) \ and text[index] == '\\': char_range.append(RegexRangeCharacter(text[index+1])) index += 2 elif index+1 < len(text) \ and text[index] == '-' and text[index+1] == ']': break elif index+3 < len(text) \ and text[index+1] == '-' \ and text[index+2] != ']': char_range.append(RegexRangeItem(ord(text[index]), ord(text[index+2]))) index += 3 else: char_range.append(RegexRangeCharacter(text[index])) index += 1 if index < len(text) and text[index] == '-': char_range.append(RegexRangeCharacter('-')) index += 1 if index == len(text) or text[index] != ']': raise SyntaxError('Invalid range: %s' % text[start-1:index]) return RegexRange(char_range, exclude), index+1 def parseOr(text, start): """ >>> parseOr('(a)', 1) (<RegexString 'a'>, 3) >>> parseOr('(a|c)', 1) (<RegexRange '[ac]'>, 5) >>> parseOr(' (a|[bc]|d)', 2) (<RegexRange '[a-d]'>, 11) """ index = start # (?:...): Skip Python prefix '?:' if text[index:index+2] == '?:': index += 2 if text[index] == '?': raise NotImplementedError("Doesn't support Python extension (?...)") regex = None while True: new_regex, index = _parse(text, index, "|)") if regex: regex = regex | new_regex else: regex = new_regex if len(text) <= index: raise SyntaxError('Missing closing parenthesis') if text[index] == ')': break index += 1 index += 1 if regex is None: regex = RegexEmpty() return regex, index REPEAT_REGEX = re.compile("([0-9]+)(,[0-9]*)?}") def parseRepeat(text, start): """ >>> parseRepeat('a{0,1}b', 2) (0, 1, 6) >>> parseRepeat('a{12}', 2) (12, 12, 5) """ match = REPEAT_REGEX.match(text, start) if not match: raise SyntaxError('Unable to parse repetition '+text[start:]) rmin = int(match.group(1)) if
match.group(2): text = match.group(2)[1:] if text: rmax = int(text) else: rmax = None else: rmax = rmin return (rmin, rmax, match.end(0)) CHAR_TO_FUNC =
{'[': parseRange, '(': parseOr} CHAR_TO_CLASS = {'.': RegexDot, '^': RegexStart, '$': RegexEnd} CHAR_TO_REPEAT = {'*': (0, None), '?': (0, 1), '+': (1, None)} def _parse(text, start=0, until=None): if len(text) == start: return RegexEmpty(), 0 index = start regex = RegexEmpty() last = None done = False while index < len(text): char = text[index] if until and char in until: done = True break if char in REGEX_COMMAND_CHARACTERS: if char in CHAR_TO_FUNC: new_regex, index = CHAR_TO_FUNC[char] (text, index+1) elif char in CHAR_TO_CLASS: new_regex = CHAR_TO_CLASS[char]() index += 1 elif char == '{': rmin, rmax, index = parseRepeat(text, index+1) new_regex = RegexRepeat(last, rmin, rmax) last = None elif char in CHAR_TO_REPEAT: rmin, rmax = CHAR_TO_REPEAT[char] if last is None: raise SyntaxError('Repetition character (%s) without previous expression' % text[index]) new_regex = RegexRepeat(last, rmin, rmax) last = None index += 1 elif char == "\\": index += 1 if index == len(text): raise SyntaxError("Antislash (\\) without escaped character") char = text[index] if char == 'b': new_regex = RegexWord() else: if not(char in REGEX_COMMAND_CHARACTERS or char in " '"): raise SyntaxError("Operator '\\%s' is not supported" % char) new_regex = RegexString(char) index += 1 else: raise NotImplementedError("Operator '%s' is not supported" % char) if last: regex = regex + last last = new_regex else: subtext = text[index] index += 1 if last: regex = regex + last last = RegexString(subtext) if last: regex = regex + last return regex, index def parse(text): r""" >>> parse('') <RegexEmpty ''> >>> parse('abc') <RegexString 'abc'> >>> parse("chats?") <RegexAnd 'chats?'> >>> parse('[bc]d') <RegexAnd '[bc]d'> >>> parse("\\.") <RegexString '\.'> """ regex, index = _parse(text) assert index == len(text) return regex if __name__ == "__main__": import doctest doctest.testmod()
from gpu import * LAMP_TYPES = [ GPU_DYNAMIC_LAMP_DYNVEC, GPU_DYNAMIC_LAMP_DYNCO, GPU_DYNAMIC_LAMP_DYNIMAT, GPU_DYNAMIC_LAMP_DYNPERSMAT, GPU_DYNAMIC_LAMP_DYNENERGY, GPU_DYNAMIC_LAMP_DYNENERGY, GPU_DYNAMIC_LAMP_DYNCOL, GPU_DYNAMIC_LAMP_DISTANCE, GPU_DYNAMIC_LAMP_ATT1, GPU_DYNAMIC_LAMP_ATT2, GPU_DYNAMIC_LAMP_SPOTSIZE, GPU_DYNAMIC_LAMP_SPOTBLEND, ] MIST_TYPES = [ GPU_DYNAMIC_MIST_ENABLE, GPU_DYNAMIC_MIST_START, GPU_DYNAMIC_MIST_DISTANCE, GPU_DYNAMIC_MIST_INTENSITY, GPU_DYNAMIC_MIST_TYPE, GPU_DYNAMIC_MIST_COLOR, ] WORLD_TYPES = [ GPU_DYNAMIC_HORIZON_COLOR, GPU_DYNAMIC_AMBIENT_COLOR, ] MATERIAL_TYPES = [ GPU_DYNAMIC_MAT_DIFFRGB, GPU_DYNAMIC_MAT_REF, GPU_DYNAMIC_MAT_SPECRGB, GPU_DYNAMIC_MAT_SPEC, GPU_DYNAMIC_MAT_HARD, GPU_DYNAMIC_MAT_EMIT, GPU_DYNAMIC_MAT_AMB, GPU_DYNAMIC_MAT_ALPHA, ] TYPE_TO_NAME = { GPU_DYNAMIC_OBJECT_VIEWMAT : 'view_mat', GPU_DYNAMIC_OBJECT_MAT : 'model_mat', GPU_DYNAMIC_OBJECT_VIEWIMAT : 'inv_view_mat', GPU_DYNAMIC_OBJECT_IMAT : 'inv_model_mat', GPU_DYNAMIC_OBJECT_COLOR : 'color', GPU_DYNAMIC_OBJECT_AUTOBUMPSCALE : 'auto_bump_scale', GPU_DYNAMIC_MIST_ENABLE : 'use_mist', GPU_DYNAMIC_MIST_START : 'start', GPU_DYNAMIC_MIST_DISTANCE : 'depth', GPU_DYNAMIC_MIST_INTENSITY : 'intensity', GPU_DYNAMIC_MIST_
TYPE : 'falloff', GPU_DYNAMIC_MIST_COLOR : 'color', GPU_DYNAMIC_HORIZON_COLOR : 'horizon_color', GPU
_DYNAMIC_AMBIENT_COLOR : 'ambient_color', GPU_DYNAMIC_LAMP_DYNVEC : 'dynvec', GPU_DYNAMIC_LAMP_DYNCO : 'dynco', GPU_DYNAMIC_LAMP_DYNIMAT : 'dynimat', GPU_DYNAMIC_LAMP_DYNPERSMAT : 'dynpersmat', GPU_DYNAMIC_LAMP_DYNENERGY : 'energy', GPU_DYNAMIC_LAMP_DYNCOL : 'color', GPU_DYNAMIC_LAMP_DISTANCE : 'distance', GPU_DYNAMIC_LAMP_ATT1 : 'linear_attenuation', GPU_DYNAMIC_LAMP_ATT2 : 'quadratic_attenuation', GPU_DYNAMIC_LAMP_SPOTSIZE : 'spot_size', GPU_DYNAMIC_LAMP_SPOTBLEND : 'spot_blend', GPU_DYNAMIC_MAT_DIFFRGB : 'diffuse_color', GPU_DYNAMIC_MAT_REF : 'diffuse_intensity', GPU_DYNAMIC_MAT_SPECRGB : 'specular_color', GPU_DYNAMIC_MAT_SPEC : 'specular_intensity', GPU_DYNAMIC_MAT_HARD : 'specular_hardness', GPU_DYNAMIC_MAT_EMIT : 'emit', GPU_DYNAMIC_MAT_AMB : 'ambient', GPU_DYNAMIC_MAT_ALPHA : 'alpha', } TYPE_TO_SEMANTIC = { GPU_DYNAMIC_LAMP_DYNVEC : 'BL_DYNVEC', GPU_DYNAMIC_LAMP_DYNCO : 'BL_DYNCO', GPU_DYNAMIC_LAMP_DYNIMAT : 'BL_DYNIMAT', GPU_DYNAMIC_LAMP_DYNPERSMAT : 'BL_DYNPERSMAT', CD_ORCO: 'POSITION', -1: 'NORMAL' # Hack until the gpu module has something for normals } DATATYPE_TO_CONVERTER = { GPU_DATA_1I : lambda x : x, GPU_DATA_1F : lambda x : x, GPU_DATA_2F : lambda x : list(x), GPU_DATA_3F : lambda x : list(x), GPU_DATA_4F : lambda x : list(x), } DATATYPE_TO_GLTF_TYPE = { GPU_DATA_1I : 5124, # INT GPU_DATA_1F : 5126, # FLOAT GPU_DATA_2F : 35664, # FLOAT_VEC2 GPU_DATA_3F : 35665, # FLOAT_VEC3 GPU_DATA_4F : 35666, # FLOAT_VEC4 }
import os.path from tornado import ioloop, httpserver, web,
websocket, template from config import GameConfig OS = os.path.dirname(__file__) def server_path(uri): return os
.path.join(OS, uri) def static_path(uri): return { "path": server_path("static/" + uri) } level_1 = GameConfig() class TarmHandler(web.RequestHandler): def get(self): self.render(server_path("html/game.html"), config = level_1) def write_error(self, code, **kwargs): self.render(server_path("html/error.html")) class TarmSocket(websocket.WebSocketHandler): def open(self, *args): self.stream.set_nodelay(True) print("Socket opened.") def on_message(self, message): print("Message from browser:", message) if "load-config" in message: self.write_message(template.Loader('html').load('config.html').generate(config=level_1)) elif "load-about" in message: self.write_message(template.Loader('html').load('about.html').generate()) elif "load-audio" in message: self.write_message(template.Loader('html').load('audio.html').generate()) def start_server(): tarm_app = web.Application(handlers=[ (r"/", TarmHandler), (r"/socket", TarmSocket), (r"/images/(.*)", web.StaticFileHandler, static_path("images")), (r"/textures/(.*)", web.StaticFileHandler, static_path("textures")), (r"/music/(.*)", web.StaticFileHandler, static_path("audio")) ], debug=True, gzip=True, static_path=server_path("static")) httpserver.HTTPServer(tarm_app).listen(8000) print("Starting server.") ioloop.IOLoop.instance().start() if __name__ == "__main__": start_server()
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from ._policy_tracked_resources_operations import PolicyTrackedResourcesOperations from ._remediations_operations
import RemediationsOperations from ._policy_events_operations import Polic
yEventsOperations from ._policy_states_operations import PolicyStatesOperations from ._operations import Operations from ._policy_metadata_operations import PolicyMetadataOperations from ._policy_restrictions_operations import PolicyRestrictionsOperations from ._attestations_operations import AttestationsOperations __all__ = [ 'PolicyTrackedResourcesOperations', 'RemediationsOperations', 'PolicyEventsOperations', 'PolicyStatesOperations', 'Operations', 'PolicyMetadataOperations', 'PolicyRestrictionsOperations', 'AttestationsOperations', ]
import hashlib import mock import uuid from django.test import TestCase from ..models import Commander class CommanderTestCase(TestCase): def test_generate_token(self):
with mock.patch.object(uuid, 'uuid4', return_value='a_test'): cmdr = Commander( name='Branch' ) self.assertEqual( cmdr.generate_token(), hashlib.md5('a_test').hexdigest() ) def test_save(self): # We need to ensure tokens get auto-populated here. cmdr = Commander.objects.create( name='Branch' ) self.as
sertTrue(len(cmdr.api_token) > 0)